diff --git "a/3639.jsonl" "b/3639.jsonl" new file mode 100644--- /dev/null +++ "b/3639.jsonl" @@ -0,0 +1,394 @@ +{"seq_id":"2837369312","text":"import numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport pickle\n\ndef imputation(df):\n \"\"\" Missing value imputation.\n \n Parameters:\n -----------\n df: Pandas dataframe\n \n Yields:\n -------\n df_new: Pandas dataframe\n \"\"\"\n \n print(\"Start missing value imputation ...\")\n genes = df.columns[4:-1]\n #ave = {g:df[g].sum()/df[g].count() for g in genes} # compute average gene's non-missing values\n # fill missing values with gene's average exp levels\n df_new = df.copy(deep = True)\n for i in df_new.index:\n ave = df.loc[i, genes].sum()/df.loc[i, genes].count()\n #print(df.loc[i, genes].count())\n df_new.loc[i,df_new.loc[i].isnull()] = ave\n #print(df_new)\n return df_new\n\ndef quantile_normalization(df1, df2):\n \"\"\" Quantile normalization cross platforms\n Parameters:\n -----------\n df1: first Pandas dataframe after imputation\n df2: second Pandas dataframe after imputation\n Yields:\n -------\n df1_new: Pandas dataframe\n df2_new: Pandas dataframe\n \"\"\"\n \n print(\"Start quantile normalization ...\")\n genes = df1.columns[4:-1]\n df = pd.concat([df1[genes], df2[genes]])\n sorted_matrix = np.array([sorted(row[genes].tolist()) for _,row in df.iterrows()])\n quantiled_ave = {i:j for i, j in enumerate(list(np.mean(sorted_matrix,axis = 0)))} #{rank: value}\n df1_new = []\n for i,row in df1.iterrows():\n vals = list(row[genes])\n order = {}\n for j,k in enumerate(np.argsort(vals)):\n order.update({vals[k]:j})\n ranked_genes = [quantiled_ave[order[v]] for v in vals]\n df1_new.append(list(row.iloc[:4])+ranked_genes+list(row.iloc[-1:]))\n df1_new = pd.DataFrame(df1_new, columns = list(df1.columns))\n df2_new = []\n for i,row in df2.iterrows():\n vals = list(row[genes])\n order = {}\n for j,k in enumerate(np.argsort(vals)):\n order.update({vals[k]:j})\n ranked_genes = [quantiled_ave[order[v]] for v in vals]\n df2_new.append(list(row.iloc[:4])+ranked_genes+list(row.iloc[-1:]))\n df2_new = pd.DataFrame(df2_new, columns = list(df2.columns))\n return df1_new, df2_new\n\ndef preprocess_in_vivo(df):\n \"\"\" Preprocess raw in vivo dataset\n \"\"\"\n # preprocess prediction features\n df_new = imputation(df)\n # binarize prediction labels\n assert (list(sorted(set(df_new['ClearanceRate']))) == ['Fast', 'Slow']), \"Clearance Rate in in vivo dataset is not correct!\"\n df_new['ClearanceRate_binary'] = [0 if x == 'Fast' else 1 for x in df_new['ClearanceRate']]\n df_new = df_new.drop(columns = ['ClearanceRate'])\n return df_new\n\ndef preprocess_in_vitro(df):\n \"\"\" Preprocess raw in vitro dataset\n \"\"\"\n df_new = imputation(df)\n return df_new\n\ndef data_preparation(df_invivo, df_invitro, no_quantile = False, common_genes = None):\n \"\"\" Preprocess of in vivo and in vitro datasets\n Parameters:\n -----------\n df_invivo: Pandas Dataframe\n in vivo dataset of gene expression levels\n extra columns: 'Sample_Names', 'Country', 'Asexual.stage..hpi.', 'Kmeans.Grp'\n label: 'ClearanceRate'\n df_invitro: Pandas Dataframe\n in vitro dataset of gene expression levels\n extra columns: 'Sample_Name', 'Isolate', 'Timepoint', 'Treatment', 'BioRep'\n label: 'DHA_IC50'\n if_quantile: boolean\n if quantile normalization or not\n common_genes: list\n if specified, use common_genes only as feature set. else use the whole genesets shared by both in vivo and in vitro dataset.\n Yields:\n -------\n df_invivo: Pandas Dataframe\n processed in vivo dataset\n df_invitro: Pandas Dataframe\n processed in vitro dataset\n \"\"\"\n # remove samples without labels\n df_invivo = df_invivo.dropna(subset = ['ClearanceRate'])\n df_invitro = df_invitro.dropna(subset = ['DHA_IC50'])\n \n # find common genes\n if common_genes == None:\n common_genes = sorted(list(set(df_invivo.columns[4:-1])&set(df_invitro.columns[5:-1])))\n print(\"Shared genes between in vivo and in vitro datasets: \",len(common_genes))\n \n # select common columns; attach four extra informatio columns at front\n df_invivo = df_invivo[['Sample_Names', 'Country', 'Asexual.stage..hpi.', 'Kmeans.Grp']+common_genes+['ClearanceRate']]\n df_invitro = df_invitro[['Isolate', 'Timepoint', 'Treatment', 'BioRep']+common_genes+['DHA_IC50']]\n \n # preprocess in invo and in vitro datasets respectively since they contain differernt labels\n df_invivo = preprocess_in_vivo(df_invivo)\n df_invitro = preprocess_in_vitro(df_invitro)\n \n # quantile normalization on both datasets\n if no_quantile:\n pass\n else:\n df_invivo, df_invitro = quantile_normalization(df_invivo, df_invitro)\n \n return df_invivo, df_invitro\n","repo_name":"GuanLab/Predict-Malaria-ART-Resistance","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18690685339","text":"import sys\nimport csv\nfrom datetime import datetime\n\n'''índices de columnas del archivo para su búsqueda'''\nINDICE_NroCheque = 0\nINDICE_CodigoBanco = 1\nINDICE_CodigoScurusal = 2\nINDICE_NumeroCuentaOrigen = 3\nINDICE_NumeroCuentaDestino = 4\nINDICE_Valor = 5\nINDICE_FechaOrigen = 6\nINDICE_FechaPago = 7\nINDICE_DNI = 8\nINDICE_Tipo = 9\nINDICE_Estado = 10\n\n\ndef obtendatos(nombreArchivo):\n with open(nombreArchivo) as archivo:\n lector = csv.reader(archivo)\n datos = list(lector)\n\n cabecera = datos[0]\n\n return datos, cabecera \n''' La función devuelve una lista de listas a partir del archivo, \n\"cabera\" es una lista aparte con la primer fila de datos. '''\n\ndef fecha2timestamp(value:str) -> int:\n dt=datetime.strptime(value,'%d-%m-%Y')\n return int(dt.timestamp())\n''' Convierte la fecha de formato str en timestamp '''\n\ndef duplicadoError(listaCheques, dni):\n errorNroCheque=False\n for i in range(1,len(listaCheques)):\n for j in range(i+1,len(listaCheques)):\n if listaCheques[i][0]==listaCheques[j][0]:\n if listaCheques[i][3]==listaCheques[j][3]:\n if dni == listaCheques[i][8]:\n print('\\n*** ERROR: Nro Cheque:',listaCheques[i][0],' de cuenta origen ',listaCheques[i][3],' se encuentra repetido. Datos no válidos***')\n errorNroCheque=True\n return errorNroCheque\n\n''' Retorna un error de ser que se encuntre un cheque con número repetido y cuenta de origen con número repetido '''\n\n\ndef consulta(datos, DNI, tipoCheques, estadoCheques, strFechaInicio, strFechaFin):\n\n fechaInicio = str(fecha2timestamp(strFechaInicio))\n fechaFin = str(fecha2timestamp(strFechaFin))\n\n if tipoCheques == 'DEPOSITADO':\n INDICE_fecha = INDICE_FechaPago\n else:\n INDICE_fecha = INDICE_FechaOrigen\n #selecciona la columna de fechas que va a ser usada en la consulta\n\n cheques_dni=[]\n for i in datos:\n if (i[INDICE_fecha]>=fechaInicio) and (i[INDICE_fecha]<=fechaFin):\n if i[INDICE_DNI]==DNI: \n if (tipoCheques==i[INDICE_Tipo]):\n if (estadoCheques==i[INDICE_Estado]) or (estadoCheques=='TODOS'):\n cheques_dni.append(i)\n return cheques_dni\n\n\ndef pedidoUsuario():\n print(\" * Valor por defecto: 'file.csv' *\")\n print(\"->\")\n nombreArchivo = str(input(\"Ingrese el nombre del archivo: \"))\n if nombreArchivo == '':\n nombreArchivo = 'file.csv'\n \n dniNoValido = True\n print(\"\\n->\")\n while dniNoValido:\n dni = str(input(\"Ingrese el DNI a consultar: \"))\n if dni.isnumeric() and (len(dni) == 7 or len(dni) == 8):\n dniNoValido = False\n else:\n dniNoValido = True\n print('*** Por favor, ingrese de nuevo. \\nEl DNI debe tener entre 7 y 8 dígitos ***')\n\n\n tipoChequeNoValido = True\n print(\"\\n->\")\n while tipoChequeNoValido:\n opcion = str(input(\"Tipos de cheque: \\n 1. Emitido\\n 2. Depositado\\n Ingrese el Tipo de cheque a consultar: \"))\n if opcion == '1':\n tipoCheque = \"EMITIDO\"\n tipoChequeNoValido = False\n print(tipoCheque)\n elif opcion == '2':\n tipoCheque = \"DEPOSITADO\"\n tipoChequeNoValido = False\n print(tipoCheque)\n else:\n tipoChequeNoValido = True\n print('\\n*** Elija una opción válida: ***\\n')\n\n\n print(\"\\n->\")\n opcion = input(\"Estado de cheques: \\n 1. PENDIENTE\\n 2. APROBADO\\n 3. RECHAZADO\\n* Presionando ENTER selecciona todos por defecto * \\n Ingrese el Estado a consultar: \")\n if opcion == '1':\n estadoCheque = \"PENDIENTE\"\n elif opcion == '2':\n estadoCheque = \"APROBADO\"\n elif opcion == '3':\n estadoCheque = \"RECHAZADO\"\n else:\n estadoCheque = \"TODOS\"\n\n\n strFechaInicio = str('1-1-2017') # fecha de Inicio por defecto\n strFechaFin = str('1-8-2022') # fecha de Finalización por defecto\n\n print(\"\\n->\")\n solicitaRango = True if str(input(\"¿Desea seleccionar un rango de fecha? S/N \")).upper() == \"S\" else False\n \n\n if solicitaRango:\n print(\"\\n->\")\n test_str = str(input(\"Seleccione fecha de Inicio(dd-mm-aaaa): \"))\n try:\n res = bool(datetime.strptime(test_str, '%d-%m-%Y'))\n except ValueError:\n res = False\n if res:\n strFechaInicio = test_str\n else:\n strFechaInicio = str('1-1-2017') # fecha de Inicio por defecto\n print(strFechaInicio)\n\n print(\"\\n->\")\n test_str = str(input(\"Seleccione fecha de Finalización(dd-mm-aaaa): \"))\n try:\n res = bool(datetime.strptime(test_str, '%d-%m-%Y'))\n except ValueError:\n res = False\n if res:\n strFechaFin = test_str\n else:\n strFechaFin = str('1-8-2022') # fecha de Finalización por defecto\n print(strFechaFin)\n\n print(\"\\n->\")\n salida = str(input(\"¿Desea impresión por PANTALLA (por defecto) o en un ARCHIVO CSV?\\n 1. PANTALLA\\n 2. ARCHIVO CSV \\nIngrese una opción: \"))\n # Variable a evaluar sobre preferencia del Usuario\n\n return nombreArchivo, dni, tipoCheque, estadoCheque, strFechaInicio, strFechaFin, salida\n''' Función para obtener datos del usuario '''\n\ndef salidaPantalla(lista_cheques, cabecera):\n formato = \"{:<10} {:<12} {:<15} {:<19} {:<20} {:<6} {:<12} {:<10} {:<10} {:<11} {:<10}\"\n print(formato.format(cabecera[0], cabecera[1], cabecera[2], cabecera[3], cabecera[4], cabecera[5], cabecera[6], cabecera[7], cabecera[8], cabecera[9], cabecera[10]))\n for lista in lista_cheques:\n print(formato.format(lista[0], lista[1], lista[2], lista[3], lista[4], lista[5], lista[6], lista[7], lista[8], lista[9], lista[10]))\n\n print(\"\\n\")\n\ndef salidaCsv(lista_cheques, cabecera):\n nombre = nombreCsv(lista_cheques)\n with open(nombre, 'w', newline='') as archivoCsv:\n writer=csv.writer(archivoCsv)\n writer.writerow(cabecera[3:8])\n for line in lista_cheques:\n seleccion = line[3:8]\n writer.writerow(seleccion)\n\n\ndef nombreCsv(lista_cheques):\n dni = str(lista_cheques[0][8]) \n fecha = str(int(datetime.now().timestamp()))\n return dni + '_' + fecha + '.csv'\n","repo_name":"lxrpetrosino/itbank_hb","sub_path":"backend/SPRINT 4/funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":6275,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71054150121","text":"#soma dos numeros de valor de 4 digitos\nprint('')\nnumero = int(input('Digite um número de 4 digitos: '))\n\nm = numero // 1000 % 10\nc = numero // 100 % 10\nd = numero // 10 % 10\nu = numero // 1 % 10\n\nsoma = m + c + d + u\n\nprint('')\nprint('O valor digitado corresponde:', soma)","repo_name":"CarlosDaniel0/atividades_tds","sub_path":"Lista1 Fábio/questao35.py","file_name":"questao35.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40755264909","text":"# 복호화 암호화\n\ndef test2(sentence, keyword, skip) :\n answer = ''\n keywords = keyword*1000\n\n char = list(map(str, sentence))\n key = list(map(str, keywords))\n\n for idx, s in enumerate(skip) :\n #skip==0인경우\n if idx == 0 :\n answer+=key.pop(0)\n continue\n\n word1 = key.pop(0)\n for _ in range(s):\n if len(char) != 0 :\n word2 = char.pop(0)\n #같은 경우 뒤에\n if word2 == word1 :\n answer+=word2\n break\n else :\n answer+=word2\n elif len(char) == 0:\n return answer\n answer+=word1\n\n #skip마지막\n if idx == len(skip)-1 :\n answer+=''.join(char)\n break\n return answer\n\n\n","repo_name":"hcw3737/algorithms","sub_path":"cote/N_test2.py","file_name":"N_test2.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28296121104","text":"import configparser\nimport gettext\nimport hashlib\nimport lzma\nimport os\nimport shutil\nimport tarfile\nimport tempfile\n\n\nfrom io import StringIO\n\nfrom otopi import plugin\nfrom otopi import util\n\nfrom ovirt_hosted_engine_setup import constants as ohostedcons\nfrom ovirt_hosted_engine_setup import engineapi\n\n\ndef _(m):\n return gettext.dgettext(message=m, domain='ovirt-hosted-engine-setup')\n\n\n@util.export\nclass Plugin(plugin.PluginBase):\n \"\"\"\n VM configuration plugin.\n \"\"\"\n\n def __init__(self, context):\n super(Plugin, self).__init__(context=context)\n self._tmp_files_directory_name = None\n\n def _validate_authz(self, files_tar):\n self.logger.info(_(\"Validating authentication plugins\"))\n authz_ext = set([])\n flist = files_tar.getmembers()\n self.logger.debug('Content:')\n self.logger.debug([f.name for f in flist])\n authplist = [\n f for f in flist if f.isfile() and\n 'etc/ovirt-engine/extensions.d' in f.name and\n f.name.endswith('.properties')\n ]\n self.logger.debug('Configured plugins:')\n self.logger.debug([ap.name for ap in authplist])\n for authp in authplist:\n authp_file = files_tar.extractfile(authp)\n auth_f_str = '[section]\\n' + authp_file.read()\n auth_fp = StringIO(unicode(auth_f_str))\n config = configparser.RawConfigParser()\n try:\n config.readfp(auth_fp)\n except configparser.Error as ex:\n msg = _(\n 'The extension configuration file \\'{authp}\\' inside '\n 'the backup seams invalid, '\n 'please check its content on the engine VM and fix: {ex}'\n ).format(\n authp=authp,\n ex=ex\n )\n self.logger.error(msg)\n return False\n if (\n config.has_section('section') and\n config.has_option(\n 'section',\n 'ovirt.engine.extension.provides'\n ) and\n config.has_option(\n 'section',\n 'ovirt.engine.extension.name'\n )\n ):\n provides = config.get(\n 'section',\n 'ovirt.engine.extension.provides'\n )\n name = config.get(\n 'section',\n 'ovirt.engine.extension.name'\n )\n self.logger.debug(\n 'Extension {n} provides {p}'.format(\n n=name,\n p=provides\n )\n )\n if provides == 'org.ovirt.engine.api.extensions.aaa.Authz':\n authz_ext.add(name)\n else:\n msg = _(\n 'The extension configuration file \\'{authp}\\' inside '\n 'the backup seams invalid, '\n 'please check its content on the engine VM and fix.'\n ).format(\n authp=authp,\n )\n self.logger.error(msg)\n return False\n self.logger.debug(\n 'Authz extensions configured on fs: {l}'.format(l=authz_ext)\n )\n engine_api = engineapi.get_engine_api(self)\n eng_authz_domains = set([\n d.get_name() for d in engine_api.domains.list()\n ])\n self.logger.debug(\n 'Authz domains configured on the engine: {l}'.format(\n l=eng_authz_domains\n )\n )\n if eng_authz_domains > authz_ext:\n to_be_fixed = eng_authz_domains - authz_ext\n msg = _(\n '{tbf}: such AAA domains are still configured in a '\n 'deprecated way that is not compatible with the current '\n 'release; please upgrade them to ovirt-engine-extension '\n 'mechanism before proceeding.'\n ).format(\n tbf=[d for d in to_be_fixed],\n )\n self.logger.error(msg)\n raise RuntimeError('Unsupported AAA mechanism')\n return True\n\n def _validate_backup_file(self, backup_file_path):\n self.logger.info(\n _(\"Validating backup file '{backup_file_path}'\").format(\n backup_file_path=backup_file_path,\n )\n )\n if not os.path.isfile(backup_file_path):\n self.logger.error(\n _(\"Unable to open '{path}'\").format(\n path=backup_file_path\n )\n )\n return False\n try:\n tar = tarfile.open(backup_file_path, 'r:*')\n except tarfile.ReadError as ex:\n self.logger.error(\n _(\"'{path}' is not a tar.gz archive: {m}\").format(\n path=backup_file_path,\n m=ex.message,\n )\n )\n return False\n files = tar.getnames()\n self.logger.debug('backup contents: {files}'.format(files=files))\n if (\n './files' not in files or\n './version' not in files or\n './md5sum' not in files or\n './db/engine_backup.db' not in files or\n './config' not in files\n ):\n self.logger.error(\n _(\"'{path}' is not a complete backup\").format(\n path=backup_file_path\n )\n )\n tar.close()\n return False\n if './db/dwh_backup.db' in files:\n self.environment[ohostedcons.Upgrade.RESTORE_DWH] = True\n self.logger.info(_(\n 'The provided file contains also a DWH DB backup: '\n 'it will be restored as well'\n ))\n if './db/reports_backup.db' in files:\n self.environment[ohostedcons.Upgrade.RESTORE_REPORTS] = True\n self.logger.info(_(\n 'The provided file contains also a Reports DB backup: '\n 'it will be restored as well'\n ))\n\n md5_f = tar.extractfile(tar.getmember('./md5sum'))\n md5_lines = md5_f.readlines()\n md5_list = [(x[0], './'+x[1].replace('\\n', '')) for x in (\n x.split(' ')\n for x in md5_lines\n )]\n self.logger.debug('md5_list: {ml}'.format(ml=md5_list))\n for cfile in md5_list:\n self.logger.debug('checking {f}'.format(f=cfile[1]))\n fo = tar.extractfile(tar.getmember(cfile[1]))\n hash_md5 = hashlib.md5()\n for chunk in iter(lambda: fo.read(4096), b\"\"):\n hash_md5.update(chunk)\n calc_md5 = hash_md5.hexdigest()\n self.logger.debug(\n 'calculated {f} - stored {s}'.format(\n f=calc_md5,\n s=cfile[0],\n )\n )\n if calc_md5 != cfile[0]:\n self.logger.error(\n _(\"'{path}' is corrupted\").format(\n path=backup_file_path\n )\n )\n tar.close()\n return False\n\n self._tmp_files_directory_name = tempfile.mkdtemp()\n\n tar.extract(\n tar.getmember('./files'),\n path=self._tmp_files_directory_name\n )\n\n # tarfile on Python 2 doesn't natively support xz compression\n # which is the engine-backup default\n try:\n uncompressed_file = lzma.LZMAFile(\n os.path.join(\n self._tmp_files_directory_name,\n './files'\n )\n )\n try:\n files_tar = tarfile.open(\n fileobj=uncompressed_file,\n mode='r:'\n )\n except tarfile.ReadError as ex:\n self.logger.error(\n _(\"'{path}' is not a valid archive: {m}\").format(\n path='./files',\n m=ex.message,\n )\n )\n tar.close()\n return False\n except lzma.error:\n self.logger.debug('Not lzma')\n try:\n files_tar = tarfile.open(\n fileobj=tar.extractfile(tar.getmember('./files')),\n mode='r:*'\n )\n except tarfile.ReadError as ex:\n self.logger.error(\n _(\n \"'{path}' is not a valid archive: {m} - please try \"\n \"recreating the backup with \"\n \"'--files-compressor=gzip' option.\"\n ).format(\n path='./files',\n m=ex.message,\n )\n )\n tar.close()\n return False\n\n auth_valid = self._validate_authz(files_tar)\n files_tar.close()\n if not auth_valid:\n tar.close()\n return False\n\n self.logger.info(\n _(\"'{backup_file_path}' is a sane backup file\").format(\n backup_file_path=backup_file_path\n )\n )\n tar.close()\n return True\n\n @plugin.event(\n stage=plugin.Stages.STAGE_INIT,\n )\n def _init(self):\n self.environment.setdefault(\n ohostedcons.NetworkEnv.BRIDGE_NAME,\n None,\n )\n self.environment.setdefault(\n ohostedcons.EngineEnv.ADMIN_PASSWORD,\n None\n )\n self.environment.setdefault(\n ohostedcons.VMEnv.VM_UUID,\n None,\n )\n self.environment.setdefault(\n ohostedcons.Upgrade.RESTORE_DWH,\n False,\n )\n self.environment.setdefault(\n ohostedcons.Upgrade.RESTORE_REPORTS,\n False,\n )\n\n @plugin.event(\n stage=plugin.Stages.STAGE_CUSTOMIZATION,\n after=(\n ohostedcons.Stages.CONFIG_OVF_IMPORT,\n ohostedcons.Stages.UPGRADE_CHECK_UPGRADE_VERSIONS,\n ),\n name=ohostedcons.Stages.CONFIG_BACKUP_FILE,\n )\n def _customization(self):\n valid = False\n interactive = self.environment[ohostedcons.Upgrade.BACKUP_FILE] is None\n backup_file_path = self.environment[ohostedcons.Upgrade.BACKUP_FILE]\n while not valid:\n # TODO: do it automatically\n self.dialog.note(_(\n 'Please take a backup of the current engine running this '\n 'command on the engine VM:\\n'\n ' engine-backup --mode=backup --archive-compressor=gzip '\n '--file=engine_backup.tar.gz --log=engine_backup.log\\n'\n 'Then copy the backup archive to this host and input here '\n 'its path when ready.\\n'\n ))\n if interactive:\n backup_file_path = self.dialog.queryString(\n name='OVEHOSTED_CONFIGURATION_BACKUPFILE',\n note=_(\n 'Please specify path to engine backup archive '\n 'you would like to restore on the new appliance: '\n ),\n prompt=True,\n caseSensitive=True,\n )\n backup_file_path = self.resolveFile(backup_file_path)\n valid = self._validate_backup_file(backup_file_path)\n if valid:\n self.environment[\n ohostedcons.Upgrade.BACKUP_FILE\n ] = backup_file_path\n if not self.environment[ohostedcons.Upgrade.DST_BACKUP_FILE]:\n self.environment[\n ohostedcons.Upgrade.DST_BACKUP_FILE\n ] = os.path.join(\n '/root/',\n os.path.basename(backup_file_path)\n )\n if not valid and not interactive:\n raise RuntimeError(_('Invalid backup file'))\n\n @plugin.event(\n stage=plugin.Stages.STAGE_CLEANUP,\n )\n def _cleanup(self):\n if self._tmp_files_directory_name is not None:\n shutil.rmtree(self._tmp_files_directory_name)\n\n# vim: expandtab tabstop=4 shiftwidth=4\n","repo_name":"wenzt/my_hosted_engine_setup","sub_path":"plugins/gr-he-upgradeappliance/vm/configurevm.py","file_name":"configurevm.py","file_ext":"py","file_size_in_byte":12340,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"70814341159","text":"from collections import UserList\n\nfrom utils import translate, get_info\nfrom score import ScoreFast\n\nfrom tqdm import tqdm\nfrom PIL import Image\nimport numpy as np\n\n\nclass DPNode:\n def __init__(self, score, sequence):\n self.score = score\n self.sequence = sequence\n\n\nclass SymbolInfo:\n def __init__(self, symbol, width, colormap):\n self.symbol = symbol\n self.width = width\n self.colormap = colormap\n\n def __repr__(self):\n return f'{self.symbol}: {self.width},\\n{self.colormap}'\n\n\nclass ModList(UserList):\n def __init__(self, mod):\n super().__init__()\n self.mod = mod\n self.data = [None] * mod\n\n def __getitem__(self, item):\n item = item % self.mod\n return super().__getitem__(item)\n\n def __setitem__(self, key, value):\n key = key % self.mod\n super().__setitem__(key, value)\n\n\n# any whitespace is bad\n# letters = list('qwertyuiopasdfghjklzxcvbnm'\n# 'йцукенгшщзхъфывапролджэячсмитьбю'\n# '!?1234567890-+—.,'\n# 'QWERTYUIOPASDFGHJKLZXCVBNM'\n# 'ЙЦУКЕНГШЩЗХЪФЫВАПРОЛДЖЭЯЧСМИТЬБЮ')\n# letters = list('qwertyiopasdfghjklzxcvbnm')\n# letters = list('ЙlSf!im-+—vWHabcde')\n# letters = list('l—L')\nletters = list('.kнRqpмщ6KЮхiOftVYлlушпДzЬчЪu,2-жBвHНZТгgзPAйь4XЭoDMъ+тx1n78IФ?!aдwЧиdыЯc—ЙhкГvИCEц')\n\nnice_len = 36400 # around 0.55 pt\n# nice_in_page = 620\nnice_in_page = 1000 # landscape\nsize = nice_len * nice_in_page\n# print(size)\n\n# baselineskip is 12pt = 12*65536sp\nbaselineskip = 12 * 65536\n\n\nsymbols: list[SymbolInfo] = []\nfor letter in tqdm(letters):\n symbols.append(SymbolInfo(letter, *get_info(letter)))\nmin_color = min([s.colormap.min() for s in symbols])\nmax_width = max([s.width for s in symbols])\n\nimage = np.array(Image.open('frog.jpg').convert('L'))\nlines = round(size / baselineskip / image.shape[1] * image.shape[0])\nprint(lines)\n\nscorer = ScoreFast(image, size, baselineskip * lines, col_min=min_color)\n\n\ndef dp(height):\n arr = ModList(max_width+1)\n arr[0] = DPNode(0, [])\n for i in tqdm(range(size), leave=False):\n if arr[i]:\n curr_score = arr[i].score\n curr_seq = arr[i].sequence\n for symbol_ind, symb in enumerate(symbols):\n w = symb.width\n if i + w <= size:\n s = scorer(i, height, w, baselineskip, symb.colormap)\n if (arr[i + w] is None\n or curr_score + s < arr[i + w].score):\n arr[i + w] = DPNode(curr_score + s, curr_seq + [symbol_ind])\n arr[i] = None\n return arr[size]\n\n\nanswer = ''\ncurr_height = 0\nfor line in tqdm(range(lines)):\n answer += translate(dp(curr_height).sequence, letters) + '\\n\\n'\n curr_height += baselineskip\nprint(answer)\n","repo_name":"DKozl50/LaTeX_ArT","sub_path":"latex_art/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38556779023","text":"\"\"\"\nGet artist sale to sale comparison.\n\nRun a comparison for each artist grouping similar paintings together\nbased on dimensions. The definition of similar is based on user input X\nwhen running the script, where any painting within dimensions +/- X are\nconsidered similar. The input X is in centimeters.\n\"\"\"\n\nimport re\nimport sys\n\nimport pandas as pd\n\nimport config\n\n\ndef find_dimensions(medium_dimensions: str) -> dict:\n \"\"\"Extract dimensions from raw data dimension description.\"\"\"\n if len(re.compile(r'cm').findall(medium_dimensions)) == 2:\n numbers = re.compile(r'([\\d\\.]+)\\scm').findall(medium_dimensions)\n height = numbers[0]\n width = numbers[1]\n else:\n height = re.compile(r'\\(([\\d\\.]+)').findall(medium_dimensions)[0]\n width = re.compile(r'([\\d\\.]+)\\scm').findall(medium_dimensions)[0]\n return {'height': float(height), 'width': float(width)}\n\n\ndef get_usd_price(price_realized: str) -> float:\n \"\"\"Get the USD price from the posted realized price.\"\"\"\n price = re.sub(r'[^\\d]', '', price_realized)\n if not price:\n return None\n else:\n price = float(price)\n if '£' in price_realized:\n price *= config.GBP_TO_USD_EXCHANGE_RATE\n return price\n\n\ndef data_prep() -> pd.DataFrame:\n \"\"\"Fetch and prep data.\"\"\"\n data_nov = pd.read_csv(config.NOV_2017_SALE_DATA_CSV)\n data_mar = pd.read_csv(config.MAR_2018_SALE_DATA_CSV)\n data_nov['month_year'] = '2017_11'\n data_mar['month_year'] = '2018_03'\n data = pd.concat([data_nov, data_mar], ignore_index=True)\n dimensions = pd.DataFrame(data['medium_dimensions'].apply(find_dimensions).tolist())\n data = pd.concat([data, dimensions], axis=1)\n data['price_usd'] = data['price_realized'].astype(str).apply(get_usd_price)\n return data\n\n\ndef find_similar_objects(data: pd.DataFrame, lot: pd.Series,\n similarity_threshold: float) -> pd.DataFrame:\n \"\"\"\n Find similar objects within data from given lot.\n \n Similar is defined by the specified similarity threshold, in which\n all similar objects are within +/- the similarity threshold.\n \"\"\"\n similar_objects = data[\n (data.artist_description == lot.artist_description)\n & (\n data.width.between(\n lot.width - similarity_threshold,\n lot.width + similarity_threshold, \n inclusive=True\n )\n )\n & (\n data.height.between(\n lot.height - similarity_threshold,\n lot.height + similarity_threshold,\n inclusive=True\n )\n )\n ]\n return similar_objects\n\n\ndef get_report(data: pd.DataFrame, similarity_threshold: float) -> pd.DataFrame:\n \"\"\"Get report from provided data and similarity threshold.\"\"\"\n lot_groupings = []\n for _, lot in data.iterrows():\n similar_objects = find_similar_objects(data, lot, similarity_threshold)\n avg_price_2017 = similar_objects[\n similar_objects.month_year == '2017_11'\n ]['price_usd'].mean()\n avg_price_2018 = similar_objects[\n similar_objects.month_year == '2018_03'\n ]['price_usd'].mean()\n if avg_price_2017 >= 0 and avg_price_2018 >= 0:\n grouping_id = similar_objects.sort_values('lot_number')['lot_number'].sum()\n metadata = similar_objects[['title', 'price_usd', 'height', 'width']].to_dict()\n grouping = {\n 'artist': lot.artist_description,\n 'grouping_id': grouping_id,\n 'avg_price_2017_11': avg_price_2017,\n 'avg_price_2018_03': avg_price_2018,\n 'price_diff': (avg_price_2018 - avg_price_2017) / avg_price_2017,\n 'metadata': metadata,\n }\n lot_groupings.append(grouping)\n else:\n pass\n return lot_groupings\n\nif __name__ == '__main__':\n SIMILARITY_THRESHOLD = float(sys.argv[1])\n DATA = data_prep()\n LOT_GROUPINGS = get_report(DATA, SIMILARITY_THRESHOLD)\n LOT_GROUPINGS = pd.DataFrame(LOT_GROUPINGS).drop_duplicates('grouping_id')\n LOT_GROUPINGS.to_csv(config.REPORT_CSV)\n","repo_name":"syargeau/christies-data-extraction","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31766245241","text":"\nimport configparser\nfrom datetime import datetime, timezone, timedelta\n\nfrom models import AniDBTitle\n\nconfig_parser = configparser.ConfigParser(interpolation=None)\n\n\nclass Cache:\n \"\"\" Represents a cache for anime data \"\"\"\n __instance = None\n refresh_days = 30\n \n def __new__(cls, db_connection=None):\n if Cache.__instance is None:\n Cache.__instance = object.__new__(cls)\n Cache.__instance.db_connection = db_connection\n \n return Cache.__instance\n \n def __init__(self, db_connection=None):\n self.db_connection = db_connection\n self.cache_con = db_connection['cache']\n\n def find(self, anime: AniDBTitle) -> AniDBTitle:\n \"\"\" Checks cache to see if data already existing. If last_updated is later than configured refresh time (30 days default)\n then None will be returned \"\"\"\n existing_record = self.cache_con.find_one({'title': anime.title})\n\n if existing_record is not None and existing_record != {} and not Cache.is_expired(existing_record['last_updated']):\n return AniDBTitle.fromJSON(existing_record)\n \n print('{} is not in cache'.format(anime.title))\n return None\n \n def update(self, anime: AniDBTitle) -> AniDBTitle:\n \"\"\" For a given anime, insert or update DB entry \"\"\"\n found_anime = self.find(anime)\n if found_anime is None:\n result = self.cache_con.insert_one(Cache.create_cache_object(anime))\n anime._id = result.inserted_id\n else:\n self.cache_con.update_one({'_id': found_anime.get_db_id()}, {'$set': Cache.create_cache_object(anime)})\n anime._id = found_anime.get_db_id()\n\n return self.find(anime)\n \n @staticmethod\n def create_cache_object(anime):\n return {\n 'title': anime.title,\n 'anidb_id': anime.anidb_id,\n 'tags': [a.toJSON() for a in anime.tags],\n 'last_updated': datetime.now(timezone.utc)\n }\n\n @staticmethod\n def is_expired(last_updated):\n prev_day = last_updated\n now = datetime.utcnow()\n\n if isinstance(last_updated, str):\n prev_day = datetime.utcfromtimestamp(int(last_updated))\n\n if now >= prev_day + timedelta(days=30):\n return True\n\n return False\n \n","repo_name":"majora2007/plex-anime-tags","sub_path":"cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"12645617715","text":"from astropy import units as u\nimport logging\n\nimport pysynphot\nfrom scipy.interpolate import InterpolatedUnivariateSpline as spline\nfrom scipy.optimize import brute\nimport numpy as np\n\nfrom kglib.spectral_type import Mamajek_Table\n\n\nclass CompanionFitter(object):\n def __init__(self, filt, T_low=3500, T_high=12000, dT=10, feh=0.0):\n \"\"\"\n Initialize a CompanionFitter instance. It will pre-tabulate\n synthetic photometry for main-sequence stars with Temperatures\n ranging from T_low to T_high, in steps of dT K. All the\n models will have [Fe/H] = feh. Finally, we will interpolate\n the relationship between temperature and magnitude so that\n additional photometry points are made quickly.\n\n Parameters:\n ===========\n - filt: A pysynphot bandpass encoding the filter information.\n\n - T_low, T_high, dT: floats\n Parameters describing the temperature grid\n to interpolate\n\n -feh: float\n The metallicity [Fe/H] to use for the models\n \"\"\"\n # Use the Mamajek table to get main-sequence relationships\n MT = Mamajek_Table.MamajekTable()\n MT.mam_df['radius'] = 10**(0.5*MT.mam_df.logL - 2.0*MT.mam_df.logT + 2.0*3.762)\n MT.mam_df['logg'] = 4.44 + np.log10(MT.mam_df.Msun) - 2.0*np.log10(MT.mam_df.radius)\n teff2radius = MT.get_interpolator('Teff', 'radius')\n teff2logg = MT.get_interpolator('Teff', 'logg')\n\n # Pre-calculate the magnitude at each temperature\n self.temperature = np.arange(T_low, T_high, dT)\n self.magnitude = np.zeros(self.temperature.size)\n for i, T in enumerate(self.temperature):\n logging.info('i = {}/{}: T = {:.1f}'.format(i+1, self.temperature.size, T))\n logg = teff2logg(T)\n R = teff2radius(T)\n spec = pysynphot.Icat('ck04models', T, feh, logg) * R**2\n obs = pysynphot.Observation(spec, filt)\n self.magnitude[i] = obs.effstim('abmag')\n\n # Interpolate the T-mag curve\n self.interpolator = spline(self.temperature, self.magnitude)\n\n\n def fit(self, T_prim, delta_mag, delta_mag_error, T_range=(3500, 9000)):\n \"\"\"\n Fit for the companion temperature given a primary temperature and delta-magnitude measurement\n\n Parameters:\n ===========\n - T_prim: float\n The primary star temperature (in Kelvin)\n\n - delta_mag: float\n The magnitude difference between the primary and companion\n\n - delta_mag_error: float\n Uncertainty in the magnitude difference\n\n - T_range: tuple of size 2\n The lower and upper bounds on the companion temperature.\n \"\"\"\n\n def lnlike(T2, T1, dm, dm_err):\n dm_synth = self.__call__(T2) - self.__call__(T1)\n logging.debug('T2 = {}: dm = {}'.format(T2, dm_synth))\n return 0.5 * (dm - dm_synth)**2 / dm_err**2\n\n T_sec = brute(lnlike, [T_range], args=(T_prim, delta_mag, delta_mag_error))\n return T_sec\n\n\n\n def __call__(self, T):\n \"\"\"\n Evaluate the spline at the given temperature, returning the interpolated magnitude\n \"\"\"\n return self.interpolator(T)\n\n @classmethod\n def make_box_filter(cls, center, width):\n \"\"\"\n Make a box filter with the given parameters. Both center and width should either be in angstroms, \n or be astropy quantities.\n \"\"\"\n if not isinstance(center, u.quantity.Quantity):\n center *= u.angstrom\n if not isinstance(width, u.quantity.Quantity):\n width *= u.angstrom\n\n return pysynphot.Box(center.to(u.angstrom).value, width.to(u.angstrom).value)\n\n\n\n\n","repo_name":"kgullikson88/gullikson-scripts","sub_path":"kglib/utils/synthetic_photometry.py","file_name":"synthetic_photometry.py","file_ext":"py","file_size_in_byte":3919,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"29820285959","text":"### settings ###\nimport socket, os\nfrom conf.project import PROJECT_ID\n\ndef contains(str, substr):\n if str.find(substr) != -1:\n return True\n else:\n return False\n\nif contains(socket.gethostname(), 'webfaction'):\n LIVEHOST = True\nelse:\n LIVEHOST = False\n\nPROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))\n\n## Global settings ###\nROOT_URLCONF = 'urls'\nTEMPLATE_DIRS = (\n os.path.join(PROJECT_ROOT, 'templates/'), \n)\nSECRET_KEY = ''\nINSTALLED_APPS = (\n 'django.contrib.staticfiles',\n #'fabric',\n 'apps.new_secret', \n)\n\n## Settings used when running live on WebFaction ##\nif LIVEHOST:\n DEBUG = False\n STATIC_ROOT = '/home/edhedges/webapps/static/PROJECT_ID/'\n STATIC_URL = 'http://www.edhedges.com/static/PROJECT_ID/'\n\n## Settings used locally for development ##\nif not LIVEHOST:\n DEBUG = True\n STATIC_ROOT = '/static/'\n STATIC_URL = '/static/'\n STATICFILES_DIRS = (\n os.path.join(PROJECT_ROOT, 'static/'),\n )","repo_name":"edhedges/edjmicro","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20803239088","text":"from django.conf.urls import url\nfrom django.contrib import admin\nfrom userinfo import views\n\nurlpatterns = [\n url(r'login/$', views.signin, name='login'),\n url(r'loginin/', views.login_, name='login_in'),\n url(r'register/$', views.register, name='register'),\n url(r'registerin/', views.register_, name='register_in'),\n url(r'logout/$', views.logout_, name='logout'),\n url(r'buyinfo/$', views.buyinfo, name='buyinfo'),\n url(r'infomes/$', views.infomes, name='infomes'),\n url(r'infomesin/', views.infomes_, name='infomes_in'),\n url(r'service/$', views.service, name='service'),\n]","repo_name":"suntaopython/cart","sub_path":"Usedcar_all/userinfo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"71807577000","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom ansible.module_utils.six import iteritems\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\nimport atexit\nimport ssl\nimport traceback\n\ntry:\n import requests\n from pyVim import connect\n from pyVmomi import vim, vmodl\n HAS_PYVMOMI = True\nexcept ImportError:\n HAS_PYVMOMI = False\n\nclass TaskError(Exception):\n pass\n\ndef get_connection_argument_spec():\n return dict(\n hostname=dict(required=True),\n port=dict(default='443', required=False),\n username=dict(aliases=['user', 'uname'], required=True),\n password=dict(aliases=['pass', 'pwd'], required=True, no_log=True),\n validate_certs=dict(type='bool', required=False, default=False),\n )\n\ndef get_connection_info(module):\n hostname = module.params['hostname']\n port = module.params['port']\n username = module.params['username']\n password = module.params['password']\n validate_certs = module.params['validate_certs']\n return hostname, port, username, password, validate_certs\n\ndef get_service_instance(module):\n hostname, port, username, password, validate_certs = get_connection_info(module)\n\n if validate_certs and not hasattr(ssl, 'SSLContext'):\n module.fail_json(msg='pyVim does not support changing verification mode with python < 2.7.9.'\n 'Either update python or use validate_certs=false')\n try:\n if validate_certs:\n service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password)\n else:\n context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\n context.verify_mode = ssl.CERT_NONE\n service_instance = connect.SmartConnect(host=hostname, user=username,\n pwd=password, sslContext=context)\n except Exception as e:\n module.fail_json(msg=e.message, exception=traceback.format_exc())\n finally:\n if not service_instance:\n module.fail_json(msg='could not connect to \"{0}\"'.format(hostname))\n atexit.register(connect.Disconnect, service_instance)\n return service_instance\n\ndef collect_objects_properties(si, type, props_list=None, include_object=True):\n \"\"\"\n Collect properties for managed objects from a view ref\n\n Args:\n si (ServiceInstance): ServiceInstance connection\n object_type pyVmomi.vim.*): Type of managed object\n props_list list): List of properties to retrieve\n include_object (bool): If True include the managed objects\n\n Returns:\n A list of properties for the managed objects\n \"\"\"\n\n # Create object specification to define the starting point of\n # inventory navigation\n root_folder = si.content.rootFolder\n view_container = si.content.viewManager.CreateContainerView(\n container=root_folder, type=[type], recursive=True)\n object_spec = vmodl.query.PropertyCollector.ObjectSpec()\n object_spec.obj = view_container\n object_spec.skip = True\n\n # Create a traversal specification to identify the path for collection\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()\n traversal_spec.name = 'traverseEntities'\n traversal_spec.path = 'view'\n traversal_spec.skip = False\n traversal_spec.type = view_container.__class__\n object_spec.selectSet = [traversal_spec]\n\n # Identify the properties to the retrieved\n property_spec = vmodl.query.PropertyCollector.PropertySpec()\n property_spec.type = type\n\n if not props_list:\n property_spec.all = True\n\n property_spec.pathSet = props_list\n\n # Add the object and property specification to the\n # property filter specification\n filter_spec = vmodl.query.PropertyCollector.FilterSpec()\n filter_spec.objectSet = [object_spec]\n filter_spec.propSet = [property_spec]\n\n # Retrieve properties\n collector = si.content.propertyCollector\n managed_objects = collector.RetrieveContents([filter_spec])\n\n data = []\n for object in managed_objects:\n properties = {}\n for property in object.propSet:\n properties[property.name] = property.val\n\n if include_object:\n properties['obj'] = object.obj\n\n data.append(properties)\n return data\n\ndef get_entity(si, name, type=vim.VirtualMachine):\n entity_data = collect_objects_properties(si, type=type, props_list=['name'])\n entity = next((entity for entity in entity_data if entity['name'].lower() == name.lower()), None)\n if entity is None: return None\n return entity['obj']\n\ndef get_entity_by_name_and_type(si, name, type):\n properties = ['name']\n type = type.lower()\n\n if type == 'host':\n vim_type = vim.HostSystem\n elif type == 'vm':\n vim_type = vim.VirtualMachine\n elif type == 'datastore':\n vim_type = vim.Datastore\n elif type == 'network':\n vim_type = vim.Network\n else:\n vim_type = vim.VirtualMachine\n\n entity_data = collect_objects_properties(si, type=vim_type, props_list=properties)\n entity = next((entity for entity in entity_data if entity['name'] == name), None)\n\n if entity is None:\n return entity\n\n return entity['obj']\n\ndef get_folder_type(type):\n type = type.lower()\n if type == 'host':\n return 'ComputeResource'\n if type == 'vm':\n return 'VirtualMachine'\n if type == 'datastore':\n return 'Datastore'\n if type == 'network':\n return 'Network'\n return None\n\ndef are_folder_contain_type(folder, type):\n result = next((folder_type for folder_type in folder.childType if folder_type.lower() == type.lower()), None)\n if result is None:\n return False\n else:\n return True\n\ndef get_folder_by_name_and_type(si, name, type):\n folder_data = collect_objects_properties(si, type=vim.Folder, props_list=['name'])\n filtered_folders = [folder for folder in folder_data if folder['name'].lower() == name.lower()]\n\n if len(filtered_folders) == 0:\n return None\n\n if type is None:\n return filtered_folders[0]['obj']\n\n folder_type = get_folder_type(type)\n folder = next((folder for folder in filtered_folders if are_folder_contain_type(folder['obj'], folder_type)), None)\n if folder is None: return folder\n return folder['obj']\n\ndef get_all_snapshots_info(snapshot_tree):\n snapshots = []\n\n if snapshot_tree is None:\n return snapshots\n\n for snapshot in snapshot_tree:\n snapshots.append(snapshot)\n child_snapshot = snapshot.childSnapshotList\n snapshots.extend(get_all_snapshots_info(child_snapshot))\n\n return snapshots\n\ndef get_snaphot_from_vm_by_name(vm, name):\n snapshots = get_all_snapshots_info(vm.snapshot.rootSnapshotList)\n snapshot = next((snap for snap in snapshots if snap is not None and snap.name.lower() == name.lower()), None)\n\n if snapshot is None:\n return snapshot\n\n return snapshot.snapshot","repo_name":"Akasurde/vsphere_module","sub_path":"library/vsphere_utils.py","file_name":"vsphere_utils.py","file_ext":"py","file_size_in_byte":6919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3458597831","text":"# -*- coding: utf-8 -*-\n# @Time : 18/6/25 下午 9:12\n# @Author : Ji\n# @File : handover.py\n# @Software: PyCharm\n\nfrom LTE import *\nimport numpy as np\nimport random\nimport math\nimport matplotlib.pyplot as plt\n\nNUM_STATIONS = 15 # number of cellular stations\n\n# possible of noise and signal strength and computation of SNR obtained from information page below\n# http://www.speedguide.net/faq/how-to-read-rssisignal-and-snrnoise-ratings-440\n\n\nSRRI_RANGE = range(-50, 1) # range of average signal stength in dB\nNOISE_RANGE = range(-120, -80) # range of noise level in dB\n\nLOCATION_X = range(0, 500) # 0 to 500 possible x coordinates\nLOCATION_Y = range(0, 500) # 0 to 500 possible x coordinates\n\nSPEED_RANGE = range(0, 16)\nSTATIONS = []\n\nPath_loss_exponent =1.6\npower = 23\n\ntime_simult = 0\n# SimultRunTime = 3000\nHANDOVER_TIMES = 0\nPINGPANG_TIMES = 0\n\nrandom.seed(2)\n\n\ndef distance(location1,location2):\n dis = math.sqrt((location1.coordinate[0]-location2.coordinate[0])**2 +\n (location1.coordinate[1]-location2.coordinate[1])**2)\n return dis\n\ndef calcultRSSI(d,power):\n temp = math.log(d,10) * (10**Path_loss_exponent) + power\n return (temp)\n\n# random locations, signal strength and noise. SNR is the difference between the two\n# def generateStations():\nfor _ in range(NUM_STATIONS):\n pos = Location(random.choice(LOCATION_X), random.choice(LOCATION_Y))\n srri = random.choice(SRRI_RANGE)\n noise = random.choice(NOISE_RANGE)\n STATIONS.append(eNB(_, pos, srri, noise))\n\nlocationset_x = []\nlocationset_y = []\nfor bs in STATIONS:\n locationset_x.append(bs.location[0])\n locationset_y.append(bs.location[1])\n print(bs)\n\n# plt.plot(locationset_x,locationset_y, marker = '*')\n# plt.show()\n\ndef NeedHandover(ue):\n result = -1\n currentRssi = 0\n for eNB in STATIONS:\n if eNB.node_id == ue.BS_id:\n d = distance(eNB.location, ue.location)\n currentRssi = calcultRSSI(d,power)\n # print(' CurrentRssi:'+str(currentRssi))\n # print(' CurrentDistance:'+str(d))\n for eNB in STATIONS:\n if eNB.node_id != ue.BS_id:\n d = distance(eNB.location,ue.location)\n # print(' distance: ',end='')\n # print(d)\n eNBRSSI = calcultRSSI(d,power)\n # print('bs rssi:' + str(eNBRSSI))\n if eNBRSSI < currentRssi:\n result = eNB.node_id\n currentRssi = eNBRSSI\n return result\n\n\n# computing inverse distances from one stations to all the other ones (used for transition probability matrix)\n# closer distances get higher values :)\ndef run(SimultRunTime,predict_rate=1):\n time_simult = 0\n HANDOVER_TIMES =0\n hdtimesset, simltset, pptset, pre_pptset, timeset = [], [], [], [], []\n # location_ue = Location(random.choice(LOCATION_X), random.choice(LOCATION_Y))\n location_ue = Location(250, 250)\n speed = random.choice(SPEED_RANGE)\n final_location = Location(location_ue[0] + math.sqrt(2) * random.randint(-1, 1) * speed * 1000 / 3600,\n location_ue[1] + math.sqrt(2) * random.randint(-1, 1) * speed * 1000 / 3600)\n\n ue = UE(location_ue, final_location, speed, BS_id=0) # creating the user\n print('UE INFO: ', ue)\n while time_simult < SimultRunTime:\n print('UE INFO: ', ue)\n ue.move_in_second()\n time_simult = time_simult + 1\n need_id = NeedHandover(ue)\n if need_id >= 0:\n HANDOVER_TIMES += 1\n ue.handover(need_id)\n else:\n ue.handover(ue.BS_id)\n if time_simult%500 ==0:\n hdtimesset.append(HANDOVER_TIMES)\n simltset.append(time_simult)\n pptset.append(ue.pingpang_times)\n pre_pptset.append(ue.pingpang_times*predict_rate)\n timeset.append(time_simult)\n\n print('handover times:' + str(HANDOVER_TIMES))\n print('simulation time:' + str(time_simult))\n print('pingpang handover times: ' + str(ue.pingpang_times))\n\n # return HANDOVER_TIMES,time_simult,ue.pingpang_times,ue.pingpang_times*predict_rate\n return hdtimesset, simltset, pptset, pre_pptset, timeset\nif __name__ == '__main__':\n # location_ue = Location(random.choice(LOCATION_X), random.choice(LOCATION_Y))\n location_ue = Location(250,250)\n speed = random.choice(SPEED_RANGE)\n final_location = Location(location_ue[0] + math.sqrt(2)*random.randint(-1,1)*speed*1000/3600,\n location_ue[1] + math.sqrt(2)*random.randint(-1,1)*speed*1000/3600)\n\n ue = UE(location_ue, final_location, speed,BS_id=0) # creating the user\n print('UE INFO: ', ue)\n while time_simult < 1000:\n print('UE INFO: ', ue)\n ue.move_in_second()\n time_simult = time_simult+1\n need_id = NeedHandover(ue)\n if need_id >= 0:\n HANDOVER_TIMES += 1\n ue.handover(need_id)\n else:\n ue.handover(ue.BS_id)\n\n\n print('handover times:'+str(HANDOVER_TIMES))\n print('simulation time:'+str(time_simult))\n print('pingpang handover times: '+ str(ue.pingpang_times))\n","repo_name":"Wings-Ji/Handover_simu","sub_path":"handover.py","file_name":"handover.py","file_ext":"py","file_size_in_byte":5102,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"9477303438","text":"from app.dao.model.movies import Movie\nfrom app.dao.model.directors import Director\nfrom app.dao.model.genres import Genre\n\n\nclass MovieDAO:\n def __init__(self, session):\n self.session = session\n\n def get_movies(self, mid=None, **kwargs):\n query = self.session.query(Movie)\n if mid:\n return query.get(mid)\n if kwargs:\n for key, value in kwargs.items():\n query = query.filter(eval(f\"Movie.{key}\") == int(value))\n return query\n\n def create(self, data):\n movie = Movie(**data)\n self.session.add(movie)\n self.session.commit()\n return movie\n\n def update(self, movie):\n self.session.add(movie)\n self.session.commit()\n\n def delete(self, movie):\n self.session.delete(movie)\n self.session.commit()\n","repo_name":"Ivan-2022/Homework18","sub_path":"app/dao/movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37712509074","text":"from time import sleep\nfrom py_imessage import imessage\n\nnumber = \"4086678219\"\nlyrics = \"lyrics.txt\"\n\n\n#get words from lyrics text\ndef get_lyrics():\n with open(lyrics) as file:\n data = [line.strip() for line in file]\n string = \" \".join(data)\n words = string.split()\n return words\n\n#sending each word\ndef send_messages(messages, phone_num):\n for message in messages:\n send_message(message, phone_num)\n # print(message)\n sleep(.2)\n\n#sending message in imessage\ndef send_message(message, phone_num):\n imessage.send(phone_num, message)\n\n#main function\ndef lyrics_prank(phone_number, lyrics):\n words_list = get_lyrics()\n send_messages(words_list, phone_number)\n\n\nlyrics_prank(number, get_lyrics())\n\n","repo_name":"aibarrola/LyricsPrank","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5941494732","text":"import copy\nimport urllib.request\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\nimport cv2\nfrom skimage import transform\n\nimport torch\nimport torchvision\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\n\n\ndef prepare_input(img, input_height, input_width):\n img = cv2.imread(img)\n img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype(np.float32)\n aspect = img.shape[1] / float(img.shape[0])\n if (aspect > 1):\n res = int(aspect * input_height)\n img_res = transform.resize(img_rgb, (input_width, res))\n if (aspect < 1):\n res = int(input_width / aspect)\n img_res = transform.resize(img_rgb, (res, input_height))\n if (aspect == 1):\n img_res = transform.resize(img_rgb, (input_width, input_height))\n img_res /= 255.0\n img_res = torchvision.transforms.ToTensor()(img_res)\n return img_res\n\n\ndef plot_img_bbox(img, target, num_detection, treshholds=None, inst_classes=[]):\n # plot the image and bboxes\n # Bounding boxes are defined as follows: x-min y-min width height\n dir_jpg = 'static/Results/' + str(num_detection) + '_picture_with_det' + '.jpg'\n fig, a = plt.subplots(1, 1)\n fig.set_size_inches(10, 10)\n a.imshow(img, cmap='gray')\n for number, box in enumerate(target['boxes']):\n if target['scores'][number] > treshholds:\n x, y, width, height = box[0], box[1], box[2]-box[0], box[3]-box[1]\n rect = patches.Rectangle((x, y),\n width, height,\n linewidth=2,\n edgecolor='r',\n facecolor='none')\n a.add_patch(rect)\n a.text(x, y,\n (round(target['scores'][number].item() * 100, 2), inst_classes[target['labels'][number].item()]),\n bbox=dict(facecolor='white', alpha=0.5))\n\n fig.savefig(dir_jpg)\n\n\ndef apply_nms(orig_prediction, iou_thresh=0.3):\n # torchvision returns the indices of the bboxes to keep\n keep = torchvision.ops.nms(orig_prediction['boxes'], orig_prediction['scores'], iou_thresh)\n\n final_prediction = copy.deepcopy(orig_prediction)\n final_prediction['boxes'] = final_prediction['boxes'][keep]\n final_prediction['scores'] = final_prediction['scores'][keep]\n final_prediction['labels'] = final_prediction['labels'][keep]\n\n return final_prediction\n\n\ndef torch_to_pil(img):\n return torchvision.transforms.ToPILImage()(img).convert('RGB')\n\n\ndef get_object_detection_model(num_classes):\n model = torchvision.models.detection.fasterrcnn_resnet50_fpn()\n\n in_features = model.roi_heads.box_predictor.cls_score.in_features\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n\n return model\n\n\ndef init_model():\n num_classes = 4\n\n model = get_object_detection_model(num_classes)\n weight = 'static/Others/pretrained_weights.pth'\n model.load_state_dict(torch.load(weight, map_location=torch.device('cpu')))\n\n return model\n\n\ndef download_img(url, num_detection):\n dir_img = 'static/Results/' + str(num_detection) + '_picture' + '.jpg'\n img = urllib.request.urlopen(url).read()\n with open(dir_img, \"wb\") as file:\n file.write(img)\n return dir_img\n\n\ndef make_detection(img, num_detection):\n fruit_classes = ['background', 'apple', 'banana', 'orange']\n\n model = init_model()\n model.eval()\n with torch.no_grad():\n prediction = model([img])[0]\n\n nms_prediction = apply_nms(prediction, iou_thresh=0.3)\n plot_img_bbox(torch_to_pil(img), nms_prediction, num_detection, treshholds=0.5, inst_classes=fruit_classes)\n\n\ndef make_result(url, num_detection):\n img = download_img(url, num_detection)\n img = prepare_input(img, 480, 480)\n make_detection(img, num_detection)","repo_name":"Sivchan/fruit_recognitions_pet_project","sub_path":"modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23839068101","text":"import serial\nimport serial.tools.list_ports as port_list\nimport time\n\nPort = 'COM5'\n\nprint('List of available ports:')\nports = list(port_list.comports())\nfor p in ports:\n print(p)\nprint('-----------------------')\n# a=0;\n# for i in range(10):\n# in_hex = input(\"Enter a hex value(0 -> f): \");\n# a=a+1;\n# print(a)\n\nser = serial.Serial(Port, baudrate=9600, timeout=1, bytesize=8, stopbits=serial.STOPBITS_ONE);\nprint('Connecting to ' + ser.name) # check which port was really used\n\nfor i in range(100):\n in_hex = input(\"Enter a hex value(0 -> f): \")\n ser.write(bytes(in_hex, encoding='utf8'));\n x = ser.read(4)\n # a = ser.read() \n # b = ser.read() \n # c = ser.read() \n # d = ser.read() \n print('Input: ' + str(int(in_hex,16)) + ' ==> CRC32: ' + str(hex(int.from_bytes(x, byteorder='little'))))\n # print(str(int.from_bytes(a, byteorder='little')))\n # print(str(int.from_bytes(b, byteorder='little')))\n # print(str(int.from_bytes(c, byteorder='little')))\n # print(str(int.from_bytes(d, byteorder='little')))","repo_name":"arsalanfiroozi/uProcessor","sub_path":"Project/Python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43326689709","text":"from pyrogram import Client,filters,enums\nfrom pyrogram.handlers import MessageHandler\nimport asyncio\n\n\napi_id = 13503794\napi_hash = \"7c5edb66000bcb12d914019f87bffed4\"\nbot_token = \"5600997688:AAG_8tBfLx4XOXorGSaXBNt521NKMI2l_nI\"\n\n\nadmin = [5329205911,5559099357]\n\nbot = Client(\n \"my_bot\",\n api_id=api_id, api_hash=api_hash,\n bot_token=bot_token\n)\n\n\n@bot.on_message(filters.command('start'))\ndef start(bot,msg):\n bot.send_message(msg.chat.id,\"Hey ! \")\n\n\n\n\n@bot.on_message(filters.video | filters.document )\nasync def start(bot,msg):\n #await asyncio.sleep(1)\n #await bot.copy_message(-1001619271851,msg.chat.id,msg.id)\n await asyncio.sleep(3600)\n #await bot.copy_message(-1001619887800,msg.chat.id,msg.id)\n await bot.delete_messages(msg.chat.id,msg.id)\n\n@bot.on_message(filters.photo )\nasync def start(bot,msg):\n #await asyncio.sleep(1)\n #await bot.copy_message(-1001619271851,msg.chat.id,msg.id)\n #await asyncio.sleep(15)\n #await bot.copy_message(-1001512328886,msg.chat.id,msg.id)\n await asyncio.sleep(1800)\n await bot.delete_messages(msg.chat.id,msg.id)\n \n\n@bot.on_message(filters.animation)\nasync def del_filt(bot,msg):\n await asyncio.sleep(30)\n await bot.delete_messages(msg.chat.id,msg.id)\n\n\n\n@bot.on_message( filters.web_page)\nasync def service_msg(bot,msg):\n await asyncio.sleep(1)\n await bot.delete_messages(msg.chat.id,msg.id)\n \n \n\n\n \n \n\n\nprint(\"bot started\")\nbot.run()\n","repo_name":"mayavigg/bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9427525083","text":"import csv\nimport arcpy\n\n#albers=arcpy.SpatialReference(3005)\nalbers=arcpy.SpatialReference(26909) # we're not using Albers anymore\nutm=arcpy.SpatialReference(26909)\n\ngdb='G:/Projects/Various_Clients/Galore Creek/Mapping_Data.gdb'\narcpy.env.workspace = gdb\neditor=arcpy.da.Editor(gdb)\neditor.startEditing()\neditor.startOperation()\n\n##\n# Get bridges from templates layer\n\ncursor=arcpy.da.SearchCursor(\"FEL2A_LLine/RoadTemplate\",\n ['SHAPE@','RoadName','RoadCode'],\n u\"{} like 'BR%' and {} like '2021-005J%'\".format(\n arcpy.AddFieldDelimiters(\"FEL2A_LLine/RoadTemplate\",'Template'),\n arcpy.AddFieldDelimiters(\"FEL2A_LLine/RoadTemplate\",'RoadCode')))\n\nresults=list(cursor)\n\ncursor=arcpy.da.InsertCursor('Bridges',['RoadName','RoadCode','LOA','SHAPE@'])\n\ntry:\n for row in results:\n start=row[0].firstPoint\n end=row[0].lastPoint\n midpoint=arcpy.PointGeometry(arcpy.Point((start.X + end.X)/2,(start.Y + end.Y)/2),albers)\n\n attr=list(row[1:])\n for i in range(0,len(attr)):\n if attr[i] == '':\n attr[i] = None\n attr.append(round(row[0].length,1))\n attr.append(midpoint)\n cursor.insertRow(attr)\nexcept:\n editor.stopOperation()\n editor.stopEditing(False)\n raise\n\neditor.stopOperation()\neditor.stopEditing(True)\n","repo_name":"piceaglauca/arcpy-scripts","sub_path":"InsertBridgesFromTemplates.py","file_name":"InsertBridgesFromTemplates.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"32485927913","text":"import yfinance as yf\n# import FinanceDataReader as fdr\nimport pandas as pd\nimport glob\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.optimizers import Adam\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nimport datetime\nimport pickle\nimport numpy as np\nfrom tensorflow.keras.callbacks import EarlyStopping # early_stopping 걸기\n# from matplotlib.backends.backend_pdf import PdfPages\nearly_stopping=EarlyStopping(monitor='val_loss',patience=5) # early_stopping 걸기\npd.set_option('display.max_columns', None) # 열이 전부다 나오게\n\n# currencies_lists = [('EURUSD=X', 'EUR-USD'), ('JPY=X', 'USD-JPY'), ('GBPUSD=X', 'GBP-USD'), ('AUDUSD=X', 'AUD-USD'), ('NZDUSD=X', 'NZD-USD'),\n# ('EURJPY=X', 'EUR-JPY'), ('GBPJPY=X', 'GBP-JPY'), ('EURGBP=X', 'EUR-GBP'), ('EURCAD=X', 'EUR-CAD'), ('EURSEK=X', 'EUR-SEK'),\n# ('EURCHF=X', 'EUR-CHF'), ('EURHUF=X', 'EUR-HUF'), ('CNY=X', 'USD-CNY'), ('HKD=X', 'USD-HKD'), ('SGD=X', 'USD-SGD'),\n# ('INR=X', 'USD-INR'), ('MXN=X', 'USD-MXN'), ('PHP=X', 'USD-PHP'), ('IDR=X', 'USD-IDR'), ('THB=X', 'USD-THB'),\n# ('MYR=X', 'USD-MYR'), ('ZAR=X', 'USD-ZAR'), ('RUB=X', 'USD-RUB')]\n\ncurrencies_lists = [('AUDUSD=X', 'AUD-USD'), ('CNY=X', 'USD-CNY'), ('EURCAD=X', 'EUR-CAD'), ('EURCHF=X', 'EUR-CHF'), ('EURGBP=X', 'EUR-GBP'),\n ('EURHUF=X', 'EUR-HUF'), ('EURJPY=X', 'EUR-JPY'), ('EURSEK=X', 'EUR-SEK'), ('EURUSD=X', 'EUR-USD'), ('GBPJPY=X', 'GBP-JPY'),\n ('GBPUSD=X', 'GBP-USD'), ('HKD=X', 'USD-HKD'), ('IDR=X', 'USD-IDR'), ('INR=X', 'USD-INR'), ('JPY=X', 'USD-JPY'),\n ('MXN=X', 'USD-MXN'), ('MYR=X', 'USD-MYR'), ('NZDUSD=X', 'NZD-USD'), ('PHP=X', 'USD-PHP'), ('RUB=X', 'USD-RUB'),\n ('SGD=X', 'USD-SGD'), ('THB=X', 'USD-THB'), ('ZAR=X', 'USD-ZAR')]\ncurrencies_lists_paths = glob.glob('./preprocessing_currencies/currencies_list/*.csv')\n\n# for i in range (24):\n# print(currencies_lists[i])\n# print(currencies_lists_paths[i])\n# print('')\n\n# 인덱스와 컬럼만 지정한 빈 DF 만들기\n# 30개 자산클래스의 이름\nclass_name = []\n# class_name.append(currencies_lists[22][1])\n# print(class_name)\nfor i in range(23):\n class_name.append(currencies_lists[i][1])\n\nmse = ['High', 'Low', 'Adj Close', 'Change', 'Average'] # 인덱스\ndf_loss = pd.DataFrame(columns=class_name) # 30개 자산클래스 column\ndf_loss = pd.DataFrame({'mse':mse}) # 'mse' 5개 값을 가진 column을 만듬\n\nfor ticker, name in currencies_lists: # 30개 클래스 이름이 모두 columns로 들어옴\n df_loss[name] = np.nan # nan 값으로 채워서 빈 데이터프레임 만들기\ndf_loss.set_index('mse', inplace=True) # mse 안 5개 값을 인덱스로 나열해줌(추후 transpose(T)해서 인덱스-컬럼 바꿀것임).\n\n\nplt.figure(figsize=(8, 18))\nfor num, currencies_lists_path in enumerate(currencies_lists_paths):\n df = pd.read_csv(currencies_lists_path, index_col=0)\n df_lists = [('df_high','High'), ('df_low','Low'), ('df_close','Adj Close'), ('df_change','Change')] # ['df_high',' df_low', 'df_close', 'df_change']\n plot_num = 0\n for df_each, colname in df_lists:\n plot_num += 1 # 1, 2, 3, 4\n df_each = df[[colname]]\n last_60_df = df_each[-60:] # 마지막 30개만 따로 빼놓기(벡테스팅용)\n print(last_60_df.tail())\n df_each = df_each[:-30] # 마지막 30개 빼고 모델링\n print(type(df_each)) # DataFrame\n last_60_df.to_csv('./updated/{}_{}_updated.csv'.format(currencies_lists[num][1], colname))\n minmaxscaler = MinMaxScaler()\n scaled_data = minmaxscaler.fit_transform(df_each) # 스케일링해주기\n with open('./minmaxscaler/{}_{}_minmaxscaler.pickle'.format(currencies_lists[num][1], colname), 'wb') as f:\n pickle.dump(minmaxscaler, f)\n sequence_X = []\n sequence_Y = []\n for i in range(len(scaled_data) - 30):\n _x = scaled_data[i:i + 30] # 총 30개\n _y = scaled_data[i + 30] # 31번째를 예측\n sequence_X.append(_x)\n sequence_Y.append(_y)\n sequence_X = np.array(sequence_X)\n sequence_Y = np.array(sequence_Y)\n X_train, X_test, Y_train, Y_test = train_test_split(sequence_X, sequence_Y, test_size=0.2)\n xy = X_train, X_test, Y_train, Y_test\n np.save('./train_test_split/{}_{}_train_test.npy'.format(currencies_lists[num][1], colname), xy) # 저장하고\n\n model = Sequential()\n model.add(LSTM(512, input_shape=(30, 1), activation='tanh', return_sequences=1))\n model.add(Flatten())\n model.add(Dropout(0.2))\n model.add(Dense(128))\n model.add(Dropout(0.2))\n model.add(Dense(1))\n model.compile(loss='mse', optimizer='adam')\n fit_hist = model.fit(X_train, Y_train, epochs=100, callbacks=[early_stopping], shuffle=False, validation_data=(X_test, Y_test))\n\n # 플롯 차트 #\n plt.plot(fit_hist.history['loss'][-30:], label='loss')\n plt.plot(fit_hist.history['val_loss'][-30:], label='val_loss')\n mse = fit_hist.history['val_loss'][-1]\n print('val_loss값은?? :', mse)\n plt.subplot(4, 1, plot_num)\n plt.title(currencies_lists[num][1])\n plt.ylabel(colname)\n plt.legend()\n plt.tight_layout()\n plt.grid(True)\n\n # 위에서 만든 DataFrame에 Key값들 채우기.\n if colname == 'Change':\n df_loss.loc[colname][currencies_lists[num][1]] = mse\n df_loss.loc['Average'][currencies_lists[num][1]] = (df_loss.loc['High'][currencies_lists[num][1]] + df_loss.loc['Low'][currencies_lists[num][1]]\n + df_loss.loc['Adj Close'][currencies_lists[num][1]] + df_loss.loc['Change'][currencies_lists[num][1]]) / 4\n else:\n df_loss.loc[colname][currencies_lists[num][1]] = mse # '클래스이름'행 - 열에 mse 값이 들어가게 하기.\n\n model.save('./models/{}_{}_model.h5'.format(currencies_lists[num][1], colname)) # 모델 저장하기\n print(currencies_lists[num][1], colname, ' 모델링및 저장 까지 완료 ')\n\n # 한 클래스당 4개의 컬럼에 대한 mse(val_loss)의 추이에 대한 그래프를 저장.\n plt.savefig('./datasets/{}_mse_plot.png'.format(currencies_lists[num][1]))\n plt.show(block=False)\n plt.pause(1) # 1초후 자동으로 창 닫음\n plt.close()\n\ndf_loss = df_loss.T # 행-열 전환 transpose.\ndf_loss.to_csv('./datasets/currencies_mse.csv', index =True)\n\n\n","repo_name":"jisoor/Final_for_quant_LSTM","sub_path":"12_currencies_remodeling.py","file_name":"12_currencies_remodeling.py","file_ext":"py","file_size_in_byte":6708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35845831995","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def maxAncestorDiff(self, root: Optional[TreeNode]) -> int:\n if not root: return 0\n\n def helper(node,currMax, currMin):\n if not node:\n return currMax - currMin\n \n currMax = max(currMax, node.val)\n currMin = min(currMin, node.val)\n\n left = helper(node.left,currMax, currMin)\n right = helper(node.right,currMax, currMin)\n\n return max(left, right)\n\n print(root.val, root.val)\n return helper(root, root.val, root.val)","repo_name":"Jaydale3221/LeetCodeProblems","sub_path":"1092-maximum-difference-between-node-and-ancestor/1092-maximum-difference-between-node-and-ancestor.py","file_name":"1092-maximum-difference-between-node-and-ancestor.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"74521565801","text":"import glob\nimport numpy as np\nimport pandas as pd\nfrom math import log\nfrom scipy import spatial\nimport math\n\npath = \"D:\\\\CCCCcomputer\\\\2020_2\\\\컴퓨터보안\\\\과제\\\\assignment#2\\\\opcode\\\\test3\\\\*.txt\"\nfiles = glob.glob(path)\n\ncorpus =[]\n# 파일을 통째로 str로 저장\nfor file in files:\n with open(file,\"r\") as f:\n doc = []\n for word in f:\n doc.append(word.rstrip('\\n'))\n corpus.append(' '.join(doc))\n\nvocab = list(set(w for x in corpus for w in x.split()))\nvocab.sort()\n\nN = len(corpus) # 총 문서의 수\n\ndef tf(t, d):\n return math.log(d.count(t)+1)\n\ndef idf(t):\n df = 0\n for doc in corpus:\n df += t in doc\n return log(N/(df + 1))\n\ndef tfidf(t, d):\n return tf(t,d)* idf(t)\n\n\nresult = []\nfor i in range(N):\n result.append([])\n d = corpus[i]\n for j in range(len(vocab)):\n t = vocab[j]\n\n result[-1].append(tfidf(t,d))\n\nresult = np.array(result)\ntfidf_ = pd.DataFrame(result, columns = vocab)\n# print(tfidf_)\n\n\nanalysis=[]\nmalware = [0, 2.56591, 0.155082, 0.126886, 0.39755]\nfor n in range(len(corpus)):\n opcode_list = np.split(result[n],len(result[n]))\n\n re_opcode_list =[]\n need_only = ['bsr', 'and', 'clc', 'aad', 'call']\n for opcode in need_only:\n if opcode in vocab:\n idx = vocab.index(opcode)\n re_opcode_list.append(float(opcode_list[idx]))\n else:\n re_opcode_list.append(0)\n\n # cosine similarity\n cosine_sim = 1 - spatial.distance.cosine(re_opcode_list, malware)\n\n analysis.append(cosine_sim)\n\n# 정확도 분석\nnotmal = 0\nmalware = 0\nfor i in range(len(analysis)):\n if analysis[i] > 0:\n malware += 1\n else:\n notmal += 1\n\nprint('정상파일 개수:',notmal)\nprint('악성파일 개수:',malware)","repo_name":"2018007956/HYU","sub_path":"Computer_Security/#2_opcode_analysis.py","file_name":"#2_opcode_analysis.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36208922985","text":"from itertools import *\nfrom collections import *\nfrom heapq import *\nfrom bisect import *\nfrom copy import *\nfrom array import *\nimport math\nimport sys\nsys.setrecursionlimit(1<<20)\nINF = float('inf')\n\nn,X = map(int,input().split())\ng = []\nans = 0\nfor _ in range(n):\n l = list(map(int,input().split()))\n g.append(l[1:])\n# print(g)\ndef rec(i,cur):\n global ans\n if cur>X:\n return\n if i==n:\n if cur==X:ans += 1\n return\n L = g[i]\n for j in range(len(L)):\n rec(i+1,cur*L[j])\n return\nrec(0,1)\nprint(ans)\n","repo_name":"to24toro/Atcoder","sub_path":"ABC233/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31193356019","text":"##########################################################################\n##\n## Original Code written for SBU's Big Data Analytics Project \n##\n## Student Name: Sai Bhargav Varanasi\n## Student ID: 114707860\n## Student Name: Aniket Panda\n## Student ID: 114356301\n## Student Name: Akash Sateesh\n## Student ID: 113221752\n## Student Name: Priyanka Amol Dighe\n## Student ID: 113264191\n\nfrom pyspark import SparkContext\nfrom pprint import pprint\nimport csv\nfrom collections import defaultdict\nfrom dateutil import parser\n\n# Initializing the Spark context\nsc = SparkContext(appName=\"GSOD\")\nclimateRDD = sc.textFile('gsod-county-cleaned-2017.csv', 32)\nclimateGhcndRDD = sc.textFile('ghcnd-county-2017.csv', 32)\nsc.setLogLevel(\"WARN\")\n\n# Required keys for GSOD Dataset. we do not need all features.\nrequired_keys = ['state', 'county', 'yearday', 'temp', 'dewp', 'slp', 'stp' , 'visib', 'wdsp', 'mxspd', 'gust', 'max', 'min', 'sndp']\n# Keys to identify the item in GSOD RDD.\nfilter_keys = ['state', 'county', 'yearday']\nstar = \"*\"\n\n\n\"\"\"\n\nProcess each line of GSOD csv data set and returns\n\nkey: (county, state, yearday)\nvalue: (1, attribute dictionary)\n\n\"\"\"\ndef process_row(row, headers):\n \n key_dict = dict()\n row_split = row.split(',')\n\n for value, key in zip(row_split, headers.value):\n if key not in required_keys:\n continue\n\n if star in value:\n value = value.replace(star, '') \n\n if key == 'county':\n value = value.lower().replace(' county', '')\n\n key_dict[key] = value\n\n map_key = (key_dict['county'], key_dict['state'], key_dict['yearday'])\n\n return (map_key, (1, key_dict))\n\n\n\"\"\"\n\ncombines all the attributes at a county.\n@returns:\n\nkey - sum of all observations at county\nvalue - combined attribute dictionary.\n\n\"\"\"\n\ndef combineAttributeAtCounty(row1, row2):\n \"\"\"\n TODO : Filter prcp units and add an in clause for it.\n \"\"\"\n dict1 = row1[1]\n dict2 = row2[1]\n\n ans_dict = dict()\n\n for key in dict1.keys():\n if key in filter_keys:\n ans_dict[key] = dict1[key]\n\n elif key == \"max\":\n ans_dict[key] = max(dict1[key], dict2[key])\n \n elif key == \"min\":\n ans_dict[key] = min(dict1[key], dict2[key])\n\n else:\n ans_dict[key] = float(dict1[key]) + float(dict2[key])\n\n return (row1[0] + row2[0], ans_dict)\n\n\"\"\"\n\nInput:\nkey - (county, state, yearday)\nvalue - (sum of all observations at a county, aggregated attribute dictionary)\n\nOutput:\nkey: (county, state, yearday)\nvalue: (daily averaged attribute dict)\n\n\"\"\"\n\ndef reduceAtCountyDaily(row):\n dict = row[1][1]\n\n ans_dict = defaultdict(float)\n\n for key in dict.keys():\n if key in filter_keys:\n ans_dict[key] = dict[key]\n elif key == \"max\" or key == \"min\":\n ans_dict[key] = dict[key]\n else:\n ans_dict[key] = float(dict[key])/row[1][0]\n \n return (row[0], ans_dict)\n\n\"\"\"\n\nNormalizing all the attribute values.\n\nInput: key: (county, state, yearday)\nvalue: (daily average of all attributes dict)\n\nOutput:\nkey: (county, state, yearday)\nvalue: (normalized attribute dictionary)\n\n\"\"\"\n\ndef meanCenterAtCounty(row):\n dict_list = row[1]\n dict_size = len(row[1])\n agg_dict = defaultdict(float)\n\n agg_dict_result = defaultdict(list)\n\n agg_normalized_dict = defaultdict(float)\n\n for dict in dict_list:\n for key in dict.keys():\n if key in filter_keys:\n continue\n else:\n agg_dict[key] += float(dict[key])\n agg_dict_result[key].append(float(dict[key]))\n \n for k,v in agg_dict_result.items():\n\n minV = min(v)\n maxV = max(v)\n agg_normalized_dict[k] = maxV - minV\n \n # agg_normalized_dict = {k: np.linalg.norm(v) for k,v in agg_dict_result.items()}\n \n agg_dict = {k : v / dict_size for k, v in agg_dict.items()}\n meancentered_dict = {}\n for dict in dict_list:\n d = defaultdict(float)\n date = dict['yearday']\n for key in dict.keys():\n if key in filter_keys:\n d[key] = dict[key]\n else:\n if agg_normalized_dict[key] != 0:\n d[\"gsod_\"+key] = round(( (float(dict[key]) ) )/agg_normalized_dict[key] *8)/2\n \n meancentered_dict[date] = d\n \n return (row[0], meancentered_dict)\n\n\"\"\"\n\nEmits the key value pairs at a county\n\n@returns\n\nkey: (county, state, attribute)\nvalue: ( [date, average attribute value for a day], (count of observation, sum of all attribute values seen son far) )\n\n\"\"\"\n\ndef emitCountyKeys(row):\n date, attribute, county, state = row[0]\n count, s = row[1]\n\n avgForDay = round((s / count)*2)/2\n\n keyTuple = (county, state, attribute)\n valueTuple = ( [(date, avgForDay)] , (count, s) )\n\n return (keyTuple, valueTuple)\n\n\"\"\"\n\nEmits the normalized value of attribute at a county level for a given day.\n\n@returns\nkey: (county, state, date)\nvalue: (attribute, normalized attribute value)\n\n\"\"\"\n\ndef emitMeanCenteredValues(row):\n county, state, attribute = row[0]\n listDatesValues = row[1][0]\n count,s = row[1][1]\n\n avgForAttribute = round((s / count)*2)/2\n\n finalEmitList = []\n\n valueList = []\n for dateValuePair in listDatesValues:\n date, value = dateValuePair\n valueList.append(value)\n\n valueNormalized = max(valueList) - min(valueList)\n\n if valueNormalized == 0:\n valueNormalized = 1\n\n for dateValuePair, normalizedValue in zip(listDatesValues, valueList):\n keyTuple = (county, state, date)\n\n valueTuple = (attribute, normalizedValue/valueNormalized)\n emitTuple = (keyTuple, [valueTuple])\n \n finalEmitList.append(emitTuple)\n\n\n return finalEmitList\n\n\"\"\"\n\nEmits the key value pairs where,\nkey: (county, state)\nvalue: dictionary where key is date and value is dictionary of key being attribute and value being attribute value.\n\n\"\"\"\n\ndef emitCountyState(row):\n\n county, state, date = row[0]\n attributeMeanCenteredValues = row[1]\n\n res = {}\n for amv in attributeMeanCenteredValues:\n attribute, value = amv[0], amv[1]\n \n if date not in res:\n res[date] = {}\n \n res[date][attribute] = value\n\n return ((county,state), res)\n\n\"\"\"\n\nMerge the two dictionaries.\n\n\"\"\"\n\ndef mergeDictionaries(x,y):\n\n if not x:\n return y \n \n if not y:\n return x \n \n x.update(y)\n\n return x\n\n\"\"\"\n\nProcessing each line of the GHCND csv file for processing.\nReturns: Key: (state, county, yearday, attribute) , Value: (1, attribute_value)\n\n\"\"\"\n\ndef processLine(line, headerList):\n \n columns = list(csv.reader([line], delimiter=','))[0]\n\n keyTuple = tuple()\n valueTuple = tuple()\n\n for value, key in zip(columns, headerList.value):\n\n if key in ['state', 'county', 'yearday','attribute']:\n \n if key == 'county':\n value = value.lower().replace(' county', '')\n \n keyTuple += tuple([value])\n \n elif key == 'value':\n valueTuple = int(value)\n\n if len(keyTuple) < 4:\n print(keyTuple)\n\n return ( keyTuple, (1,valueTuple) )\n\n\"\"\"\nRemoves features: yearday, state and county from the RDD after the processing is done.\n\n\"\"\"\n\ndef removeKeysAttributesinAttributeDict(row):\n\n keyTuple = row[0]\n valueTuple = row[1]\n\n for v in valueTuple.values():\n\n if 'yearday' in v:\n del v['yearday']\n\n if 'state' in v:\n del v['state']\n \n if 'county' in v:\n del v['county']\n \n return (keyTuple, valueTuple)\n\n\"\"\"\n\nMerge the two dictionaries whose value is also the dictionary.\n\n\"\"\"\n\ndef mergeDictionaries_1(dict1, dict2):\n ans_dict = {}\n\n for key in dict1.keys():\n v1 = dict1[key]\n v2 = (dict2[key] if key in dict2 else None)\n\n ans_dict[key] = mergeDictionaries(v1, v2)\n \n return ans_dict\n\n\n\"\"\"\n key, value: key = (county, state) value = dict(mean_centered_attribute values for that county)\n\"\"\"\n\n# GSOD Dataset processing\nheaders = climateRDD.first()\nheaderList = headers.split(\",\")\nheaderList = sc.broadcast(headerList)\n\nclimateRDD = climateRDD.filter(lambda x : x!=headers)\n\nclimateRDD = climateRDD.map(lambda x : process_row(x, headerList))\\\n .reduceByKey(lambda x, y : combineAttributeAtCounty(x, y))\\\n .map(reduceAtCountyDaily)\\\n .map(lambda x : ((x[0][0], x[0][1]), [x[1]]))\\\n .reduceByKey(lambda x, y : x + y)\\\n .map(meanCenterAtCounty)\n\n\"\"\"\n key, value: key = (county, state) value = dict(mean_centered_attribute values for that county)\n\"\"\"\n\n# GHCND Data set processing.\nheaders = climateGhcndRDD.first()\nheaderList = headers.split(\",\")\nheaderList = sc.broadcast(headerList)\n\n# Required keys for RDD keys.\nkeys = ['state', 'county', 'yearday','attribute']\nvalues = ['value']\nkeyOrdinals = []\nvalueOrdinals = []\ncountyOrdinal = 0\n\n# Filtering keys/attributes that are required.\nfilterKeys = ['PRCP','SNOW','SNWD','TMAX','TMIN']\n\nfor i in range(len(headerList.value)):\n\n if headerList.value[i] in keys:\n keyOrdinals.append(i)\n \n if headerList.value[i] == 'county':\n countyOrdinal = i\n\n if headerList.value[i] in values:\n valueOrdinals.append(i)\n\nclimateGhcndRDD = climateGhcndRDD.filter(lambda line: line != headers)\\\n .filter(lambda line: len ( list(csv.reader([line], delimiter=','))[0][keyOrdinals[1]] ) > 0) \n\nclimateGhcndRDD = climateGhcndRDD.map(lambda line: processLine(line, headerList))\\\n .filter(lambda x: x[0][1] in ['PRCP','SNOW','SNWD','TMAX','TMIN'] and len(x[0][2]) > 0) \n\nclimateGhcndRDD = climateGhcndRDD.reduceByKey(lambda a,b: (a[0]+b[0], a[1]+b[1]))\\\n .map(emitCountyKeys)\\\n .reduceByKey(lambda x,y: ( x[0]+y[0] , ( x[1][0]+y[1][0], x[1][1]+y[1][1] ) ) )\\\n .flatMap(emitMeanCenteredValues)\\\n .reduceByKey(lambda x,y: x+y)\\\n .map(emitCountyState)\\\n .reduceByKey(lambda x,y: mergeDictionaries(x,y))\n\nfinalRdd = climateRDD.union(climateGhcndRDD).reduceByKey(lambda x, y : mergeDictionaries_1(x, y))\n\nfinalRdd = finalRdd.map(lambda line: removeKeysAttributesinAttributeDict(line) )\n\n#DISASTER RDD methods\n#For disaster data - process each row of disaster and \ndef process_disaster_file(row, headers, required_cols):\n \n key_dict = dict()\n row_split = list(csv.reader([row], delimiter=','))[0]\n\n for value, key in zip(row_split, headers.value):\n # print(key,value)\n if key not in required_cols.value:\n continue\n\n if key == 'designated_area':\n value =value.replace(' (County)', '').lower()\n \n if 'date' in key:\n value = value.split('T')[0]\n value = value.replace('-','')\n\n key_dict[key] = value\n\n map_key = (key_dict['designated_area'], key_dict['state'], key_dict['fy_declared'])\n \n return (map_key, key_dict)\n\n#disaster RDD method - emit only county and distater + date range\ndef emitCountyDisasterRange(row):\n county, state, _ = row[0]\n detailsDict = row[1]\n\n startDate = detailsDict['incident_begin_date']\n endDate = detailsDict['incident_end_date']\n reportedDate = detailsDict['declaration_date']\n\n #if either start or end date is not reported\n\n if startDate == 'NA' or startDate == '':\n startDate = reportedDate\n if endDate == 'NA' or endDate == '':\n endDate = reportedDate\n \n detailsDict['incident_begin_date'] = parser.parse(startDate)\n detailsDict['incident_end_date'] = parser.parse(endDate)\n detailsDict['declaration_date'] = parser.parse(reportedDate)\n\n disasterType = detailsDict['incident_type']\n\n keyTuple = (county, state)\n valueTuple = (disasterType, detailsDict)\n\n return (keyTuple, [valueTuple])\n\n\n#DISASTER RDD manipulation\ndisaterRDD = sc.textFile('us_disaster_declarations.csv', 32)\n\nheaders = disaterRDD.first()\ndisaterRDD = disaterRDD.zipWithIndex().filter(lambda row_index: row_index[1] > 0).keys()\n\nheaders = headers.split(',')\nheaders = sc.broadcast(headers)\n\nrequired_cols = sc.broadcast(['state', 'declaration_date', 'incident_type', 'fy_declared', \\\n 'incident_begin_date', 'incident_end_date', 'place_code', 'designated_area'])\n\ndisaterRDD = disaterRDD.map(lambda row :process_disaster_file(row, headers, required_cols))\\\n .filter(lambda row: row[0][2] == '2017')\\\n .map(emitCountyDisasterRange)\\\n .reduceByKey(lambda x,y: x+y)\n\n# key, value: key = (county, state) value = dict(mean_centered_attribute values for that county)\ndef emitMap(row):\n\n county, state = row[0]\n dateAttributeDict = row[1]\n\n resultDict = {}\n\n for k,v in dateAttributeDict.items():\n\n # k -> date\n # v -> attribute dict\n parsedDate = parser.parse(k)\n resultDict[parsedDate] = v\n\n \n return ( ( county, state ), list(resultDict.items()))\n\n\n#climatedisasterRDD method - emit key-value pairs with \ndef emitCountyAttrDisasterPairs(row):\n \n finalEmitList = []\n\n county, state = row[0]\n climateList = row[1][0]\n disasterList = row[1][1]\n\n # (date, attributeDict) - for climate attributeDict = value for temp etc\n # (disasterType, detailsDict) - for disaster detailsDict = details like startdate,etc\n for date, attributeDict in climateList:\n \n # flag to check if any disaster was matched\n disasterFlag = 0\n\n for disasterType, detailsDict in disasterList:\n \n startDate = detailsDict['incident_begin_date']\n endDate = detailsDict['incident_end_date']\n\n if startDate <= date <= endDate:\n disasterFlag = 1\n keyTuple = (county, state)\n valueTuple = (date, attributeDict, True, disasterType)\n emitPair = (keyTuple, [valueTuple])\n\n finalEmitList.append(emitPair)\n \n #meaning not mapped to any disaster - good right ? :)\n if disasterFlag == 0:\n keyTuple = (county, state)\n valueTuple = (date, attributeDict, False, '')\n emitPair = (keyTuple, [valueTuple])\n\n finalEmitList.append(emitPair)\n \n return finalEmitList\n\n\n#CLIMATE-DISASTER-RDD manipulations\n#merging climate and disaster RDD\n\nfinalRdd = finalRdd.map(emitMap)\n\nclimateDisasterRDD = finalRdd.join(disaterRDD)\n\nclimateDisasterRDD = climateDisasterRDD.flatMap(emitCountyAttrDisasterPairs)\\\n .reduceByKey(lambda a,b: a+b)\n\nclimateDisasterRDD.saveAsPickleFile('climateDisasterRDD')\n\n# climateDisasterRDD = sc.pickleFile('climateDisasterRDD')\n\n# FINAL OUTPUT - key(county, state) value(date, attributeDict, disasterY/N, disasterType)\n\n## SIMILARITY SEARCH\n#function to create list of attributes of 7 days\n# value(date, attributeDict, disasterY/N, disasterType)\ndef helperForShingles(dateRange):\n weeklyAttributeDict = dict()\n\n startDate = endDate = None\n startFlag = 0\n disasterBool, disasterType = False, ''\n\n for idx in range(0, len(dateRange)):\n if dateRange[idx] is None:\n continue\n\n date, attributeDict, idxDisasterBool, idxDisasterType = dateRange[idx]\n disasterBool = idxDisasterBool or disasterBool\n if idxDisasterType != '':\n disasterType = idxDisasterType\n\n if not startFlag:\n startDate = date\n startFlag = 1\n\n endDate = date\n\n for k,v in attributeDict.items():\n #1_Tmax, 2_Tmax, etc\n newKey = str(idx+1)+'_'+k\n weeklyAttributeDict[newKey] = v\n \n return (startDate, endDate, weeklyAttributeDict, disasterBool, disasterType)\n\n\n#function takes list of date\ndef emitWeeklyShingles(row):\n finalEmitList = []\n\n county, state = row[0]\n listOfDates = row[1]\n listOfDates.sort(key= lambda x: x[0])\n\n # making list of 366 days and filling entries based on dayofYear found\n dateRange = [None]*366\n first = float('inf')\n last = float('-inf')\n for dateItem in listOfDates:\n date = dateItem[0]\n dayOfYear = date.timetuple().tm_yday\n dateRange[dayOfYear] = dateItem\n\n if dayOfYear < first:\n first = dayOfYear\n if dayOfYear > last:\n last = dayOfYear\n \n # making weekly shingle strings\n if last - first <= 7:\n startDate, endDate, weeklyAttributeDict,\\\n _ , disasterType = helperForShingles(dateRange[first:last])\n\n keyTuple = (county, state, startDate, endDate, disasterType)\n finalEmitList.append((keyTuple, [weeklyAttributeDict]))\n \n else:\n for idx in range(first, last, 7):\n weekData = dateRange[idx: idx + 7]\n startDate, endDate, weeklyAttributeDict,\\\n _ , disasterType = helperForShingles(weekData)\n\n keyTuple = (county, state, startDate, endDate, disasterType)\n finalEmitList.append((keyTuple, [weeklyAttributeDict]))\n \n return finalEmitList\n\nshingleRDD = climateDisasterRDD.flatMap(emitWeeklyShingles)","repo_name":"Stonybrookcse545/cse545project","sub_path":"reducer_normalized.py","file_name":"reducer_normalized.py","file_ext":"py","file_size_in_byte":17346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9329859160","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jan 14 10:22:47 2022\r\n\r\n@author: dcsem\r\n\"\"\"\r\n \r\ndef seq_search(items,val):\r\n \r\n \"\"\"Implementation of a Sequential Search algorithm, that takes linear time\r\n O(n) in the worst case.\"\"\"\r\n \r\n found = False\r\n count = 0\r\n \r\n while count < len(items) and not found:\r\n if items[count] == val:\r\n found = True\r\n else:\r\n count +=1\r\n \r\n return found\r\n\r\n\r\n","repo_name":"Daniel-Sem/Sorting-and-Searching","sub_path":"Sequential_Search.py","file_name":"Sequential_Search.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31789817787","text":"# -*- encoding: utf-8 -*-\n\nfrom odoo import models,fields, api\n\n\nclass ResPartner(models.Model):\n _inherit = \"res.partner\"\n\n def _paiements_count(self):\n for partner in self:\n count_cheque_client = len(partner.cheque_client_ids)\n count_effet_client = len(partner.effet_client_ids)\n count_ov_client = len(partner.ov_client_ids)\n count_cb_client = len(partner.cb_client_ids)\n count_cash_client = len(partner.cash_client_ids)\n partner.count_cheque_client = count_cheque_client\n partner.count_effet_client = count_effet_client\n partner.count_ov_client = count_ov_client\n partner.count_cb_client = count_cb_client\n partner.count_cash_client = count_cash_client\n count_pec_client = len(partner.pec_client_ids)\n partner.count_pec_client = count_pec_client\n\n count_pec_client = fields.Integer(compute='_paiements_count', string=u'Nbre des PEC')\n pec_client_ids = fields.One2many('paiement.pec.client', 'client', string=u'Prises en charges', readonly=True)\n","repo_name":"kasback/mebras15","sub_path":"account_tres_pec/models/partner.py","file_name":"partner.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"7655148646","text":"from scorers.scorer import Scorer\nimport numpy as np\nimport re\nimport nltk\nimport string\n\n\ndef count_chinese_sentences(text):\n pattern = r'[^。!?;\\n]+[。!?;\\n]'\n sentences = re.findall(pattern, text)\n return len(sentences)\n\n\ndef count_valid_words(text):\n valid_characters = string.ascii_letters + string.digits \n words = text.split() \n\n valid_word_count = 0\n for word in words:\n cleaned_word = ''.join(char for char in word if char in valid_characters)\n if cleaned_word: \n valid_word_count += 1\n\n return valid_word_count\n\n\nclass Count_Limit_Scorer(Scorer):\n def __init__(self):\n super(Scorer, self).__init__()\n\n def get_final_score(self, ans, histories, count_limits, lang):\n '''\n Input:\n - ans (str): The answer to be scored.\n - histories (list): A list of historical answers.\n - count_limits (list of dictionaries): Criteria for scoring the answer regarding *Count Limit*\n - lang (str): The language of the answer (e.g., 'ch' for Chinese, 'eg' for English).\n\n Output:\n - final_score (float): The final score for the answer.\n\n Function:\n Calculate the final score for the answer based on specified criteria.\n '''\n word_count_score = np.nan\n sample_count_score = np.nan\n sentence_count_score = np.nan\n revise_score = np.nan\n\n if not isinstance(ans, str):\n word_count_score = 0\n sample_count_score = 0\n sentence_count_score = 0\n revise_score = 0\n else:\n word_count_score = self.word_count_score(ans, count_limits, lang)\n sample_count_score = self.sample_count_score(ans, count_limits, lang)\n sentence_count_score = self.sentence_count_score(ans, count_limits, lang)\n if len(histories) > 0:\n if not isinstance(histories[-1][1], str):\n revise_score = 0\n else:\n revise_score = self.revise_score((histories[-1][1], ans), count_limits, lang)\n\n return np.nanmean([word_count_score, sample_count_score, sentence_count_score, revise_score])\n\n def word_count_score(self, ans, criteria, language):\n mode = ''\n limit = ''\n for criterion in criteria:\n if criterion['criterion'] in [\"word-max\", \"word-min\", \"word-exact\", \"word-min-max\"]:\n mode = criterion['criterion']\n limit = criterion['limit']\n break\n elif criterion['criterion'] == 'NULL':\n return np.nan\n\n if mode == '':\n return np.nan\n\n le = 0\n score = 1\n if language == 'ch':\n le = len(re.findall('[\\u4e00-\\u9fa5]', ans))\n elif language == 'eg':\n le = count_valid_words(ans)\n\n if mode == \"word-max\":\n if le > limit:\n score = max(1 - abs(limit - le) / le, 0)\n else:\n score = 1\n if mode == \"word-min\":\n if le < limit:\n score = max( le / limit, 0)\n else:\n score = 1\n return score\n\n def sample_count_score(self, ans, criteria, language):\n mode = ''\n limit = ''\n \n score_list = []\n for criterion in criteria:\n # markdown table can be categoried into sample-number\n if criterion['criterion'] in [\"sample-table\", \"sample-min\", \"sample-number\"]:\n mode = criterion['criterion']\n limit = criterion['limit']\n elif criterion['criterion'] == 'NULL':\n return np.nan\n\n if mode == '':\n return np.nan\n\n if mode == 'sample-min':\n score = 0\n for ll in limit:\n if ans.count(ll[0]) >= ll[1]:\n score += 1\n score = score / len(limit)\n elif mode == 'sample-table':\n score = 0\n try:\n be = ans.index('|')\n en = ans.rfind('|')\n ans = ans[be:en+1]\n if ans.count('\\n') == limit + 1:\n score = 1\n except ValueError:\n score = 0\n elif mode == \"sample-number\": \n score = 0\n result = re.findall(r'\\d+', ans)\n result = list(set(result))\n tmp_score = 0\n for r in result:\n if int(r) == limit:\n tmp_score = 1\n if int(r) > limit:\n tmp_score = 0\n break\n score = tmp_score\n \n score_list.append(score)\n\n return np.max(score_list)\n\n def sentence_count_score(self, ans, criteria, language):\n mode = ''\n limit = ''\n for criterion in criteria:\n if criterion['criterion'] in [\"sentence-max\", \"sentence-min\", \"sentence-min-max\", \"sentence-exact\"]:\n mode = criterion['criterion']\n limit = criterion['limit']\n break\n elif criterion['criterion'] == 'NULL':\n return np.nan\n\n if mode == '':\n return np.nan\n\n mode = mode\n score = 0\n le = 0\n if language == 'ch':\n le = count_chinese_sentences(ans)\n elif language == 'eg':\n le = len(nltk.sent_tokenize(ans))\n\n if mode == \"sentence-max\":\n if le > limit:\n score = max(1 - abs(limit - le) / le, 0)\n else:\n score = 1\n score = np.nan\n if mode == \"sentence-min\":\n if le < limit:\n score = max(le / limit, 0)\n else:\n score = 1\n if mode == \"sentence-min-max\" and limit[0] <= le <= limit[1]:\n score = 1\n if mode == \"sentence-exact\" and le == limit:\n score = 1\n\n return score\n\n def revise_score(self, ans_pairs, criteria, language):\n mode = ''\n limit = ''\n for criterion in criteria:\n if criterion['criterion'] in [\"revise\"]:\n mode = criterion['criterion']\n limit = criterion['limit']\n break\n elif criterion['criterion'] == 'NULL':\n return np.nan\n\n if mode == '':\n return np.nan\n\n assert (limit in [\"longer\", \"shorter\"])\n\n prev_ans, now_ans = ans_pairs\n score = 0\n if limit == \"longer\" and len(now_ans) >= len(prev_ans):\n score = 1\n if limit == \"shorter\" and len(now_ans) <= len(prev_ans):\n score = 1\n\n return score","repo_name":"Abbey4799/CELLO","sub_path":"code/scorers/count_limit.py","file_name":"count_limit.py","file_ext":"py","file_size_in_byte":6805,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"18"} +{"seq_id":"7831566916","text":"import tweepy\nimport requests\nimport sys\nimport random\nimport MyAPI\nimport traceback\nimport re\nimport pathlib\n\nauth = tweepy.OAuthHandler(MyAPI.consumer_key_send, MyAPI.consumer_secret_send)\nauth.set_access_token(MyAPI.access_token_send, MyAPI.access_secret_send)\napi = tweepy.API(auth)\n\n\ndef find_level(tweet):\n # unable_to_choice = ['10.1', '10.2', '10.4', '14.2', '14.3', '14.4', '14.5', '14.6', '14.7', '14.8', '14.9']\n unable_to_choice = ['10.1', '10.2', '10.4', '15.7', '15.8', '15.9']\n levels = []\n tweet = tweet.replace('+', '+')\n for level in re.findall(\"1[0-5]\\+|1[0-5]\\.[0-9]|1[0-5]\", tweet):\n if level not in unable_to_choice:\n levels.append(level)\n if len(levels) == 0:\n return ['random']\n return levels\n\ndef choice_song(levels):\n # unable_to_choice = ['10.1', '10.2', '10.4', '14.2', '14.3', '14.4', '14.5', '14.6', '14.7', '14.8', '14.9']\n unable_to_choice = ['10.1', '10.2', '10.4', '15.7', '15.8', '15.9']\n for i in range(len(levels)):\n if levels[i] == '10':\n level_a = ['10.0', '10.3', '10.5', '10.6']\n levels[i] = random.choice(level_a)\n elif levels[i] == '10+':\n level_b = ['10.7', '10.8', '10.9']\n levels[i] = random.choice(level_b)\n # elif levels[i] == '14':\n # level_c = ['14.0', '14.1']\n # levels[i] = random.choice(level_c)\n # elif levels[i] == '14+':\n # levels[i] = random.choice([str(i / 10) for i in range(100, 142) if str(i / 10) not in unable_to_choice])\n elif levels[i] == '15+':\n levels[i] = random.choice([str(i / 10) for i in range(100, 156) if str(i / 10) not in unable_to_choice])\n choice_is_random = False\n if levels[0] == 'random':\n levels = levels.remove('random')\n levels = [str(i / 10) for i in range(100, 156) if str(i / 10) not in unable_to_choice]\n choice_is_random = True\n choiced_songs = []\n for i in range(len(levels)):\n path = \"data/\"\n if \"+\" in levels[i]:\n path += levels[i] + \"/\"\n r = [7, 8, 9]\n path += levels[i][:-1] + \".\" + str(random.choice(r)) + \"/\"\n elif '.' in levels[i]:\n if int(levels[i][-1]) >= 7:\n path += levels[i][:-2] + \"+/\" + levels[i] + \"/\"\n else:\n path += levels[i][:-2] + \"/\" + levels[i] + \"/\"\n else:\n path += levels[i] + \"/\"\n r = [0, 1, 2, 3, 4, 5, 6]\n path += levels[i] + \".\" + str(random.choice(r)) + \"/\"\n print(path)\n songs = []\n for song in pathlib.Path(path).glob(\"*.png\"):\n songs.append({\"file_path\": song, \"file_name\": str(\n song)[:-4].replace(path, ''), \"level_path\": path})\n choiced_songs.append(random.choice(songs))\n if not choice_is_random:\n return choiced_songs\n else:\n all_songs = []\n for path in choiced_songs:\n for song in pathlib.Path(path[\"level_path\"]).glob(\"*.png\"):\n all_songs.append({\"file_path\": song, \"file_name\": str(song)[:-4].replace(path[\"level_path\"], ''), \"level_path\": path['level_path']})\n return [random.choice(all_songs)]\n\ndef generating_challenge():\n notes_decision = (\"JUSTICE\", \"ATTACK\", \"MISS\")\n Riquest_JUSTICE = (1, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 150)\n Riquest_ATTACK_or_MISS = (1, 5, 10, 20, 30)\n \n choiced = random.choice(notes_decision)\n if choiced == notes_decision[0]:\n return [choiced, random.choice(Riquest_JUSTICE)]\n else:\n return [choiced, random.choice(Riquest_ATTACK_or_MISS)]\n\n# def serch_genre(text):\n# genre_list = []\n# ジャンルの指定をつくる\n\ndef tweet(tw_text, tw_user_name, tw_id, tw_author_screen_name, tw_retweeted):\n level = find_level(tw_text)\n choiced_song = choice_song(level)\n #条件を生成\n challenge_request = generating_challenge()\n print(\"デフォ\")\n try:\n api.create_favorite(tw_id)\n except:\n pass\n tweet_url = \"https://twitter.com/\" + tw_author_screen_name + \"/status/\" + str(tw_id)\n tweet_buf = \"僕の選んだ課題曲は\" + \"「\" + choiced_song[0][\"file_name\"] + \"」\" + \"!!\\n\" + challenge_request[0] + \" \" + str(challenge_request[1]) + \"以下を目指そう!!\\n\"+ tweet_url\n try:\n api.update_with_media(filename=str(choiced_song[0][\"file_path\"]), status=tweet_buf)\n except:\n traceback.print_exc()\n \n","repo_name":"yuina-blend/chuni-penguin","sub_path":"default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"33067721044","text":"import sys\n\nimport dateutil.parser\n\n\nclass ModelBase(object):\n \"\"\"Super class for all models. Provides basic serialization.\"\"\"\n\n __dump_attributes__ = []\n\n # Borrowed from Armin Ronacher\n if sys.version_info > (3, 0):\n __str__ = lambda x: x.__unicode__() # noqa\n else:\n __str__ = lambda x: unicode(x).encode('utf-8') # noqa\n\n @classmethod\n def from_dict(cls, session, data_dict):\n \"\"\"\n Creating an instance of the specific type from the data passed\n in the dictionary `data_dict`.\n \"\"\"\n return cls(session, **data_dict)\n\n def __init__(self, session, **kwargs):\n self.session = session\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n def dump(self):\n \"\"\"Serialize the ModelBase object to a dictionary.\"\"\"\n result = {}\n for attribute in self.__dump_attributes__:\n value = getattr(self, attribute)\n if value is not None:\n result[attribute] = value\n return result\n\n\nclass Account(ModelBase):\n \"\"\"Object representing one bank account of the user, independent of the exact account type.\n\n Attributes:\n account_id: internal figo connect account id\n balance: account balance\n bank_id: internal figo connect bank id\n name: account name\n owner: account owner\n auto_sync: boolean value that indicates whether the account is automatically synchronized\n account_number: account number\n bank_code: bank code\n currency: three character currency code\n iban: iban code\n bic: bic code\n type: account type, one of (Giro account, Savings account, Credit card, Loan account,\n PayPal, Cash book, Unknown)\n supported_tan_schemes: List of supported tan schemes\n preferred_tan_scheme: id of preferred tan scheme\n icon: account icon URL\n additional_icons: dictionary that maps resolutions to icon URLs\n status: synchronization status object\n \"\"\"\n\n __dump_attributes__ = [\"name\", \"owner\", \"auto_sync\"]\n\n account_id = None\n balance = None\n bank_id = None\n name = None\n owner = None\n auto_sync = None\n account_number = None\n bank_code = None\n bank_name = None\n currency = None\n iban = None\n bic = None\n type = None\n supported_tan_schemes = None\n preferred_tan_scheme = None\n icon = None\n additional_icons = None\n status = None\n\n @property\n def bank(self):\n \"\"\"The corresponding BankContact object for this account.\"\"\"\n return self.session.get_bank(self.bank_id)\n\n @property\n def payments(self):\n \"\"\"An array of `Payment` objects, one for each transaction on the account.\"\"\"\n return self.session.get_payments(self.account_id)\n\n def get_payment(self, payment_id):\n \"\"\"Retrieve a specific payment.\n\n Args:\n payment_id: id of the payment to be retrieved\n\n Returns:\n A Payment object representing the payment to be retrieved\n \"\"\"\n return self.session.get_payments(self.account_id, payment_id)\n\n @property\n def transactions(self):\n \"\"\"An array of `Transaction` objects, one for each transaction on the account.\"\"\"\n return self.session.get_transactions(self.account_id)\n\n def get_transactions(self, since=None, count=1000, offset=0, include_pending=False):\n \"\"\"Get an array of `Transaction` objects, one for each transaction of the user.\n\n Args:\n since: This parameter can either be a transaction ID or a date.\n count: Limit the number of returned transactions\n offset Offset into the result set to determine the first transaction returned\n (useful in combination with count)\n include_pending: boolean, indicates whether pending transactions should be included\n in the response; pending transactions are always included as a\n complete set, regardless of the `since` parameter.\n\n Returns:\n A list of Transaction objects\n \"\"\"\n return self.session.get_transactions(self.account_id, since, count, offset, include_pending)\n\n def get_transaction(self, transaction_id):\n \"\"\"Retrieve a specific transaction.\n\n Args:\n transaction_id: id of the transaction to be retrieved\n\n Returns:\n A Transaction object representing the transaction to be retrieved\n \"\"\"\n return self.session.get_transaction(self.account_id, transaction_id)\n\n @property\n def securities(self):\n \"\"\"An array of `Securities` objects, one for each security on the account.\"\"\"\n return self.session.get_securities(self.account_id)\n\n def get_securities(self, since=None, count=1000, offset=0, accounts=None):\n \"\"\"Get an array of Security objects, one for each security of the user.\n\n Args:\n account_id: ID of the account for which to list the securities\n since: This parameter can either be a transaction ID or a date.\n count: Limit the number of returned transactions\n offset: Offset into the result set to determine the first security returned\n (useful in combination with count)\n accounts: list of accounts. If retrieving the securities for all accounts, filter\n the securities to be only from these accounts.\n\n Returns:\n A list of Security objects\n \"\"\"\n return self.session.get_securities(self.account_id, since, count, offset, accounts)\n\n def get_security(self, security_id):\n \"\"\"Retrieve a specific security.\n\n Args:\n account_id: id of the account on which the security belongs\n security_id: id of the security to be retrieved\n\n Returns:\n A Security object representing the transaction to be retrieved\n \"\"\"\n return self.session.get_security(self.account_id, security_id)\n\n def __unicode__(self):\n return u\"Account: %s (%s at %s)\" % (self.name, self.account_number, self.bank_name)\n\n def __init__(self, session, **kwargs):\n super(Account, self).__init__(session, **kwargs)\n if self.status:\n self.status = SynchronizationStatus.from_dict(self.session, self.status)\n if self.balance:\n self.balance = AccountBalance.from_dict(self.session, self.balance)\n\n\nclass BankContact(ModelBase):\n \"\"\"Object representing a BankContact.\n\n Attributes:\n bank_id: figo internal bank id\n sepa_creditor_id: SEPA direct debit creditor id\n save_pin: boolean, indicates whether user has chosen to save PIN\n \"\"\"\n\n __dump_attributes__ = [\"sepa_creditor_id\"]\n\n bank_id = None\n sepa_creditor_id = None\n save_pin = None\n\n def __unicode__(self):\n return u\"BankContact: %s \" % self.bank_id\n\n\nclass AccountBalance(ModelBase):\n \"\"\"Object representing the balance of a certain bank account of the user.\n\n Attributes:\n balance: acccount balance or None if the balance is not yet known\n balance_date: bank server timestamp of balance or None if the balance is not yet known.\n credit_line: credit line\n monthly_spending_limit: user-defined spending limit\n status: synchronization status object\n \"\"\"\n\n __dump_attributes__ = [\"credit_line\", \"monthly_spending_limit\"]\n\n balance = None\n balance_date = None\n credit_line = None\n monthly_spending_limit = None\n status = None\n\n def __unicode__(self):\n return u\"Balance: %d at %s\" % (self.balance, str(self.balance_date))\n\n def __init__(self, session, **kwargs):\n super(AccountBalance, self).__init__(session, **kwargs)\n if self.status:\n self.status = SynchronizationStatus.from_dict(self.session, self.status)\n\n if self.balance_date:\n self.balance_date = dateutil.parser.parse(self.balance_date)\n\n\nclass Payment(ModelBase):\n \"\"\"Object representing a Payment.\n\n When creating a new Payment for submitment to the Figo API all necessary\n fields have to be set on the Payment object.\n\n Attributes:\n payment_id: internal figo payment id\n account_id: internal figo account id\n type: payment type, one of (Transfer, Direct Debit, SEPA transfer, SEPA direct debit)\n name: name of creditor or debtor\n account_number: account number of creditor or debtor\n bank_code: bank code of creditor or debtor\n bank_code: bank name of creditor or debtor\n amount: order amount\n purpose: purpose text\n bank_icon: icon of creditor or debtor bank\n bank_additional_icons: dictionary that maps resolutions to icon URLs\n amount: order amount\n currency: three character currency code\n purpose: purpose text\n submission_timestamp: submission timestamp\n creation_timestamp: internal creation timestamp\n modification_timestamp: internal creation timestamp\n traditional_id: transaction id, only set if payment has been matched to a transaction\n \"\"\"\n\n __dump_attributes__ = [\"type\", \"name\", \"account_number\", \"bank_code\",\n \"amount\", \"currency\", \"purpose\"]\n\n payment_id = None\n account_id = None\n type = None\n name = None\n account_number = None\n bank_code = None\n bank_name = None\n bank_icon = None\n bank_additional_icons = None\n amount = None\n currency = None\n purpose = None\n submission_timestamp = None\n creation_timestamp = None\n modification_timestamp = None\n transaction_id = None\n\n def __init__(self, session, **kwargs):\n super(Payment, self).__init__(session, **kwargs)\n\n if self.submission_timestamp:\n self.submission_timestamp = dateutil.parser.parse(self.submission_timestamp)\n\n if self.creation_timestamp:\n self.creation_timestamp = dateutil.parser.parse(self.creation_timestamp)\n\n if self.modification_timestamp:\n self.modification_timestamp = dateutil.parser.parse(self.modification_timestamp)\n\n def __unicode__(self):\n return u\"Payment: %s (%s at %s)\" % (self.name, self.account_number, self.bank_name)\n\n\nclass StandingOrder(ModelBase):\n \"\"\"Object representing one standing order on a certain bank account of the user.\n\n Attributes:\n standing_order_id: internal figo stanging order id\n account_id: internal figo account id\n iban: iban of creditor or debtor\n amount: order amount\n currency: three character currency code\n cents:\n name: name of originator or recipient\n purpose: purpose text\n execution_day: number of days of execution of the standing order\n first_execution_date: starting day of execution\n last_execution_date: finishing day of the execution\n interval:\n created_at: internal creation timestamp\n modified_at: internal creation timestamp\n \"\"\"\n\n __dump_attributes__ = []\n\n standing_order_id = None\n account_id = None\n iban = None\n amount = None\n currency = None\n cents = None\n name = None\n purpose = None\n execution_day = None\n first_execution_date = None\n last_execution_date = None\n interval = None\n created_at = None\n modified_at = None\n\n def __init__(self, session, **kwargs):\n super(StandingOrder, self).__init__(session, **kwargs)\n\n if self.created_at:\n self.created_at = dateutil.parser.parse(self.created_at)\n\n if self.modified_at:\n self.modified_at = dateutil.parser.parse(self.modified_at)\n\n if self.first_execution_date:\n self.first_execution_date = dateutil.parser.parse(self.first_execution_date)\n\n if self.last_execution_date:\n self.last_execution_date = dateutil.parser.parse(self.last_execution_date)\n\n def __unicode__(self):\n return u\"Standing Order: %s \" % (self.id)\n\nclass Transaction(ModelBase):\n \"\"\"Object representing one bank transaction on a certain bank account of the user.\n\n Attributes:\n transaction_id: internal figo transaction id\n account_id: internal figo account id\n name: name of originator or recipient\n account_number: account number of originator or recipient\n bank_code: bank code of originator or recipient\n bank_name: bank name of originator or recipient\n amount: transaction amount\n currency: three-character currency code\n booking_date: booking date\n value_date: value date\n purpose: purpose text\n type: transaction type, one of (Transfer, Standing order, Direct debit, Salary or rent,\n GeldKarte, Charges or interest)\n booking_text: booking text\n booked: boolean, indicates whether transaction is booked or pending\n categories: list of categories assigned to this transaction, ordered from general to\n specific\n creation_timestamp: create date\n modification_timestamp: modification date\n visited: boolean, indicates whether the transaction has already been marked as visited\n by the user\n bic: bic\n iban: iban\n booking_key: booking key\n creditor_id: creditor id\n mandate_reference: mandate reference\n sepa_purpose_code: sepa purpose coe\n sepa_remittance_info: sepa remittance info\n text_key_addition: text key addition\n end_to_end_reference: end to end reference\n customer_reference: customer reference\n prima_nota_number: prima nota number\n additional_info: provides more info about the transaction if available\n \"\"\"\n\n __dump_attributes__ = [\n \"transaction_id\",\n \"account_id\",\n \"name\",\n \"account_number\",\n \"bank_code\",\n \"bank_name\",\n \"amount\",\n \"currency\",\n \"booking_date\",\n \"value_date\",\n \"purpose\",\n \"type\",\n \"booking_text\",\n \"booked\",\n \"categories\",\n \"creation_timestamp\",\n \"modification_timestamp\",\n \"visited\",\n \"additional_info\",\n \"bic\",\n \"iban\",\n \"booking_key\",\n \"creditor_id\",\n \"mandate_reference\",\n \"sepa_purpose_code\",\n \"sepa_remittance_info\",\n \"text_key_addition\",\n \"end_to_end_reference\",\n \"customer_reference\",\n \"prima_nota_number\",\n ]\n\n transaction_id = None\n account_id = None\n name = None\n account_number = None\n bank_code = None\n bank_name = None\n amount = None\n currency = None\n booking_date = None\n value_date = None\n purpose = None\n type = None\n booking_text = None\n booked = None\n categories = None\n creation_timestamp = None\n modification_timestamp = None\n visited = None\n bic = None\n iban = None\n booking_key = None\n creditor_id = None\n mandate_reference = None\n sepa_purpose_code = None\n sepa_remittance_info = None\n text_key_addition = None\n end_to_end_reference = None\n customer_reference = None\n prima_nota_number = None\n additional_info = None\n\n def __init__(self, session, **kwargs):\n super(Transaction, self).__init__(session, **kwargs)\n\n if self.creation_timestamp:\n self.creation_timestamp = dateutil.parser.parse(self.creation_timestamp)\n\n if self.modification_timestamp:\n self.modification_timestamp = dateutil.parser.parse(self.modification_timestamp)\n\n if self.booking_date:\n self.booking_date = dateutil.parser.parse(self.booking_date)\n\n if self.value_date:\n self.value_date = dateutil.parser.parse(self.value_date)\n\n if self.categories:\n self.categories = [Category.from_dict(session, c) for c in self.categories]\n\n def __unicode__(self):\n return u\"Transaction: %d %s to %s at %s\" % (self.amount, self.currency,\n self.name, str(self.value_date))\n\n\nclass Category(ModelBase):\n \"\"\"Object representing a category for a transaction\n\n Attributes:\n id:\n parent_id:\n name:\n\n \"\"\"\n\n __dump_attributes__ = [\"id\", \"parent_id\", \"name\"]\n\n id = None\n parent_id = None\n name = None\n\n def __unicode__(self):\n return self.name\n\n\nclass Notification(ModelBase):\n \"\"\"Object representing a configured notification, e.g a webhook or email hook.\n\n Attributes:\n notification_id: internal figo notification ID from the notification registration response\n observe_key: notification key, see http://developer.figo.me/#notification_keys\n notify_uri: notification messages will be sent to this URL\n state: state similiar to sync and login process. It will passed as POST data for webhooks\n \"\"\"\n\n __dump_attributes__ = [\"observe_key\", \"notify_uri\", \"state\"]\n\n notification_id = None\n observe_key = None\n notify_uri = None\n state = None\n\n def __unicode__(self):\n return u\"Notification: %s triggering %s\" % (self.observe_key, self.notify_uri)\n\n\nclass SynchronizationStatus(ModelBase):\n \"\"\"Object representing the synchronization status of the figo servers with banks,\n payment providers or financial service providers.\n\n Attributes:\n code: internal figo status code\n message: human-readable error message\n sync_timestamp: timestamp of last synchronization\n success_timestamp: timestamp of last successful synchronization\n \"\"\"\n\n __dump_attributes__ = []\n\n code = None\n message = None\n sync_timestamp = None\n success_timestamp = None\n\n def __unicode__(self):\n return u\"Synchronization Status: %s (%s)\" % (self.code, self.message)\n\nclass Sync(ModelBase):\n \"\"\"Object representing a syncronisation for account creation.\n\n Attributes:\n id: internal figo syncronisation id\n status: Current processing state of the item.\n challenge: AuthMethodSelectChallenge (object) or EmbeddedChallenge (object) or RedirectChallenge (object) or DecoupledChallenge (object) (Challenge).\n error: Error detailing why the background operation failed.\n created_at: Time at which the sync was created\n started_at: Time at which the sync started\n ended_at: Time at which the sync ended\n \"\"\"\n __dump_attributes__ = []\n\n id = None\n status = None\n challenge = None\n error = None\n created_at = None\n started_at = None\n ended_at = None\n\n def __init__(self, session, **kwargs):\n super(Sync, self).__init__(session, **kwargs)\n if self.created_at:\n self.created_at = dateutil.parser.parse(self.created_at)\n\n if self.started_at:\n self.started_at = dateutil.parser.parse(self.started_at)\n\n if self.ended_at:\n self.ended_at = dateutil.parser.parse(self.ended_at)\n\n if self.challenge:\n self.challenge = Challenge.from_dict(self.session, self.challenge)\n\n def __unicode__(self):\n return u\"Sync: %s\" % (self.id)\n\nclass User(ModelBase):\n \"\"\"Object representing an user.\n\n Attributes:\n user_id: internal figo user id\n name: full name\n email: email address\n address: postal address\n verified_email: boolean, indicates whether the email address has been verified\n send_newsletter: boolean, incicates whether the user has signed up for the newsletter\n language: two letter code for preferred language\n premium: --\n premium_expires_on: --\n join_date: --\n\n \"\"\"\n\n __dump_attributes__ = [\"name\", \"address\", \"send_newsletter\", \"language\"]\n\n user_id = None\n name = None\n email = None\n address = None\n verified_email = None\n send_newsletter = None\n language = None\n premium = None\n premium_expires_on = None\n premium_subscription = None\n join_date = None\n\n def __init__(self, session, **kwargs):\n super(User, self).__init__(session, **kwargs)\n\n if self.join_date:\n self.join_date = dateutil.parser.parse(self.join_date)\n\n def __unicode__(self):\n return u\"User: %s (%s, %s)\" % (self.name, self.user_id, self.email)\n\n\nclass WebhookNotification(ModelBase):\n \"\"\"Object representing a WebhookNotification.\n\n Attributes:\n notification_id: internal figo notification ID from the notification registration response\n observe_key: notification key\n state: the state parameter from the notification registration request\n data: object or list with the data (AccountBalance or Transaction)\n \"\"\"\n\n __dump_attributes__ = []\n\n notification_id = None\n observe_key = None\n state = None\n data = None\n\n def __unicode__(self):\n return u\"WebhookNotification: %s\" % (self.notification_id)\n\n\nclass Service(ModelBase):\n \"\"\"Object representing a payment service.\n\n Attributes:\n name: human readable name of the service\n bank_code: surrogate bank code used for this service\n state: URL to a logo of the bank\n additional_icons: dictionary that maps resolutions to icon URLs\n language: the language the service description is in\n available_languages: list of other available languages\n \"\"\"\n\n __dump_attributes__ = [\"name\", \"bank_code\", \"icon\", \"additional_icons\", \"language\"]\n\n name = None\n bank_code = None\n state = None\n additional_icons = None\n language = None\n available_languages = []\n\n def __init__(self, session, **kwargs):\n super(Service, self).__init__(session, **kwargs)\n if self.language:\n self.available_languages = [l for l in self.language['available']]\n self.language = self.language['current']\n\n def __unicode__(self, *args, **kwargs):\n return u\"Service: %s\" % (self.bank_code)\n\n\nclass LoginSettings(ModelBase):\n \"\"\"Object representing login settings for a banking service.\n\n Attributes:\n bank_name: human readable bank of the bank\n supported: boolean, if set bank is supported\n icon: URL to the logo of the bank\n additional_icons: dictionary that maps resolutions to icon URLs\n credentials: list of credentials needed to connect to the bank\n auth_type: kind of authentication used by the bank\n advice: any additional advice useful to locate the required credentials\n \"\"\"\n\n __dump_attributes__ = [\"bank_name\", \"supported\", \"icon\", \"additional_icons\",\n \"credentials\", \"auth_type\", \"advice\"]\n\n bank_name = None\n supported = None\n icon = None\n additional_icons = None\n credentials = None\n auth_type = None\n advice = None\n\n def __unicode__(self, *args, **kwargs):\n return u\"LoginSettings: %s\" % (self.bank_name)\n\n\nclass Credential(ModelBase):\n \"\"\"Object representing a login credential field for a banking service.\n\n Attributes:\n label: label for text input field\n masked: boolean, if set the text input field is used for password entry and should be\n masked\n optional: boolean, if set the field is optional and may be an empty string\n \"\"\"\n\n __dump_attributes__ = [\"label\", \"masked\", \"optional\"]\n\n label = None\n masked = None\n optional = None\n\n def __unicode__(self, *args, **kwargs):\n return u\"Credential: %s\" % (self.label)\n\n\nclass TaskToken(ModelBase):\n \"\"\"Object representing a task token.\n\n Attributes:\n task_token:\n \"\"\"\n\n __dump_attributes__ = [\"task_token\"]\n\n task_token = None\n\n def __unicode__(self, *args, **kwargs):\n return u\"TaskToken: %s\" % (self.task_token)\n\n\nclass TaskState(ModelBase):\n \"\"\"Object representing a tasks state.\n\n Attributes:\n account_id: account id of currently processed account\n message: status message or error message for currently processed account\n is_waiting_for_pin: boolean, if set the figo server is waiting for PIN\n is_waiting_for_response: boolean, if set the figo server is waiting for a response to\n the parameter challenge\n is_erroneous: boolean, if set an error occurred\n is_ended: boolean, if set the communication with the bank has been completed\n challenge: challenge object\n error: dictionary, populated in the case of error\n\n \"\"\"\n\n __dump_attributes__ = [\"account_id\", \"message\", \"is_waiting_for_pin\",\n \"is_waiting_for_response\", \"is_erroneous\",\n \"is_ended\", \"challenge\", \"error\"]\n\n account_id = None\n message = None\n is_waiting_for_pin = None\n is_waiting_for_response = None\n is_erroneous = None\n is_ended = None\n challenge = None\n error = None\n\n def __unicode__(self, *args, **kwargs):\n return (u\"TaskState: '{self.message}' (is_erroneous: {self.is_erroneous}, \"\n \"is_ended: {self.is_ended})\".format(self=self))\n\n\nclass Challenge(ModelBase):\n \"\"\"Object representing a challenge.\n\n Attributes:\n title: challenge title\n label: response label\n format: challenge data format, one of (Text, HTML, HHD, Matrix)\n data: challenge data\n\n \"\"\"\n __dump_attributes__ = [\"title\", \"label\", \"format\"]\n\n id = None\n title = None\n label = None\n format = None\n data = None\n type = None\n\n def __unicode__(self, *args, **kwargs):\n return u\"Challenge: %s\" % (self.title)\n\n\nclass PaymentProposal(ModelBase):\n \"\"\"Object representing a payment proposal.\n\n Attributes:\n account_number: Account number or IBAN\n bank_code: bank code or BIC\n name: Name of the payment proposal\n \"\"\"\n\n __dump_attributes__ = [\"account_number\", \"bank_code\", \"name\"]\n\n account_number = None\n bank_code = None\n name = None\n\n def __unicode__(self, *args, **kwargs):\n return u\"Payment Proposal: %s\" % (self.name)\n\n\nclass Process(ModelBase):\n \"\"\"Object representing a Business Process.\n\n Attributes:\n email: The email of the existing user to use as context or the new user to create\n beforehand. In the latter case it must obey the figo username & password policy.\n password: The password of the user existing or new user. In the latter case it must obey\n the figo username & password policy.\n redirect_uri: The authorization code will be sent to this callback URL. It must match one\n of the URLs registered during application registration.\n state: Any kind of string that will be forwarded in the callback response message. It\n serves two purposes: The value is used to maintain state between this request and the\n callback, e.g. it might contain a session ID from your application. The value should\n also contain a random component, which your application checks to mitigate cross-site\n request forgery.\n steps: A list of step definitions. Each step definition is a dictionary with type and\n options keys, where type is the name of step type and options is another dictionary\n containing all the settings for the respective step.\n \"\"\"\n\n __dump_attributes__ = [\"email\", \"password\", \"redirect_uri\", \"state\", \"steps\"]\n\n email = None\n password = None\n redirect_uri = None\n state = None\n steps = None\n\n\nclass ProcessStep(ModelBase):\n \"\"\"Object representing a process step.\n\n Attributes:\n type: name of step type\n options: settings for respective step\n \"\"\"\n\n __dump_attributes__ = [\"type\", \"options\"]\n\n type = None\n options = None\n\n def __unicode__(self, *args, **kwargs):\n return u\"ProcessStep Type: %s\" % (self.type)\n\n\nclass ProcessOptions(ModelBase):\n \"\"\"Object representing a process option.\n\n Attributes:\n account_number:\n amount:\n bank_code:\n currency:\n name:\n purpose:\n type:\n \"\"\"\n\n __dump_attributes__ = [\"account_number\", \"amount\", \"bank_code\", \"currency\",\n \"name\", \"purpose\", \"type\"]\n\n account_number = None\n amount = None\n bank_code = None\n currency = None\n name = None\n purpose = None\n type = None\n\n\nclass ProcessToken(ModelBase):\n \"\"\"Object representing a process token.\n\n Attributes:\n process_token:\n \"\"\"\n\n __dump_attributes__ = [\"process_token\"]\n\n process_token = None\n\n def __unicode__(self, *args, **kwargs):\n return u\"Process Token: %s\" % (self.process_token)\n\n\nclass Security(ModelBase):\n \"\"\"Object representing one bank security on a certain bank account of the user.\n\n Attributes:\n security_id: internal figo connect security id\n account_id: internal figo connect account id\n name: name of originator or recipient\n isin: international securities identification number\n wkn: wertpapierkennnummer\n currency: three character currency code\n amount: monetary value in account currency\n quantity: number of securities or value\n amount_original_currency: monetary value in trading currency\n exchange_rate: exchange rate between trading and account currency\n price: current price\n price_currency: currency of current price\n purchase_price: purchase price\n purchase_price_currency: currency of purchase price\n visited: boolean that indicates whether the security has been marked as visited by the user\n trade_timestamp: trade timestamp\n creation_timestamp: internal creation timestamp\n modification_timestamp: internal modification timestamp\n\n \"\"\"\n\n __dump_attributes__ = []\n\n security_id = None\n account_id = None\n name = None\n isin = None\n wkn = None\n currency = None\n amount = None\n quantity = None\n amount_original_currency = None\n exchange_rate = None\n price = None\n price_currency = None\n purchase_price = None\n purchase_price_currency = None\n visited = None\n trade_timestamp = None\n creation_timestamp = None\n modification_timestamp = None\n\n def __init__(self, session, **kwargs):\n super(Security, self).__init__(session, **kwargs)\n\n if self.trade_timestamp:\n self.trade_timestamp = dateutil.parser.parse(self.trade_timestamp)\n\n if self.creation_timestamp:\n self.creation_timestamp = dateutil.parser.parse(self.creation_timestamp)\n\n if self.modification_timestamp:\n self.modification_timestamp = dateutil.parser.parse(self.modification_timestamp)\n\n def __unicode__(self):\n return u\"Security: %d %s to %s at %s\" % (self.amount, self.currency, self.name,\n self.trade_timestamp)\n","repo_name":"figo-connect/python-figo","sub_path":"figo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":30659,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"18"} +{"seq_id":"35837565623","text":"# encoding: utf-8\nfrom app.modules import base\n\nclass shop(base):\n def render(self, template_name, **kwargs):\n super(shop, self).render(\"shop/\" + template_name, **kwargs)\n\nclass ShopHandler(shop):\n\tdef get(self):\n\t\tif self.get_secure_cookie('u', None) is not None:\n\t\t\tusers = self.db.client().data\n\t\t\tself.render('index.html', users=users)\n\t\telse:\n\t\t\tself.redirect('/')\n\t\treturn\n\nclass NotFoundHandler(shop):\n def get(self):\n self.write(\"Sorry, Page not Found.. Go back\")\n\nurl_prefix = '/shop'\n\nurls = [\n ('?', ShopHandler)\n]","repo_name":"MJSJ/mykrystal.com","sub_path":"app/modules/shop.py","file_name":"shop.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"37976672531","text":"#!/usr/bin/env python\nfrom BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nimport json,logging,os,sys\n\naddress=\"127.0.0.3\"\nPORT = 7777\nclass serverhandler(BaseHTTPRequestHandler):\n\t#This handles any requests that the server receives\n\t\n\t#GET req handler\n\tdef do_GET(self):\n\t\tlogging.warning(\"**GETting started**\") \n\t\tlogging.warning(self.headers)\n\t\tself.send_response(200)\n\t\t#200 is OK\n\t\t#Response for successful HTTP requests\n\t\t#link:https://en.wikipedia.org/wiki/List_of_HTTP_status_codes\n\t\tself.send_header(\"Content-type\",\"application/json\")\n\t\tself.end_headers()\n\t\tself.wfile.write(\"GET Done\")\n\tdef do_POST(self):\n\t\tlogging.warning(\"**POSTing**\")\n\t\tself.send_response(200)\n\t\tself.request.sendall(json.dumps({'path':self.path}))\n\t\tself.send_header(\"Content-type\",\"application/json\")\n\t\tself.end_headers()\n\t\t\n\t\tself.wfile.write(\"POST Done\")\n\t\t\ndef runserver():\n\ttry:\n\t\tprint('http server is starting...')\n\t\tserver_address = (address,PORT)\n\t\thttpd = HTTPServer(server_address,\"S\")\n\t\tlogging.warning(\"**HTTP server starting**\")\n\t\tprint('http server is running...listening on port 127.0.0.3:7777' )\n\t\thttpd.serve_forever()\n\texcept KeyboardInterrupt:\n\t\thttpd.server_close()\n\nrunserver()\n\n \n","repo_name":"skshetry/pyHTTPServer","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22393677305","text":"import abc\nimport json\nimport logging\nimport os\nimport re\nfrom collections import namedtuple\n\nfrom transformers import BertTokenizerFast\n\nEntity = namedtuple('Entity', ['text', 'span', 'type'])\nRelation = namedtuple('Relation', ['subject', 'object', 'subject_span', 'object_span', 'predict'])\nExample = namedtuple('Example', ['id', 'text', 'relations', 'entities'])\n\n\nclass AbstractDatasetAdapter(abc.ABC):\n\n @abc.abstractmethod\n def adapte(self, input_file, output_file, **kwargs):\n raise NotImplementedError()\n\n\nclass NYTBertAdapter(AbstractDatasetAdapter):\n\n def __init__(self, pretrained_bert_path, add_special_tokens=False, do_lower_case=False, **kwargs):\n super().__init__()\n self.do_lower_case = do_lower_case\n self.tokenizer = BertTokenizerFast.from_pretrained(\n pretrained_bert_path, add_special_tokens=add_special_tokens, do_lower_case=do_lower_case)\n\n def adapte(self, input_file, output_file, **kwargs):\n with open(output_file, mode='wt', encoding='utf-8') as fout, \\\n open(input_file, mode='rt', encoding='utf-8') as fin:\n count = 0\n for line in fin:\n data = json.loads(line)\n example = self._adapte_example(data)\n # example.pop('offset', None)\n # json.dump(example, fout, ensure_ascii=False)\n # fout.write('\\n')\n # print(example)\n self._validate_example(example)\n count += 1\n if count == kwargs.get('limit', -1):\n break\n\n def _adapte_example(self, data):\n text = data['sentText']\n codes = self.tokenizer.encode_plus(text, return_offsets_mapping=True, add_special_tokens=False)\n example = {\n 'text': text,\n 'tokens': self.tokenizer.convert_ids_to_tokens(codes['input_ids']),\n 'ids': codes['input_ids'],\n 'offset': codes['offset_mapping']\n }\n self._adapte_entities(data, example)\n # TODO: finishe relations adaption\n # self._adapte_relations(data, example)\n return example\n\n def _adapte_entities(self, data, example):\n text = data['sentText']\n entity_list = []\n for e in data['entityMentions']:\n for m in re.finditer(re.escape(e['text']), text):\n char_span_start, char_span_end = m.span()[0], m.span()[1]\n # prev character is number\n if char_span_start > 0 and re.match('\\d', text[char_span_start - 1]):\n continue\n # next character is number\n if char_span_end < len(text) and re.match('\\d', text[char_span_end]):\n continue\n # get token span by char span\n token_span_start, token_span_end = self._parse_token_span(example, char_span_start, char_span_end)\n if not token_span_start or not token_span_end:\n print('invalid token span for entity: {}, regex match span: {}'.format(e, m.span()))\n continue\n entity_list.append({\n 'text': e['text'],\n 'type': e['label'],\n 'token_span': [token_span_start, token_span_end],\n 'char_span': [char_span_start, char_span_end]\n })\n example.update({\n 'entity_list': entity_list\n })\n\n def _adapte_relations(self, data, example):\n entities = {e['text']: e for e in example['entity_list']}\n relations_list = []\n for relation in data['relationMentions']:\n relations_list.append({\n 'subject': None,\n 'object': None,\n 'predict': None\n })\n\n def _parse_token_span(self, example, start, end):\n token_start, token_end = None, None\n for idx, (token, offset) in enumerate(zip(example['tokens'], example['offset'])):\n if offset[0] == start and end == offset[1]:\n return idx, idx + 1\n if offset[0] == start:\n token_start = idx\n if end == offset[1]:\n token_end = idx\n if token_start is not None and token_end is not None:\n return token_start, token_end + 1\n return token_start, token_end\n\n def _validate_example(self, example):\n tokens = example['tokens']\n text = example['text']\n for entity in example['entity_list']:\n start, end = entity['token_span']\n print()\n print('tokens subsequence: {}'.format(tokens[start:end]))\n print('entity text: {}'.format(entity['text']))\n char_start, char_end = entity['char_span']\n print('origin text: {}'.format(text[char_start:char_end]))\n","repo_name":"luozhouyang/TPLinker","sub_path":"preprocessor/adapters.py","file_name":"adapters.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"18"} +{"seq_id":"22262919091","text":"import numpy as np \nimport os \nimport matplotlib.pyplot as plt\n\n\ndef loop_arrays(path):\n\n summary_patch = []\n summary_frag = []\n bins = []\n\n # Looping through all the arrays\n for ls_file in os.listdir(path):\n\n ls_path = os.path.join(path, ls_file)\n\n # Loading the array as numpy\n arr = np.load(ls_path, allow_pickle = True)\n\n # Create the boxplot for the array, teturns summary values\n cnt_patch, cnt_fragment, name = pre_proces_plot(arr, ls_file)\n\n # Saving summary values\n summary_patch.append(cnt_patch)\n summary_frag.append(cnt_fragment)\n bins.append(name)\n\n # Sort the summary before plotting\n summary_sort(summary_patch, summary_frag, bins)\n\n\n\ndef pre_proces_plot(arr, ls_file):\n\n # Creating a list of len of each item in array\n len_ls = create_len_list(arr)\n\n # Getting the unique bins\n bins = np.unique(len_ls)\n\n # Calculating the total amount of patches\n cnt_patch = sum(len_ls)\n\n # Calculating the total amount of fragments\n cnt_fragments = len(len_ls)\n\n # Getting the name of the list\n name = ls_file.split('Paths_')[1]\n name = name.split('.npy')[0]\n\n\n create_box_plot(len_ls, bins, cnt_patch, cnt_fragments, name)\n\n return cnt_patch, cnt_fragments, name \n\n\ndef create_box_plot(len_ls, bins, cnt_patch, cnt_fragments, name):\n\n # Plotting the histogram \n\n # Creating the figure and axis \n fig, axs = plt.subplots(1, 1)\n\n # Adding the hist values to the axis \n axs.hist(len_ls, bins, edgecolor='black', linewidth=1.0)\n\n # Setting the ticks for the hist\n axs.set_xticks(bins)\n\n # Setting the labels for the axis \n axs.set_xlabel('Total number of patches per fragment')\n axs.set_ylabel('Frequency')\n\n # Adding texts to the figure \n plt.figtext(0.55, 0.75, \"Cutoff at: \" + name + '%')\n plt.figtext(0.55, 0.70, \"Total patches: \" + str(cnt_patch))\n plt.figtext(0.55, 0.65, \"Total fragments: \" + str(cnt_fragments))\n plt.savefig('Images/Plots/Plot_' + name + '.png')\n plt.close()\n\n\ndef create_len_list(arr):\n len_ls = []\n\n for inst in arr:\n len_ls.append(len(inst))\n\n return len_ls\n\n\ndef summary_sort(ls1, ls2, bins):\n \n # Defining our stop value \n # The length of the list\n stop = len(bins)\n\n # For index and object in our bins\n for idx1, obj in enumerate(bins):\n\n # Index for comparison object \n idx2 = idx1 + 1 \n\n # Comparing till the stop \n while(idx2 != stop):\n \n # Object in the list to compare to \n cmp_obj = bins[idx2]\n\n # Swap if object is larget than comparison obj\n if int(obj) > int(cmp_obj):\n\n # Order the bins \n bins[idx1] = cmp_obj\n bins[idx2] = obj\n\n # Order the other two lists accordingly \n tmp1 = ls1[idx1] \n tmp2 = ls1[idx2]\n\n ls1[idx1] = tmp2\n ls1[idx2] = tmp1\n\n tmp1 = ls2[idx1] \n tmp2 = ls2[idx2]\n\n ls2[idx1] = tmp2\n ls2[idx2] = tmp1\n\n obj = cmp_obj\n\n # Increase idx2\n idx2 += 1 \n\n\n# Creating the summary \ndef create_summary(ls1, ls2, bins):\n\n # Creating the figure and axis \n fig, axs = plt.subplots(1, 1)\n\n # Adding the hist values to the axis \n axs.plot(bins, ls2, label = 'Fragments', linewidth = 1.0)\n axs.plot(bins, ls1, label = 'Patches', linewidth = 1.0)\n\n # Adding a limit and grid \n axs.set_xlim(self.trk_cutoff[0], self.trk_cutoff[-1])\n axs.set_ylim(0,)\n axs.grid()\n\n # Setting the labels for the axis \n axs.set_xlabel('Percentage of non zero value pixels')\n axs.set_ylabel('Frequency')\n axs.legend()\n\n # Adding texts to the figure \n plt.savefig('Images/Summary/' + 'summary' + '.png')\n plt.close()\n\n\n\n\n\n\n\n\n\n","repo_name":"BrewBrizzly/DSS","sub_path":"2_Statistics/Create_summary_and_hist_patches/create_plots.py","file_name":"create_plots.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"35254244669","text":"class Solution:\n # classic dfs,\n # return the count in the dfs instad of void, and keep updating it\n def maxAreaOfIsland(self, grid: List[List[int]]) -> int:\n def isValid(i,j):\n return i >= 0 and j >= 0 and i < len(grid) and j < len(grid[0]) and grid[i][j] == 1\n \n def dfs(i, j):\n if isValid(i, j):\n grid[i][j] = -1\n a = dfs(i, j+1)\n b = dfs(i, j-1)\n c = dfs(i-1, j)\n d = dfs(i+1, j)\n return 1+a+b+c+d\n else:\n return 0\n \n max_count = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n count = dfs(i,j)\n max_count = max(max_count, count)\n \n return max_count\n","repo_name":"pranavmswamy/leetcode","sub_path":"mathworks/maxAreaOfIsland.py","file_name":"maxAreaOfIsland.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31427934871","text":"'''\n https://projecteuler.net/problem=20\n n! means n × (n − 1) × ... × 3 × 2 × 1\n\n For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800,\n and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.\n\n Find the sum of the digits in the number 100!\n\n Big numbers\n'''\n\nnumToFac = 100\n\ndef getStrFac(num):\n sum = 1\n for i in range(1, num+1):\n sum *= i\n return str(sum)\n\ndef getTotalDigits(string):\n sum = 0\n for i in string:\n sum += int(i)\n return sum\n\ndef main():\n print(getTotalDigits(getStrFac(numToFac)))\n\nif __name__ == \"__main__\":\n main()","repo_name":"KaelPearson/ProjectEulerChallenges","sub_path":"Completed/problem20.py","file_name":"problem20.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74221482280","text":"# -*- coding: utf-8 -*-\n \n\"\"\"setup.py: setuptools control.\"\"\"\n \nfrom setuptools import setup, find_packages\nimport re\n \nversion = re.search(\n '^__version__\\s*=\\s*\"(.*)\"',\n open('skduplo/__init__.py').read(),\n re.M\n ).group(1)\n \nwith open(\"README.md\", \"rb\") as f:\n long_descr = f.read().decode(\"utf-8\")\n\nsetup(\n license=\"MIT\",\n name = \"scikit-duplo\",\n packages=find_packages(exclude=[\"notebooks\", \"tests\"]),\n install_requires=[\n 'pandas','numpy','scikit-learn'\n ],\n include_package_data=True,\n version = version,\n description = \"Sci-kit learn tools for machine learning pipelines\",\n long_description = long_descr,\n long_description_content_type='text/markdown',\n author = \"John Hawkins\",\n author_email = \"johnc@getting-data-science-done.com\",\n url = \"http://getting-data-science-done.com\",\n project_urls = {\n 'Documentation': \"http://scikit-duplo.readthedocs.io\",\n 'Source': 'https://github.com/getting-data-science-done/scikit-duplo',\n 'Tracker': 'https://github.com/getting-data-science-done/scikit-duplo/issues',\n },\n )\n\n\n","repo_name":"getting-data-science-done/scikit-duplo","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11393352774","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .models import Conference, Journal, Faculty, Post, Event, RequestPublications, Rating\nfrom .forms import ConferenceForm, JournalForm, PostForm, EventForm, RequestPublicationsForm, RatingForm\n\n\ndef home(request):\n return render(request, 'html-pages/home.html')\n\ndef pub(request):\n pub = Conference.objects.all()\n context = {'publications': pub}\n return render(request, 'html-pages/publications.html', context)\n\ndef jpub(request):\n jpub = Journal.objects.all()\n context = {'jpublications': jpub}\n return render(request, 'html-pages/Journal-publications.html', context)\n\ndef spub(request, pk):\n pubObj = Faculty.objects.all()\n context = {'pubObj': pubObj}\n return render(request, 'html-pages/single-publications.html', context)\n\ndef sjpub(request, pk):\n jpubObj = Faculty.objects.all()\n context = {'jpubObj': jpubObj}\n return render(request, 'html-pages/single-jpublications.html', context)\n\ndef submissions(request):\n submissions = Post.objects.all()\n context = {'submissions': submissions}\n return render(request, 'html-pages/published-submissions.html', context)\n\ndef EventSubmissions(request):\n eveObj = Event.objects.all()\n context = {'eveObj': eveObj}\n return render(request, 'html-pages/published-events.html', context)\n\ndef requestingPapers(request):\n reqObj = RequestPublications.objects.all()\n context = {'reqobj': reqObj}\n return render(request, 'html-pages/profile-home.html', context)\n\ndef ratedPublications(request):\n r = Rating.objects.all()\n context = {'r': r}\n return render(request, 'html-pages/ratedPublications.html', context)\n\ndef createPublications(request):\n form = ConferenceForm()\n\n if request.method == \"POST\":\n form = ConferenceForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('conference-publications')\n \n context = {'form': form}\n return render(request, \"html-pages/html-forms.html\", context)\n\ndef createJournalPublications(request):\n form = JournalForm()\n\n if request.method == \"POST\":\n if form.is_valid():\n form.save()\n return redirect('journal-publications')\n\n context = {'form': form}\n return render(request, \"html-pages/html-forms.html\", context)\n\ndef postPublications(request):\n form = PostForm()\n\n if request.method == \"POST\":\n if form.is_valid():\n form.save()\n return redirect('published-submissions')\n\n context = {'form': form}\n return render(request, \"html-pages/html-forms.html\", context)\n\ndef publishingEvents(request):\n form = EventForm()\n\n if request.method == \"POST\":\n if form.is_valid():\n if form.save():\n return redirect('published-events')\n\n context = {'form': form}\n return render(request, \"html-pages/html-forms.html\", context)\n\ndef requestingPublications(request):\n form = RequestPublicationsForm()\n\n if request.method == \"POST\":\n if form.is_valid():\n if form.save():\n return redirect('profile-home')\n\n context = {'form': form}\n return render(request, \"html-pages/html-forms.html\", context)\n\ndef ratingPublications(request):\n form = RatingForm()\n\n if request.method == \"POST\":\n if form.is_valid():\n if form.save():\n return redirect('ratedPublications')\n\n context = {'form': form}\n return render(request, \"html-pages/html-forms.html\", context)\n\n","repo_name":"AyeshaNaaz17/DBMS-MiniProject2","sub_path":"projects2/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11621524056","text":"import numpy as np\r\nimport joblib\r\n\r\ndef computeModel(x):\r\n lst = x\r\n lst = [float(i) for i in lst]\r\n lst = np.array(lst).reshape(1,-1)\r\n model = joblib.load(open( 'Crop_final.sav', 'rb'))\r\n ans = model.predict(lst)\r\n return ans[0]\r\n","repo_name":"youssef12347/Artificial-Intelligence","sub_path":"FarmingAI/getModel.py","file_name":"getModel.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19199816165","text":"import pandas as pd\nimport pytest\n\nfrom st_weaviate_connection import WeaviateConnection\n\n\n@pytest.fixture\ndef weaviate_connection(weaviate_db):\n yield WeaviateConnection(\"test_weaviate_conn\", url=\"http://localhost:8080\")\n\n\ndef test_query(weaviate_connection):\n query = \"\"\"\n {\n Get {\n TVShow {\n title\n }\n }\n }\n \"\"\"\n df = weaviate_connection.query(query)\n assert df.shape == (5, 1)\n assert set(df[\"title\"]) == {\n \"Animaniacs\",\n \"Rugrats\",\n \"Doug\",\n \"Hey Arnold!\",\n \"The Ren & Stimpy Show\",\n }\n\n\ndef test_malformed_query(weaviate_connection):\n query = \"\"\"\n {\n Foo\n }\n \"\"\"\n with pytest.raises(Exception) as exc_info:\n weaviate_connection.query(query)\n\n assert \"The GraphQL query returned an error\" in str(exc_info.value)\n\n\ndef test_query_with_additional_properties(weaviate_connection):\n query = \"\"\"\n {\n Get {\n TVShow(limit: 3, bm25: {query: \"Rugrats\"}) {\n title\n creator\n _additional {\n score\n vector\n }\n }\n }\n }\n \"\"\"\n df = weaviate_connection.query(query)\n assert df.shape == (1, 4)\n assert set(df.columns) == {\n \"title\",\n \"creator\",\n \"_additional.score\",\n \"_additional.vector\",\n }\n assert set(df[\"title\"]) == {\"Rugrats\"}\n\n\ndef test_query_builder(weaviate_connection):\n client = weaviate_connection.client()\n animaniacs_query_vector = [0.1, 0.2, 0.3, 0.4, 0.5]\n results = (\n client.query.get(\"TVShow\", [\"title\", \"creator\"])\n .with_limit(3)\n .with_additional(\"distance\")\n .with_near_vector(\n {\n \"vector\": animaniacs_query_vector,\n }\n )\n .do()\n )\n\n df = pd.json_normalize(results[\"data\"][\"Get\"][\"TVShow\"])\n\n assert df.shape == (3, 3)\n assert set(df.columns) == {\"title\", \"creator\", \"_additional.distance\"}\n assert df.iloc[0][\"title\"] == \"Animaniacs\"\n","repo_name":"weaviate/st-weaviate-connection","sub_path":"tests/test_connection.py","file_name":"test_connection.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"18"} +{"seq_id":"13371893604","text":"from config import LENGTH,SAMPLING_RATE\nfrom inference_live import Inference\nimport time\nimport numpy as np\n\nimport socket_client_live\n\nimport brainflow\nfrom brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds\nfrom brainflow.data_filter import DataFilter\n\nBoardShim.enable_dev_board_logger()\n\nboardParameters = BrainFlowInputParams()\nboardParameters.serial_port = '/dev/ttyUSB0'\n\nFILENAME = \"live_session/file\"\nTIME_DELAY = 2\n# real board\ncytonId = BoardIds.CYTON_BOARD.value \nboard = BoardShim(cytonId, boardParameters)\n\ndef main():\n pass\n\ndef read_button():\n #read digital pin 17\n digital_pin_17 = board.get_current_board_data(1)[16]\n # print(digital_pin_17)\n return digital_pin_17\n\ndef save_file(data):\n DataFilter.write_file(data,FILENAME,\"w\")\n\ndef start_session():\n BoardShim.log_message(LogLevels.LEVEL_INFO.value, 'Starting Recording Session')\n board.prepare_session()\n board.start_stream()\n\ndef end_session():\n board.stop_stream()\n BoardShim.log_message(LogLevels.LEVEL_INFO.value, 'Ending Recording Session')\n board.release_session()\n \n\ndef record_data():\n # clear buffer\n end_session()\n #start stream\n start_session()\n time.sleep(TIME_DELAY)\n \n while(read_button()==True):\n pass\n\n # save_file(data)\n # end_session()\n return board.get_board_data()\n\n\ndef loop():\n start_session()\n\n while(True):\n if(read_button()==True):\n try:\n print(\"In record section\")\n data = record_data()\n data = (np.transpose(data))[TIME_DELAY*SAMPLING_RATE:,1:9]\n data = data[:LENGTH,:]\n # print(data.shape)\n if (len(data) N):\n K = N\n c = np.zeros((K,1))\n Mconst = np.sqrt(2/K)\n for k in range(0,K):\n if (k == 0):\n beta = 1/np.sqrt(2)\n else:\n beta = 1\n kSum = [x[n]*np.cos(k*np.pi/N*(0.5+n)) for n in range(0,N)]\n c[k] = Mconst*beta*np.sum(kSum)\n return c","repo_name":"adelinocpp/Multi_Feature_speaker_comparison","sub_path":"imports/signal_process_util.py","file_name":"signal_process_util.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30459849428","text":"# -*- coding: utf-8 -*-\n\nfrom sys import argv\nfrom pprint import pprint\nimport sqlite3\nfrom tabulate import tabulate\n\n\ndef args_check(argv):\n if len(argv) == 1:\n header = 'В таблице dhcp такие записи:'\n query = 'SELECT * from dhcp'\n return(header, query)\n elif len(argv) == 3:\n header = f\"Информация об устройствах с такими параметрами: {argv[1]} {argv[2]}\\n\"\n query = f\"SELECT * from dhcp WHERE {argv[1]} = '{argv[2]}'\"\n return(header, query)\n else:\n return False\n\ndef get_data(header, query):\n connection = sqlite3.connect('dhcp_snooping.db')\n try:\n data = connection.execute(query)\n result = tabulate(data)\n connection.close()\n print(header, result, sep='\\n')\n except sqlite3.OperationalError:\n print('Данный параметр не поддерживается. '\n 'Допустимые значения параметров: mac, ip, vlan, interface, switch')\n\n\nif __name__ == '__main__':\n if args_check(argv):\n tmp = args_check(argv)\n get_data(tmp[0], tmp[1])\n else:\n print('Пожалуйста, введите два или ноль аргументов')","repo_name":"hasculdr/pyneng_2022","sub_path":"exercises/25_db/task_25_2/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5338289301","text":"import sys\n\ndef fib(x):\n f = [0,1]\n while(x > len(f)):\n new_int = f[len(f) - 1] + f[len(f) - 2]\n f.append( new_int )\n return f\n\ndef factorial(x):\n z = 1\n for y in range(1,x+1):\n z *= y\n return z\n","repo_name":"emeraldbuttons/fibonacci","sub_path":"fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11306267177","text":"from FaultInjector.NetworkFaultInjector import NetworkFaultInjector\nfrom FaultInjector.FaultInjectorEngine import FaultInjectorEngine\n\nfrom tqdm import tqdm\nimport numpy as np\nimport itertools\n\n\nclass StuckAtFaultInjector(NetworkFaultInjector):\n\n def __init__(self, network, seed):\n super().__init__(network, seed)\n\n def generate_fault_list(self, fault_list_length=10000):\n \"\"\"\n Generate and set the fault list. Each entry of the fault list is a list of three elements: the first element is\n the index of the layer, the second is a tuple containing the index of the weight, the last element is the bit to\n be flipped\n :param fault_list_length: Length of the fault list\n \"\"\"\n\n target_layers = super().generate_layer_probability(fault_list_length)\n\n # Generate a dictionary containing the number of fault to generate in each layer\n layers_list, layers_count = np.unique(target_layers, return_counts=True)\n layers_dict = dict(zip(layers_list, layers_count))\n\n for layer_index, layer_count in tqdm(layers_dict.items()):\n\n weight_params = self.network.layers[layer_index].get_weights()[0].size\n bias_params = self.network.layers[layer_index].get_weights()[1].size\n\n total_layer_params = weight_params + bias_params\n weights_probability = weight_params / total_layer_params\n weight_count = int(np.ceil(weights_probability * layer_count))\n bias_count = layer_count - weight_count\n\n if len(self.network.layers[layer_index].get_weights()[0].shape) == 4:\n dim_0 = np.arange(self.network.layers[layer_index].get_weights()[0].shape[0])\n dim_1 = np.arange(self.network.layers[layer_index].get_weights()[0].shape[1])\n dim_2 = np.arange(self.network.layers[layer_index].get_weights()[0].shape[2])\n dim_3 = np.arange(self.network.layers[layer_index].get_weights()[0].shape[3])\n layer_weights_locations = tuple(itertools.product(*[dim_0, dim_1, dim_2, dim_3]))\n else:\n dim_0 = np.arange(self.network.layers[layer_index].get_weights()[0].shape[0])\n dim_1 = np.arange(self.network.layers[layer_index].get_weights()[0].shape[1])\n layer_weights_locations = tuple(itertools.product(*[dim_0, dim_1]))\n\n layer_weight_fault_locations = [tuple(location) for location in self.rng.choice(layer_weights_locations,\n size=min(len(layer_weights_locations),\n weight_count),\n replace=False)]\n\n layer_bias_fault_locations = [tuple(location) for location in self.rng.choice(np.arange(bias_params),\n size=min(bias_params,\n bias_count),\n replace=False)]\n\n layer_fault_locations = layer_weight_fault_locations + layer_bias_fault_locations\n\n bias_or_weights = list(np.full(len(layer_weight_fault_locations), 0)) + list(np.full(len(layer_bias_fault_locations), 1))\n layer = np.full(len(layer_fault_locations), layer_index)\n bits = self.rng.integers(32, size=len(layer_fault_locations))\n values = self.rng.integers(2, size=len(layer_fault_locations))\n\n layer_faults = list(zip(layer, bias_or_weights, layer_fault_locations, bits, values))\n self.fault_list += layer_faults\n pass\n\n # # For each layer selected, generate an injection index and a target bit\n # for layer_index in target_layers:\n # while True:\n # # Get the probability of a fault affecting the bias versus the weights\n # total_layer_params = self.network.layers[layer_index].get_weights()[0].size +\\\n # self.network.layers[layer_index].get_weights()[1].size\n # weights_probability = self.network.layers[layer_index].get_weights()[0].size / total_layer_params\n # bias_or_weights = self.rng.choice([0, 1], p=[weights_probability, 1 - weights_probability])\n # # Where to inject the fault\n # if bias_or_weights == 0:\n # injection_index = tuple([self.rng.integers(0, i) for i in self.layer_shape[layer_index]])\n # else:\n # injection_index = tuple([self.rng.integers(self.network.layers[layer_index].get_weights()[1].size)])\n # # Value to inject\n # stuck_at_value = self.rng.integers(0, 1, endpoint=True)\n # # Compose the fault details in a list\n # fault_list_element = [layer_index, bias_or_weights, injection_index, self.rng.integers(0, 32), stuck_at_value]\n # if fault_list_element not in self.fault_list:\n # break\n # self.fault_list.append(fault_list_element)\n\n def inject_incremental_fault(self, increment_number):\n \"\"\"\n Inject new faults on top of those already present in the network. Fault injections are done layer by layer (i.e.\n we cycle trough all the layer and for each one of them we inject the corresponding fault from the incremental\n fault list). The update of the network weights is done once per layer.\n :param increment_number: The number of faults to inject on top of those already present\n \"\"\"\n def fault_injection(weights, bias, fault_list, layer_count):\n for i in np.arange(0, layer_count):\n # Get whether to inject a bias or a weight\n bias_or_weights = fault_list[i][1]\n # Get the index of the weight to inject\n injection_index = fault_list[i][2]\n # Get which bit to change\n injection_position = fault_list[i][3]\n # Get target value\n stuck_at_value = fault_list[i][4]\n # Perform the fault injection\n if bias_or_weights == 0: # Inject into the weights\n weights[injection_index] = FaultInjectorEngine.float32_stuck_at(float_number=weights[injection_index],\n position=injection_position,\n stuck_at_value=stuck_at_value)\n else: # Inject into Biases\n bias[injection_index] = FaultInjectorEngine.float32_stuck_at(float_number=bias[injection_index],\n position=injection_position,\n stuck_at_value=stuck_at_value)\n\n super().inject_incremental_fault_with_function(increment_number, fault_injection)\n\n def inject_up_to(self, target_number):\n \"\"\"\n Inject as many fault as need in order to reach the target number of faults in the network\n :param target_number: Target number of fault to have in the network\n \"\"\"\n\n increment = super().compute_increment(target_number)\n\n self.inject_incremental_fault(increment)\n\n # temp - Used for debugging\n def TEMP_load_fault_list(self, fault_list_location):\n\n with open(fault_list_location) as input_file:\n fault_list = input_file.read().splitlines()\n\n for fault in fault_list:\n fault_details = [int(x) for x in fault.split(' ')]\n self.fault_list.append([\n fault_details[0],\n [fault_details[6], fault_details[5], fault_details[3], fault_details[4]],\n fault_details[2],\n fault_details[1]])\n pass\n","repo_name":"GabrieleGavarini/FaultInjector","sub_path":"FaultInjector/StuckAtFaultInjector.py","file_name":"StuckAtFaultInjector.py","file_ext":"py","file_size_in_byte":8238,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"72655501543","text":"import copy\nimport json\nimport logging\nimport os\n\nimport tweepy\nfrom confluent_kafka import Consumer, Producer\nimport sys\n\nfrom dotenv import load_dotenv\nfrom geopy import Nominatim\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\nfrom dendritic_cell_algorithm.signal_generator import remove_user_mentions, remove_urls, Signals\n\n\ndef delivery_report(err, msg):\n \"\"\" Called once for each message produced to indicate delivery result.\n Triggered by poll() or flush(). \"\"\"\n if err is not None:\n print('SignalGenerator: Message delivery failed: {}'.format(err))\n sys.stdout.flush()\n else:\n print('SignalGenerator: Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))\n sys.stdout.flush()\n\n\ndef startSignalGenerator(consumer_servers, consumer_group_id, consumer_offset, consumer_topic, producer_servers,\n producer_topic,\n consumer_key=None, consumer_secret=None, access_token=None,\n access_token_secret=None, bearer=None):\n c = Consumer({\n 'bootstrap.servers': consumer_servers,\n 'group.id': consumer_group_id,\n 'auto.offset.reset': consumer_offset\n })\n\n producer = Producer({'bootstrap.servers': producer_servers})\n server_topics = c.list_topics().topics\n\n if consumer_topic in server_topics:\n c.subscribe([consumer_topic])\n else:\n producer.produce(consumer_topic, key=\"INFO\", value=(\"Create topic \" + consumer_topic), callback=delivery_report)\n producer.flush()\n c.subscribe([consumer_topic])\n\n while True:\n msg = c.poll(1.0)\n\n if msg is None:\n continue\n\n if msg.error():\n print(\"SignalGenerator: Consumer error: {}\".format(msg.error()))\n sys.stdout.flush()\n continue\n\n if msg.key().decode('utf-8') == \"INFO\":\n if msg.value().decode('utf-8') == \"END\":\n producer.flush()\n producer.produce(producer_topic, key=\"INFO\", value=\"END\", callback=delivery_report)\n producer.flush()\n break\n else:\n continue\n print('SignalGenerator: Received message: {0} | {1}'.format(msg.value().decode('utf-8')[:50],\n msg.key().decode('utf-8')))\n\n tweet = json.loads(msg.value())\n tweet[\"created_at\"] = tweet[\"created_at\"].replace(\" +0000\", \"\")\n\n if \"truncated\" in tweet and \"retweeted_status\" in tweet:\n if tweet[\"truncated\"]:\n tweet[\"text\"] = tweet[\"retweeted_status\"][\"full_text\"]\n tweet[\"full_text\"] = tweet[\"retweeted_status\"][\"full_text\"]\n\n if not (\"full_text\" in tweet):\n tweet[\"full_text\"] = tweet[\"text\"]\n\n ##############################################################################\n\n if bearer is not None:\n auth = tweepy.OAuth2BearerHandler(bearer)\n else:\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n api = tweepy.API(auth, retry_count=3, timeout=100000, wait_on_rate_limit=True)\n\n userObj = {}\n user = api.get_user(screen_name=tweet[\"user\"][\"screen_name\"])._json\n user.pop('status', None)\n userObj[\"user\"] = user\n userObj[\"found_tweet\"] = tweet\n\n geo_locator = Nominatim(user_agent=\"findBots\")\n if userObj[\"user\"][\"location\"]:\n try:\n location = geo_locator.geocode(userObj[\"user\"][\"location\"])\n logging.info(location)\n if location is None:\n logging.info(\"try to add loc\")\n userObj[\"coordinates\"] = \"\"\n except Exception as e:\n continue\n if location:\n logging.info(\"try to add loc\")\n userObj[\"coordinates\"] = [location.latitude, location.longitude]\n\n logging.info(\"sentiment!\")\n analyzer = SentimentIntensityAnalyzer()\n tweet_modified = remove_user_mentions(remove_urls(copy.deepcopy(userObj[\"found_tweet\"])))\n sentence = tweet_modified[\"full_text\"]\n sentiment = analyzer.polarity_scores(sentence)\n logging.info(sentence)\n logging.info(sentiment['compound'])\n if sentiment['compound'] >= 0.2:\n logging.info(\"Positive\")\n userObj[\"found_tweet\"][\"sentiment\"] = \"positive\"\n\n elif sentiment['compound'] <= - 0.2:\n logging.info(\"Negative\")\n userObj[\"found_tweet\"][\"sentiment\"] = \"negative\"\n\n else:\n logging.info(\"Neutral\")\n userObj[\"found_tweet\"][\"sentiment\"] = \"neutral\"\n\n userObj[\"tweets\"] = []\n for fulltweet in api.user_timeline(screen_name=tweet[\"user\"][\"screen_name\"],\n # max 200 tweets\n count=20,\n include_rts=False,\n # Necessary to keep full_text\n tweet_mode='extended'\n ):\n tw = fulltweet._json\n\n logging.info(\"sentiment!\")\n analyzer = SentimentIntensityAnalyzer()\n tweet_modified = remove_user_mentions(remove_urls(copy.deepcopy(tw)))\n sentence = tweet_modified[\"full_text\"]\n sentiment = analyzer.polarity_scores(sentence)\n logging.info(sentence)\n logging.info(sentiment['compound'])\n if sentiment['compound'] >= 0.1:\n logging.info(\"Positive\")\n tw[\"sentiment\"] = \"positive\"\n\n elif sentiment['compound'] <= - 0.2:\n logging.info(\"Negative\")\n tw[\"sentiment\"] = \"negative\"\n\n else:\n logging.info(\"Neutral\")\n tw[\"sentiment\"] = \"positive\"\n tw.pop('user', None)\n userObj[\"tweets\"].append(tw)\n\n new_signals = Signals()\n # friends_count, followers_count, verified, default_profile, default_profile_image, created_at, name,\n # screen_name, description, tweets\n new_signals.generate_signals(user[\"friends_count\"], user[\"statuses_count\"], user[\"followers_count\"],\n user[\"verified\"],\n user[\"default_profile\"],\n user[\"default_profile_image\"], user[\"created_at\"], user[\"name\"],\n user[\"screen_name\"],\n user[\"description\"],\n userObj[\"tweets\"])\n\n logging.info(new_signals.get_parameters())\n userObj[\"signals\"] = new_signals.get_parameters()\n\n ##############################################################################\n\n producer.produce(producer_topic, key=msg.key(), value=json.dumps(userObj), callback=delivery_report)\n producer.flush()\n print(\"SignalGenerator: Send \" + str(json.dumps(userObj))[:50])\n\n c.close()\n","repo_name":"rwth-acis/bot-detector","sub_path":"python_kafka/SignalGenerator.py","file_name":"SignalGenerator.py","file_ext":"py","file_size_in_byte":7207,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"36"} +{"seq_id":"14187050115","text":"import csv\n\ndef main():\n path = \"/Users/ArseneLupin/Documents/edy/experiment_new_cascade/scaled/subject_32.csv\"\n dest_path = \"/Users/ArseneLupin/Documents/edy/experiment_new_cascade/new_scaled/subject_32.csv\"\n\n temp_array=[]\n with open(path) as object_file:\n for line in object_file:\n raw_data = line.split()\n\n del raw_data[len(raw_data)-2]\n temp_array.append(raw_data)\n\n out_file = open(dest_path, \"w\")\n csv_writer = csv.writer(out_file, delimiter='\\t')\n\n for i in temp_array:\n csv_writer.writerow(i)\n\n out_file.close\n\nif __name__ == '__main__':\n main()\n","repo_name":"edysuardiyana/Staged-CC","sub_path":"file_changed.py","file_name":"file_changed.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"69930618343","text":"# pylint: disable=wildcard-import, unused-wildcard-import\nfrom .scraping import *\n\n\ndef rarbgScraper(soup: Soup, url: ParseResult, **_kwargs) -> List[Thing]:\n def path_to_key(path: str) -> str:\n return extract_path(path, 2, 1)\n\n def torrentPage():\n title = soup.select_one(\"td.block h1.black\")\n if title is None:\n raise ParseException(\"couldn't find the name\")\n title = scrape_text(title)\n key = path_to_key(url.path)\n\n return [Thing(name=title, key=key)]\n\n def searchPage():\n if soup.select_one(\"table.lista2t\") is None:\n raise ParseException(\"the HTML is not as expected\")\n\n results = []\n for tr in soup.find_all(\"tr\", class_=\"lista2\"):\n tds = list(tr.find_all(\"td\", class_=\"lista\", recursive=False))\n if len(tds) != 8:\n raise ParseException(\"unexpected amount of td-tags\")\n\n main_a = tds[1].a\n if main_a is None:\n raise ParseException(\"an a-tag was not found\")\n\n name = scrape_text(main_a)\n\n if \"href\" not in main_a.attrs:\n raise ParseException(\"the a-tag is missing href\")\n torrent_url = main_a[\"href\"]\n key = path_to_key(torrent_url)\n\n results.append(\n Thing(name=name, key=key, jsmark=change_color_on(main_a, \"black\"))\n )\n\n return results\n\n if url.path.startswith(\"/torrent/\"):\n return torrentPage()\n elif url.path.startswith(\"/torrents.php\"):\n return searchPage()\n else:\n raise UserError(\"wrong page\")\n","repo_name":"erikvader/dotfiles","sub_path":"rememberer/.pythonlibs/rememberer/scrapers/rarbg.py","file_name":"rarbg.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"28452924424","text":"import json\nimport pathlib\nimport argparse\nimport os\nimport subprocess\nimport shutil\n\nfrom pycparser import CParser\n\nfrom casting import get_type, update_types, add_type, parse_arguments\nfrom pause import make_pausable\nfrom globals_consts import NAMESPACE, cname, return_types, arguments\nfrom expression import generate_expression\nfrom function_trees import generate_trees\nfrom goto import goto_postprocess\nfrom optimizations import remove_zerolines, inline_oneline, remove_unused\nfrom temps import get_temp, stringss, get_position, register_space, clear_used, gtemps, new_cid\nfrom locals import set_rtype, create_local, reset_locals\n\nHEAP_SIZE = 2 ** 20\n\n\ndef main():\n argparser = argparse.ArgumentParser(description='Compile C to a Minecraft datapack')\n argparser.add_argument('file', metavar='Cfile', type=pathlib.Path, help='Path to the C file')\n argparser.add_argument('target', metavar='target', type=pathlib.Path, help='Location to write functions in')\n argparser.add_argument('--preprop', dest='preprop', action='store_const', const=True, default=False,\n help='Don\\'t run the C preprocessor on the file')\n argparser.add_argument('--file-input', help='Take constant input from file', dest='finput', metavar='file',\n default=False, type=pathlib.Path)\n program_args = argparser.parse_args()\n if program_args.preprop:\n preprocessed = open(program_args.file).read()\n else:\n preprocessed = subprocess.check_output(\n ['gcc', '-nostdinc', '-E', program_args.file, f'-I{os.path.dirname(os.path.abspath(__file__))}/libc']).decode()\n print(program_args.target)\n if program_args.finput:\n inp = list(reversed(program_args.finput.open('rb').read()))\n try:\n shutil.rmtree(program_args.target / 'functions')\n except IOError:\n pass\n os.mkdir(program_args.target / 'functions')\n os.mkdir(program_args.target / 'functions' / 'tree')\n copy_stdlib(program_args.target / 'functions')\n os.chdir(program_args.target / 'functions')\n parser = CParser()\n parsed = parser.parse(preprocessed)\n obj_name = {}\n vtypes = {}\n vars = {}\n methods = {}\n for global_definition in parsed.ext:\n if cname(global_definition) == 'Typedef':\n add_type(global_definition.name, get_type(global_definition.type, obj_name, vtypes))\n elif cname(global_definition) == 'Decl' and cname(global_definition.type) != 'FuncDecl':\n vars[global_definition.name] = global_definition\n global varaddr\n elif cname(global_definition) == 'Decl' and cname(global_definition.type) == 'FuncDecl':\n return_types[global_definition.name] = get_type(global_definition.type.type)\n arguments[global_definition.name] = parse_arguments(global_definition.type.args)\n elif cname(global_definition) == 'FuncDef':\n return_types[global_definition.decl.name] = get_type(global_definition.decl.type.type)\n arguments[global_definition.decl.name] = parse_arguments(global_definition.decl.type.args)\n # print(functions)\n methods[global_definition.decl.name] = global_definition\n else:\n print(cname(global_definition))\n update_types()\n file = open('main.mcfunction', 'w')\n file.write(generate_head(vars, obj_name, vtypes))\n for method in methods:\n f = open(f'method_{method.lower()}.mcfunction', 'w')\n set_rtype(return_types[method])\n args = methods[method].decl.type.args\n if args is None:\n args = []\n clear_used()\n reset_locals()\n j = 0\n for arg in args:\n if cname(arg) == 'EllipsisParam':\n j += 10\n for i in range(10): create_local()\n break\n var_type = arg.type\n vtypes[arg.name] = get_type(var_type)\n obj_name[arg.name] = [create_local() for i in range(get_type(arg.type).size)]\n j += vtypes[arg.name].size\n new_cid()\n f.write(generate_expression([], methods[method].body, vtypes, obj_name, False, True)[0])\n f.write(f'scoreboard players set $returned {NAMESPACE} 0')\n f.close()\n generate_trees()\n if program_args.finput:\n file.write(f'data modify storage {NAMESPACE}:main input set value {inp}\\n')\n file.write(f'data modify storage {NAMESPACE}:main rec set value []\\n')\n file.write(f'data modify storage {NAMESPACE}:main ibuffer set value []\\n')\n\n for strg in stringss:\n for i, c in enumerate(stringss[strg]):\n # TODO: change it to direct access to the heap, as index is known in compile time.\n file.write(f'scoreboard players set $index {NAMESPACE} {strg + i}\\n')\n file.write(f'scoreboard players set $value {NAMESPACE} {c}\\n')\n file.write(f'function {NAMESPACE}:set_heap\\n')\n file.write(f'function {NAMESPACE}:method_main\\n')\n file.close()\n goto_postprocess()\n remove_zerolines()\n inline_oneline()\n make_pausable({'method_getc', 'method__get_setjmp'})\n remove_unused()\n\n\ndef copy_stdlib(loc):\n stdlib_address = '/'.join(os.path.split(__file__)[:-1] + (\"stdlib\",))\n for file in os.listdir(stdlib_address):\n f1 = open(stdlib_address + '/' + file, 'r')\n f2 = open(loc / file, 'w')\n c = f1.read()\n\n f2.write(c.replace('namespace', NAMESPACE))\n f1.close()\n f2.close()\n\n\ndef init_heap(size):\n a = int(size ** 0.5)\n b = size // a\n assert a * b == size, f'{size} isn\\'t a prefect square' # otherwise we need to do factorization and it might\n # result in sub-optimal results\n thing = [{'value': [0] * b, 'selected': 0}] * a\n thing2 = [{'value': [0] * 1024, 'selected': 0, 'used': 0, 'index': i} for i in range(1024)]\n return f'data modify storage {NAMESPACE}:main heap set value {json.dumps(thing)}\\n' \\\n f'data modify storage {NAMESPACE}:main alloc set value {json.dumps(thing2)}\\n'\n\n\ndef generate_head(vars, store, vtypes):\n code = ''\n code += f'gamerule maxCommandChainLength 200000\\n'\n code += f'scoreboard objectives add {NAMESPACE} dummy\\n' \\\n f'scoreboard players set $-1 {NAMESPACE} -1\\n'\n code += f'scoreboard players set $65536 {NAMESPACE} 65536\\n'\n code += f'scoreboard players set $256 {NAMESPACE} 256\\n'\n code += f'scoreboard players set $128 {NAMESPACE} 128\\n'\n code += f'scoreboard players set $64 {NAMESPACE} 64\\n'\n code += f'data modify storage {NAMESPACE}:main temps set value []\\n'\n code += f'data modify storage {NAMESPACE}:main setjmp set value []\\n'\n code += f'scoreboard players set $32 {NAMESPACE} 32\\n'\n code += f'scoreboard players set $2 {NAMESPACE} 2\\n'\n code += f'scoreboard players set $3 {NAMESPACE} 3\\n'\n code += f'scoreboard players set $7 {NAMESPACE} 7\\n'\n code += f'scoreboard players set $5 {NAMESPACE} 5\\n'\n code += f'scoreboard players set $6 {NAMESPACE} 6\\n'\n code += f'scoreboard players set $4 {NAMESPACE} 4\\n'\n code += f'scoreboard players set $8 {NAMESPACE} 8\\n'\n code += f'scoreboard players set $stackSize {NAMESPACE} 0\\n' # NOTE: multiplied by 1024\n code += f'data modify storage {NAMESPACE}:main lstack set value []\\n'\n code += f'scoreboard players set $1 {NAMESPACE} 1\\n'\n code += f'scoreboard players set $1024 {NAMESPACE} 1024\\n'\n code += f'scoreboard players set $1073741824 {NAMESPACE} 1073741824\\n'\n code += f'scoreboard players set $16777216 {NAMESPACE} 16777216\\n'\n code += f'scoreboard players set $8388608 {NAMESPACE} 8388608\\n'\n code += f'scoreboard players set $-inf {NAMESPACE} -2147483648\\n'\n code += f'scoreboard players set $returned {NAMESPACE} 0\\n'\n code += f'scoreboard players set $broken {NAMESPACE} 0\\n'\n code += f'scoreboard players set $lasta {NAMESPACE} -1\\n'\n code += f'scoreboard players set $lastb {NAMESPACE} -1\\n'\n code += f'scoreboard players set $setjmpctr {NAMESPACE} -1\\n'\n code += f'scoreboard players set $1048576 {NAMESPACE} 1048576\\n'\n code += f'scoreboard players set $61681 {NAMESPACE} 61681\\n'\n code += f'scoreboard players set $33554432 {NAMESPACE} 33554432\\n'\n code += f'scoreboard players set $16777216 {NAMESPACE} 16777216\\n'\n code += f'scoreboard players set $134217728 {NAMESPACE} 134217728\\n'\n code += f'scoreboard players set $16 {NAMESPACE} 16\\n'\n code += f'scoreboard players set $2097152 {NAMESPACE} 2097152\\n'\n code += f'scoreboard players set $2048 {NAMESPACE} 2048\\n'\n code += f'scoreboard players set $4096 {NAMESPACE} 4096\\n'\n code += init_heap(HEAP_SIZE)\n for var_name in vars:\n var = vars[var_name]\n var_type = var.type\n vtypes[var_name] = get_type(var_type)\n store[var_name] = [get_temp() for _ in range(vtypes[var_name].size)]\n gtemps.extend(store[var_name])\n if cname(var_type) == 'ArrayDecl':\n size = int(var_type.dim.value)\n code += f'scoreboard players set {store[var_name][0]} {NAMESPACE} {get_position()}\\n'\n register_space(size * vtypes[var_name].ptr.size)\n else:\n for varrr in store[var_name]:\n code += f'scoreboard players set {varrr} {NAMESPACE} 0\\n'\n for var_name in vars:\n var = vars[var_name]\n if var.init is not None:\n code += generate_expression(store[var_name], var.init, vtypes, store, True, False)[0]\n return code\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Command-Master/MCCC","sub_path":"src/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":9467,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"36"} +{"seq_id":"36714557267","text":"import pandas as pd\nfrom dash import html, dcc, Output, Input\nfrom dash.development.base_component import Component\n\nfrom datafiles.views.view import View, DictView, DfView\nfrom dataviz.irenderer import IDataStudyRenderer\nimport plotly.graph_objects as go\n\nfrom dataviz.src.components.iplot import IPlot\n\n\nclass PieChart(IPlot):\n _name = \"pie chart\"\n\n @classmethod\n def name(cls) -> str:\n return cls._name\n\n # TODO: Make sure this is robust\n pulled = {}\n\n @staticmethod\n def new(plot_id: str, renderer: IDataStudyRenderer, source: View, *args,\n **kwargs) -> Component:\n plot_name = kwargs.get(\"plot_name\", source.name)\n if isinstance(source, DfView):\n data = source.data\n labels = kwargs.pop(\"names\")\n values = kwargs.pop(\"values\")\n if labels is None or values is None:\n labels, values = check_columns(data)\n labels = data[labels]\n values = data[values]\n elif isinstance(source, DictView):\n labels = [str(key) for key in source.data.keys()]\n values = [str(val) for val in source.data.values()]\n\n # We assumed that the keys were the strings (categories) and the\n # values were the numeric, but it could be the opposite,\n # so we check where the numeric values are.\n # We assumed a dict like {\"cat1\": 1, \"cat2\": 2}\n # We check for a dict like {1: \"cat1\", 2: \"cat2\"}\n\n if (all([is_number(label) for label in labels]) and\n not all([is_number(value) for value in values])):\n labels, values = values, labels\n values = [float(val) for val in values]\n else:\n raise NotImplementedError()\n\n plot = html.Div(\n className=\"plot\",\n children=[\n html.Div(\n children=IPlot.get_header(plot_id, plot_name)\n ),\n dcc.Graph(id=f\"{source.name}_{plot_id}_graph\"),\n ]\n )\n\n @renderer.app.callback(\n Output(f\"{source.name}_{plot_id}_graph\", \"figure\"),\n [Input(f\"{source.name}_{plot_id}_graph\", \"clickData\")]\n )\n def update_graph(clickData):\n pulled = PieChart.pulled.get(source.name)\n\n layout = kwargs.pop(\"layout\", {})\n layout.update(\n template='plotly_dark',\n plot_bgcolor='rgba(0, 0, 0, 0)',\n paper_bgcolor='rgba(0, 0, 0, 0)',\n )\n pull = [0] * len(labels)\n if clickData is not None:\n if pulled == clickData[\"points\"][0][\"pointNumber\"]:\n pulled = None\n else:\n pulled = clickData[\"points\"][0][\"pointNumber\"]\n value = clickData[\"points\"][0][\"value\"]\n print(value)\n pull[int(pulled)] = ((0.1 + ((1 - value) * 0.3))\n / sum(values))\n PieChart.pulled[source.name] = pulled\n\n fig = go.Figure(\n data=[go.Pie(\n labels=labels,\n values=values,\n pull=pull\n )], layout=layout\n )\n\n return fig\n\n return plot\n\n @staticmethod\n def config_panel(selected_view: View) -> list[Component]:\n if isinstance(selected_view, DictView):\n return []\n elif isinstance(selected_view, DfView):\n return [\n IPlot.html_dropdown(\"Names\", 0, selected_view.data.columns),\n IPlot.html_dropdown(\"Values\", 1, selected_view.data.columns)\n ]\n\n @staticmethod\n def are_plot_args_valid(plot_args: list, selected_view: View) -> bool:\n if isinstance(selected_view, DictView):\n return True\n elif isinstance(selected_view, DfView):\n return all(plot_args) and plot_args\n raise NotImplementedError()\n\n @staticmethod\n def from_config(plot_id: str, renderer: IDataStudyRenderer, plot_args: list,\n selected_view: View):\n if isinstance(selected_view, DictView):\n return PieChart.new(plot_id, renderer, selected_view)\n elif isinstance(selected_view, DfView):\n return PieChart.new(plot_id, renderer, selected_view,\n names=plot_args[0], values=plot_args[1])\n\n\ndef check_columns(df):\n string_col = None\n scalar_col = None\n\n if len(df.columns) != 2:\n raise ValueError(\n \"Ambiguous data, got a dataframe that does not have two columns.\"\n \"Use another view or pass labels and values arguments.\"\n )\n\n # Iterate over columns\n for column in df.columns:\n if df[column].dtype == object:\n if string_col is None:\n string_col = column\n else:\n raise ValueError(\"Multiple string columns found.\")\n elif pd.api.types.is_numeric_dtype(df[column]):\n if scalar_col is None:\n scalar_col = column\n else:\n raise ValueError(\"Multiple scalar columns found.\")\n\n if string_col is None or scalar_col is None:\n raise ValueError(\"Both string and scalar columns are not present.\")\n\n return string_col, scalar_col\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n","repo_name":"adangreputationsquad/theriver","sub_path":"dataviz/src/components/pie_charts.py","file_name":"pie_charts.py","file_ext":"py","file_size_in_byte":5478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"7120057062","text":"# -*- coding: utf-8 -*-\n\n# ***************************************************\n# * File : restapi.py\n# * Author : Zhefeng Wang\n# * Email : wangzhefengr@163.com\n# * Date : 2023-04-27\n# * Version : 0.1.042722\n# * Description : description\n# * Link : https://github.com/ultralytics/yolov5/tree/master/utils/flask_rest_api\n# * Requirement : 相关模块版本需求(例如: numpy >= 2.1.0)\n# ***************************************************\n\n# python libraries\nimport io\nimport argparse\n\nimport torch\nfrom flask import Flask, request\nfrom PIL import Image\n\n# global variable\nLOGGING_LABEL = __file__.split('/')[-1][:-3]\nDETECTION_URL = \"/v1/object-detection/\"\n\napp = Flask(__name__)\nmodels = {}\n\n@app.route(DETECTION_URL, method = [\"POST\"])\ndef predict(model):\n if request.method != \"POST\":\n return\n \n if request.files.get(\"image\"):\n # method 1\n # with request.files[\"image\"] as f:\n # im = Image.open(io.BytesIO(f.read()))\n # method 2\n im_file = request.files[\"image\"]\n im_bytes = im_file.read()\n im = Image.open(io.BytesIO(im_bytes))\n # inference\n if model in models:\n results = models[model](im, size = 640) # reduce size=320 for faster inference\n return results.pandas().xyxy[0].to_json(orient = \"records\")\n\n\n\n\n# 测试代码 main 函数\ndef main():\n # arg parse\n parser = argparse.ArgumentParser(description = \"Flask API exposing YOLOv5 model\")\n parser.add_argument(\"--port\", default = 5000, type = int, help = \"port number\")\n parser.add_argument(\"--model\", nargs = \"+\", default = [\"yolov5s\"], help = \"model(s) to run, i.e. --model yolov5n yolov5s\")\n opt = parser.parse_args()\n # model load\n for m in opt.model:\n models[m] = torch.hub.load(\n \"ultralytics/yolov5\", \n m, \n force_reload = True, \n skip_validation = True\n )\n # app run \n app.run(host = \"0.0.0.0\", port = opt.port) # debug=True causes Restarting with stat\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wangzhefeng/yolo","sub_path":"experiment/yolo5/flask_app/restapi.py","file_name":"restapi.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"3737357854","text":"from django.shortcuts import render\nfrom django.core.mail import send_mail\nfrom django.conf import settings\nfrom django.template import loader\nfrom .models import Our_Service\nfrom .models import Dental_Procedure\nfrom .models import Blog_Post\nfrom .models import Testimonial\n\n\ndef home(request):\n\tservice_list = Dental_Procedure.objects.all()\n\ttestimonial_list = Testimonial.objects.all()\n\tblog_list = Blog_Post.objects.all()\n\tcontext = {'service_list':service_list,'testimonial_list':testimonial_list,'blog_list':blog_list}\n\treturn render(request, 'home.html', context)\n\n\n\t\n\n\t\n\ndef contact(request):\n\tif request.method == \"POST\":\n\t\tmessage_name = request.POST.get('message-name')\n\t\tmessage_email = request.POST.get('message-email')\n\t\tmessage = request.POST.get('message')\n\t\n\n\n\t\t\n\n\t\t#send email\n\t\tsend_mail(\n\t\t'Message from ' + message_email, #subject\n\t\tmessage, #message\n\t\tmessage_email, # from email\n\t\t['lacson.joandale@dfcamclp.edu.ph'], # to email\n\t\tfail_silently=False\n\t\t)\n\t\treturn render(request, 'contact.html', {'message_name': message_name})\n\n\n\telse:\n\t\t\n\t\treturn render(request, 'contact.html', )\n\n\ndef about(request):\n\n\ttestimonial_list = Testimonial.objects.all()\n\treturn render(request, 'about.html', {'testimonial_list': testimonial_list})\n\t\n\n\n\ndef pricing(request):\n\tservice_list = Dental_Procedure.objects.all()\n\treturn render(request, 'pricing.html', {'service_list': service_list})\n\n\ndef service(request):\n\ttestimonial_list = Testimonial.objects.all()\n\tblog_list = Blog_Post.objects.all()\n\tcontext = {'testimonial_list':testimonial_list,'blog_list':blog_list}\n\treturn render(request, 'service.html', context)\ndef appointment1(request):\n\n\tif request.method == \"POST\":\n\t\tyour_name = request.POST.get('your-name')\n\t\tyour_phone = request.POST.get('your-phone')\n\t\tyour_email = request.POST.get('your-email')\n\t\tyour_address = request.POST.get('your-address')\n\t\tyour_schedule = request.POST.get('your-schedule')\n\t\tyour_date = request.POST.get('your-date')\n\t\tyour_message = request.POST.get('your-message')\n\t\t\n\t\n\n\t\t#send email\n\t\tappointment = \"Name:\" + your_name + \"\\n\" +\" Phone: \" + your_phone + \"\\n\" +\"Email: \" + your_email + \"\\n\" +\"Address: \" + your_address + \"\\n\" +\"Schedule: \" + your_schedule + \"\\n\" +\"Date: \" + your_date + \"\\n\" +\"Message: \" + your_message\n\t\t\n\t\tsend_mail(\n\t\t'Appointment Request', #subject\n\n\t\tappointment, #message\n\t\tyour_email, # from email\n\t\t['lacson.joandale@dfcamclp.edu.ph'], # to email\n\t\tfail_silently=False\n\t\t)\n\t \t\n\t\treturn render(request, 'appointment.html', {\n\t\t\t'your_name': your_name,\n\t\t\t'your_phone': your_phone,\n\t\t\t'your_email': your_email,\n\t\t\t'your_address': your_address, \n\t\t\t'your_schedule': your_schedule,\n\t\t\t'your_date': your_date,\n\t\t\t'your_message': your_message\n\t\t\t})\n\n\t\n\t\t\n\n\telse:\n\t\t\n\t\treturn render(request, 'home.html', )\n\ndef appointment(request):\n\n\treturn render(request, 'appointment.html', {})\n\ndef blog(request):\n\tblog_list = Blog_Post.objects.all()\n\n\treturn render(request, 'blog.html', {'blog_list': blog_list})\n\n\t\n\n\n\n\n\t\n\n\n\n","repo_name":"latotraymart/DentistDjango","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"2364300989","text":"from seznam_mereni_1 import Ui_Dialog\nfrom PyQt5 import QtWidgets, uic,QtSql,QtCore,QtGui\nfrom PyQt5.QtSql import QSqlDatabase,QSqlQuery,QSqlQueryModel\nimport sqlite3 as sql\nimport sys\nfrom data import Databaze\n\nclass Seznam_mereni(QtWidgets.QDialog,Ui_Dialog):\n\n def __init__(self, cesta_nazvu):\n super().__init__()\n self.setupUi(self)\n self.cesta_nazvu=cesta_nazvu\n\n\n\n # # nacte nazev aktualniho projektu ze souboru \"nazev.txt\"\n # with open(cesta_nazvu) as n:\n # nazev=n.readlines()\n #\n # nazev=nazev[0]\n # cesta_inv=cesta_nazvu[::-1] #invertuje cestu\n # pozice=cesta_inv.find('/') #najde poradi lomitka\n # cesta_konecna=cesta_nazvu[0:len(cesta_nazvu)-pozice] #udela cestu adresare bez nazvu souboru\n #\n # databaze=cesta_konecna+nazev #vytvori cestu+nazev databaze\n\n\n\n #otevreni databaze\n db1 = QSqlDatabase.addDatabase(\"QSQLITE\",\"db1\")\n # db = QSqlDatabase.addDatabase(\"\")\n db1.setDatabaseName(cesta_nazvu)\n db1.open()\n\n # vytvori model databaza a nacte data\n projectModel1 = QSqlQueryModel()\n # projectModel.setQuery('select Stanovisko,Orientace, Delka,Zenitka, Smer, Kod from gps_sour',db)\n projectModel1.setQuery('select Stanovisko,Orientace,Delka,Zenitka,Smer, Kod from mereni',db1)\n self.tableView.setModel(projectModel1)\n # self.tableView.setColumnWidth(1,5)\n\n db1.close()\n del projectModel1\n del db1\n\n QSqlDatabase.removeDatabase(\"db1\")\n self.show()\n self.exec()\n\nif __name__ == \"__main__\":\n app=QtWidgets.QApplication([])\n okno=Seznam_mereni()\n okno.show()\n sys.exit(app.exec_())\n","repo_name":"ctu-yobp/2020-a","sub_path":"app/seznam_mereni_2.py","file_name":"seznam_mereni_2.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"sl","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"39256774111","text":"import re\nfrom cpp_gen import MacroFunction, CppFile, Function\nfrom cpp_parser import CPPParser, DetectedMethod\nfrom mockclass_gen import MockClass, create_mock_class\n\n\ndef find_every_include(file, isfilename=True, delete_keyword=False):\n result = []\n if isfilename:\n f = open(file)\n result = re.findall(r'#include .*[\">]\\n', open(file).read())\n f.close()\n else:\n result = re.findall(r'#include .*[\">]\\n', open(file).read())\n for s in range(len(result)):\n result[s] = result[s].rstrip()\n if delete_keyword:\n result[s] = result[s].replace(\"#include \", \"\")\n return result\n\n\n# This returns a MacroFunction object\ndef make_empty_test(test_name, sub_test_name):\n test = MacroFunction(\"TEST\", test_name, sub_test_name)\n test.add_comment(\"Arrange\")\n test.add_white_space(amount=2)\n test.add_comment(\"Act\")\n test.add_white_space(amount=2)\n test.add_comment(\"Assert\")\n test.add_white_space(amount=2)\n return test\n\n\n# Returns a CppFile object, which can be used to print to console or write to a file\ndef make_full_file(filename, transfer_class_to_new_file=False):\n cpp = CppFile()\n file = open(filename)\n parser = CPPParser(file)\n parser.detect_methods()\n file.close()\n\n # Generate mock class\n mc = create_mock_class(parser)\n\n # Generate Tests\n for i in range(len(parser.methods)):\n cpp.add_component(make_empty_test(parser.detected_class_name, parser.methods[i].name + str(i)))\n\n\n cpp.add_include(\"iostream\")\n cpp.add_include(\"gtest/gtest.h\")\n cpp.add_include(\"\\\"gmock/gmock.h\\\"\")\n\n # Adds class to file or includes class it came from\n # leaving this functionality out for now, can cause complications that we cannot fix yet\n # if transfer_class_to_new_file:\n # cpp.add_component(parser)\n # includes = find_every_include(filename, delete_keyword=True)\n # for i in includes:\n # if (\"iostream\" not in i) and (\"gtest/gtest.h\" not in i):\n # cpp.add_include(i)\n # else:\n cpp.add_include(\"\\\"\" + filename + \"\\\"\")\n\n cpp.add_component(mc.get_class())\n\n # Generates and adds the main for running tests, could make this a separate function\n run_tests = Function(\"int\", \"main\", \"int argc\", \"char **argv\")\n run_tests.add_statement(\"testing::InitGoogleTest(&argc, argv)\")\n run_tests.add_return(\"RUN_ALL_TESTS()\")\n\n cpp.add_component(run_tests)\n\n return cpp\n\n\ndef make_empty_test_suite(filename):\n cpp = CppFile()\n file = open(filename)\n parser = CPPParser(file)\n parser.detect_methods()\n file.close()\n\n # Generate Tests\n for i in range(len(parser.methods)):\n cpp.add_component(make_empty_test(parser.detected_class_name, parser.methods[i].name + str(i)))\n\n cpp.add_include(\"iostream\")\n cpp.add_include(\"gtest/gtest.h\")\n cpp.add_include(\"\\\"\" + filename + \"\\\"\")\n\n # Generates and adds the main for running tests, could make this a separate function\n run_tests = Function(\"int\", \"main\", \"int argc\", \"char **argv\")\n run_tests.add_statement(\"testing::InitGoogleTest(&argc, argv)\")\n run_tests.add_return(\"RUN_ALL_TESTS()\")\n\n cpp.add_component(run_tests)\n\n return cpp\n","repo_name":"AFMS-Rowan-Software-Projects/gMock-Test-Framework-Project-Spring2020","sub_path":"full_file_creator.py","file_name":"full_file_creator.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"} +{"seq_id":"10344742820","text":"from UM.Extension import Extension\nfrom cura.CuraApplication import CuraApplication\n\nclass VersionInTitlebarPlugin(Extension):\n def __init__(self):\n super().__init__()\n\n self._application = CuraApplication.getInstance()\n\n self._print_information = None\n self._main_window = None\n\n self._version_string = self._application.getVersion()\n self._application.engineCreatedSignal.connect(self._engineCreated)\n\n self._application_name = \"\"\n\n def _engineCreated(self):\n try:\n engine = self._application._qml_engine\n except AttributeError:\n engine = self._application._engine\n self._main_window = engine.rootObjects()[0]\n\n try:\n self._application_name = self._application.getApplicationDisplayName()\n except AttributeError:\n self._application_name = self._main_window.title()\n\n self._print_information = self._application.getPrintInformation()\n self._print_information.jobNameChanged.connect(self._updateWindowTitle)\n\n self._updateWindowTitle()\n\n def _updateWindowTitle(self):\n # set window title\n self._main_window.setTitle(\"%s - %s %s\" % (self._print_information.jobName, self._application_name, self._version_string))\n\n","repo_name":"fieldOfView/Cura-VersionInTitlebarPlugin","sub_path":"VersionInTitlebarPlugin.py","file_name":"VersionInTitlebarPlugin.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"} +{"seq_id":"19842344715","text":"# coding: utf-8\nimport argparse\nimport torch\nimport math\nfrom torch import nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nimport time\n\nimport data_fnn as data\nimport model\n\nparser = argparse.ArgumentParser(description='PyTorch Wikitext-2 FNN Language Model')\nparser.add_argument('--data', type=str, default='./data/wikitext-2',\n help='location of the data corpus')\nparser.add_argument('--emsize', type=int, default=200,\n help='size of word embeddings')\nparser.add_argument('--hidd', type=int, default=100,\n help='number of hidden units per layer')\nparser.add_argument('--contsz', type=float, default=7,\n help='context size (8-gram -> contsz = 7)')\nparser.add_argument('--epochs', type=int, default=8,\n help='upper epoch limit')\nparser.add_argument('--batch_size', type=int, default=512, metavar='N',\n help='batch size')\nparser.add_argument('--tied', action='store_true',\n help='tie the embedding weights and output weights (when using tied, contsz must be equal to hidd')\nparser.add_argument('--cuda', action='store_true',\n help='use CUDA')\nparser.add_argument('--save', type=str, default='model.pt',\n help='path to save the final model')\nparser.add_argument('--lr', type=float, default=1e-3,\n help='initial learning rate')\n\n\nargs = parser.parse_args()\nEMBEDDING_DIM = args.emsize\nCONTEXT_SIZE = args.contsz\nBATCH_SIZE = args.batch_size\n\n# hidden units\nH = args.hidd\ntorch.manual_seed(42)\nlearn_rate = args.lr\ntied = True if args.tied else False\n\n\n###############################################################################\n# Helper Functions\n###############################################################################\ndef ngram_split(orig_corpus, dataset, n):\n # This function breaks corpus into [context, target]\n # For e.g., in trigram, the tensor returned would be [C(n-2), C(n-1), T]\n ngram = []\n data_len = len(dataset)\n eos_id = corpus.dictionary.word2idx[''] \n for i, tokenid in enumerate(dataset):\n if i+n3][0]\n print(\"Dwell time (Poisson) =\",t[time_index],\"days\")","repo_name":"stw4198/LEARN","sub_path":"veto.py","file_name":"veto.py","file_ext":"py","file_size_in_byte":6911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"29779796450","text":"\"\"\"\nCreate a function that determines whether a point \nwith given coordinates (x, y) lies inside a circle of radius r with center at (0, 0), \nand displays a message based on the result.\n\"\"\"\n\ndef distance_check(x, y, r):\n point_distance = x ** 2 + y ** 2\n if point_distance <= r:\n print(\"Монетка где-то рядом\")\n else:\n print(\"Монетки в области нет\")\n\n\nprint(\"Введите координаты монетки:\")\npos_x = float(input(\"X: \"))\npos_y = float(input(\"Y: \"))\nradius = float(input(\"Введите радиус:\" ))\n\ndistance_check(pos_x, pos_y, radius)\n\n\n\n","repo_name":"AfoninSV/python_scripts","sub_path":"coin_finder.py","file_name":"coin_finder.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"39174029806","text":"from django.shortcuts import render, redirect\nimport hashlib\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom rest_framework.response import Response\nfrom django.contrib.auth import get_user_model\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom django.contrib import messages\nfrom .serializers import UserSerializer\nfrom django.contrib.auth.models import User\n\n# Create your views here.\n\n\n@api_view(['POST'])\ndef login_user(request):\n username = request.data['username']\n password = request.data['password']\n user = authenticate(request, username=username, password=password)\n print(user)\n if user is not None:\n login(request, user)\n return Response(\"User logged in\", status=status.HTTP_200_OK)\n else:\n messages.success(request, \"No such user exists, please sign up.\")\n return Response(\"User doesn't exist\", status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\ndef logout_user(request):\n logout(request)\n messages.success(request, \"You were logged out\")\n return Response(\"User logged out\", status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\ndef get_all_users(request):\n User = get_user_model()\n users = User.objects.values()\n context = {'users': users}\n return Response(context, status=status.HTTP_200_OK)\n\n\n@api_view(['POST'])\ndef create_user(request):\n new_user = UserSerializer(data=request.data)\n if new_user.is_valid():\n user_saved = new_user.create(request.data)\n return Response('User {} created'.format(user_saved.username), status=status.HTTP_200_OK)\n else:\n return Response('User not created', status=status.HTTP_200_OK)\n\n\n@api_view(['DELETE'])\ndef delete_user(request):\n user = User.objects.get(id=request.data['id'])\n user.delete()\n return Response('User deleted')\n\n\n# @api_view(['GET'])\n# def see_user(request):\n# username = User.username\n# return Response(user)\n","repo_name":"miloshIra/Kavra","sub_path":"kavsite/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"9195709483","text":"import torch\nimport math\nimport numpy as np\n\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom typing import Union\n\nfrom vq_vae_text.modules import Quantize, GeometricCategoricalDropout, CategoricalNoise, SlicedQuantize, ChannelWiseLayerNorm, wn_conv_transpose1d, wn_conv1d, wn_linear, Attention\nfrom vq_vae_text.models.transformer import PositionalEncoding\n\n\nclass ResBlock(nn.Module):\n def __init__(self, in_channel, channel, kernel_size=3, padding=1, dilation=1):\n super().__init__()\n\n self.conv = nn.Sequential(\n nn.ELU(),\n nn.Conv1d(in_channel, channel, kernel_size=kernel_size, padding=padding + 2, dilation=3),\n\n nn.ELU(),\n nn.Conv1d(channel, in_channel, kernel_size=kernel_size, padding=padding),\n )\n\n def forward(self, input):\n return self.conv(input) + input\n\n\nclass Encoder(nn.Module):\n def __init__(self, in_channel, channel, res_channel, n_res_blocks=2):\n super().__init__()\n\n self.first = nn.Conv1d(in_channel, channel, kernel_size=4, stride=2, padding=1)\n\n blocks = []\n for i in range(n_res_blocks):\n blocks.append(ResBlock(channel, res_channel))\n\n self.blocks = nn.Sequential(*blocks)\n\n self.attention = Attention(channel, channel // 4, channel, n_heads=1)\n\n def forward(self, input):\n out = keys = self.first(input)\n out = self.blocks(out)\n out = out + self.attention(out, out)\n return out\n\n\nclass Decoder(nn.Module):\n def __init__(self, channel, out_channel, res_channel, n_res_blocks=2):\n super().__init__()\n\n blocks = []\n\n for i in range(n_res_blocks):\n blocks.append(ResBlock(channel, res_channel))\n\n self.blocks = nn.Sequential(*blocks)\n\n self.attention = Attention(channel, channel // 4, channel, n_heads=1)\n\n self.final = nn.ConvTranspose1d(channel, out_channel, kernel_size=4, stride=2, padding=1)\n\n self.blocks = nn.Sequential(*blocks)\n\n def forward(self, input):\n out = self.blocks(input)\n out = out + self.attention(out, out)\n out = self.final(out)\n return out\n\n\nclass TextCNNV2(nn.Module):\n def __init__(self,\n vocab_size: int,\n channel: int,\n res_channel: int,\n n_res_blocks: int,\n n_encoders: int,\n tau: float,\n pad_idx: Union[None, int],\n input_noise=0.0,\n embed_dropout=0.0,\n num_vq_embeds: int = 512,\n vq_embeds_dim: int = None,\n vq_loss_alpha=0.25,\n vq_decay=0.99,\n ignore_quant=False):\n super().__init__()\n\n self.vocab_size = vocab_size\n self.pad_idx = pad_idx\n self.tau = tau\n self.vq_loss_alpha = vq_loss_alpha\n self.ignore_quant = ignore_quant\n\n self.vq_loss = 0\n self.nll_loss = 0\n self.acc = 0\n\n self.vq_blend = 0.0\n self.blend_steps = 5000\n self.blend_step = 0\n\n self.vq_embeds_dim = vq_embeds_dim\n\n self.input_noise = CategoricalNoise(vocab_size, input_noise)\n self.embed_dropout = nn.Dropout(embed_dropout)\n\n self.embed = nn.Embedding(vocab_size, channel, padding_idx=pad_idx, max_norm=1.0)\n\n self.encoder = nn.Sequential(*[Encoder(channel, channel, res_channel, n_res_blocks)\n for i in range(n_encoders)])\n self.decoder = nn.Sequential(*[Decoder(channel, channel, res_channel, n_res_blocks)\n for i in range(n_encoders)[::-1]])\n\n self.conv_to_quant = nn.Conv1d(channel, vq_embeds_dim, kernel_size=1)\n self.quant_to_conv = nn.Conv1d(vq_embeds_dim, channel, kernel_size=1)\n\n self.quantize = Quantize(dim=vq_embeds_dim, n_embed=num_vq_embeds, decay=vq_decay)\n\n self.conv_to_logits = nn.Conv1d(channel, vocab_size, kernel_size=1)\n\n self.nll = nn.NLLLoss(reduction='none', ignore_index=self.pad_idx)\n\n def get_quantization_layers(self):\n return [self.quantize]\n\n def encode(self, input):\n out = self.embed(input)\n out = out.permute(0, 2, 1)\n\n out = self.encoder(out)\n out = self.conv_to_quant(out).permute(0, 2, 1)\n\n quant, diff, code = self.quantize(out)\n quant = quant.permute(0, 2, 1)\n\n return [quant], diff, [code]\n\n def decode(self, quants):\n quant = quants[0]\n quant = self.quant_to_conv(quant)\n out = self.decoder(quant)\n logits = self.conv_to_logits(out)\n logits = logits.permute(0, 2, 1)\n\n logits = logits / self.tau\n logp_probs = F.log_softmax(logits, dim=-1)\n\n return logp_probs\n\n def decode_code(self, codes):\n code = codes[0]\n quant = self.quantize.embed_code(code)\n quant = quant.permute(0, 2, 1)\n x = self.decode([quant])\n return x\n\n def forward(self, x):\n z, diff, ids = self.encode(x)\n logp = self.decode(z)\n\n self.blend_step += 1\n\n return logp, z, diff, ids\n\n def compute_accuracy(self, recon_probs, target, mask=None):\n if mask is None:\n mask = torch.ones_like(target)\n\n lens = mask.sum(-1).float()\n corr = ((recon_probs.argmax(-1) == target) * mask).sum(-1) / lens\n\n acc = corr.double().mean().item()\n\n return acc\n\n def loss_function(self, inputs, target):\n logp, z, diff, ids = inputs\n\n if self.pad_idx is not None:\n mask = target != self.pad_idx\n else:\n mask = torch.ones_like(target)\n lens = mask.sum(-1).float()\n\n acc = self.compute_accuracy(logp, target, mask=mask)\n\n bs = logp.size(0)\n\n logp = logp.view(-1, logp.size(-1))\n target = target.reshape(-1)\n\n nll_loss = self.nll(logp, target).view(bs, -1) * mask\n nll_loss = (nll_loss.sum(-1) / lens).mean()\n\n self.vq_loss = diff\n self.nll_loss = nll_loss\n self.acc = acc\n\n return self.nll_loss + self.vq_loss_alpha * self.vq_loss\n\n def latest_losses(self):\n return {\n 'nll': self.nll_loss,\n 'ppl': math.exp(self.nll_loss),\n 'bpc': self.nll_loss / math.log(2),\n 'vq': self.vq_loss,\n 'acc': self.acc,\n }\n","repo_name":"kklemon/text-gan-experiments","sub_path":"legacy/vq_vae_text/vq_vae_text/models/textcnnv2.py","file_name":"textcnnv2.py","file_ext":"py","file_size_in_byte":6384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"34174169757","text":"def solution(park, routes): \n answer = [0,0] \n for i in range(len(park)): \n for j in range(len(park[0])): \n if park[i][j] == 'S': \n answer = [i, j] \n\n move = { 'E': [0, 1], 'W': [0, -1], 'S': [1, 0], 'N': [-1, 0] }\n \n for route in routes:\n op, num = route.split(' ') \n num = int(num) \n nx, ny = answer \n\n step = 0 \n while step < num: \n nx += move[op][0] \n ny += move[op][1]\n\n if nx < 0 or len(park) <= nx or ny < 0 or len(park[0]) <= ny or park[nx][ny] == 'X': #x y축이 경계를 벗어났는지 확인, X(장애물)을 만났는지 확인\n break \n step += 1 #\n\n if step == num: \n answer = [nx, ny] \n\n return answer","repo_name":"zozeyx/programmers_1","sub_path":"공원 산책.py","file_name":"공원 산책.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"8668496289","text":"from ..lib import FileSystem\nfrom ..exception import CTERAException\nfrom .base_command import BaseCommand\n\n\nclass UploadTaskStatus():\n IN_PROGRESS = 0\n COMPLETE = 1\n FAIL = -1\n\n\nclass Firmware(BaseCommand):\n \"\"\" Gateway Firmware upgrade API \"\"\"\n\n def __init__(self, gateway):\n super().__init__(gateway)\n self._filesystem = FileSystem.instance()\n\n def upgrade(self, file_path, reboot=True, wait_for_reboot=True):\n \"\"\"\n Upgrade the Filer firmware with the provided file\n\n :param str file_path: Path to the local file to upload\n :param bool,optional reboot: Perform reboot after uploading the new firmware, defaults to True\n :param bool,optional wait_for_reboot: Wait for reboot to complete (if reboot is performed), defaults to True\n \"\"\"\n upload_task_info = self._upload_firmware(file_path)\n if upload_task_info.rc != 0:\n raise CTERAException(message='Failed to upload the new firmware', path=file_path)\n self._wait_for_completion(upload_task_info.taskPointer)\n if reboot:\n self._gateway.power.reboot(wait=wait_for_reboot)\n\n def _upload_firmware(self, file_path):\n file_info = self._filesystem.get_local_file_info(file_path)\n with open(file_path, 'rb') as fd:\n return self._gateway.upload(\n 'proc/firmware',\n dict(\n name='upload',\n firmware=(file_info['name'], fd, file_info['mimetype'][0])\n )\n )\n\n def _wait_for_completion(self, task_pointer):\n while True:\n task_status = self._gateway.get(task_pointer)\n is_running = task_status.status == UploadTaskStatus.IN_PROGRESS\n if not is_running:\n if task_status.status == UploadTaskStatus.COMPLETE:\n return\n raise CTERAException(\n message=f'Filer failed to receive the new firmware - {task_status.statusMessage}',\n instance=task_status\n )\n","repo_name":"ctera/ctera-python-sdk","sub_path":"cterasdk/edge/firmware.py","file_name":"firmware.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"36"} +{"seq_id":"11252050390","text":"from typing import Optional\nfrom PyQt6.QtCore import QModelIndex, QUrl, Qt, pyqtSignal\nfrom PyQt6.QtGui import QColor, QIcon, QPixmap\nfrom PyQt6.QtNetwork import QNetworkAccessManager, QNetworkReply, QNetworkRequest\nfrom PyQt6.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLabel, QSizePolicy, QStackedLayout, QVBoxLayout, QWidget\n\nclass DetailScreen(QFrame):\n abort: pyqtSignal = pyqtSignal()\n\n def __init__(self, parent: Optional[QWidget] = None):\n super().__init__(parent=parent)\n\n self.networkManger: QNetworkAccessManager = QNetworkAccessManager()\n #self.baseBackdropUrl: str = \"https://www.themoviedb.org/t/p/w1920_and_h800_multi_faces\"\n self.baseBackdropUrl: str = \"https://www.themoviedb.org/t/p/original\"\n self.placeHolderPixmap: QPixmap = QPixmap(1920,1080)\n self.placeHolderPixmap.fill(QColor(\"#7c859E\"))\n self.backdropLabel: QLabel = QLabel()\n self.backdropLabel.setPixmap(self.placeHolderPixmap)\n self.backdropLabel.setSizePolicy(QSizePolicy.Policy.Ignored,QSizePolicy.Policy.Ignored)\n self.backdropLabel.setObjectName(\"backdropLabel\")\n self.titleLabel: QLabel = QLabel()\n self.titleLabel.setAlignment(Qt.AlignmentFlag.AlignBottom)\n self.titleLabel.setObjectName(\"titleLabel\")\n self.overViewLabel: QLabel = QLabel()\n self.overViewLabel.setSizePolicy(QSizePolicy.Policy.Expanding,QSizePolicy.Policy.Expanding)\n self.overViewLabel.setMaximumHeight(40)\n self.overViewLabel.setWordWrap(True)\n self.overViewLabel.setObjectName(\"overviewLabel\")\n self.ratingButton: QPushButton = QPushButton()\n ratingPixmap: QPixmap = QPixmap(\"film_finder/screens/detail/assets/star.png\")\n ratingPixmap = ratingPixmap.scaled(24,24)\n self.ratingButton.setIcon(QIcon(ratingPixmap))\n self.ratingButton.setObjectName(\"ratingButton\")\n\n\n trailerButton: QPushButton = QPushButton(\"Trailer\")\n trailerButton.setObjectName(\"trailerButton\")\n trailerButton.setIcon(QIcon(\"film_finder/screens/detail/assets/play.png\"))\n trailerButton.setSizePolicy(QSizePolicy.Policy.Fixed,QSizePolicy.Policy.Fixed)\n\n frontFrame: QFrame = QFrame()\n frontFrame.setObjectName(\"frontFrame\")\n backFrame: QFrame = QFrame()\n\n frontFrameLayout: QVBoxLayout = QVBoxLayout(frontFrame)\n frontFrameLayout.addWidget(self.titleLabel)\n frontFrameLayout.addWidget(self.ratingButton)\n frontFrameLayout.addWidget(self.overViewLabel)\n frontFrameLayout.addWidget(trailerButton)\n frontFrameLayout.setContentsMargins(20,0,0,20)\n frontFrameLayout.setSpacing(20)\n frontFrameLayout.setAlignment(Qt.AlignmentFlag.AlignBottom)\n\n backFrameLayout: QHBoxLayout = QHBoxLayout(backFrame)\n backFrameLayout.addWidget(self.backdropLabel)\n backFrameLayout.setContentsMargins(0,0,0,0)\n backFrameLayout.setSpacing(0)\n\n bannerLayout: QStackedLayout = QStackedLayout(self)\n bannerLayout.addWidget(frontFrame)\n bannerLayout.addWidget(backFrame)\n bannerLayout.setStackingMode(QStackedLayout.StackingMode.StackAll)\n self.setSizePolicy(QSizePolicy.Policy.Expanding,QSizePolicy.Policy.Expanding)\n def onFilmclicked(self,index: QModelIndex):\n title: Optional[str] = index.data()\n rating: Optional[str] = index.siblingAtColumn(1).data()\n overview: Optional[str] = index.siblingAtColumn(2).data()\n backdropUrl: Optional[str] = index.siblingAtColumn(3).data()\n\n if title is not None:\n self.titleLabel.setText(title)\n if rating is not None:\n self.ratingButton.setText(str(rating))\n if overview is not None:\n self.overViewLabel.setText(overview)\n if backdropUrl is not None:\n self.abort.emit()\n url: str = self.baseBackdropUrl + backdropUrl\n request: QNetworkRequest = QNetworkRequest(QUrl(url))\n reply: QNetworkReply = self.networkManger.get(request)\n reply.finished.connect(self.onBackdropReplyFinished)\n self.abort.connect(reply.abort)\n\n def onBackdropReplyFinished(self):\n reply = self.sender()\n if isinstance(reply, QNetworkReply):\n if reply.error() == QNetworkReply.NetworkError.NoError:\n data = reply.readAll()\n pixmap = QPixmap()\n pixmap.loadFromData(data) #pyright: ignore\n pixmap = pixmap.scaled(1920,1080)\n self.backdropLabel.setPixmap(pixmap)\n\n\n\n","repo_name":"Savant35/filmfinder-","sub_path":"film_finder/screens/detail/detailscreen.py","file_name":"detailscreen.py","file_ext":"py","file_size_in_byte":4569,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"} +{"seq_id":"5355005493","text":"from config import Config\n\nimport numpy as np\nimport torch\n\nconfig = Config()\n\n\ndef get_all_predictions(model, loader):\n all_preds = []\n all_labels = []\n\n with torch.no_grad():\n for inputs, labels in loader:\n inputs, labels = inputs.to(config.device), labels.to(config.device)\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n all_preds.extend([int(x) for x in preds.cpu().numpy()])\n all_labels.extend([int(x) for x in labels.cpu().numpy()])\n\n return np.array(all_preds), np.array(all_labels)\n","repo_name":"Daniel-Elston/CV-model-compare","sub_path":"src/models/predict_model.py","file_name":"predict_model.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"25904138602","text":"import sys\n\n\nclass BatallasCriaturas:\n\n def __init__(self, entrenador, rivales):\n self.entrenador = entrenador\n self.rivales = rivales\n self.invicto = True\n self.victorias = 0\n\n def batalla_entrenadores(self):\n print(f\"¡Te enfrentas contra {self.rival.nombre}!\")\n print(\"Preparado...\\nListo!\\nYa!\\n\")\n j1 = 0\n j2 = 0\n # Ese 2 es luego 6\n while j1 < 6 and j2 < 6:\n self.batalla_criaturas(self.entrenador.bolsillo[j1], self.rival.bolsillo[j2])\n if self.entrenador.bolsillo[j1].hp == 0:\n j1 += 1\n print(\"Tu criatura perdió esta\\n\") \n elif self.rival.bolsillo[j2].hp == 0:\n j2 += 1\n print(\"El rival va en su criatura número\", j2)\n print(\"Tu criatura ganó esta\\n\")\n print(\"<\"*15,\">\"*15)\n if j2 == 5 and self.rival.bolsillo[j2].hp == 0:\n break\n if j1 == 6: # este tambiene debe ser un 6\n self.invicto = False\n\n def batalla_criaturas(self, criatura1, criatura2):\n while criatura1.hp > 0 or criatura2.hp > 0:\n criatura1, criatura2 = self.atacar(criatura1, criatura2)\n if criatura2.hp == 0:\n print(f\"¡{criatura2.nombre} fue derrotado!\")\n break\n print()\n criatura2, criatura1 = self.atacar(criatura2, criatura1)\n if criatura1.hp == 0:\n print(f\"¡{criatura1.nombre} fue derrotado!\")\n break\n print()\n\n def atacar(self, criatura1, criatura2):\n print(f\"{criatura1.nombre} ataca a {criatura2.nombre}\", end=\" \")\n print(f\"con su poder de tipo {criatura1.tipo}\")\n if criatura1.preferencia_combate() == \"Fisico\":\n dano = max(criatura1.atk*1.5 - criatura2.defense, 5)\n else:\n dano = max(criatura1.sp_atk*1.5 - criatura2.defense, 5)\n\n print(f\"El ataque fue de {dano}\")\n criatura2.recibir_ataque(dano)\n print(f\"{criatura2.nombre} tiene {criatura2.hp} de vida\")\n return criatura1, criatura2\n\n def ejecutar_simulación(self):\n try:\n print(f\"Tus criaturas son\\n{[x.nombre for x in self.entrenador.bolsillo]}\")\n except AttributeError:\n print(\"Ocurrió un error, revisa que todas las clases estén completas\")\n sys.exit()\n\n while len(self.rivales) > 0:\n self.rival = self.rivales.pop(0)\n self.batalla_entrenadores()\n if self.invicto == False:\n print(f\"Has perdido valiente entrenador {self.entrenador.nombre}\")\n print(\"¡Pero has completado la actividad!\")\n print([x.nombre for x in self.entrenador.bolsillo])\n break\n else:\n print(f\"¡Has ganado valiente entrenador!\")\n print(f\"Ahora cobrarás tu recompensa, con una criatura enemiga\\n\\n\")\n self.victorias += 1\n self.entrenador.bolsillo + self.rival.bolsillo\n print(f\"Tus criaturas ahora son\\n{[x.nombre for x in self.entrenador.bolsillo]}\")\n print(\"-\"*40)\n try:\n for x in self.entrenador.bolsillo:\n x.hp = x.hp_base\n except (AttributeError, NameError):\n print(\"Ocurrió un error, revisa que la property hp esté bien definida\")\n sys.exit()\n\n estrellas = self.entrenador.bolsillo.cantidad_criaturas_estrella()\n print(f\"Tu cantidad de criaturas estrella fue: {estrellas}\")\n print(f\"Cantidad de victorias es igual a {self.victorias}\")\n\n if self.invicto == True:\n print(\"¡Ganaste todas las peleas! ¡Eres el campeon de la Liga Sinnoh!\")\n print(\"¡Éxito programador! ¡Haz completado la actividad!\")\n","repo_name":"IIC2233/syllabus-2020-2","sub_path":"Actividades/AF01/batalla.py","file_name":"batalla.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"es","doc_type":"code","stars":7,"dataset":"github-code","pt":"36"} +{"seq_id":"75040616423","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport time\n\ndef getStatusIds(user,nbStatus=3,headless=True):\n\tif user[0] == \"@\":\n\t\tuser = user[1:]\n\tregex = f\"/{user}\\/status\\/\\d*/g\"\n\tjsPart1 = f\"var out = [];var str = document.getElementsByTagName('html')[0].innerHTML;var patt = {regex};\"\n\tjsToRun = jsPart1+\"while(match=patt.exec(str)){out.push(match[0]);}return out;\"\n\toptions = Options()\n\tif(headless == True):\n\t\toptions.add_argument('--headless')\n\toptions.add_argument('--disable-gpu')\n\tdriver = webdriver.Chrome(options=options)\n\tdriver.get(f\"https://twitter.com/{user}\") \n\ttime.sleep(3)\n\t#WebDriverWait(driver, 60).until(EC.element_to_be_clickable((By.XPATH, \"/html/body/div[1]/div/div/div[2]/main/div/div/div/div[1]/div/div[3]/div/div/section/div/div/div[1]/div/div/div/article\")))\n\tarrOut = logic(driver,nbStatus,jsToRun)\n\tdriver.close()\n\treturn arrOut\n\ndef logic(driver,nbStatus,jsToRun):\n\tarrOut = list()\n\twhile len(arrOut) < nbStatus:\n\t\tarrTweets = driver.execute_script(jsToRun)\n\t\tfor tweet in arrTweets:\n\t\t\tarrOut.append(int(tweet.split(\"/\")[-1]))\n\t\tarrOut = list(dict.fromkeys(arrOut))\n\t\tdriver.execute_script(\"window.scrollTo(0, document.body.scrollHeight)\")\n\t\ttime.sleep(1)\n\tarrOut.sort(reverse=True)\n\tlength = len(arrOut)\n\tif length > nbStatus:\n\t\tarrOut = arrOut[:nbStatus]\n\treturn arrOut\n\ndef getStatusMultipleUsers(listUsers,nbStatus=3,headless=True):\n\toptions = Options()\n\tif(headless == True):\n\t\toptions.add_argument('--headless')\n\toptions.add_argument('--disable-gpu')\n\tdriver = webdriver.Chrome(options=options)\n\tarrOutMulti = {}\n\tfor user in listUsers:\n\t\ttry:\n\t\t\tif user[0] == \"@\":\n\t\t\t\tuser = user[1:]\n\t\t\tregex = f\"/{user}\\/status\\/\\d*/g\"\n\t\t\tjsPart1 = f\"var out = [];var str = document.getElementsByTagName('html')[0].innerHTML;var patt = {regex};\"\n\t\t\tjsToRun = jsPart1+\"while(match=patt.exec(str)){out.push(match[0]);}return out;\"\n\t\t\tdriver.get(f\"https://twitter.com/{user}\") \n\t\t\ttime.sleep(3)\n\t\t\t#WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, \"/html/body/div[1]/div/div/div[2]/main/div/div/div/div[1]/div/div[3]/div/div/section/div/div/div[1]/div/div/div/article\")))\n\t\t\tarrOut = logic(driver,nbStatus,jsToRun)\n\t\t\tarrOutMulti[user] = arrOut\n\t\texcept:\n\t\t\tarrOutMulti[user] = \"invalid\"\n\tdriver.close()\n\treturn arrOutMulti\n\nif __name__ == \"__main__\":\n\tprint(\"This is a module, don't run it like this !\")\n","repo_name":"BaLaurent/TwitterStatusExtractor","sub_path":"tse.py","file_name":"tse.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"5052350735","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n\turl(r'^$', views.home, name='home'),\n\turl(r'^about/$', views.about, name='about'),\n\turl(r'^panel/$', views.panel, name='panel'),\n\turl(r'^login/$', views.mylogin, name='mylogin'),\n\turl(r'^loginout/$', views.mylogout, name='mylogout'),\n\turl(r'^panel/settings/$', views.site_settings, name='site_settings'),\n]","repo_name":"caprioara/technews-django","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"32274472858","text":"#!/opt/csw/bin/python\n# coding=utf-8\n\nimport json\nfrom urllib import urlopen, urlencode\nfrom conf import config\n\ndef getCommands():\n return [\"np\"]\n\ndef getResponseType():\n return \"MSG\"\n\ndef getInfo():\n return \"[kasutajanimi] Tagastab last.fm-s hetkel mängiva loo\"\n\n\nclass UserNotFoundError(BaseException):\n pass\n\ndef get(parameter, channel, author, folder):\n username = parameter\n if not username:\n return \"Kasutajanimi on puudu\"\n\n try:\n playingTrack = findPlayingTrack(username.strip())\n except UserNotFoundError:\n return \"Kasutajat ei leitud\"\n\n if playingTrack:\n return formatPlayingTrack(playingTrack)\n\n return \"%s ei kuula hetkel midagi\" % username\n\ndef findPlayingTrack(username):\n tracks = getRecentTracks(username)\n if not tracks:\n return None\n for track in tracks:\n attributes = track.get('@attr')\n if attributes:\n if attributes.get('nowplaying', False):\n return track\n return None\n\ndef getRecentTracks(username):\n params = {'method': 'user.getrecenttracks', 'user': username, 'api_key': config.LASTFM_KEY, 'limit': '1', 'format': 'json'}\n url = \"http://ws.audioscrobbler.com/2.0/?\" + urlencode(params)\n content = urlopen(url).read()\n data = json.loads(content)\n if \"error\" in data and data[\"error\"] == 6:\n raise UserNotFoundError\n recentTracks = data.get('recenttracks')\n if not recentTracks:\n return []\n trackEntry = recentTracks.get('track')\n # Response might be a dict or a list, make a list of it either way\n return [trackEntry] if type(trackEntry) == dict else trackEntry\n\ndef formatPlayingTrack(track):\n album = track.get('album')\n artist = track.get('artist')\n return \"%s - %s (%s)\" % (artist.get('#text').encode(\"utf-8\"), track.get('name').encode(\"utf-8\"), album.get('#text').encode(\"utf-8\"))\n","repo_name":"sviik/marju","sub_path":"plugin/command/np/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"30669021749","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport json\n\n\nOUTPUT_PATH=os.environ.get('CI_CODECHECK_PACK_DIR')\nif not OUTPUT_PATH:\n OUTPUT_PATH=\"./output/quality_check\"\n\nPROJECT_JSON_NAME=\"code_check_project.json\"\nPERSON_JSON_NAME=\"code_check_person.json\"\nPROJECT_JSON=OUTPUT_PATH + '/' + PROJECT_JSON_NAME\nPERSON_JSON=OUTPUT_PATH + '/' + PERSON_JSON_NAME\n\ndef _init():\n global _global_dict\n _global_dict = {}\n set_value('output_path', OUTPUT_PATH)\n set_value('project_json_name', PROJECT_JSON_NAME)\n set_value('person_json_name', PERSON_JSON_NAME)\n set_value('project_json', PROJECT_JSON)\n set_value('person_json', PERSON_JSON)\n\ndef set_value(key, value):\n _global_dict[key] = value\n\ndef get_value(key, defValue=None):\n try:\n return _global_dict[key]\n except KeyError:\n return defValue\n\n\ndef get_user_email(fil, line):\n file_dir = os.path.dirname(fil)\n file_name = os.path.basename(fil)\n # print(f'cd {file_dir}; git blame {file_name} -L {line},{line} -e')\n ans = os.popen(f'cd {file_dir}; git blame {file_name} -L {line},{line} -e')\n email = re.search(r'(?<=<).*?(?=>)', ans.readline())\n if email:\n return email.group().replace(' ', '')\n return None\n\n\ndef quality_report_append(report, key, value):\n json_data = {}\n if os.path.exists(report):\n fjson = open(report, 'r', encoding='utf8')\n json_data = json.load(fjson)\n fjson.close()\n\n json_data[key] = value\n json_str = json.dumps(json_data, indent=4, ensure_ascii=False)\n with open(report, 'w') as f:\n f.write(json_str)\n\n","repo_name":"tuya/tuyaos-development-board-t2","sub_path":"software/TuyaOS/scripts/quality_check/quality_check_base.py","file_name":"quality_check_base.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"} +{"seq_id":"30326240359","text":"from typing import Any, Callable\n\nfrom nicegui import ui\nfrom nicegui.element import Element\n\nfrom sempubflow.models.scholar import Scholar\n\n\nclass ScholarSuggestion(Element):\n \"\"\"\n display a Scholar\n \"\"\"\n\n def __init__(self, scholar: Scholar, on_select: Callable[[Scholar], Any]):\n super().__init__(tag=\"div\")\n self.scholar = scholar\n self._on_select_callback = on_select\n with ui.card().tight() as card:\n card.on(\"click\", self.on_select)\n with ui.card_section() as section:\n section.props(add=\"horizontal\")\n with ui.card_section():\n with ui.avatar():\n if scholar.image:\n ui.image(source=scholar.image)\n ui.separator().props(add=\"vertical\")\n with ui.card_section():\n with ui.row():\n self.scholar_label = ui.label(self.scholar.label)\n with ui.row():\n self.scholar_name = ui.label(f\"{self.scholar.given_name} {self.scholar.family_name}\")\n with ui.row():\n self._show_identifier()\n\n def on_select(self):\n \"\"\"\n Handle selection of the suggestion card\n \"\"\"\n return self._on_select_callback(self.scholar)\n\n def _show_identifier(self):\n \"\"\"\n display all identifier of the scholar\n \"\"\"\n if self.scholar.wikidata_id:\n with ui.element('div'):\n ui.avatar(\n icon=\"img:https://www.wikidata.org/static/favicon/wikidata.ico\",\n color=None,\n size=\"sm\",\n square=True\n )\n ui.link(text=self.scholar.wikidata_id,\n target=f\"https://www.wikidata.org/wiki/{self.scholar.wikidata_id}\", new_tab=True)\n if self.scholar.dblp_author_id:\n with ui.element('div'):\n ui.element('i').classes('ai ai-dblp')\n ui.link(text=self.scholar.dblp_author_id,\n target=f\"https://dblp.org/pid/{self.scholar.dblp_author_id}\", new_tab=True)\n if self.scholar.orcid_id:\n with ui.element('div'):\n ui.element('i').classes('ai ai-orcid')\n ui.link(text=self.scholar.orcid_id,\n target=f\"https://orcid.org/{self.scholar.orcid_id}\", new_tab=True)\n","repo_name":"WolfgangFahl/SemPubFlow","sub_path":"sempubflow/elements/suggestion.py","file_name":"suggestion.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"32975371258","text":"import requests\nimport sys\n\ntry:\n url = sys.argv[1]\n path = sys.argv[2]\n print('url: {}'.format(url))\n print('path: {}'.format(path))\n res = requests.get(url)\n with open('{}'.format(path), 'bw') as f:\n f.write(res.content)\nexcept:\n print('USAGE: python download.py [download-url] [save-path]')\n","repo_name":"zJoyceLee/Homework","sub_path":"GraduationProject/downloadApp/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"32115881286","text":"from __future__ import annotations\n\nfrom typing import Iterable, Optional\n\nimport frictionless as fl\nimport pandas as pd\nfrom dimcat.base import get_setting\nfrom dimcat.data.packages.base import Package, PackageMode\nfrom dimcat.data.resources.base import D, Resource, SomeDataframe\nfrom dimcat.data.resources.dc import DimcatResource, PieceIndex\n\n\nclass DimcatPackage(Package):\n accepted_resource_types = (DimcatResource,)\n default_mode = PackageMode.RECONCILE_SAFELY\n detects_extensions = get_setting(\"resource_descriptor_endings\")\n\n def _verify_creationist_arguments(\n self,\n **kwargs,\n ):\n \"\"\"Spoiler alert: They are spurious.\"\"\"\n if not any(kwargs.values()):\n raise ValueError(\"No arguments were passed to create a resource.\")\n if kwargs.get(\"resource\") and kwargs.get(\"df\"):\n raise ValueError(\"Pass either a resource or a dataframe, not both.\")\n\n def __init__(\n self,\n package_name: str,\n resources: Iterable[Resource] = None,\n basepath: Optional[str] = None,\n descriptor_filename: Optional[str] = None,\n auto_validate: bool = False,\n metadata: Optional[dict] = None,\n ) -> None:\n \"\"\"\n\n Args:\n metadata:\n package_name:\n Name of the package that can be used to retrieve it.\n resources:\n An iterable of :class:`Resource` objects to add to the package.\n descriptor_filename:\n Pass a JSON or YAML filename or relative filepath to override the default (``.json``).\n Following frictionless specs it should end on \".datapackage.[json|yaml]\".\n basepath:\n The absolute path on the local file system where the package descriptor and all contained resources\n are stored. The filepaths of all included :class:`DimcatResource` objects need to be relative to the\n basepath and DiMCAT does its best to ensure this.\n auto_validate:\n By default, the package is validated everytime a resource is added. Set to False to disable this.\n metadata:\n Custom metadata to be maintained in the package descriptor.\n \"\"\"\n super().__init__(\n package_name=package_name,\n resources=resources,\n basepath=basepath,\n descriptor_filename=descriptor_filename,\n auto_validate=auto_validate,\n metadata=metadata,\n )\n\n def create_and_add_resource(\n self,\n df: Optional[D] = None,\n resource: Optional[Resource | fl.Resource | str] = None,\n resource_name: Optional[str] = None,\n basepath: Optional[str] = None,\n auto_validate: bool = False,\n ) -> None:\n \"\"\"Adds a resource to the package. Parameters are passed to :class:`DimcatResource`.\"\"\"\n self._verify_creationist_arguments(df=df, resource=resource)\n if df is not None:\n new_resource = DimcatResource.from_dataframe(\n df=df,\n resource_name=resource_name,\n auto_validate=auto_validate,\n basepath=basepath,\n )\n self.add_resource(new_resource)\n return\n super().create_and_add_resource(\n resource=resource,\n resource_name=resource_name,\n basepath=basepath,\n auto_validate=auto_validate,\n )\n\n def get_boolean_resource_table(self) -> SomeDataframe:\n \"\"\"Returns a table with this package's piece index and one boolean column per resource,\n indicating whether the resource is available for a given piece or not.\"\"\"\n bool_masks = []\n for resource in self:\n piece_index = resource.get_piece_index()\n if len(piece_index) == 0:\n continue\n bool_masks.append(\n pd.Series(\n True,\n dtype=\"boolean\",\n index=piece_index.index,\n name=resource.resource_name,\n )\n )\n if len(bool_masks) == 0:\n return pd.DataFrame([], dtype=\"boolean\", index=PieceIndex().index)\n table = pd.concat(bool_masks, axis=1).fillna(False).sort_index()\n table.index.names = (\"corpus\", \"piece\")\n table.columns.names = (\"resource_name\",)\n return table\n\n def get_piece_index(self) -> PieceIndex:\n \"\"\"Returns the piece index corresponding to a sorted union of all included resources' indices.\"\"\"\n IDs = set()\n for resource in self:\n IDs.update(resource.get_piece_index())\n return PieceIndex.from_tuples(sorted(IDs))\n","repo_name":"DCMLab/dimcat","sub_path":"src/dimcat/data/packages/dc.py","file_name":"dc.py","file_ext":"py","file_size_in_byte":4749,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"36"} +{"seq_id":"8094816058","text":"#--------------------//CLIENT//-----------------------#\nimport socket, os, sys\nfrom time import sleep\ndef Main():\n print(\"CLIENT STARTED\")\n print(\"SUSIE(Script Uploading and Sharing of Information Environment)\")\n print(\"Open data project written in Python2 by -C0RVUS-\")\n host = '127.0.0.1'\n port = 5444\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n\n s.send(\"Still connected\")\n\n usrres = raw_input(\"1.RETRIEVE FILE\\n2.SUBMIT FILE\\n> \")\n s.send(usrres)\n\n if usrres == \"1\":\n clientret()\n elif usrres == \"2\":\n clientsub()\n else:\n print(\"ERR..\")\n print(\"RESTARTING CLIENT\")\n Main()\n\n\n\n#================================================================\n #while True:\n #output = s.recv(1024)\n #print(output)\n #else:\n # Main()\n#================================================================\n\ndef clientsub():\n recev = (c.recv(1024))\n print(recev)\n send = raw_input(\"> \")\n s.send(send)\n recev = (c.recv(1024))\n print(recev)\n\n#================================================================\ndef clientret():\n recev = (c.recv(1024))\n print(recev)\n send = raw_input(\"> \")\n s.send(send)\n recev = (c.recv(1024))\n print(recev)\n\n\nMain()\n","repo_name":"C0RVU5/Script-Uploading-and-Sharing-of-Information-Environment","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"42519348622","text":"\nfrom flask import Flask,request\nimport boto3\nfrom flask_cors import CORS\n\napp =Flask(__name__)\nCORS(app)\n\n@app.route('/')\ndef func():\n\n instancename=request.args.get('instancename')\n ec2 = boto3.client('ec2')\n\n \n response1 = ec2.describe_instances(\n Filters=[\n {\n 'Name': 'tag:Name',\n 'Values': [\n str(instancename)\n ]\n }\n \n ]\n) \n\n return (response1)\n \n\n\n\napp.run(port=2003, host=\"0.0.0.0\",debug=True) ","repo_name":"Manideep1308/aws-deployment-status-apis","sub_path":"instance/instance.py","file_name":"instance.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"17211877102","text":"frase = str(input('Escreva uma frase: ')).strip().upper()\npalavra = frase.split()\njunto = ''.join(palavra)\ninverso = ''\nfor c in range(len(junto) - 1, -1, -1):\n inverso += junto[c]\nprint(f'O inverso de {junto} é {inverso}')\nif inverso == junto:\n print('Temos um palíndromo!')\nelse:\n print('A frase digitada não é um palíndromo.')\n\n","repo_name":"GRSFFE/PythonExercicios","sub_path":"ex053.py","file_name":"ex053.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"10612637299","text":"\"\"\"\n=======================================================\nDecomposition of flat footprints (structuring elements)\n=======================================================\n\nMany footprints (structuring elements) can be decomposed into an equivalent\nseries of smaller structuring elements. The term \"flat\" refers to a footprints\nthat only contains values of 0 or 1 (i.e. all methods in\n``skimage.morphology.footprints``). Binary dilation operations have an\nassociative and distributive property such that often allows decomposition into\nan equivalent series of smaller footprints. Most often this is done to provide\na performance benefit.\n\nAs a concrete example, dilation with a square footprint of size (15, 15) is\nequivalent to dilation with a rectangle of size (15, 1) followed by a\ndilation with a rectangle of size (1, 15). It is also equivalent to 7\nconsecutive dilations with a shape (3, 3) square.\n\nThere are many possible decompositions and which one performs best may be\narchitecture-dependent.\n\nscikit-image currently provides two forms of automated decomposition. For the\ncases of ``square``, ``rectangle`` and ``cube`` footprints, there is an option\nfor a \"separable\" decomposition (size > 1 along only 1 axis at a time).\n\nFor some other symmetric convex shapes such as ``diamond``, ``octahedron`` and\n``octagon`` there is no separable decomposition, but it is possible to provide\na \"sequence\" decomposition based on a series of small footprints of shape\n``(3,) * ndim``.\n\nFor simplicity of implementation, all decompositions use only odd-sized\nfootprints with their origin located at the center of the footprint.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom skimage.morphology import (cube, diamond, octagon, octahedron, rectangle,\n square)\n\n# Generate 2D and 3D structuring elements.\nfootprint_dict = {\n \"square(11) (separable)\": (square(11, decomposition=None),\n square(11, decomposition=\"separable\")),\n \"square(11) (sequence)\": (square(11, decomposition=None),\n square(11, decomposition=\"sequence\")),\n \"rectangle(7, 11) (separable)\": (rectangle(7, 11, decomposition=None),\n rectangle(7, 11,\n decomposition=\"separable\")),\n \"rectangle(7, 11) (sequence)\": (rectangle(7, 11, decomposition=None),\n rectangle(7, 11,\n decomposition=\"sequence\")),\n \"diamond(5) (sequence)\": (diamond(5, decomposition=None),\n diamond(5, decomposition=\"sequence\")),\n \"octagon(7, 4) (sequence)\": (octagon(7, 4, decomposition=None),\n octagon(7, 4, decomposition=\"sequence\")),\n \"cube(11) (separable)\": (cube(11, decomposition=None),\n cube(11, decomposition=\"separable\")),\n \"cube(11) (sequence)\": (cube(11, decomposition=None),\n cube(11, decomposition=\"sequence\")),\n \"octahedron(7) (sequence)\": (octahedron(7, decomposition=None),\n octahedron(7, decomposition=\"sequence\")),\n}\n\n# Visualize the elements\n\n# use a similar dark blue for the 2d plots as for the 3d voxel plots\ncmap = colors.ListedColormap(['white', (0.1216, 0.4706, 0.70588)])\nfontdict = dict(fontsize=16, fontweight='bold')\nfor title, (footprint, footprint_sequence) in footprint_dict.items():\n fig = plt.figure(figsize=(12, 4))\n ndim = footprint.ndim\n num_seq = len(footprint_sequence)\n if ndim == 2:\n ax = fig.add_subplot(1, num_seq + 1, num_seq + 1)\n ax.imshow(footprint, cmap=cmap, vmin=0, vmax=1)\n else:\n ax = fig.add_subplot(1, num_seq + 1, num_seq + 1,\n projection=Axes3D.name)\n ax.voxels(footprint, cmap=cmap)\n\n ax.set_title(title.split(' (')[0], fontdict=fontdict)\n ax.set_axis_off()\n for n, (fp, num_reps) in enumerate(footprint_sequence):\n npad = [((footprint.shape[d] - fp.shape[d]) // 2, ) * 2\n for d in range(ndim)]\n fp = np.pad(fp, npad, mode='constant')\n if ndim == 2:\n ax = fig.add_subplot(1, num_seq + 1, n + 1)\n ax.imshow(fp, cmap=cmap, vmin=0, vmax=1)\n else:\n ax = fig.add_subplot(1, num_seq + 1, n + 1, projection=Axes3D.name)\n ax.voxels(fp, cmap=cmap)\n title = f\"element {n + 1} of {num_seq}\\n({num_reps} iteration\"\n title += \"s)\" if num_reps > 1 else \")\"\n ax.set_title(title, fontdict=fontdict)\n ax.set_axis_off()\n ax.set_xlabel(f'num_reps = {num_reps}')\n fig.tight_layout()\n\n # draw dividing line between seqeuence element plots and composite plot\n line_pos = num_seq / (num_seq + 1)\n line = plt.Line2D([line_pos, line_pos], [0, 1], color=\"black\")\n fig.add_artist(line)\n\nplt.show()\n","repo_name":"alxndrkalinin/scikit-image","sub_path":"doc/examples/numpy_operations/plot_structuring_element_decompositions.py","file_name":"plot_structuring_element_decompositions.py","file_ext":"py","file_size_in_byte":4984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"36"} +{"seq_id":"34607125258","text":"#Couting no of sunday in the year\n'''\nYou are given the following information, but you may prefer to do some research for yourself.\n\n1 Jan 1900 was a Monday.\nThirty days has September,\nApril, June and November.\nAll the rest have thirty-one,\nSaving February alone,\nWhich has twenty-eight, rain or shine.\nAnd on leap years, twenty-nine.\nA leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.\nHow many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?'''\n\nfrom datetime import date\nfrom collections import Counter\nn=int(input())\ndays=['mon','tue','wed','thu','fri','sat','sun']\ninitial_year=1901\nfinal_year=n\nsunday_count=0\nday_left=1\nwhile initial_year<=final_year:\n\tfirst_day=days[day_left]\n\tsunday_year=0\t\n\t#for leap year\n\tif (initial_year%4==0 and initial_year%100!=0) or (initial_year%100==0 and initial_year%400==0):\n\t\tday_left+=2\n\t\tif first_day in ['mon','wed','thu']:\n\t\t\tsunday_year+=2\n\t\telif first_day=='sun':\n\t\t\tsunday_year+=3\n\t\telse:\n\t\t\tsunday_year+=1\n\telse:\n\t\tif first_day in ['mon','tue','sun']:\n\t\t\tsunday_year+=2\n\t\telif first_day=='thu':\n\t\t\tsunday_year+=3\n\t\telse:\n\t\t\tsunday_year+=1\n\t\tday_left+=1\n\tif day_left>=7:\n\t\tday_left+=-7\n\tsunday_count+=sunday_year\n\tinitial_year+=1\n\t\nprint(sunday_count)\n\n\n\ncounter = Counter()\n\nfor year in range(1901, 2001):\n for month in range(1, 13):\n day = date(year, month, 1)\n counter[day.weekday()] += 1\n\nprint (counter[6])\n\n","repo_name":"sandeep2305/Euler-Problems","sub_path":"1_50/p19_counting_sundays.py","file_name":"p19_counting_sundays.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"35785130052","text":"import sys\nimport time\nimport sqlite3\n\nlist_book = []\n\ndef get_timestamp(get_time):\n time_array = time.strptime(get_time, \"%Y-%m-%d %H:%M:%S\")\n timestamp = int(time.mktime(time_array))\n return timestamp\n\nwith open(\"file.txt\", \"r\", encoding='utf-8') as f:\n line = f.readline()\n while line:\n get_time = line[:19]\n timestamp = get_timestamp(get_time)\n\n if not (timestamp, line) in list_book:\n list_book.append((timestamp, line))\n\n line = f.readline()\n\nfor book in sorted(list_book, key=lambda info: info[0]):\n print(book[1])\n\n\nconn = sqlite3.connect(\"tia.db\")\ntry:\n cursor = conn.execute(\"SELECT timestamp from Tia\")\nexcept BaseException as e:\n if \"no such table\" in str(e):\n conn.execute('''CREATE TABLE Tia (id INT PRIMARY KEY NOT NULL,\n timestamp INT,\n tia_info TEXT);''')\n# print(type(cursor))","repo_name":"uccus/tia_spider","sub_path":"get_newest.py","file_name":"get_newest.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"38687723541","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 23 17:21:10 2020\r\n\r\n@author: PC\r\n\"\"\"\r\n\r\nfrom math import*\r\nimport sys\r\nfrom PyQt5 import uic, QtWidgets\r\nqtCreatorFile = \"concreplacas.ui\"\r\n\r\nUi_MainWindow,QtBaseClass = uic.loadUiType(qtCreatorFile)\r\n\r\ne=0.12\r\nr=0.20\r\nw_varilla=0.994\r\nA=0\r\nL=0\r\n\r\nclass VentanaPrincipal(QtWidgets.QMainWindow,Ui_MainWindow):\r\n def __init__(self):\r\n QtWidgets.QMainWindow.__init__(self)\r\n Ui_MainWindow.__init__(self)\r\n self.setupUi(self)\r\n \r\n #DECLARAMOS EL SIGMA\r\n self.pushButtonCalcular.clicked.connect(self.Calcular)\r\n self.pushButtonGuardar.clicked.connect(self.Guardar)\r\n \r\n def Calcular(self):\r\n \r\n e=0.12\r\n r=0.20\r\n A=0\r\n L=0\r\n L=self.textEditLongitud.toPlainText()\r\n A=self.textEditAncho.toPlainText()\r\n L=int(L)\r\n A=int(A)\r\n \r\n V=(L*A*e)\r\n self.labelVolumen.setText(str(V))\r\n \r\n \r\n cemento= (V*7)*1.05\r\n arena= (V*0.56)\r\n grava= (V*0.84)\r\n agua= (V*180)\r\n precio_cemento=(cemento*25000)\r\n precio_arena=(arena*45000)\r\n precio_grava=(grava*68000)\r\n precio_agua=(agua*1000)\r\n \r\n acero_ancho=(A/r)\r\n acero_long=(L/r)\r\n t_varillas_ancho=acero_ancho\r\n t_varillas_long=acero_long\r\n \r\n if A<=6:\r\n t_met_ancho=(t_varillas_long*6)\r\n total_pesos_a=t_varillas_long*12500\r\n else:\r\n t_met_ancho=(t_varillas_long*12)\r\n total_pesos_a=t_varillas_long*25000\r\n if L<=6:\r\n t_met_long=(t_varillas_ancho*6)\r\n total_pesos_l=t_varillas_ancho*12500\r\n else:\r\n t_met_long=(t_varillas_ancho*12)\r\n total_pesos_l=t_varillas_ancho*25000\r\n total_acero_lineal=(t_met_ancho+t_met_long)\r\n \r\n #PESO TOTAL DEL ACERO\r\n #t_w_acero=total_acero_lineal*w_varilla\r\n \r\n total_pesosvarilla=total_pesos_a+total_pesos_l\r\n \r\n #Mano de obra\r\n mo=350000\r\n m_o= V*mo\r\n \r\n #Maquinaria\r\n mezcla_dora=35000\r\n mezcladora=mezcla_dora\r\n \r\n #ACPM\r\n acpm=3900\r\n a_c_p_m= V*acpm\r\n \r\n #TOTAL A PAGAR\r\n TOTAL=precio_cemento+precio_arena+precio_grava+precio_agua+total_acero_lineal+total_pesosvarilla+m_o+mezcladora+a_c_p_m\r\n\r\n self.textEditTotal.setText(str(TOTAL))\r\n \r\n def Guardar(self):\r\n nombre=self.textEditNombre.toPlainText()\r\n Id=self.textEditId.toPlainText()\r\n L=self.textEditLongitud.toPlainText()\r\n A=self.textEditAncho.toPlainText()\r\n TOTAL=self.textEditTotal.toPlainText()\r\n \r\n \r\n \r\n #SE TRABAJA CON EL ARCHIVO\r\n archivo=open('DatosConcreplacas.txt','a')\r\n archivo.write('NOMBRE:' + '' + nombre + 'IDENTIFICACION: ' + '' + Id + '' + 'LONGITUD: ' + '' +L+ 'ANCHO: ' + '' + A + 'TOTAL A PAGAR: ' + TOTAL + '' + '\\n' )\r\n \r\nif __name__ == \"__main__\":\r\n app = QtWidgets.QApplication(sys.argv)\r\n window = VentanaPrincipal()\r\n window.show()\r\n sys.exit(app.exec())","repo_name":"danielmedinam03/TRABAJOS-PYTHON","sub_path":"Interfaz grafica QtDesigner PLACA DE CONCRETO/proyecto.py","file_name":"proyecto.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"17862262620","text":"'''\n本节文章\nhttps://learnscript.net/zh-hant/obs-python-scripting/junior/script-properties/create-and-destroy-property-collections/ 如何建立和终结属性集\n'''\n\n# 汇入模组 obspython,datetime\nimport obspython as obs\nimport datetime\n\n# 开始时间\nstart = None\n\ndef script_properties():\n # script_properties 传回一个属性集物件\n # 呼叫 obs_properties_create 函式建立属性集\n props = obs.obs_properties_create()\n\n # 添加一个判断是否可以结束串流的按钮\n obs.obs_properties_add_button(props, 'over', '结束了?', over_clicked)\n\n # 将当前时间记录为开始时间\n global start\n start = datetime.datetime.now()\n\n return props\n\ndef over_clicked(props, prop):\n # 当按钮被点选时,判断是否可以结束串流\n\n # 计算从开始时间到现在经历的秒数\n end = datetime.datetime.now()\n seconds = (end - start).total_seconds()\n\n # 已经过了 1 分钟,则显示可以结束\n if seconds > 60:\n obs.script_log(obs.LOG_INFO, '可以结束了')\n else:\n obs.script_log(obs.LOG_INFO, '串流还不到 1 分钟')\n","repo_name":"codebeatme/obs-python-scripting","sub_path":"src/zh-hant/junior/create_and_destroy_pc.py","file_name":"create_and_destroy_pc.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"12960176015","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 14 16:43:26 2022\r\n\r\n@author: Triton\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 14 15:18:37 2022\r\n\r\n@author: Triton\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 31 16:52:19 2022\r\n\r\n@author: Triton\r\n\"\"\"\r\n\r\nfrom moku.instruments import FrequencyResponseAnalyzer\r\nimport numpy as np\r\nimport time\r\nimport InstrumentDriver\r\nimport json\r\nimport math\r\nclass Driver(InstrumentDriver.InstrumentWorker):\r\n \r\n # def __init__(self):\r\n # self.IP = '[fe80:0000:0000:0000:7269:79ff:feb0:0470%3]'\r\n # self.Instrument = WaveformGenerator(self.IP, force_connect = True)\r\n # self.Instrument.relinquish_ownership()\r\n\r\n def performOpen(self, options={}):\r\n '''Perform the operation of opening the instrument connection'''\r\n global Instrument\r\n Instrument = FrequencyResponseAnalyzer(self.getValue('IP'), force_connect = True)\r\n \r\n \r\n def performClose(self, bError=False, options={}):\r\n '''Perform the close instrument connection operation'''\r\n \r\n Instrument.relinquish_ownership()\r\n \r\n \r\n def performSetValue(self, quant, value, sweepRate=0.0, options={}):\r\n \r\n #Initialize the Instrument Settings\r\n \r\n quant.setValue(value)\r\n\r\n if quant.name.startswith('Output'):\r\n amp = self.getValue('Output Amplitude')\r\n off = self.getValue('Output Offset')\r\n phase = self.getValue('Output Phase')\r\n for i in range(1,3) :\r\n Instrument.set_output(i,amp, off)\r\n Instrument.set_output_phase(i,phase)\r\n \r\n elif quant.name.startswith('Sweep'):\r\n freq1 = self.getValue('Sweep Start Freq')\r\n freq2 = self.getValue('Sweep End Freq')\r\n n_pts = int(self.getValue('Sweep Num-Points'))\r\n avg_t = self.getValue('Sweep Averaging-Time')\r\n avg_cyc = int(self.getValue('Sweep Averaging-Cycles'))\r\n set_t = self.getValue('Sweep Settling-Time')\r\n set_cyc = int(self.getValue('Sweep Settling-Cycles'))\r\n swp_linear = self.getValue('Sweep Linear')\r\n Instrument.set_sweep(start_frequency=freq1, stop_frequency=freq2,\r\n num_points=n_pts , averaging_time=avg_t,\r\n settling_time=set_t, averaging_cycles=avg_cyc,\r\n settling_cycles=set_cyc, linear_scale=swp_linear)\r\n\r\n Instrument.start_sweep()\r\n \r\n elif quant.name.startswith('Input'):\r\n for i in range(1,3) :\r\n coupling = self.getValue('Input Ch' + str(i) + ' - Coupling')\r\n impedance = self.getValue('Input Ch' + str(i) + ' - Impedance')\r\n inp_range = self.getValue('Input Ch' + str(i) + ' - Range')\r\n Instrument.set_frontend(i, impedance=impedance, coupling=coupling, range=inp_range)\r\n \r\n elif quant.name.startswith('Harmonic'):\r\n Instrument.set_harmonic_multiplier(int(self.getValue('Harmonic Multiplier')))\r\n elif quant.name.startswith('Measurement'):\r\n Instrument.measurement_mode(self.getValue('Measurement Mode'))\r\n \r\n return value \r\n \r\n def performGetValue(self, quant, options={}):\r\n \r\n if quant.name.startswith('Magnitude - Ch1'):\r\n #Wait for the sweeping to be done before collecting the data.\r\n #time.sleep(Instrument.get_sweep()['estimated_sweep_time'])\r\n #Collect the data from the Moku\r\n data = Instrument.get_data()\r\n \r\n #Update Labber\r\n \r\n for channel in data:\r\n \r\n data[channel]['magnitude'] = [0 if x == 'nan' else x for x in data[channel]['magnitude']]\r\n data[channel]['phase'] = [0 if x == 'nan' else x for x in data[channel]['phase']]\r\n\r\n trace_magnitude = quant.getTraceDict(value = np.asarray(data[channel]['magnitude']), \r\n x = np.asarray(data[channel]['frequency']))\r\n\r\n trace_phase = quant.getTraceDict(value = np.asarray(data[channel]['phase']) , \r\n x = np.asarray(data[channel]['frequency']))\r\n \r\n self.setValue('Phase - Ch' + channel[-1], trace_phase)\r\n self.setValue('Magnitude - Ch' + channel[-1], trace_magnitude) \r\n \r\n return quant.getValue() \r\n \r\n return quant.getValue()\r\n \r\n \r\n","repo_name":"Samanyu-T/Labber_MokuPro","sub_path":"Labber_Scripts - New/Moku_Pro_FRA/Moku_Pro_FRA.py","file_name":"Moku_Pro_FRA.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"71513782120","text":"from tkinter import *\n\nroot = Tk()\nroot.geometry(\"1000x1000\")\nroot.title(\"hs\")\n\nEntry_1 = Entry(root)\nEntry_1.grid(row = 0, column = 0)\n\nEntry_2 = Entry(root)\nEntry_2.grid(row = 0, column = 1)\n\ndef plus():\n\tone_chisl = Entry_1.get()\n\ttwo_chisl = Entry_2.get()\n\tif one_chisl.isdigit():\n\t\tif two_chisl.isdigit():\n\t\t\tresult = float(one_chisl) + float(two_chisl)\n\t\t\tLabel_res['text'] = result\n\t\telse:\n\t\t\tLabel_res['text'] = 'Error'\n\telse:\n\t\tLabel_res['text'] = 'Error'\n\ndef minus():\n\tone_chisl = Entry_1.get()\n\ttwo_chisl = Entry_2.get()\n\tif one_chisl.isdigit():\n\t\tif two_chisl.isdigit():\n\t\t\tresult = float(one_chisl) - float(two_chisl)\n\t\t\tLabel_res['text'] = result\n\t\telse:\n\t\t\tLabel_res['text'] = 'Error'\n\telse:\n\t\tLabel_res['text'] = 'Error'\n\ndef multioly():\n\tone_chisl = Entry_1.get()\n\ttwo_chisl = Entry_2.get()\n\tif one_chisl.isdigit():\n\t\tif two_chisl.isdigit():\n\t\t\tresult = float(one_chisl) * float(two_chisl)\n\t\t\tLabel_res['text'] = result\n\t\telse:\n\t\t\tLabel_res['text'] = 'Error'\n\telse:\n\t\tLabel_res['text'] = 'Error'\ndef diveded():\n\tone_chisl = Entry_1.get()\n\ttwo_chisl = Entry_2.get()\n\tif(float(one_chisl) == 0):\n\t\tLabel_res['text'] = 'Error'\n\telse:\n\t\tif one_chisl.isdigit():\n\t\t\tif two_chisl.isdigit():\n\t\t\t\tresult = float(one_chisl) / float(two_chisl)\n\t\t\t\tLabel_res['text'] = result\n\t\t\telse:\n\t\t\t\tLabel_res['text'] = 'Error'\n\t\telse:\n\t\t\tLabel_res['text'] = 'Error'\n\nB1 = Button(root, text = '+', width=16, height=3, command = plus)\nB1.grid(row = 1, column = 0)\n\nB2 = Button(root, text = '-', width=16, height=3, command = minus)\nB2.grid(row = 1, column = 1)\n\nB3 = Button(root, text = '*', width=16, height=3, command = multioly)\nB3.grid(row = 2, column = 0)\n\nB4 = Button(root, text = '/', width=16, height=3, command = diveded)\nB4.grid(row = 2, column = 1)\n\nLabel_res = Label(root, text = '')\nLabel_res.grid(row = 1, column = 2)\n\nroot.mainloop()","repo_name":"INePro/Tk","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10865270809","text":"# 2021-01-06\r\n# https://www.hackerrank.com/challenges/sock-merchant/problem\r\n\r\n#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n# Complete the sockMerchant function below.\r\ndef sockMerchant(n, ar):\r\n st = list(set(ar))\r\n \r\n k = 0\r\n for ele in st:\r\n k += ar.count(ele)//2\r\n \r\n return k\r\n \r\n\r\nif __name__ == '__main__':\r\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\r\n\r\n n = int(input())\r\n\r\n ar = list(map(int, input().rstrip().split()))\r\n\r\n result = sockMerchant(n, ar)\r\n\r\n fptr.write(str(result) + '\\n')\r\n\r\n fptr.close()\r\n","repo_name":"circle-sphere/SolveAlgosEveryday","sub_path":"HackerRank/Sales by Match.py","file_name":"Sales by Match.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"73418504040","text":"def draw_line(screen, w, x1, x2, y):\n base = w // 8 * y\n start = x1 // 8\n end = x2 // 8\n start_mask = 0xff >> x1 % 8\n end_mask = (-1 << (8 - (x2 % 8 + 1))) & 0xff\n for i in range(w // 8):\n j = base + i\n if i < start or i > end:\n screen[j] = 0\n elif i > start and i < end:\n screen[j] = 0xff\n else:\n if start == end:\n screen[j] = start_mask & end_mask\n else:\n if i == start:\n screen[j] = start_mask\n else:\n screen[j] = end_mask\n return screen\n\nw = 40\nh = 5\nscreen = [0] * (w // 8 * h)\nprint(draw_line(screen, w, 3, 5, 0))\n\n\n","repo_name":"derek-dkliu/pydsa","sub_path":"bits/draw_line.py","file_name":"draw_line.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"2546362346","text":"#!/usr/bin/env python2\n\nfrom __future__ import print_function\nimport subprocess, os, re, sys\nimport json, shutil\nimport argparse\n\nimport Deuterocol1\n\nclass Paragraph(object):\n\tdef __init__(self, tmdatadir, d1dir, outdir, pdblist, force=False, bundle=4, loopless=False, min_tms=0):\n\t\tself.tmdatadir = tmdatadir\n\t\tself.d1dir = d1dir\n\t\tself.outdir = outdir\n\t\tself.force = force\n\t\tself.bundle = bundle\n\t\tself.loopless = False\n\t\tself.min_tms = min_tms\n\n\t\tself.fams1, self.fams2 = pdblist\n\t\tself.fam1 = sorted(self.fams1)[0]\n\t\tself.fam2 = sorted(self.fams2)[0]\n\n\t\tself.tcmap = {}\n\n\t\twith open('{}/termini.json'.format(self.d1dir)) as fh: self.termini = json.load(fh)\n\t\t#self.termini = {}\n\t\t#for pdb in termini:\n\t\t#\tself.termini[pdb.decode('utf-8')] = {}\n\t\t#\tfor chain in termini[pdb]:\n\t\t#\t\tself.termini[pdb.decode('utf-8')][chain.decode('utf-8')] = termini[pdb][chain]\n\n\t@staticmethod\n\tdef load_d2(d2obj, pdblist, prefix=None):\n\n\t\tselfobj = Paragraph(tmdatadir=d2obj.tmdatadir, d1dir=d2obj.d1dir, outdir=d2obj.outdir, pdblist=pdblist, force=d2obj.force, bundle=d2obj.bundle, loopless=d2obj.loopless)\n\n\t\t#selfobj.fams1, selfobj.fams2 = pdblist\n\t\t\n\t\tselfobj.outdir = '{}/{}_vs_{}'.format(selfobj.outdir, selfobj.fam1, selfobj.fam2)\n\n\t\treturn selfobj\n\n\tdef initialize_dir(self):\n\t\tif not os.path.isdir(self.outdir): os.mkdir(self.outdir)\n\n\t\t#for subdir in ('config', 'html', 'pdbs', 'sequences', 'superpositions'):\n\t\tfor subdir in ('config', 'superpositions'):\n\t\t\tif not os.path.isdir('{}/{}'.format(self.outdir, subdir)): \n\t\t\t\tos.mkdir('{}/{}'.format(self.outdir, subdir))\n\t\tif self.force or not os.path.isfile('{}/config/command_line'.format(self.outdir)):\n\t\t\twith open('{}/config/command_line'.format(self.outdir), 'w') as f: \n\t\t\t\tf.write(str(sys.argv))\n\n\t\tif self.force or not os.path.isfile('{}/config/align_me.json'.format(self.outdir)):\n\t\t\tDeuterocol1.info('Writing align_me.json')\n\t\t\twith open('{}/config/align_me.json'.format(self.outdir), 'w') as f: \n\t\t\t\tf.write(json.dumps(self.fams1) + '\\n')\n\t\t\t\tf.write(json.dumps(self.fams2) + '\\n')\n\n\t\tself.tcmap = self.get_tcmap()\n\t\tif self.force or not os.path.isfile('{}/config/tcmap.json'.format(self.outdir)):\n\t\t\tDeuterocol1.info('Writing tcmap.json')\n\t\t\twith open('{}/config/tcmap.json'.format(self.outdir), 'w') as f: f.write(json.dumps(self.tcmap))\n\n\t\tindices = {}\n\t\tif self.force or not os.path.isfile('{}/config/indices.json'.format(self.outdir)):\n\t\t\tDeuterocol1.info('Writing indices.json')\n\t\t\tg = open('{}/config/indices.json'.format(self.outdir), 'w')\n\t\t\twith open('{}/indices.json'.format(self.d1dir)) as f: \n\t\t\t\tindobj = json.loads(f.read())\n\t\t\t\t#for pdbid in sorted(indobj): indices[pdbid] = Deuterocol1.SpanCollection.parse_json(indobj[pdbid])\n\t\t\t\tfor pdbid in sorted(indobj): \n\t\t\t\t\tindices[pdbid] = Deuterocol1.SpanCollection()\n\t\t\t\t\tfor span in indobj[pdbid]: indices[pdbid].add(span)\n\t\t\t\t\tg.write('{}\\t{}\\n'.format(pdbid, indobj[pdbid]))\n\t\t\t\t\n\t\tif self.loopless: sourcedir = '{}/pdbs_loopless'.format(self.d1dir)\n\t\telse: sourcedir = '{}/pdbs'.format(self.d1dir)\n\n\t\t#FIXME: move this line to the appropriate location\n\t\tif not os.path.isdir('{}/../pdbs'.format(self.outdir)): os.mkdir('{}/../pdbs'.format(self.outdir))\n\t\tDeuterocol1.info('Copying PDBs')\n\n\t\tcopyme = set()\n\t\twith open('{}/config/align_me.json'.format(self.outdir)) as f:\n\t\t\tfor l in f:\n\t\t\t\tobj = json.loads(l)\n\t\t\t\tfor fam in obj:\n\t\t\t\t\tfor pdbid in obj[fam]:\n\t\t\t\t\t\tcopyme.add(pdbid[:4])\n\t\t#for fn in sorted(os.listdir('{}/pdbs'.format(self.d1dir))):\n\t\tfor fn in sorted(copyme):\n\t\t\tshutil.copy('{}/pdbs/{}.pdb'.format(self.d1dir, fn), '{}/../pdbs/{}.pdb'.format(self.outdir, fn))\n\n\t\tif self.force or not os.path.isfile('{}/config/agenda.json'.format(self.outdir)):\n\t\t\tDeuterocol1.info('Writing agenda.json')\n\t\t\tcommands = []\n\t\t\talready_warned = set()\n\t\t\tif self.bundle <= 0:\n\t\t\t\tfor fam1 in self.fams1:\n\t\t\t\t\tfor fam2 in self.fams2:\n\t\t\t\t\t\tfor pdb1 in self.fams1[fam1]:\n\t\t\t\t\t\t\tqstart, qend = self.termini[pdb1[:4]][pdb1[-1]]\n\t\t\t\t\t\t\tqlen = qend - qstart + 1\n\t\t\t\t\t\t\tqindices = [[qstart, qend]]\n\t\t\t\t\t\t\tqname = '{}_h1-1'.format(pdb1)\n\t\t\t\t\t\t\tfor pdb2 in self.fams2[fam2]:\n\t\t\t\t\t\t\t\tsstart, send = self.termini[pdb2[:4]][pdb2[-1]]\n\t\t\t\t\t\t\t\tslen = send - sstart + 1\n\t\t\t\t\t\t\t\tsindices = [[sstart, send]]\n\t\t\t\t\t\t\t\tsname = '{}_h1-1'.format(pdb2)\n\t\t\t\t\t\t\t\tcommands.append({\n\t\t\t\t\t\t\t\t\t'name':'{}_vs_{}'.format(qname, sname),\n\t\t\t\t\t\t\t\t\t'query':pdb1[:4],\n\t\t\t\t\t\t\t\t\t'subject':pdb2[:4],\n\t\t\t\t\t\t\t\t\t'qhelices':[0, 0],\n\t\t\t\t\t\t\t\t\t'shelices':[0, 0],\n\t\t\t\t\t\t\t\t\t'qindices':qindices,\n\t\t\t\t\t\t\t\t\t'sindices':sindices,\n\t\t\t\t\t\t\t\t\t'qlen':qlen,\n\t\t\t\t\t\t\t\t\t'slen':slen,\n\t\t\t\t\t\t\t\t\t'bundle':self.bundle,\n\t\t\t\t\t\t\t\t\t'qchain':pdb1[-1],\n\t\t\t\t\t\t\t\t\t'schain':pdb2[-1],\n\t\t\t\t\t\t\t\t\t'qspan':[qstart,qend],\n\t\t\t\t\t\t\t\t\t'sspan':[sstart,send],\n\t\t\t\t\t\t\t\t})\n\t\t\telse:\n\t\t\t\tfor fam1 in self.fams1:\n\t\t\t\t\tfor fam2 in self.fams2:\n\t\t\t\t\t\tfor pdb1 in self.fams1[fam1]:\n\t\t\t\t\t\t\ttry: indices[pdb1]\n\t\t\t\t\t\t\texcept KeyError: continue\n\n\t\t\t\t\t\t\tif len(indices[pdb1]) < self.min_tms: \n\t\t\t\t\t\t\t\tif pdb1 not in already_warned:\n\t\t\t\t\t\t\t\t\tDeuterocol1.warn('Not enough TMSs for {}: {} < {}'.format(pdb1, len(indices[pdb1]), self.min_tms))\n\t\t\t\t\t\t\t\t\talready_warned.add(pdb1)\n\t\t\t\t\t\t\t\tcontinue\n\n\n\t\t\t\t\t\t\tfor pdb2 in self.fams2[fam2]:\n\t\t\t\t\t\t\t\ttry: indices[pdb2]\n\t\t\t\t\t\t\t\texcept KeyError: continue\n\n\t\t\t\t\t\t\t\tif len(indices[pdb2]) < self.min_tms: \n\t\t\t\t\t\t\t\t\tif pdb2 not in already_warned:\n\t\t\t\t\t\t\t\t\t\tDeuterocol1.warn('Not enough TMSs for {}: {} < {}'.format(pdb2, len(indices[pdb2]), self.min_tms))\n\t\t\t\t\t\t\t\t\t\talready_warned.add(pdb2)\n\t\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t\t\t#XXX not sure if this works properly for short proteins\n\t\t\t\t\t\t\t\tif len(indices[pdb1]) <= self.bundle: endtms1 = 1\n\t\t\t\t\t\t\t\telse: endtms1 = len(indices[pdb1]) - self.bundle + 1\n\t\t\t\t\t\t\t\tfor bundle1 in range(0, endtms1):\n\t\t\t\t\t\t\t\t\tif len(indices[pdb2]) <= self.bundle: endtms2 = 1\n\t\t\t\t\t\t\t\t\telse: endtms2 = len(indices[pdb2]) - self.bundle + 1\n\t\t\t\t\t\t\t\t\tfor bundle2 in range(0, endtms2):\n\t\t\t\t\t\t\t\t\t\t#print(fam1, pdb1, '{}-{}'.format(bundle1+1, bundle1+self.bundle-1+1), pdb2, '{}-{}'.format(bundle2+1, bundle2+self.bundle-1+1), fam2)\n\t\t\t\t\t\t\t\t\t\tif len(indices[pdb1]):\n\n\t\t\t\t\t\t\t\t\t\t\tqstart = indices[pdb1][bundle1].start\n\t\t\t\t\t\t\t\t\t\t\tif bundle1 + self.bundle - 1 >= len(indices[pdb1]): qend = indices[pdb1][-1].end\n\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\telse: qend = indices[pdb1][bundle1 + self.bundle - 1].end\n\t\t\t\t\t\t\t\t\t\t\tqindices = indices[pdb1][bundle1:bundle1+self.bundle].to_rawlist()\n\t\t\t\t\t\t\t\t\t\t\tqlen = indices[pdb1][bundle1:bundle1+self.bundle].residue_count()\n\t\t\t\t\t\t\t\t\t\t\tqname = '{}_h{}-{}'.format(pdb1, bundle1+1, min(len(indices[pdb1]), bundle1+self.bundle-1+1))\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\tqstart, qend = self.termini[pdb1[:4]][pdb1[-1]]\n\t\t\t\t\t\t\t\t\t\t\tqlen = qend - qstart + 1\n\t\t\t\t\t\t\t\t\t\t\tqindices = range(qstart, qend+1)\n\t\t\t\t\t\t\t\t\t\t\tqname = '{}_h1-1'.format(pdb1)\n\t\t\t\t\t\t\t\t\t\tif len(indices[pdb2]):\n\n\t\t\t\t\t\t\t\t\t\t\tsstart = indices[pdb2][bundle2].start\n\t\t\t\t\t\t\t\t\t\t\tif bundle2 + self.bundle - 1 >= len(indices[pdb2]): send = indices[pdb2][-1].end\n\t\t\t\t\t\t\t\t\t\t\telse: send = indices[pdb2][bundle2 + self.bundle - 1].end\n\n\t\t\t\t\t\t\t\t\t\t\tsindices = indices[pdb2][bundle2:bundle2+self.bundle].to_rawlist()\n\t\t\t\t\t\t\t\t\t\t\tslen = indices[pdb2][bundle2:bundle2+self.bundle].residue_count()\n\t\t\t\t\t\t\t\t\t\t\tsname = '{}_h{}-{}'.format(pdb2, bundle2+1, min(len(indices[pdb2]), bundle1+self.bundle-1+1))\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\tsstart, send = self.termini[pdb2[:4]][pdb2[-1]]\n\t\t\t\t\t\t\t\t\t\t\tslen = send - sstart + 1\n\t\t\t\t\t\t\t\t\t\t\tsindices = range(sstart, send+1)\n\t\t\t\t\t\t\t\t\t\t\tsname = '{}_h1-1'.format(pdb2)\n\n\t\t\t\t\t\t\t\t\t\tcommands.append({'name':'{}_vs_{}'.format( \\\n\t\t\t\t\t\t\t\t\t\t\t\tqname, sname\n\t\t\t\t\t\t\t\t\t\t\t), \\\n\t\t\t\t\t\t\t\t\t\t\t'query': pdb1[:4], \\\n\t\t\t\t\t\t\t\t\t\t\t'subject': pdb2[:4], \\\n\t\t\t\t\t\t\t\t\t\t\t#'qhelices': list(range(bundle1+1, bundle1+1+self.bundle)), \\\n\t\t\t\t\t\t\t\t\t\t\t#'shelices': list(range(bundle2+1, bundle2+1+self.bundle)), \\\n\t\t\t\t\t\t\t\t\t\t\t'qhelices': [bundle1+1, min(len(indices[pdb1]), bundle1+self.bundle)], \\\n\t\t\t\t\t\t\t\t\t\t\t'shelices': [bundle2+1, min(len(indices[pdb2]), bundle2+self.bundle)], \\\n\t\t\t\t\t\t\t\t\t\t\t'qindices': qindices, \\\n\t\t\t\t\t\t\t\t\t\t\t'sindices': sindices, \\\n\t\t\t\t\t\t\t\t\t\t\t'qlen': qlen, \\\n\t\t\t\t\t\t\t\t\t\t\t'slen': slen, \\\n\t\t\t\t\t\t\t\t\t\t\t#'qfam': fam1, \\\n\t\t\t\t\t\t\t\t\t\t\t#'sfam': fam2, \\\n\t\t\t\t\t\t\t\t\t\t\t'bundle': self.bundle, \\\n\t\t\t\t\t\t\t\t\t\t\t#'loopless': int(self.loopless), \\\n\t\t\t\t\t\t\t\t\t\t\t'qchain': pdb1[-1], \\\n\t\t\t\t\t\t\t\t\t\t\t'schain': pdb2[-1], \\\n\t\t\t\t\t\t\t\t\t\t\t'qspan': [qstart, qend], \\\n\t\t\t\t\t\t\t\t\t\t\t'sspan': [sstart, send], \\\n\t\t\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t#print(commands[-1]['name'], fam1, fam2, pdb1, commands[-1]['qchain'], pdb2, commands[-1]['schain'])\n\t\t\twith open('{}/config/agenda.json'.format(self.outdir), 'w') as f: \n\t\t\t\tfor l in commands: f.write(json.dumps(l) + '\\n')\n\n\tdef get_tcmap(self):\n\t\ttcmap = {}\n\t\tpdbs = set()\n\t\ttcids = set()\n\t\t#for l in pdblist:\n\t\t#\tfor fam in l:\n\t\t#\t\ttcids.add(fam)\n\t\t#\t\tfor pdb in l[fam]: pdbs.add(pdb)\n\t\tpdblist = {self.fam1:self.fams1[self.fam1], self.fam2:self.fams2[self.fam2]}\n\t\tfor fam in pdblist: \n\t\t\ttcids.add(fam)\n\t\t\tfor pdb in pdblist[fam]: pdbs.add(pdb)\n\t\tpdbs = sorted(pdbs)\n\t\ttcids = sorted(tcids)\n\t\twith open('{}/tcmap.tsv'.format(self.tmdatadir)) as f:\n\t\t\tfor l in f:\n\t\t\t\tif not l.strip(): continue\n\t\t\t\telif l.startswith('#'): continue\n\t\t\t\telse:\n\t\t\t\t\tfor fam in tcids:\n\t\t\t\t\t\t#check if possibly relevant\n\t\t\t\t\t\tif fam in l and Deuterocol1.TCID.parse_str(l.split()[0]) in Deuterocol1.TCID.parse_str(fam):\n\t\t\t\t\t\t\t#see which PDB it is\n\t\t\t\t\t\t\tfor pdb in pdbs:\n\t\t\t\t\t\t\t\tif pdb in l: \n\t\t\t\t\t\t\t\t\ttcmap[pdb] = l.split()[0]\n\t\treturn tcmap\n\nclass Deuterocol2(object):\n\tdef __init__(self, tmdatadir, d1dir, outdir, force=False, bundle=4, loopless=False, allow_internal=False, min_tms=3):\n\t\tself.tmdatadir = tmdatadir\n\t\tself.d1dir = d1dir\n\t\tself.outdir = outdir\n\t\tself.loopless = loopless\n\t\tself.allow_internal = allow_internal\n\t\tself.min_tms = min_tms\n\n\t\tself.force = force\n\n\t\tself.bundle = bundle\n\n\tdef run(self, famlist1, famlist2):\n\t\tDeuterocol1.info('Retrieving structures...')\n\t\tif famlist2 == ['auto']:\n\t\t\tfamlist2 = []\n\t\t\twith open('{}/pdblist.json'.format(self.d1dir)) as f:\n\t\t\t\tobj = json.loads(f.read())\n\t\t\t\tfor tcid in obj:\n\t\t\t\t\tfound = True\n\t\t\t\t\tfor fam in famlist1:\n\t\t\t\t\t\tif Deuterocol1.TCID.parse_str(tcid) in Deuterocol1.TCID.parse_str(fam): \n\t\t\t\t\t\t\tfound = False\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif not found: break\n\t\t\t\t\tif found: famlist2.append(tcid)\n\t\tpdblist = []\n\t\tfor famlist in (famlist1, famlist2):\n\t\t\tpdblist.append({})\n\t\t\twith open('{}/pdblist.json'.format(self.d1dir)) as f:\n\t\t\t\tpdblistobj = json.loads(f.read())\n\t\t\t\tfor tcid in pdblistobj: \n\t\t\t\t\tttcid = Deuterocol1.TCID.parse_str(tcid)\n\t\t\t\t\tfor fam in famlist:\n\t\t\t\t\t\tqtcid = Deuterocol1.TCID.parse_str(fam)\n\t\t\t\t\t\tif ttcid in qtcid:\n\t\t\t\t\t\t\ttry: pdblist[-1][fam] += pdblistobj[tcid]\n\t\t\t\t\t\t\texcept KeyError: pdblist[-1][fam] = pdblistobj[tcid]\n\t\t\t\t#for l in f:\n\t\t\t\t#\tsl = l.split('\\t')\n\t\t\t\t#\tttcid = Deuterocol1.TCID.parse_str(sl[0])\n\t\t\t\t#\tfor fam in famlist:\n\t\t\t\t#\t\tqtcid = Deuterocol1.TCID.parse_str(fam)\n\t\t\t\t#\t\tif ttcid in qtcid: \n\t\t\t\t#\t\t\ttry: pdblist[-1][fam] += sl[1].strip().split(',')\n\t\t\t\t#\t\t\texcept KeyError: pdblist[-1][fam] = sl[1].strip().split(',')\n\t\t\n\t\tif not os.path.isdir(self.outdir): os.mkdir(self.outdir)\n\n\t\tDeuterocol1.info('Recording intended alignments...')\n\t\tdone = []\n\t\tfor fam1 in sorted(pdblist[0]):\n\t\t\tfor fam2 in sorted(pdblist[1]):\n\t\t\t\tif (fam1, fam2) in done: continue\n\t\t\t\telse: \n\t\t\t\t\tdone.append((fam1,fam2))\n\t\t\t\t\tdone.append((fam2,fam1))\n\t\t\t\tif not self.allow_internal and fam1 == fam2: continue\n\t\t\t\tfam2pdb = [{fam1:pdblist[0][fam1]}, {fam2:pdblist[1][fam2]}]\n\t\t\t\tx = Paragraph.load_d2(d2obj=self, pdblist=fam2pdb)\n\t\t\t\tx.initialize_dir()\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\n\tparser.add_argument('-l', type=int, default=None, help='Bundle size')\n\n\tparser.add_argument('--fams1', nargs='+', help='First list of families')\n\tparser.add_argument('--fams2', nargs='+', help='Second list of families')\n\n\tparser.add_argument('--d1dir', default='deuterocol1', help='Directory containing Deuterocol1 output')\n\n\tparser.add_argument('--loopless', action='store_true', help='Trim off loops')\n\n\tparser.add_argument('--tmdatadir', default='tmdata', help='Directory containing TM-prediction data and PDBs')\n\tparser.add_argument('--outdir', default='deuterocol2', help='Directory intended to contain Deuterocol2 output')\n\tparser.add_argument('--allow-internal', action='store_true', help='Allow self-vs-self comparisons')\n\n\targs = parser.parse_args()\n\n\tif not (args.fams1 and args.fams2): \n\t\tparser.print_usage()\n\t\texit(1)\n\n\telif args.l is None:\n\t\t#this is another thing that should be moved to deuterocol_common\n\t\tDeuterocol1.error('No bundle length specified')\n\t\tparser.print_usage()\n\t\texit(1)\n\n\tdeut = Deuterocol2(tmdatadir=args.tmdatadir, d1dir=args.d1dir, outdir=args.outdir, bundle=args.l, loopless=args.loopless, allow_internal=args.allow_internal)\n\tdeut.run(args.fams1, args.fams2)\n","repo_name":"SaierLaboratory/TCDBtools","sub_path":"bin/Deuterocol2.py","file_name":"Deuterocol2.py","file_ext":"py","file_size_in_byte":12707,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"32898280711","text":"#!/usr/bin/python3.8\n\nimport os\nimport requests\nimport argparse\nimport queue\nimport threading\nimport logging\n\n\n\nDOWNLOAD_THREADS = 6\nIO_THREADS = 2\n\ndownload_queue = queue.Queue()\nwrite_queue = queue.Queue()\n\nfiles_lock = threading.Lock()\n\ndef download_worker(download_queue, write_queue, url, chunk_size):\n finished = False\n logging.info(\"Download thread started\")\n while not finished:\n chunk_offset = download_queue.get()\n if chunk_offset is not None:\n chunk_end = chunk_offset + chunk_size - 1\n logging.debug(f\"GET: {url} - {chunk_offset}-{chunk_end}\")\n resume_headers = {}\n if chunk_size != 0:\n resume_headers = {\"Range\":f\"bytes={chunk_offset}-{chunk_end}\"}\n r = requests.get(url, stream=True, headers=resume_headers)\n chunk = r.content\n write_queue.put((chunk_offset, chunk_size, chunk))\n else:\n finished = True\n download_queue.task_done()\n\ndef io_worker(write_queue, file_size, file_chunk_size, files):\n finished = False\n while not finished:\n task = write_queue.get()\n if task is not None:\n chunk_offset, chunk_size, chunk = task\n file_start = file_chunk_size * (chunk_offset // file_chunk_size) # Calculate which file the received chunk belongs to\n file_offset = chunk_offset - file_start # Calculate where in the file to put the chunk\n \n filename = f\"{file_start}.dat\"\n\n logging.debug(f\"File: {filename}. {chunk_offset} -> {file_start}+{file_offset}\")\n\n if file_offset + chunk_size > file_chunk_size: # If not all of the chunk fits in current file, put rest back for later\n \n overflow = file_offset + chunk_size - file_chunk_size\n new_offset = chunk_offset + file_chunk_size - file_offset\n\n logging.debug(f\"File: {filename}. Splitting {overflow} bytes to next file at {new_offset} \")\n write_queue.put((new_offset, overflow, chunk[-overflow:]))\n chunk = chunk[:-overflow]\n chunk_size = file_chunk_size - file_offset\n with files_lock:\n with files[file_start]:\n if os.path.isfile(filename):\n with open(filename, \"r+b\") as f:\n f.seek(file_offset)\n f.write(chunk)\n else:\n with open(filename, \"wb\") as f:\n logging.debug(f\"Starting new file {filename}\")\n if file_size < file_start + file_chunk_size:\n newfile = file_size - file_start\n else: \n newfile = file_chunk_size\n f.truncate(newfile)\n f.seek(file_offset)\n f.write(chunk)\n else:\n finished = True\n write_queue.task_done()\n\n\n\ndef main(url, http_chunk, file_chunk):\n logging.basicConfig(level=logging.DEBUG, format='%(relativeCreated)6d %(threadName)s %(message)s')\n\n r = requests.head(url, allow_redirects=True)\n if not r.ok:\n logging.error(f\"Server returned {r.status_code}, aborting\")\n return\n size = int(r.headers.get(\"Content-Length\", -1))\n ranges = r.headers.get(\"Accept-Ranges\",\"none\")\n if ranges.lower() == \"none\":\n logging.error(\"Remote server doesn't support download ranges, aborting\")\n return\n \n http_chunks = [i * http_chunk for i in range( 1 + size // http_chunk)]\n\n file_chunks = {i * file_chunk : threading.Lock() for i in range( 1 + size // file_chunk)}\n \n logging.debug(f\"{size}: {http_chunks}\")\n for chunk_start in http_chunks:\n download_queue.put(chunk_start)\n\n download_workers = []\n io_workers = []\n\n for thread_no in range(DOWNLOAD_THREADS):\n t = threading.Thread(target=download_worker, kwargs={ \"download_queue\": download_queue,\n \"write_queue\": write_queue,\n \"url\": url,\n \"chunk_size\": http_chunk})\n t.start()\n download_workers.append(t)\n download_queue.put(None) \n\n for thread_no in range(IO_THREADS):\n t = threading.Thread(target=io_worker, kwargs={ \"write_queue\": write_queue,\n \"file_size\": size,\n \"file_chunk_size\": file_chunk, \n \"files\": file_chunks},\n daemon=True)\n t.start()\n io_workers.append(t)\n\n download_queue.join()\n write_queue.join()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"url\", help=\"URL to download\")\n parser.add_argument(\"http_chunk\", help=\"Size (in bytes) of download chunk\", type=int)\n parser.add_argument(\"file_chunk\", help=\"Size (in bytes) of output file chunk\", type=int)\n args = parser.parse_args()\n main(args.url, args.http_chunk, args.file_chunk)\n\n","repo_name":"MikrySoft/SimpleHTTPDownloader","sub_path":"downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":5349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30422162535","text":"import requests\n\nurl = 'http://75.119.149.11:30000/Auth/Login'\nurlProgram = 'http://75.119.149.11:30002/Atencion/AgregarProgramacion'\n\n\ndef login(data):\n response = requests.post(url, json=data)\n if response.status_code == 200:\n return response\n else:\n return None\n\n\ndef programar(headers, data):\n response = requests.post(urlProgram, headers=headers, json=data)\n return response","repo_name":"u201720528/morelo-dashboard","sub_path":"services/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74513676520","text":"ip_address = 'localhost' # Enter your IP Address here\r\nproject_identifier = 'P3B' # Enter the project identifier i.e. P3A or P3B\r\n\r\n# SERVO TABLE CONFIGURATION\r\nshort_tower_angle = 315 # enter the value in degrees for the identification tower \r\ntall_tower_angle = 90 # enter the value in degrees for the classification tower\r\ndrop_tube_angle = 180 # enter the value in degrees for the drop tube. clockwise rotation from zero degrees\r\n\r\n# BIN CONFIGURATION\r\n# Configuration for the colors for the bins and the lines leading to those bins.\r\n# Note: The line leading up to the bin will be the same color as the bin \r\n\r\nbin1_offset = 0.15 # offset in meters\r\nbin1_color = [1,0,0] # e.g. [1,0,0] for red\r\nbin1_metallic = False\r\n\r\nbin2_offset = 0.15\r\nbin2_color = [0,1,0]\r\nbin2_metallic = False\r\n\r\nbin3_offset = 0.15\r\nbin3_color = [0,0,1]\r\nbin3_metallic = False\r\n\r\nbin4_offset = 0.15\r\nbin4_color = [0,1,1]\r\nbin4_metallic = False\r\n#--------------------------------------------------------------------------------\r\nimport sys\r\nsys.path.append('../')\r\nfrom Common.simulation_project_library import *\r\n\r\nhardware = False\r\nif project_identifier == 'P3A':\r\n table_configuration = [short_tower_angle,tall_tower_angle,drop_tube_angle]\r\n configuration_information = [table_configuration, None] # Configuring just the table\r\n QLabs = configure_environment(project_identifier, ip_address, hardware,configuration_information).QLabs\r\n table = servo_table(ip_address,QLabs,table_configuration,hardware)\r\n arm = qarm(project_identifier,ip_address,QLabs,hardware)\r\nelse:\r\n table_configuration = [short_tower_angle,tall_tower_angle,drop_tube_angle]\r\n bin_configuration = [[bin1_offset,bin2_offset,bin3_offset,bin4_offset],[bin1_color,bin2_color,bin3_color,bin4_color],[bin1_metallic,bin2_metallic, bin3_metallic,bin4_metallic]]\r\n configuration_information = [table_configuration, bin_configuration]\r\n QLabs = configure_environment(project_identifier, ip_address, hardware,configuration_information).QLabs\r\n table = servo_table(ip_address,QLabs,table_configuration,hardware)\r\n arm = qarm(project_identifier,ip_address,QLabs,hardware)\r\n bot = qbot(0.1,ip_address,QLabs,project_identifier,hardware)\r\n\r\n#NOTE! Please ensure all settings in Quanser related to shadow and reflections are turned down to minimum\r\n#Ensure FPS setting is turned up so that it is running between 50-60 FPS (We run it at 120FPS on VMWare and get ~55-60FPS\r\n#Physics in Quanser are directly tied to framerate so if it is not running at an ideal frame rate bottles may fall off or Q-Bot may leave line\r\n\r\ndef drop_off(): #function which empties the hopper\r\n bot.activate_linear_actuator()\r\n bot.rotate_hopper(10)\r\n time.sleep(1)\r\n bot.rotate_hopper(20)\r\n time.sleep(1)\r\n bot.rotate_hopper(40)\r\n time.sleep(1)\r\n bot.rotate_hopper(70)\r\n time.sleep(1)\r\n bot.rotate_hopper(90)\r\n time.sleep(1)\r\n bot.rotate_hopper(0)\r\n print(\"Dropoff completed\")\r\n\r\ndef get_to_bin(): #function which moves in front of bin after colour detection\r\n\r\n if bin_location == \"Bin01\": \r\n bot.forward_distance(0.20)\r\n time.sleep(0.5)\r\n bot.rotate(-30)\r\n time.sleep(0.5) #moves bot around curve to bin if on edge\r\n bot.forward_distance(0.1)\r\n elif bin_location == \"Bin03\":\r\n bot.forward_distance(0.25)\r\n time.sleep(0.5)\r\n bot.rotate(-15)\r\n time.sleep(0.5) #moves bot around curve to bin if on edge\r\n bot.forward_distance(0.1)\r\n elif bin_location == \"Bin04\":\r\n bot.forward_distance(0.2) #moves bot straight in front of bin \r\n else: \r\n bot.forward_distance(0.3) #moves bot straight in front of bin otherwise\r\n bot.rotate(-5)\r\n print(\"We have arrived at the bin\")\r\n \r\ndef container_position(): #loads hopper with containers by Q-Arm \r\n arm.move_arm(0.658, 0.0, 0.283)\r\n time.sleep(1)\r\n arm.control_gripper(40)\r\n time.sleep(1)\r\n arm.move_arm(0.015, -0.289, 0.679)\r\n time.sleep(1)\r\n arm.move_arm(0.018, -0.495, 0.508)\r\n time.sleep(1)\r\n arm.control_gripper(-40)\r\n time.sleep(1)\r\n arm.home()\r\n\r\ndef moveit(): #function which follows line\r\n line_location = bot.line_following_sensors() #reads the value of the current location on the line\r\n\r\n if line_location[1] == 1: #if right sensor detects line (i.e. on straight path)\r\n bot.set_wheel_speed([0.1, 0.1])\r\n\r\n elif line_location[1] != 1: #turns when right sensor loses line\r\n bot.set_wheel_speed([0.1, 0.2])\r\n\r\ndef dispense_container(): #randomly dispenses container and returns its properties\r\n container_ID = random.randint(1,6) #randomly picks a container identity\r\n attributes = table.dispense_container(container_ID, True)\r\n\r\n return attributes\r\n\r\ndef load_container():\r\n\r\n global bin_location, binID\r\n\r\n checker = True #controls the iterations of while loop\r\n\r\n if len(container_list) >= 1: #checks for container left on servo table\r\n print(\"something is on the table :)\")\r\n count = 1\r\n print(\"Attributes of the container are \", container_list)\r\n bin_location = container_list[0][2]\r\n total_mass = container_list[0][1]\r\n container_position()\r\n print(\"The total mass on the Q-Bot is \", total_mass)\r\n time.sleep(2)\r\n\r\n del container_list[0] #delete container attributes once moved off of table\r\n time.sleep(2)\r\n\r\n while count < 3 and checker == True: #keep dispensing containers while conditions aren't met\r\n attributes = dispense_container()\r\n container_list.append(attributes) #add leftover container attributes to list\r\n binID = attributes[2]\r\n mass = attributes[1]\r\n time.sleep(1)\r\n print(\"Attributes of the container are \", attributes)\r\n total_mass += mass\r\n \r\n\r\n if binID == bin_location and total_mass < 90: #check if container is appropriate to be added\r\n container_position()\r\n count += 1\r\n del container_list[0] #delete attributes from list\r\n print(\"The total mass on the Q-Bot is \", total_mass)\r\n \r\n\r\n else:\r\n checker = False #end loop if conditions aren't met\r\n print(\"ready to move\")\r\n\r\n \r\n \r\n\r\n else: #if there is no container on servo table\r\n attributes = dispense_container()\r\n print(\"Attributes of the container are \", attributes)\r\n time.sleep(1)\r\n bin_location = attributes[2]\r\n total_mass = attributes[1]\r\n container_position()\r\n print(\"The total mass on the Q-Bot is \", total_mass)\r\n count = 1\r\n time.sleep(1)\r\n\r\n while count < 3 and checker == True: #Meets the first constraint\r\n attributes = dispense_container()\r\n print(\"Attributes of the container are \", attributes)\r\n container_list.append(attributes)\r\n binID = attributes[2]\r\n mass = attributes[1]\r\n time.sleep(1)\r\n total_mass += mass\r\n \r\n\r\n if binID == bin_location and total_mass < 90: #meets the second and third constraint and the container is added to the hopper\r\n container_position()\r\n count += 1\r\n print(\"The total mass on the Q-Bot is \", total_mass)\r\n del container_list[0] #delete attributes from list\r\n\r\n else: \r\n checker = False\r\n print(\"Ready to move\")\r\n\r\ndef transfer_container(binID):\r\n bot.activate_line_following_sensor()\r\n bot.activate_color_sensor()\r\n colour_reading = bot.read_color_sensor()[0] #reads the current color calues\r\n\r\n if bin_location == \"Bin01\": #convert binID to RGB values\r\n binID = [1, 0, 0]\r\n\r\n elif bin_location == \"Bin02\":\r\n binID = [0, 1, 0]\r\n\r\n elif bin_location == \"Bin03\":\r\n binID = [0, 0, 1]\r\n\r\n elif bin_location == \"Bin04\":\r\n binID = [0, 1, 1]\r\n\r\n while binID != colour_reading: #move along line until colour is sensed\r\n colour_reading = bot.read_color_sensor()[0]\r\n moveit() \r\n\r\n bot.stop()\r\n bot.deactivate_line_following_sensor()\r\n bot.deactivate_color_sensor()\r\n get_to_bin()\r\n drop_off()\r\n\r\ndef get_position(): #detect position of Q-Bot\r\n x = bot.position()[0]\r\n y = bot.position()[1]\r\n\r\n if 1.5>x>1.4 and 0.02>y>-0.02: #range of values corresponding to home position\r\n\r\n return False\r\n\r\n else:\r\n\r\n return True\r\n \r\n \r\ndef bot_home(): \r\n bot.activate_line_following_sensor()\r\n bot.activate_color_sensor()\r\n\r\n while get_position(): #follow line while position is not home\r\n colour_reading = bot.read_color_sensor()[0] #reads the current color calues\r\n line_location = bot.line_following_sensors() #reads the value of the current location on the line\r\n moveit()\r\n\r\n if not get_position(): #stop once home\r\n bot.stop()\r\n bot.deactivate_line_following_sensor()\r\n bot.deactivate_color_sensor()\r\n print(\"Welcome back home Mr. Bot\")\r\n\r\ncontainer_list = [] #list of containers on table with attributes\r\n\r\nwhile True: \r\n load_container()\r\n transfer_container(binID)\r\n bot_dot_go_home_please()\r\n\r\n","repo_name":"cartersankoff/recyclable-sorter","sub_path":"Recyclable_Sorter_Virtual.py","file_name":"Recyclable_Sorter_Virtual.py","file_ext":"py","file_size_in_byte":9305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23607469863","text":"import pygame\r\nimport os\r\nimport random\r\npygame.init()\r\n\r\n#window size\r\nwidth = height = 700\r\n\r\n\r\nwin = pygame.display.set_mode((width,height))\r\npygame.display.set_caption(\"Space Invaders\")\r\n#images\r\n#ships\r\nblue_ship = pygame.image.load(os.path.join(\"assets\",\"pixel_ship_blue_small.png\"))\r\ngreen_ship = pygame.image.load(os.path.join(\"assets\",\"pixel_ship_green_small.png\"))\r\nred_ship = pygame.image.load(os.path.join(\"assets\",\"pixel_ship_red_small.png\"))\r\nyellow_ship = pygame.image.load(os.path.join(\"assets\",\"pixel_ship_yellow.png\"))\r\n#laser\r\nblue_laser = pygame.image.load(os.path.join(\"assets\",\"pixel_laser_blue.png\"))\r\ngreen_laser = pygame.image.load(os.path.join(\"assets\",\"pixel_laser_green.png\"))\r\nred_laser = pygame.image.load(os.path.join(\"assets\",\"pixel_laser_red.png\"))\r\nyellow_laser = pygame.image.load(os.path.join(\"assets\",\"pixel_laser_yellow.png\"))\r\n#bg\r\nbg = pygame.transform.scale(pygame.image.load(os.path.join(\"assets\",\"background-black.png\")),(width,height))\r\n\r\ndef collide(obj1,obj2):\r\n\toffset_x = int(obj2.x - obj1.x)\r\n\toffset_y = int(obj2.y - obj1.y)\r\n\treturn obj1.mask.overlap(obj2.mask,(offset_x,offset_y)) != None\r\n\r\nclass Laser:\r\n\tdef __init__(self,x,y,img):\r\n\t\tself.x = x\r\n\t\tself.y = y\r\n\t\tself.img = img\r\n\t\tself.mask = pygame.mask.from_surface(self.img)\r\n\tdef move(self,vel):\r\n\t\tself.y+=vel\r\n\tdef draw(self,window):\r\n\t\twindow.blit(self.img,(self.x,self.y))\t\r\n\tdef off_screen(self,height,laser):\r\n\t\treturn (self.y > height and self.y<=0)\r\n\tdef collision(self,obj2):\r\n\t\treturn collide(self,obj2)\t\r\n\t\t\t\r\n\r\nclass Ships:\r\n\tCOOLDOWN =30\r\n\tdef __init__(self,x,y,health = 100):\r\n\t\tself.x = x\r\n\t\tself.y = y\r\n\t\tself.ship_img = None\r\n\t\tself.laser_img = None\r\n\t\tself.health = health\r\n\t\tself.laser = []\r\n\t\tself.cool_down_counter = 0\r\n\tdef draw(self,window):\t\r\n\t\twindow.blit(self.ship_img,(self.x,self.y))\r\n\t\tfor laser in self.laser:\r\n\t\t\tlaser.draw(window)\r\n\tdef get_width(self):\r\n\t\treturn self.ship_img.get_width()\r\n\tdef get_height(self):\r\n\t\treturn self.ship_img.get_height()\r\n\tdef cooldown(self):\r\n\t\tif self.cool_down_counter >= self.COOLDOWN-10:\r\n\t\t\tself.cool_down_counter = 0\r\n\t\telse: \r\n\t\t\tself.cool_down_counter+=1\t\t\r\n\tdef shoot(self):\r\n\t\tif self.cool_down_counter == 0:\r\n\t\t\tlaser = Laser(self.x,self.y,self.laser_img)\r\n\t\t\tself.laser.append(laser)\r\n\t\t\tcool_down_counter = 1\r\n\tdef move_laser(self,vel,objs):\r\n\t\tself.cooldown()\r\n\t\tfor laser in self.laser:\r\n\t\t\tlaser.move(vel)\r\n\t\t\tif laser.off_screen(height,laser):\r\n\t\t\t\tself.laser.remove(laser)\r\n\t\t\telif laser.collision(objs):\r\n\t\t\t\tobjs.health -=10 \r\n\t\t\t\tself.laser.remove(laser)\t\t\r\n\r\nclass Player(Ships):\r\n\tdef __init__(self,x,y,health = 100):\r\n\t\tsuper().__init__(x,y,health)\r\n\t\tself.ship_img = yellow_ship\r\n\t\tself.laser_img = yellow_laser\r\n\t\tself.max_health = health\r\n\t\tself.mask= pygame.mask.from_surface(self.ship_img)\r\n\tdef move_laser(self,vel,objs):\r\n\t\tself.cooldown()\r\n\t\tfor laser in self.laser:\r\n\t\t\tlaser.move(vel)\r\n\t\t\tif laser.off_screen(height,laser):\r\n\t\t\t\tself.laser.remove(laser)\r\n\t\t\telse: \r\n\t\t\t\tfor obj in objs:\r\n\t\t\t\t\tif laser.collision(obj):\r\n\t\t\t\t\t\tobjs.remove(obj)\r\n\t\t\t\t\t\tself.laser.remove(laser)\r\n\tdef healthbar(self,window):\r\n\t\tpygame.draw.rect(window,(255,0,0),(self.x,self.y+self.ship_img.get_height()+10,self.ship_img.get_width(),10))\t\t\t\t\t\r\n\t\tpygame.draw.rect(window,(0,255,0),(self.x,self.y+self.ship_img.get_height()+10,self.ship_img.get_width() * (self.health/self.max_health),10))\t\t\r\n\tdef draw(self,window):\r\n\t\tsuper().draw(window)\r\n\t\tself.healthbar(window)\t\r\n\t\t\r\nclass Enemy(Ships):\r\n\tcolor_map = {\r\n\t\t\"red\" :(red_ship,red_laser),\r\n\t\t\"blue\" : (blue_ship,blue_laser),\r\n\t\t\"green\" : (green_ship,green_laser)\r\n\t}\r\n\tdef __init__(self,x,y,color,health = 100):\r\n\t\tsuper().__init__(x,y,health)\r\n\t\tself.ship_img,self.laser_img = self.color_map[color]\r\n\t\tself.mask = pygame.mask.from_surface(self.ship_img)\r\n\tdef move(self,vel):\r\n\t\tself.y += \tvel \r\n\tdef shoot(self):\r\n\t\tif self.cool_down_counter == 0:\r\n\t\t\tlaser = Laser(self.x-20,self.y,self.laser_img)\r\n\t\t\tself.laser.append(laser)\r\n\t\t\tcool_down_counter = 1\t\r\n\r\ndef main():\r\n\tFPS = 60\r\n\trun = True\r\n\tlevel = 0\r\n\tlives = 5\r\n\tplayer_vel = 5\r\n\tenemy_vel = 1\r\n\tlaser_vel = 5\r\n\twave_length = 0\r\n\tclock = pygame.time.Clock()\r\n\tmain_font = pygame.font.SysFont(\"comicsans\",50)\r\n\tplayer = Player(width/2,height/2+height/4)\r\n\tenemies = []\r\n\tdef redrawWindow():\r\n\t\twin.blit(bg,(0,0))\r\n\t\tlevel_font = main_font.render(f\"Level {level}\",1,(255,255,255))\r\n\t\twin.blit(level_font,(10,10))\r\n\t\tlives_font = main_font.render(f\"lives {lives}\",1,(255,255,255))\r\n\t\twin.blit(lives_font,(width-lives_font.get_width()-10,10))\r\n\t\tfor enemy in enemies:\r\n\t\t\tenemy.draw(win)\r\n\t\tplayer.draw(win)\r\n\t\tpygame.display.update()\r\n\tdef lost():\r\n\t\tgame_over_font = pygame.font.SysFont(\"comicsans\",100)\r\n\t\tlost_font = game_over_font.render(\"Game Over\",1,(255,255,255))\r\n\t\twin.blit(lost_font,(width/2 - lost_font.get_width()/2,height/2 - lost_font.get_height()/2))\r\n\t\tpygame.display.update()\r\n\t\tpygame.time.delay(3000)\r\n\twhile run:\r\n\t\tclock.tick(FPS)\r\n\t\tif len(enemies) == 0:\r\n\t\t\tlevel+=1\t\t\t\r\n\t\t\twave_length+=5\r\n\t\t\tfor i in range(wave_length):\r\n\t\t\t\tenemy = Enemy(random.randrange(10,width-100),random.randrange(-700,0),random.choice([\"red\",\"blue\",\"green\"]))\r\n\t\t\t\tenemies.append(enemy)\t\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\trun = False\r\n\t\tkeys = pygame.key.get_pressed()\r\n\t\tif keys[pygame.K_a] and player.x>0:#left\r\n\t\t\tplayer.x-=player_vel\r\n\t\tif keys[pygame.K_d] and player.x + player.get_width()0 :#up\r\n\t\t\tplayer.y-=player_vel\t\r\n\t\tif keys[pygame.K_s] and player.y + player.get_height()+15 < height:#down\r\n\t\t\tplayer.y+=player_vel\r\n\t\tif keys[pygame.K_SPACE]:\r\n\t\t\tplayer.shoot()\t\r\n\t\tfor enemy in enemies:\r\n\t\t\tenemy.move_laser(laser_vel,player)\r\n\t\t\tenemy.move(enemy_vel)\r\n\t\t\tif random.randrange(0,60) == 1:\t\r\n\t\t\t\tenemy.shoot()\r\n\t\t\tif enemy.y +enemy.get_height()> height:\r\n\t\t\t\tlives-=1\r\n\t\t\t\tenemies.remove(enemy)\r\n\t\t\telif collide(enemy,player):\r\n\t\t\t\tplayer.health -=30\r\n\t\t\t\tenemies.remove(enemy)\t\t\r\n\t\tplayer.move_laser(-laser_vel,enemies)\t\t\r\n\t\tredrawWindow()\r\n\t\tif lives == 0 or player.health <=0:\r\n\t\t\tlost()\r\n\t\t\tmain()\r\n\t\t\trun = False\t\r\n\t\t\t\r\ndef main_menu():\r\n\tdef draw_text_middle(text,color,size,win):\r\n\t\tfont = pygame.font.SysFont('comicsans',size)\r\n\t\tlabel = font.render(text,1,color)\r\n\t\twin.blit(label,(width /2 -(label.get_width()/2),height/2 - label.get_height()/2))\r\n\trun = True\r\n\twhile run:\r\n\t\twin.fill((0,0,0))\r\n\t\tdraw_text_middle(\"Press any key to start...\",(255,255,255),60,win)\r\n\t\tpygame.display.update()\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\trun = False\r\n\t\t\tif event.type == pygame.KEYDOWN:\r\n\t\t\t\tmain()\r\nmain_menu()\t\t\t\t","repo_name":"18harsh/Space-Invaders-Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39714885889","text":"# This Code not use html_table_parser.\n# It's just Fucking CODE!!!!\nimport requests\nfrom bs4 import BeautifulSoup\nimport os\nfrom make_csv import write_csv\n\nos.system('clear')\n\ntemp_line = ''\ntemp_location1 = ''\ntemp_location2 = ''\n\n\ndef line_have_two_name(items):\n number = items[0].get_text()\n name = items[1].get_text()\n english_name = items[2].get_text()\n hanja_name = items[3].get_text()\n return number, name, english_name, hanja_name\n\n\ndef scrape_station(row, page_name, check_line, check_note):\n global temp_line, temp_location1, temp_location2\n check_page_name = page_name\n # except_name1 : 소재지에서 구만 하나로 분리 되어 있는 것\n # except_name2 : 소재지에서 구와 시 모두 하나로 분리 되어 있는 것 또는 시가 맨 처음인 경우 + 구가 분리\n except_name1 = {\n \"231\": \"서울_지하철_2호선\",\n \"201\": \"서울_지하철_2호선\",\n \"234-4\": \"서울_지하철_2호선\",\n \"243\": \"서울_지하철_2호선\",\n \"434\": \"수도권_전철_4호선\",\n \"531\": \"수도권_전철_5호선\",\n \"543\": \"수도권_전철_5호선\",\n \"745\": \"서울_지하철_7호선\",\n \"746\": \"서울_지하철_7호선\",\n \"911\": \"서울_지하철_9호선\",\n \"K123\": \"수도권_전철_경의·중앙선\",\n \"K222\": \"수도권_전철_수인·분당선\",\n \"S122\": \"서울_경전철_우이신설선\",\n \"I126\": \"인천_도시철도_1호선\",\n \"I217\": \"인천_도시철도_2호선\",\n \"A04\": \"인천국제공항철도\"\n }\n except_name2 = {\n \"709\": \"서울_지하철_7호선\",\n \"710\": \"서울_지하철_7호선\",\n \"P123\": \"경춘선\",\n \"D07\": \"신분당선\",\n \"K209\": \"수도권_전철_수인·분당선\",\n \"G109\": \"김포_도시철도\",\n \"A06\": \"인천국제공항철도\",\n \"A01\": \"인천국제공항철도\"\n }\n\n items = row.find_all([\"td\", \"th\"])\n\n if len(items) == 1:\n temp_line = items[0].get_text()\n return\n\n if items[0].has_attr(\"rowspan\") and row.find(\"th\", rowspan=lambda x: x != \"2\"):\n line = items[0].get_text()\n temp_line = line\n number = items[1].get_text()\n name = items[2].get_text()\n english_name = items[3].get_text()\n hanja_name = items[4].get_text()\n elif items[0].has_attr(\"colspan\"):\n return\n elif row.find(\"th\", {\"rowspan\": \"2\"}):\n line = temp_line\n number, name, english_name, hanja_name = line_have_two_name(items)\n else:\n if check_line == True:\n line = temp_line\n else:\n line = page_name.replace(\"_\", \" \")\n\n number = items[0].get_text()\n name = items[1].get_text()\n english_name = items[2].get_text()\n hanja_name = items[3].get_text()\n\n if check_note == True:\n i = -1\n else:\n i = 0\n\n if (items[-2+i].has_attr(\"rowspan\") and row.find(\"th\", rowspan=lambda x: x != \"2\")) or (except_name2.get(number.replace(\"\\n\", \"\").replace(\" \", \"\")) == check_page_name):\n temp_location1 = items[-2+i].get_text().replace(\"\\n\", \" \")\n if items[-1+i].has_attr(\"rowspan\") and row.find(\"th\", rowspan=lambda x: x != \"2\") or (except_name2.get(number.replace(\"\\n\", \"\").replace(\" \", \"\")) == check_page_name):\n temp_location2 = items[-1+i].get_text()\n location = temp_location1 + temp_location2\n transfer = items[-5+i].get_text()\n distance = items[-4+i].get_text()\n cumulative_distance = items[-3+i].get_text()\n else:\n location = temp_location1 + temp_location2\n transfer = items[-4+i].get_text()\n distance = items[-3+i].get_text()\n cumulative_distance = items[-2+i].get_text()\n else:\n location = temp_location1\n if (items[-1+i].has_attr(\"rowspan\") and row.find(\"th\", rowspan=lambda x: x != \"2\")) or (except_name1.get(number.replace(\"\\n\", \"\").replace(\" \", \"\")) == check_page_name):\n #temp_location1 = items[-2].get_text().replace(\"\\n\", \" \")\n temp_location2 = items[-1+i].get_text()\n location = temp_location1 + temp_location2\n transfer = items[-4+i].get_text()\n distance = items[-3+i].get_text()\n cumulative_distance = items[-2+i].get_text()\n elif row.find(\"th\", {\"rowspan\": \"2\"}) and items[-1+i].has_attr(\"rowspan\"):\n temp_location2 = items[-1+i].get_text()\n location = temp_location1 + temp_location2\n transfer = items[-4+i].get_text()\n distance = items[-3+i].get_text()\n cumulative_distance = items[-2+i].get_text()\n else:\n location = temp_location1 + temp_location2\n transfer = items[-3+i].get_text()\n distance = items[-2+i].get_text()\n cumulative_distance = items[-1+i].get_text()\n\n if name.replace(\"\\n\", \"\") == \"지축\": # 3호선 지축역\n location = temp_location1 + temp_location2\n transfer = items[-3+i].get_text()\n distance = items[-2+i].get_text()\n cumulative_distance = items[-1+i].get_text()\n\n return {\n 'number': number.replace(\"\\n\", \"\").replace(\" \", \"\"),\n 'line': line.replace(\"\\n\", \"\"),\n 'name': name.replace(\"\\n\", \"\"),\n 'english_name': english_name.replace(\"\\n\", \"\"),\n 'hanja_name': hanja_name.replace(\"\\n\", \"\"),\n 'transfer': transfer.replace(\"\\n\", \"\").replace(\"●\", \"\", 1).replace(\"●\", \",\"),\n 'distance': distance.replace(\"\\n\", \"\"),\n 'cumulative_distance': cumulative_distance.replace(\"\\n\", \"\"),\n 'location': location.replace(\"\\n\", \"\")\n }\n\n\ndef get_data_seoul():\n city_name = \"seoul\"\n # seoul_subway_lines = [[지하철노선이름, 테이블 내 노선명 유무, 테이블 내 비고란 유무, 시작테이블, 끝테이블 + 1(테이블이 한 개면 None)]]\n seoul_subway_lines = [\n [\"수도권_전철_1호선\", True, False, 1, 4],\n [\"서울_지하철_2호선\", False, False, 0, 3],\n [\"수도권_전철_3호선\", True, False, 0, None],\n [\"수도권_전철_4호선\", True, False, 0, None],\n [\"수도권_전철_5호선\", False, False, 0, 2],\n [\"서울_지하철_6호선\", False, False, 0, None],\n [\"서울_지하철_7호선\", False, False, 0, None],\n [\"서울_지하철_8호선\", False, False, 0, None],\n [\"서울_지하철_9호선\", False, False, 0, None],\n [\"경강선\", False, True, 0, None],\n [\"수도권_전철_경의·중앙선\", True, False, 0, 3],\n [\"경춘선\", False, False, 0, None],\n [\"인천국제공항철도\", False, True, 0, None],\n [\"서해선\", False, True, 0, None],\n [\"수도권_전철_수인·분당선\", True, False, 0, None],\n [\"신분당선\", False, False, 0, None],\n [\"김포_도시철도\", False, True, 0, None],\n [\"용인_경전철\", False, False, 0, None],\n [\"서울_경전철_우이신설선\", False, False, 0, None],\n [\"의정부_경전철\", False, False, 0, None],\n [\"인천_도시철도_1호선\", False, False, 0, None],\n [\"인천_도시철도_2호선\", False, False, 0, None],\n [\"인천공항_자기부상철도\", False, False, 0, None]\n ]\n\n for seoul_subway_line in seoul_subway_lines:\n line_name = seoul_subway_line[0]\n line_data = {\"line_name\": line_name, \"station_data\": []}\n url = f\"https://ko.wikipedia.org/wiki/{line_name}\"\n request = requests.get(url)\n soup = BeautifulSoup(request.text, \"html.parser\")\n\n if seoul_subway_line[4] == None:\n data = soup.find_all(\"table\", {\"class\": \"wikitable\"})[\n seoul_subway_line[3]]\n rows = data.find_all(\"tr\")[1:]\n elif line_name == \"수도권_전철_1호선\": # 1호선 2번째 테이블 비고란 제거\n data = soup.find_all(\"table\", {\"class\": \"wikitable\"})[\n seoul_subway_line[3]:seoul_subway_line[4]]\n rows = []\n temp = []\n for i in range(len(data)):\n temp = data[i].find_all(\"tr\")[1:]\n if i == 1:\n temp = data[i].find_all(\"tr\")[1:-1]\n rows = rows + temp\n else:\n data = soup.find_all(\"table\", {\"class\": \"wikitable\"})[\n seoul_subway_line[3]:seoul_subway_line[4]]\n rows = []\n temp = []\n for d in data:\n temp = d.find_all(\"tr\")[1:]\n rows = rows + temp\n\n for row in rows:\n scraped_station = scrape_station(\n row, line_name, seoul_subway_line[1], seoul_subway_line[2])\n if scraped_station:\n line_data[\"station_data\"].append(scraped_station)\n\n #write_csv(line_data, city_name)\n print(line_data)\n","repo_name":"kariray/korea_subway_dataset","sub_path":"src/scrapper_seoul_VER_1.py","file_name":"scrapper_seoul_VER_1.py","file_ext":"py","file_size_in_byte":8860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27763100298","text":"#############################################\r\n# Copyright (c) 2018-present\r\n# written by Kai Wu on 2018-08-04\r\n#############################################\r\n\r\n\"\"\"This is implementation of AlexNet model.\"\"\"\r\n\r\nimport tensorflow as tf\r\nfrom nn_ops import conv_relu, max_pool, dense, flatten, relu\r\n\r\n\r\nclass AlexNet(object):\r\n\r\n def __init__(self, x, output_dim, keep_prob):\r\n\r\n self.scores = AlexNet.forward(x, output_dim, keep_prob)\r\n\r\n @staticmethod\r\n def forward(x, output_dim, keep_prob):\r\n layers = []\r\n\r\n with tf.variable_scope('conv1'):\r\n conv1 = conv_relu(x, [3, 3, 3, 96], 1)\r\n pool1 = max_pool(conv1, ksize=2, stride=2, padding='SAME')\r\n layers.append(pool1)\r\n\r\n with tf.variable_scope('conv2'):\r\n conv2 = conv_relu(layers[-1], [3, 3, 96, 128], 1)\r\n pool2 = max_pool(conv2, ksize=2, stride=2, padding='SAME')\r\n layers.append(pool2)\r\n\r\n with tf.variable_scope('conv3'):\r\n conv3 = conv_relu(layers[-1], [3, 3, 128, 192], 1)\r\n layers.append(conv3)\r\n\r\n with tf.variable_scope('conv4'):\r\n conv4 = conv_relu(layers[-1], [3, 3, 192, 256], 1)\r\n layers.append(conv4)\r\n\r\n with tf.variable_scope('conv5'):\r\n conv5 = conv_relu(layers[-1], [3, 3, 256, 512], 1)\r\n pool5 = max_pool(conv5, ksize=2, stride=2, padding='SAME')\r\n layers.append(pool5)\r\n\r\n with tf.variable_scope('fc6'):\r\n flattened = flatten(layers[-1])\r\n fc6 = dense(flattened, 4096, activation=relu)\r\n dropout6 = tf.nn.dropout(fc6, keep_prob)\r\n layers.append(dropout6)\r\n\r\n with tf.variable_scope('fc7'):\r\n fc7 = dense(layers[-1], 4096, activation=relu)\r\n dropout7 = tf.nn.dropout(fc7, keep_prob)\r\n layers.append(dropout7)\r\n\r\n with tf.variable_scope('softmax'):\r\n scores = dense(layers[-1], output_dim)\r\n layers.append(scores)\r\n\r\n return layers[-1]\r\n","repo_name":"wusanye/deep-learning-practice","sub_path":"models/AlexNet/alexnet_new.py","file_name":"alexnet_new.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"72965810279","text":"''' \nEscreva um programa que leia a velocidade de um carro. Se ele ultrapassar 80Km/h, mostre uma mensagem dizendo que ele foi multado. A multa vai custar R$7,00 por cada Km acima do limite.\n'''\n\nspeed = float( input( 'Qual a velocidade atual do carro? ' ) )\n\nif speed > 80:\n print( 'O limite de velocidade da via é 80Km/h!\\n'\n 'Você será multado em R${:.2f}'.format( (speed - 80) * 7 ) )\n\nprint( 'Tenha um bom dia! Dirija com segurança!' )","repo_name":"GabrielProdi/Python_CursoEmVideo","sub_path":"PythonExercicios/ex029.py","file_name":"ex029.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26442842918","text":"\r\n#说明:本文件只需要在更改了words_alpha.txt文件后运行一次,其他时候不需要运行\r\n\r\nimport os\r\nimport shutil\r\nimport numpy as np\r\nfrom collections import Counter\r\nfrom graphviz import Digraph\r\nfrom multiprocessing import Pool,cpu_count\r\n\r\nfrom GameLogic import mark,GuessState,filtrate_words\r\n\r\ndef initial_words():\r\n #把不同长度的单词分开,并写到不同的文件中\r\n with open ('words_alpha.txt') as word_file:\r\n words_lst=word_file.readlines()\r\n assert len(words_lst)==len(set(words_lst))\r\n words_lst=[word.strip() for word in words_lst]\r\n min_length=len(min(words_lst,key=len))\r\n if min_length<3:\r\n min_length=3\r\n max_length=len(max(words_lst,key=len))\r\n #check there are only letters in the word\r\n for word in words_lst:\r\n for letter in word:\r\n assert letter.isalpha()\r\n if os.path.exists('WordsClassifiedByLength')==True:\r\n shutil.rmtree('WordsClassifiedByLength')\r\n os.mkdir('WordsClassifiedByLength')\r\n for i in range(min_length,max_length+1):\r\n print('--- word_length: ',i,' ---')\r\n words_lst_of_length=[word for word in words_lst if len(word)==i]\r\n if len(words_lst_of_length)!=0:\r\n with open ('WordsClassifiedByLength\\\\Length_'+str(i)+'.txt','w') as file:\r\n for word in words_lst_of_length:\r\n file.write(word+'\\n')\r\n\r\n\r\ndef load_words(word_length):\r\n with open('WordsClassifiedByLength\\\\Length_'+str(word_length)+'.txt') as word_file:\r\n words_lst = word_file.readlines()\r\n words_lst=[word.strip() for word in words_lst]\r\n return words_lst\r\n\r\n\r\ndef get_entrophy(args):\r\n idx,word_lst,L=args\r\n guessed=word_lst[idx]\r\n count_dict = Counter(tuple(mark(answer, guessed)) for answer in word_lst)\r\n probabilities = np.array(list(count_dict.values())) / L\r\n entrophy = -np.sum(probabilities * np.log2(probabilities))\r\n #打印进度条,不换行\r\n print('\\r',guessed,' {}/{}'.format(idx+1,L),end='')\r\n return entrophy\r\n\r\ndef find_best_tree(graph,word_lst):\r\n #找出最优决策树,原地修改graph\r\n #graph:graphviz.Digraph()\r\n L=len(word_lst)\r\n if L==1:\r\n #注意:添加的节点名称有可能是graphviz的关键字,所以要加上'_',但是label碰到关键字会自动加上引号\r\n graph.node(word_lst[0]+'_',label=word_lst[0])\r\n print('\\n'+word_lst[0]+'_')\r\n return\r\n #对于小规模不使用pool,大规模使用pool,500为分界\r\n if L<500:\r\n #find max entrophy\r\n entrophy_lst=np.zeros(L)\r\n for idx in range(L):\r\n guessed=word_lst[idx]\r\n count_dict = Counter(tuple(mark(answer, guessed)) for answer in word_lst)\r\n probabilities = np.array(list(count_dict.values())) / L\r\n entrophy_lst[idx] = -np.sum(probabilities * np.log2(probabilities))\r\n #打印进度条,不换行\r\n print('\\r',guessed,' {}/{}'.format(idx+1,L),end='')\r\n else:\r\n with Pool(cpu_count()) as p:\r\n res=p.map(get_entrophy,[(idx, word_lst, L) for idx in range(L)])\r\n entrophy_lst=np.array(res)\r\n\r\n max_idx=np.argmax(entrophy_lst)\r\n word=word_lst[max_idx]\r\n max_entrophy=entrophy_lst[max_idx]\r\n\r\n graph.node(graph.name+'_'+word,label=word+'\\n'+str(round(max_entrophy,2)))\r\n print('\\n'+graph.name+'_'+word)\r\n #对于筛选过后的new_word_lst,递归调用find_best_tree,将子图的根节点加入到graph的word节点下,这条边的label为state_lst\r\n state_lst_lst=Counter(tuple(mark(ans, word)) for ans in word_lst).keys()\r\n for state_lst in state_lst_lst:\r\n value_str=''.join([str(state.value) for state in state_lst])\r\n new_word_lst=filtrate_words(word_lst,word,state_lst)\r\n sub_graph=Digraph(name=graph.name+'_'+word+value_str)\r\n find_best_tree(sub_graph,new_word_lst)\r\n graph.subgraph(sub_graph)\r\n top_node=sub_graph.body[0].split('[')[0].strip()\r\n graph.edge(graph.name+'_'+word,top_node,label=value_str)\r\n return \r\n\r\n\r\ndef initial_decision_tree():\r\n file_lst=os.listdir('WordsClassifiedByLength')\r\n word_length_lst=sorted([int(file[7:-4]) for file in file_lst])\r\n if os.path.exists('Trees')==True:\r\n shutil.rmtree('Trees')\r\n os.mkdir('Trees')\r\n os.mkdir('Trees\\\\Graphs')\r\n os.mkdir('Trees\\\\Pdfs')\r\n \r\n for word_length in word_length_lst: \r\n words_lst=load_words(word_length)\r\n print('--- word_length: ',word_length,' ---')\r\n g=Digraph(name='_'+str(word_length)+'_')\r\n find_best_tree(g,words_lst)\r\n print('generating gv file')\r\n g.render('Trees\\\\Graphs\\\\Length'+str(word_length),view=False,format='gv')\r\n os.remove('Trees\\\\Graphs\\\\Length'+str(word_length))\r\n print('generating pdf file')\r\n g.render('Trees\\\\Pdfs\\\\Length'+str(word_length),view=False,format='pdf')\r\n os.remove('Trees\\\\Pdfs\\\\Length'+str(word_length))\r\n \r\n \r\nif __name__=='__main__':\r\n print('Initial Words')\r\n initial_words()\r\n print('Initial Decision Tree')\r\n initial_decision_tree()\r\n print('Done')","repo_name":"wrraa/wordle","sub_path":"InitialWordsAndDecisionTree.py","file_name":"InitialWordsAndDecisionTree.py","file_ext":"py","file_size_in_byte":5247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72621619241","text":"import pandas as pd \nimport numpy as np \nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_absolute_error, r2_score\n\n\ndef train():\n\ttrain_df = pd.read_csv('/data/agelgazzar/Work/AgePrediction/3DResnet/code/csvfiles/training_error.csv')\n\tX = train_df['error'].values + train_df['age'].values\n\tX_train = np.expand_dims(X,1)\n\ty = train_df['age']\n\t# Model initialization\n\tregression_model = LinearRegression()\n\t# Fit the data(train the model)\n\tregression_model.fit(X_train, y)\n\t# Predict\n\ty_predicted = regression_model.predict(X_train)\n\n\t# model evaluation\n\trmse = mean_absolute_error(y, y_predicted)\n\tr2 = r2_score(y, y_predicted)\n\n\t# printing values\n\tprint('Slope:' ,regression_model.coef_)\n\tprint('Intercept:', regression_model.intercept_)\n\tprint('Training mean absoule error: ', rmse)\n\tprint('Training R2 score: ', r2)\n\tprint('-----------------------------------------------------')\n\n\tplt.scatter(X, y, s=10)\n\tplt.xlabel('Age')\n\tplt.ylabel('Predicted age')\n\n\t# predicted values\n\tplt.plot(X, y_predicted, color='r')\n\tplt.show()\n\n\n\treturn regression_model\n\ndef test(model):\n\ttrain_df = pd.read_csv('/data/agelgazzar/Work/AgePrediction/3DResnet/code/csvfiles/test_error.csv')\n\tX = train_df['error'].values + train_df['age'].values\n\tx = np.expand_dims(X,1)\t\t\n\ty = train_df['age']\n\ty_predicted = model.predict(x)\n\trmse = mean_absolute_error(y, y_predicted)\n\tr2 = r2_score(y, y_predicted)\n\tprint('Test mean absolute error: ', rmse)\n\tprint('Test R2 score: ', r2)\n\n\n\t# model evaluation\n\trmse = mean_absolute_error(y, y_predicted)\n\tr2 = r2_score(y, y_predicted)\n\n\t# printing values\n\n\n\tplt.scatter(X, y, s=10)\n\tplt.xlabel('Age')\n\tplt.ylabel('Predicted age')\n\n\t# predicted values\n\tplt.plot(X, y_predicted, color='r')\n\tplt.show()\n\n\n\nif __name__ == '__main__':\n\tmodel = train()\n\tresutls = test(model)","repo_name":"elgazzarr/3DGroupConvNet","sub_path":"code/regression_model.py","file_name":"regression_model.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"722120009","text":"from flask import Flask, request\nfrom flask_cors import CORS\n\nappDep = Flask(__name__)\nCORS(appDep)\ndepartamentos = []\n@appDep.route(\"/\")\n\ndef inicioDep():\n return \"Flask se inicio correctamente**************\"\n\n# appDep.run(debug=True, port=1500)\n\n@appDep.route(\"/departamento/\", methods=['GET','PUT','DELETE'])\ndef departamento(id):\n if len(departamentos) > id: #Consulta si el String es > al id\n if request.method == \"GET\":\n return {\n \"ok\": True,\n \"content\": departamentos[id],\n \"message\": None\n }\n elif request.method == \"PUT\":\n data = request.get_json()\n departamentos[id] = data\n return {\n \"ok\": True,\n \"content\": departamentos[id],\n \"message\": \"Se actualizo el departamento correctamente\"\n }, 201\n elif request.method == \"DELETE\":\n departamento = departamentos.pop(id)\n return {\n \"ok\": True,\n \"content\": departamento,\n \"message\": \"Se elimino el departamento correctamente\"\n }, 200\n del departamentos[id]\n return {\n \"ok\": True,\n \"content\": None,\n \"message\": \"Se elimino el departamento correctamente\"\n }, 200\n else:\n return {\n \"ok\": False,\n \"content\": None,\n \"message\": \"El departamento no Existe\"\n }\n\n@appDep.route(\"/departamento/buscar/\")\ndef buscador(palabra):\n resultado = []\n for departamento in departamentos:\n print (palabra.lower())\n if palabra.lower() in departamento['nombre'].lower():\n resultado.append(departamento)\n if resultado:\n return {\n \"ok\": True,\n \"content\": resultado,\n \"message\": None\n }\n else:\n return {\n \"ok\": False,\n \"content\": \"No hay resultados\",\n \"message\": None\n }, 404\n@appDep.route(\"/departamentos\", methods = [\"GET\",\"POST\"])\ndef manejo_departamentos():\n print(request.method)\n if request.method == \"GET\":\n if departamentos:\n return {\n \"ok\": True,\n \"content\": departamentos,\n \"message\": None\n }\n else:\n return {\n \"ok\": False,\n \"content\": None,\n \"message\": \"No hay departamentos\"\n }, 404\n elif request.method == \"POST\":\n data = request.get_json()\n departamentos.append(data)\n return{\n \"ok\": True,\n \"content\": None,\n \"message\": \"El departamento se agrego exitosamente\"\n }, 201\n \nappDep.run(debug=True, port=1500) \n \n \n \n ","repo_name":"hariasfrancia/TAREAS","sub_path":"SEMANA2/04_reto_sem2.py","file_name":"04_reto_sem2.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18367048583","text":"import torchvision.transforms as tfs\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport numpy as np\nimport os, random\n\nclass Exemplar_Dataset(Dataset):\n def __init__(self, max_num_exemplar):\n super().__init__()\n self.max_num_exemplar = max_num_exemplar\n self.images = []\n self.task_num = 1\n self.transforms = tfs.Compose([\n tfs.CenterCrop(240),\n tfs.ToTensor(),\n tfs.Normalize(mean=[0.64, 0.6, 0.58], std=[0.14, 0.15, 0.152])\n ])\n \n def load_exemplar(self, data_path, task_num):\n self.exemplars_per_task = int(np.ceil(self.max_num_exemplar / task_num))\n self.task_num = task_num\n img_list_dir = os.listdir(data_path+'/exemplar')\n img_list_dir.sort()\n img_list = []\n for img_dir in img_list_dir:\n img = Image.open(os.path.join(data_path, 'exemplar', img_dir))\n img = self.transforms(img.convert('RGB'))\n if 'RESIDE' in data_path:\n id = img_dir.split('/')[-1].split('.')[0].split('_')[0]\n id = id + '.jpg'\n label=Image.open(os.path.join(os.path.join(data_path, 'clear', id)))\n elif 'Rain100H' in data_path:\n label=Image.open(os.path.join(os.path.join(data_path, 'norain', img_dir)))\n \n label = tfs.CenterCrop(240)(label.convert(\"RGB\"))\n label = tfs.ToTensor()(label)\n \n img_list.append((img, label))\n # img_list.append(img)\n self.images.append(img_list)\n self._clear_more(self.exemplars_per_task)\n \n def collect_exemplar(self, train_dataset, task_num):\n self.exemplars_per_task = int(np.ceil(self.max_num_exemplar / task_num))\n self.task_num = task_num\n data_num = len(train_dataset)\n assert (data_num > self.exemplars_per_task), 'Not enough samples to store'\n select_index = random.sample(range(data_num), self.exemplars_per_task)\n self.images.append([train_dataset[idx][0] for idx in select_index])\n self._clear_more(self.exemplars_per_task)\n \n def _clear_more(self, exemplars_per_task):\n for label in range(self.task_num):\n self.images[label] = self.images[label][0:exemplars_per_task]\n \n def __getitem__(self, idx):\n label = idx // self.exemplars_per_task\n idx = idx % self.exemplars_per_task\n return self.images[label][idx]\n \n def __len__(self):\n return sum([len(self.images[label]) for label in range(self.task_num)])","repo_name":"xiaojihh/CL_all-in-one","sub_path":"data/exemplar.py","file_name":"exemplar.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74471652521","text":"from TakeInput import take_input\nimport time\nimport numpy as np\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\n\n#log2 everywhere\n\n\ndef mapper(vector): #TODO Modify for better binary split\n thresholds = np.zeros(len(vector[0]))\n for j in range(len(vector[0])):\n column = vector[:,j] #Approximating threshold using distributions - normal\n upper0 = column[0:1500]\n lower1 = column[1500:2000]\n mean0 = np.mean(upper0)\n mean1 = np.mean(lower1)\n if(mean0 > mean1):\n C = (np.quantile(upper0,0.25)+np.quantile(lower1,0.75))/2\n else:\n C = (np.quantile(upper0,0.75)+np.quantile(lower1,0.25))/2\n thresholds[j] = C\n return thresholds\n\ndef mappee(vector, thresholds):\n n_features = len(vector[0])\n\n for i in range(n_features):\n vector[:,i] = (vector[:,i] > thresholds[i]).astype(int)\n\n return vector\n\ndef predict(root,feature_val):\n predicted_val = np.zeros(400) #Check better?\n i = 0\n safe_root = root\n for features in feature_val:\n root = safe_root\n while(True):\n if(root.isLeaf == True):\n predicted_val[i] = root.leafvalue\n i += 1\n break\n root = root.children[int(features[root.para_index])]\n return predicted_val\n\nclass Node:\n def __init__(self,para_index=None):\n self.para_index = para_index\n self.isLeaf = False\n self.leafvalue = None\n self.children = [None, None]\n\n\nclass DecisionTree:\n def __init__(self,max_depth,min_samples_split):\n self.root = None\n self.max_depth = max_depth\n self.min_samples_split = min_samples_split\n\n def ID3(self,depth,unused_indices,feature_vector, output_vector, parent_entropy):\n root = Node()\n \n k = 0\n GR = np.zeros(len(unused_indices))\n m = 0\n for i in unused_indices:\n entropy = [None, None]\n count_group = [None, None]\n for j in range(2):\n ones = 0\n total = np.count_nonzero(feature_vector[:,i] == j)\n count_group[j] = total\n for w in range(len(output_vector)):\n if(feature_vector[w,i]==j and output_vector[w]==1):\n ones += 1\n zeros = total - ones\n if(zeros == 0 or ones == 0):\n entropy[j] = 0\n else:\n entropy[j] = (ones/total)*np.log2(total/ones) + (zeros/total)*np.log2(total/zeros)\n avg_entropy = np.average(entropy)\n gain = parent_entropy - avg_entropy\n\n sum = 0\n c_t = np.sum(count_group)\n for c in count_group:\n if (c==0):\n continue\n sum = sum + (c/c_t)*np.log2(c_t/c)\n split_information = sum\n\n #if(split_information == 0):\n #GR[m] = -1\n #else:\n GR[m] = gain/split_information\n m += 1\n \n temp = np.argmax(GR)\n if(type(temp)==np.ndarray):\n max_indice = temp[0]\n else:\n max_indice = temp\n k = unused_indices[max_indice]\n root.para_index = k\n unused_indices.remove(k)\n\n for i in range(2):\n child = Node()\n total = np.count_nonzero(feature_vector[:,k] == i)\n if(total < self.min_samples_split or depth == self.max_depth-1):\n child.isLeaf = True\n ones = 0\n for j in range(len(output_vector)):\n if(feature_vector[j,k]==i and output_vector[j]==1):\n ones += 1\n zeros = total - ones\n child.isLeaf = True\n child.leafvalue = 1 if (3*ones>zeros) else 0 #Check 3* change\n else:\n entropy = 0\n ones = 0\n total = np.count_nonzero(feature_vector[:,k] == i)\n for w in range(len(output_vector)):\n if(feature_vector[w,k]==i and output_vector[w]==1):\n ones += 1\n zeros = total - ones\n if(zeros == 0 or ones == 0):\n entropy = 0\n else:\n entropy = (ones/total)*np.log2(total/ones) + (zeros/total)*np.log2(total/zeros)\n child = self.ID3(depth+1,unused_indices,feature_vector,output_vector,entropy)\n root.children[i] = child\n return root\n\n'''IG'''\n\ntrain_path = \"data/train\"\nfeature_vector, output_vector = take_input(train_path,2000)\nvalidation_path = \"data/validation\"\nfeature_val, output_val = take_input(validation_path,400)\nthresholds = mapper(feature_vector)\nfeature_vector = mappee(feature_vector,thresholds)\nfeature_val = mappee(feature_val,thresholds) #Only 0s and 1s\nindices = [i for i in range(3072)]\n\nbegin = time.time()\nDT = DecisionTree(4,7)\ninitial_entropy = (3/4)*np.log2(4/3) + (1/4)*np.log2(4)\nroot = DT.ID3(0,indices,feature_vector,output_vector,initial_entropy)\nend = time.time()\nprint(f\"Time taken is {end - begin} seconds\")\n\npredicted_val = predict(root,feature_val)\n\n#Confusion for both train and val\nconfusion_matrix = metrics.confusion_matrix(output_val, predicted_val,labels=[0,1])\ncm_display = metrics.ConfusionMatrixDisplay(confusion_matrix = confusion_matrix,display_labels=[0,1])\ncm_display.plot()\nplt.show()\n\naccuracy = (confusion_matrix[0][0] + confusion_matrix[1][1])/len(output_val)\nprint(accuracy)\nprint((confusion_matrix[0][0]/(confusion_matrix[0][0] + confusion_matrix[1][0])))\nprint((confusion_matrix[1][1]/(confusion_matrix[1][1] + confusion_matrix[0][1])))\nprint((confusion_matrix[0][0]/(confusion_matrix[0][0] + confusion_matrix[0][1])))\nprint((confusion_matrix[1][1]/(confusion_matrix[1][1] + confusion_matrix[1][0])))","repo_name":"Saket003/Machine-Learning","sub_path":"A3 Extra/Old 3.1A/Section3.1A copy.py","file_name":"Section3.1A copy.py","file_ext":"py","file_size_in_byte":5858,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"25871156044","text":"import cplex\nimport sys\n\ndef sample1(filename):\n\tc = cplex.Cplex(filename)\n\n\ttry:\n\t\tc.solve()\n\texcept CplexSolverError:\n\t\tprint(\"Exception raised during solve\")\n\t\treturn\n\n\tstatus = c.solution.get_status()\n\tprint(\"Solution status = {}:{}\".format(status, c.solution.status[status]))\n\tprint(\"Objective value = {}\".format(c.solution.get_objective_value()))\n\t","repo_name":"craigmax-dev/Mixed-Integer-Linear-Programming-for-Spacecraft-Maneuvers","sub_path":"lib/python/archive/cplexTest02.py","file_name":"cplexTest02.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22548346996","text":"from flask import Flask, render_template ,request,url_for,escape,session,redirect,abort\nimport sqlite3 as sql\n# import admin\nfrom flask_bcrypt import Bcrypt\napp = Flask(__name__)\napp.secret_key = 'any random string'\nbcrypt = Bcrypt(app)\n\n\ndef Convert(tup, di):\n\tdi = dict(tup)\n\treturn di\n\n# Show PROFESSOR list\ndef show_profl():\n @app.route('/show_profs')\n def show_profs():\n return render_template('show_profs.html', lisp=session['lisp'],exist=session['exist'])\n\n\n# Display feedback FORM\ndef feedform():\n @app.route('/stform')\n def stform():\n print(session['branch'],session['semester'])\n return render_template('stform.html', branch=session['branch'],semester=session['semester'])\n\n\n\n# FEEDBACK MODULE......\ndef feedmodule():\n @app.route('/feedback', methods=['POST', 'GET'])\n def feedback():\n if request.method == 'POST':\n lecturer = request.form['lecturer']\n st_rollno = session['roll_no']\n year = session['year']\n semester = session['semester']\n branch = session['branch']\n subject = request.form['subject']\n preparation = request.form['preparedness']\n information = request.form['informative']\n explanation = request.form['explaining']\n pace = request.form['pace']\n leadership = request.form['leading']\n receptive = request.form['receptive']\n interest = request.form['interest']\n discussion = request.form['discussion']\n learning = request.form['learn']\n rapport = request.form['rapport']\n available = request.form['available']\n current = [lecturer,st_rollno,year,semester,branch,subject]\n\n con = sql.connect(\"database.db\")\n cur = con.cursor()\n\n cur.execute(\"select lecturer,st_rollno,year,semester,branch,subject from feedback where st_rollno=?\",[session['roll_no']])\n alrdyexist=cur.fetchall()\n\n for i in range(0,len(alrdyexist)):\n if list(alrdyexist[i]) == current:\n msgx=\"This feedback is already registered\"\n con.close()\n\n return render_template('show_profs.html', lisp=session['lisp'], msgx=msgx, exist=session['exist'])\n\n else:\n cur.execute(\"INSERT INTO feedback (lecturer,st_rollno,year,semester,branch,subject,preparation,information,explanation,pace,leadership,receptive,interest,discussion,learning,rapport,available)VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\",(lecturer,st_rollno,year,semester,branch,subject,preparation,information,explanation,pace,leadership,receptive,interest,discussion,learning,rapport,available,))\n con.commit()\n cur.execute(\"select distinct lecturer,subject from feedback where st_rollno=?\", [session['roll_no']])\n global lis1,lis2,lis3\n lis2 = cur.fetchall()\n lis3 = [x for x in lis1 if x not in lis2]\n dictionary = {}\n session['lisp'] = Convert(lis3, dictionary)\n session['exist'] = Convert(lis2, dictionary)\n\n con.close()\n return redirect(url_for('show_profs'))\n\n\n#view RESPONSE\n# def viewres():\n# @app.route('/response', methods=['POST', 'GET'])\ndef response():\n if request.method == 'POST':\n restech = request.form['restech']\n ressub = request.form['ressub']\n print(restech)\n print(ressub)\n con = sql.connect(\"database.db\")\n cur = con.cursor()\n cur.execute(\"select preparation,information,explanation,pace,leadership,receptive,interest,discussion,learning,rapport,available from feedback where st_rollno=? and lecturer=? and subject=?\",(session['roll_no'],restech,ressub))\n out=cur.fetchall()\n res = [item for t in out for item in t]\n print(res)\n return render_template('response.html', res=res,branch=session['branch'],semester=session['semester'],restech=restech,ressub=ressub)\n\n else:\n print('this worked')\n","repo_name":"Anand09Pandey/SFIP-Student-Faculty-Interaction-Portal","sub_path":"student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":4097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"34049272451","text":"# In this section we define function palindrome\ndef palindrome(lis):\n # This is list..\n lis = [12,9,61,5,14]\n x = 0\n # In this we use for loop..\n for i in lis:\n\n t = i\n rev = 0\n # In this we use while loop..\n while t < 0:\n rev = rev * 10 + t % 10\n t = t/10\n\n if rev == i:\n print(i)\n\n c = c + 1\n # return true or false\n return True\n return False\n\n \n# This is if condition..\n if(i>0 and palindrome(lis)):\n print(\"true\")\n else:\n print(\"false\")\n \n \n","repo_name":"shakti001/Python-Codes","sub_path":"day3/palidorrmic.py","file_name":"palidorrmic.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71427358760","text":"from IPython.display import HTML\nHTML('''\nPara esconder/mostrar os erros de output do notebook, clique aqui.''')\nimport pandas as pd\nimport numpy as np\nimport sklearn\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import fbeta_score, make_scorer, roc_curve, auc\nimport os\nprint(os.listdir('../input/pmr-3508-tarefa-2'))\ntest_d = pd.read_csv('../input/testefeat/test_features.csv',index_col='Id')\ntrain_d = pd.read_csv('../input/train-data/train_data.csv', index_col='Id')\ntrain_d.head()\ntrain_d.info()\ntest_d.info()\nax = train_d['ham'].value_counts(normalize=True).plot(kind='bar')\nplt.xlabel('Classes'); plt.ylabel('Proporção');\nplt.title('Distribuição de Classe dos dados na base treino');\n#Função de avaliação entre classificadores para um determinado dataset\ndef ClassifierScores(data, labels, scorer = None, n_neighbors = 5):\n\n bnb = BernoulliNB()\n scores = cross_val_score(bnb, data, labels, cv=10, scoring = scorer)\n print('Bernoulli NB')\n print(f'Pontuação (10-fold CV) = {round(scores.mean(), 4)}, com Desvio Padrão = {round(scores.std(), 4)}')\n if scorer != None:\n scores = cross_val_score(bnb, data, labels, cv=10)\n print(f'Acurácia = {round(scores.mean(), 4)}, com Desvio Padrão = {round(scores.std(), 4)}')\n print()\n\n#MultinomialNB() não pode receber valores negativos\n if np.sum((data < 0).values.ravel()) > 0:\n print('Dados possuem valores negativos, pulando Multinomial NB')\n print()\n else:\n mnb = MultinomialNB()\n scores = cross_val_score(mnb, data, labels, cv=10, scoring = scorer)\n print('Multinomial NB')\n print(f'Pontuação (10-fold CV) = {round(scores.mean(), 4)}, com Desvio Padrão = {round(scores.std(), 4)}')\n scores = cross_val_score(mnb, data, labels, cv=10)\n print(f'Acurácia = {round(scores.mean(), 4)}, com Desvio Padrão = {round(scores.std(), 4)}')\n print()\n \n gnb = GaussianNB()\n fbeta = make_scorer(fbeta_score,beta = 3)\n scores = cross_val_score(gnb, data, labels, cv=10, scoring = scorer)\n print('Gaussian NB')\n print(f'Pontuação (10-fold CV) = {round(scores.mean(), 4)}, com Desvio Padrão = {round(scores.std(), 4)}')\n if scorer != None:\n scores = cross_val_score(gnb, data, labels, cv=10)\n print(f'Acurácia = {round(scores.mean(), 4)}, com Desvio Padrão = {round(scores.std(), 4)}')\n print()\n \n knn = KNeighborsClassifier(n_neighbors = n_neighbors, n_jobs = -1)\n scores = cross_val_score(knn, data, labels, cv=10, scoring = scorer)\n print(f'{n_neighbors}NN')\n print(f'Pontuação (10-fold CV) = {round(scores.mean(), 4)}, com Desvio Padrão = {round(scores.std(), 4)}')\n if scorer != None:\n scores = cross_val_score(knn, data, labels, cv=10)\n print(f'Acurácia = {round(scores.mean(), 4)}, com Desvio Padrão = {round(scores.std(), 4)}')\nbaseline = pd.DataFrame(np.zeros(train_d.drop('ham',axis=1).shape))\nfbeta = make_scorer(fbeta_score, greater_is_better=True, beta = 3)\nClassifierScores(baseline,\n train_d.loc[:,'ham'],\n fbeta)\nClassifierScores(train_d.drop('ham', axis = 1),\n train_d.loc[:,'ham'],\n fbeta)\ntest_d['ham'] = np.nan\ndata = train_d.append(test_d, sort=False)\nbin_train1 = pd.DataFrame()\nfeature = []\nbin_train1['ham'] = data['ham']\nfor c in cols:\n for i, row in data.iterrows():\n if row[c] > 0:\n feature.append(1)\n else:\n feature.append(0)\n bin_train1[c] = feature\n feature.clear()\n\nbin_train1.dropna(inplace = True)\nbin_train1.head()\nbin_train1.drop(labels = ['capital_run_length_average','capital_run_length_longest','capital_run_length_total'], axis= 1, inplace = True)\nClassifierScores(bin_train1.drop('ham', axis = 1),\n bin_train1.loc[:,'ham'],\n fbeta,\n 11)\nspam = train_d[train_d['ham'] == 0]\nmail = train_d[train_d['ham'] == 1]\ncols = list(data.columns)\ncols.pop()\n\nspam_means = {col:spam[col].mean() for col in cols}\nspam_stds = {col:spam[col].std() for col in cols}\nmail_means = {col:mail[col].mean() for col in cols}\nmail_stds = {col:mail[col].std() for col in cols}\nmeans = {col:train_d[col].mean() for col in cols}\nstds = {col:train_d[col].mean() for col in cols}\nplt.figure(figsize = (20, 8))\nax = plt.subplot(111)\nx = np.arange(len(cols))\nbar_mail_means = ax.bar(x-0.2, list(mail_means.values()),width=0.2,color='b',align='center')\nbar_all_means = ax.bar(x, list(means.values()),width=0.2,color='g',align='center')\nbar_spam_means = ax.bar(x+0.2, list(spam_means.values()),width=0.2,color='r',align='center')\nax.set_xticks(x+0.2)\nax.set_xticklabels(cols)\nax.legend((bar_mail_means, bar_spam_means, bar_all_means), ('Média para ham', 'Média para spam', 'Média dos dados de treino'))\nplt.xticks(rotation=90)\n\nplt.xlabel('Coluna'); plt.ylabel('Valor médio');\nplt.title('Comparação entre os valores médios das colunas para cada classe')\nplt.show()\nplt.figure(figsize = (20, 8))\nax = plt.subplot(111)\nx = np.arange(len(cols)-3)\nbar_mail_means = ax.bar(x-0.2, list(mail_means.values())[:-3],width=0.2,color='b',align='center')\nbar_all_means = ax.bar(x, list(means.values())[:-3],width=0.2,color='g',align='center')\nbar_spam_means = ax.bar(x+0.2, list(spam_means.values())[:-3],width=0.2,color='r',align='center')\nax.set_xticks(x+0.2)\nax.set_xticklabels(cols)\nax.legend((bar_mail_means, bar_spam_means, bar_all_means), ('Média para ham', 'Média para spam', 'Média dos dados de treino'))\nplt.xticks(rotation=90)\n\nplt.xlabel('Coluna'); plt.ylabel('Valor médio');\nplt.title('Comparação entre os valores médios das colunas para cada classe')\nplt.show()\nplt.figure(figsize = (20, 8))\nax = plt.subplot(111)\nx = np.arange(len(cols)-3)\nbar_mail_stds = ax.bar(x-0.2, list(mail_stds.values())[:-3],width=0.2,color='b',align='center')\nbar_all_stds = ax.bar(x, list(stds.values())[:-3],width=0.2,color='g',align='center')\nbar_spam_stds = ax.bar(x+0.2, list(spam_stds.values())[:-3],width=0.2,color='r',align='center')\nax.set_xticks(x+0.2)\nax.set_xticklabels(cols)\nax.legend((bar_mail_stds, bar_spam_stds, bar_all_stds), ('Desvio Padrão para ham', 'Desvio Padrão para spam', 'Desvio Padrão dos dados de treino'))\nplt.xticks(rotation=90)\n\nplt.xlabel('Coluna'); plt.ylabel('Desvio Padrão');\nplt.title('Comparação entre os valores médios das colunas para cada classe');\nplt.show()\nfit_norm = data[cols]\n\nfor c in cols:\n fit_norm.loc[:,c] = fit_norm.loc[:,c].subtract(means[c]).divide(stds[c])\n \nfit_norm['ham'] = data['ham']\n\nfit_norm.head()\ntrain_norm = fit_norm.dropna()\nClassifierScores(train_norm.drop('ham', axis = 1),\n train_norm.loc[:,'ham'],\n fbeta,\n 11)\nlim_spam = {col:(spam_means[col]-means[col])/stds[col] for col in cols}\nlim_mail = {col:(mail_means[col]-means[col])/stds[col] for col in cols}\n\nbin_train3 = pd.DataFrame()\nfeature = []\nbin_train3['ham'] = data['ham']\nfor c in cols:\n for i, row in fit_norm.iterrows():\n if spam_means[c] > mail_means[c]:\n if row[c] > lim_mail[c]:\n feature.append(1)\n else:\n feature.append(0)\n else:\n if row[c] > lim_spam[c]:\n feature.append(1)\n else:\n feature.append(0)\n \n bin_train3[c] = feature\n feature.clear()\n\nClassifierScores(bin_train3.dropna().drop('ham', axis = 1),\n bin_train3.dropna().loc[:,'ham'],\n fbeta,\n 11)\nlim_spam_std = {col:spam_stds[col]/stds[col] for col in cols}\nlim_mail_std = {col:mail_stds[col]/stds[col] for col in cols}\n\nbin_train4 = pd.DataFrame()\nfeature = []\nbin_train4['ham'] = data['ham']\nfor c in cols:\n for i, row in fit_norm.iterrows():\n if spam_means[c] > lim_spam[c]:\n if row[c] > lim_spam[c]-0.2*lim_spam_std[c]:\n feature.append(1)\n else:\n feature.append(0)\n else:\n if abs(row[c]) > lim_mail[c]-0.2*lim_mail_std[c]:\n feature.append(1)\n else:\n feature.append(0)\n \n bin_train4[c] = feature\n feature.clear()\n\nbin_train4.head()\nClassifierScores(bin_train4.dropna().drop('ham', axis = 1),\n bin_train4.dropna().loc[:,'ham'],\n fbeta,\n 11)\nwords = list(data.columns)[:-10]\nword_table = pd.DataFrame()\nword_table['ham'] = train_d['ham']\n\ncount = []\nfor c in words:\n for i, row in train_d.iterrows():\n if row[c] > 0:\n count.append(1)\n else:\n count.append(0)\n word_table[c] = count\n count.clear()\n \nword_table.head()\nspamicity = [word_table.loc[word_table['ham']==0].loc[:,word].sum()/word_table.loc[:,word].sum() for word in word_table.columns]\nspamicity = pd.Series(spamicity,word_table.columns)\nspamicity.drop('ham',inplace = True)\ndrop = list(spamicity.loc[abs(spamicity - 0.5) < 0.3].keys())\ndisplay(drop)\nprint(f'{len(drop)} a remover')\ncols = list(data.drop(drop,axis=1).drop('ham',axis=1).columns)\nbin_train6 = pd.DataFrame()\nfeature = []\nbin_train6['ham'] = data['ham']\nfor c in cols:\n for i, row in fit_norm.iterrows():\n if row[c] > 0:\n feature.append(1)\n else:\n feature.append(0)\n \n bin_train6[c] = feature\n feature.clear()\n\nClassifierScores(bin_train6.dropna().drop('ham', axis = 1),\n bin_train6.dropna().loc[:,'ham'],\n fbeta,\n 11)\nbin_train7 = pd.DataFrame()\nfeature = []\nbin_train7['ham'] = data['ham']\nfor c in cols:\n for i, row in fit_norm.iterrows():\n if spam_means[c] > means[c]:\n if row[c] > lim_spam[c]:\n feature.append(2)\n elif row[c] > 0:\n feature.append(1)\n else:\n feature.append(0)\n else:\n if row[c] > lim_mail[c]:\n feature.append(2)\n elif row[c] > 0:\n feature.append(1)\n else:\n feature.append(0)\n bin_train7[c] = feature\n feature.clear()\n\nClassifierScores(bin_train7.dropna().drop('ham', axis = 1),\n bin_train7.dropna().loc[:,'ham'],\n fbeta,\n 11)\nbin_train8 = pd.DataFrame()\nfeature = []\nbin_train8['ham'] = data['ham']\nfor c in cols:\n for i, row in fit_norm.iterrows():\n if spam_means[c] > means[c]:\n if row[c] > lim_spam[c]-0.25*lim_spam_std[c]:\n feature.append(2)\n elif row[c] > 0:\n feature.append(1)\n else:\n feature.append(0)\n else:\n if row[c] > lim_mail[c] - 0.25*lim_mail_std[c]:\n feature.append(2)\n elif row[c] > 0:\n feature.append(1)\n else:\n feature.append(0)\n bin_train8[c] = feature\n feature.clear()\n\nClassifierScores(bin_train8.dropna().drop('ham',axis=1),\n bin_train8.dropna().loc[:,'ham'],\n fbeta,\n 11)\nX = bin_train8.dropna().drop('ham', axis = 1)\ny = bin_train8.dropna().loc[:,'ham']\nscores_array = []\nfor n in range(1,100):\n knn = KNeighborsClassifier(n_neighbors=n)\n scores = cross_val_score(knn,\n X,\n y,\n cv=10,\n scoring = fbeta)\n scores_array.append(scores.mean())\n \nplt.plot(range(1,100),scores_array, 'ro')\nplt.xlabel('Fbeta'); plt.ylabel('Número de Vizinhos');\nplt.title('Escolha de número de vizinhos para treinamento do KNN');\nn = np.argmax(scores_array)+1\nprint(f'Pontuação Máxima:{round(max(scores_array),4)}')\nprint(f'Número de Vizinhos:{n}')\nscores_array = []\npriori = np.linspace(0.001,0.999,1000)\nfor p in priori:\n bnb = BernoulliNB(class_prior=[p,1-p])\n scores = cross_val_score(bnb,\n X,\n y,\n cv=10,\n scoring = fbeta)\n scores_array.append(scores.mean())\n \nplt.plot(priori,scores_array, 'ro')\nplt.xlabel('Fbeta'); plt.ylabel('Porcentagem a priori de Spam');\nplt.title('Escolha porcentagens a priori para Bernoulli Naive-Bayes');\npb = priori[np.argmax(scores_array)]\nprint(f'Pontuação Máxima:{round(max(scores_array),4)}')\nprint(f'Parâmetro Probabilidade:{round(pb,4)}')\nscores_array = []\npriori = np.linspace(0.001,0.999,1000)\nfor p in priori:\n mnb = MultinomialNB(class_prior=[p,1-p])\n scores = cross_val_score(mnb,\n X,\n y,\n cv=10,\n scoring = fbeta)\n scores_array.append(scores.mean())\n \nplt.plot(priori,scores_array, 'ro')\nplt.xlabel('Fbeta'); plt.ylabel('Porcentagem a priori de Spam');\nplt.title('Escolha porcentagens a priori para Multinomial Naive-Bayes');\npm = priori[np.argmax(scores_array)]\nprint(f'Pontuação Máxima:{round(max(scores_array),4)}')\nprint(f'Parâmetro Probabilidade:{round(pm,4)}')\nbnb = BernoulliNB(class_prior=[pb,1-pb])\n\nbnb.fit(X,y)\n\ntest_data = bin_train8.loc[bin_train8['ham'].isnull()].drop('ham',axis=1)\n\ntestPred = bnb.predict(test_data)\narq = open (\"prediction_bnb.csv\", \"w\")\narq.write(\"Id,ham\\n\")\nfor i, j in zip(test_data.index, testPred):\n arq.write(str(i)+ \",\" + str(int(j))+\"\\n\")\narq.close()\nclassifier = MultinomialNB(class_prior=[pm,1-pm])\n\nmnb.fit(X,y)\n\ntest_data = bin_train8.loc[bin_train8['ham'].isnull()].drop('ham',axis=1)\n\ntestPred = mnb.predict(test_data)\narq = open (\"prediction_mnb.csv\", \"w\")\narq.write(\"Id,ham\\n\")\nfor i, j in zip(test_data.index, testPred):\n arq.write(str(i)+ \",\" + str(int(j))+\"\\n\")\narq.close()\nfrom scipy import interp\nimport matplotlib.pyplot as plt\nfrom itertools import cycle\n\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.model_selection import StratifiedKFold\n\n# #############################################################################\n# Classification and ROC analysis\n\n# Run classifier with cross-validation and plot ROC curves\ncv = StratifiedKFold(n_splits=7)\nclassifier = MultinomialNB(class_prior=[pm,1-pm])\n\nthresholds = []\ntprs = []\naucs = []\nmean_fpr = np.linspace(0, 1, 100)\nplt.figure(figsize = (8, 6))\n\ni = 0\nfor train, test in cv.split(X, y):\n probas_ = classifier.fit(X.iloc[train], y.iloc[train]).predict_proba(X.iloc[test])\n # Compute ROC curve and area the curve\n fpr, tpr, thr = roc_curve(y.iloc[test], probas_[:, 1])\n tprs.append(interp(mean_fpr, fpr, tpr))\n tprs[-1][0] = 0.0\n thresholds.append(interp(mean_fpr, fpr, thr)) \n thresholds[-1][0] = 1.0\n roc_auc = auc(fpr, tpr)\n aucs.append(roc_auc)\n plt.plot(fpr, tpr, lw=1, alpha=0.3,\n label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))\n\n i += 1\nplt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',\n label='Acaso', alpha=.8)\n\nmean_tpr = np.mean(tprs, axis=0)\nmean_tpr[-1] = 1.0\nmean_thresholds = np.mean(thresholds, axis=0)\nmean_tpr[-1] = 1.0\nmean_auc = auc(mean_fpr, mean_tpr)\nstd_auc = np.std(aucs)\nplt.plot(mean_fpr, mean_tpr, color='b',\n label=r'Curva ROC média (AUC = %0.2f$\\pm$ %0.2f)' % (mean_auc,std_auc),\n lw=2, alpha=.8)\n\n\nplt.xlim([-0.05, 1.05])\nplt.ylim([-0.05, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Curva ROC para o melhor classificador (MNB)')\nplt.legend(loc=\"lower right\")\nplt.show()\npont = 0\nfor i in range(len(mean_fpr)):\n pont_p = 10*mean_tpr[i]/(10*mean_tpr[i]+9*(1-mean_tpr[i])+mean_fpr[i])\n if pont_p > pont:\n pont = pont_p\n maxindex = i\nprint(f'Segundo a curva, o limiar ideal para maximizar fbeta é {round(mean_thresholds[maxindex], 4)}.')\nprint(f'Para este limiar, a taxa de falsos positivos esperada é {round(mean_fpr[maxindex], 4)}.\\nA taxa de verdadeiros positivos é {round(mean_tpr[maxindex], 4)}.')\n","repo_name":"aorursy/new-nb-3","sub_path":"indiagolf99_pmr3508-2018-2341c86e07.py","file_name":"indiagolf99_pmr3508-2018-2341c86e07.py","file_ext":"py","file_size_in_byte":16372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72782796840","text":"from django.db import models\n\nfrom b2c.general.interfaces import SMSCodeReceiver\nfrom b2c.users.models.merchant import Merchant\nfrom b2c.users.models.user import User\n\n\nclass MerchantUser(User, SMSCodeReceiver):\n \"\"\"\n Merchant User\n \"\"\"\n merchant = models.ForeignKey(\n Merchant,\n related_name='merchant',\n on_delete=models.CASCADE,\n verbose_name='所属医院',\n )\n\n is_merchant_admin = models.BooleanField(\n default=False,\n verbose_name='是否是商户管理员'\n )\n\n notes = models.CharField(\n null=True,\n blank=True,\n max_length=50,\n verbose_name=\"备注\")\n\n def __str__(self):\n return self.username\n\n @property\n def sms_code_receiver_mobile_number(self):\n return self.mobile_number\n\n class Meta:\n verbose_name_plural = verbose_name = \"医院用户\"\n # codename , desc\n permissions = (\n (\"mb_product_categories\", \"项目分类\"),\n (\"mb_all_products_list\", \"项目管理\"),\n (\"mb_products_recommended\", \"推荐项目\"),\n (\"mb_set_product_notice\", \"购买须知\"),\n (\"mb_order_list\", \"订单列表\",),\n # (\"mb_order_retrieve\", \"订单详情\"),\n (\"mb_coupons_operation\", \"优惠券\"),\n (\"mb_sales_operation\", \"秒杀\"),\n (\"mb_groupon_operation\", \"拼团\"),\n (\"mb_assistance_operation\", \"砍价\"),\n (\"mb_order_stat\", \"销售统计\"),\n (\"mb_daily_user_stat\", \"用户统计\"),\n (\"mb_activity_data_stat\", \"活动统计\"),\n (\"mb_finance_stat\", \"财务统计\"),\n (\"mb_reservation_stat\", \"服务统计\"),\n (\"mb_index_page_banner\", \"首页banner\"),\n # (\"mb_merchant_certificate\", \"企业认证\"),\n # (\"mb_set_sms_sign\", \"短信签名\"),\n (\"mb_merchant_user_manage\", \"人员管理\"),\n (\"mb_merchant_index_page\", \"商户主页\"),\n (\"mb_merchant_card\", \"医院品牌\"),\n (\"mb_binding_wx_applet\", \"小程序绑定\"),\n (\"mb_wx_pay_auth\", \"微信支付授权\"),\n (\"mb_merchant_user_set_group\", \"角色管理\")\n )\n","repo_name":"PyZyyong/users","sub_path":"users/models/merchant_user.py","file_name":"merchant_user.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22331721394","text":"\n\nimport runtime_path # isort:skip\n\nimport numpy as np\n\nimport pytest\nfrom oreoweb.core.layer import Conv2D\nfrom oreoweb.core.layer import Dense\nfrom oreoweb.core.layer import Flatten\nfrom oreoweb.core.layer import MaxPool2D\nfrom oreoweb.core.loss import MSE\nfrom oreoweb.core.model import Model\nfrom oreoweb.core.net import Net\nfrom oreoweb.core.optimizer import SGD\nfrom oreoweb.utils.seeder import random_seed\n\nrandom_seed(0)\n\n\n@pytest.fixture\ndef fake_dataset():\n X = np.random.normal(size=(100, 5))\n y = np.random.uniform(size=(100, 1))\n return X, y\n\n\n@pytest.fixture\ndef img_dataset():\n X = np.random.normal(size=(100, 8, 8, 1))\n y = np.random.uniform(size=(100, 1))\n return X, y\n\n\n@pytest.fixture\ndef fc_model():\n net = Net([Dense(10), Dense(1)])\n loss = MSE()\n opt = SGD()\n return Model(net, loss, opt)\n\n\n@pytest.fixture\ndef cnn_model():\n net = Net([\n Conv2D(kernel=[3, 3, 1, 2]),\n MaxPool2D(pool_size=[2, 2], stride=[2, 2]),\n Conv2D(kernel=[3, 3, 2, 4]),\n MaxPool2D(pool_size=[2, 2], stride=[2, 2]),\n Flatten(),\n Dense(1)\n ])\n return Model(net, loss=MSE(), optimizer=SGD())\n\n\ndef test_parameters_change(fake_dataset):\n # make sure the parameters does change after apply gradients\n\n # fake dataset\n X, y = fake_dataset\n # simple model\n net = Net([Dense(10), Dense(1)])\n loss = MSE()\n opt = SGD(lr=1.0)\n model = Model(net, loss, opt)\n\n # forward and backward\n pred = model.forward(X)\n loss, grads = model.backward(pred, y)\n\n # parameters change test\n params_before = model.net.params.values\n model.apply_grads(grads)\n params_after = model.net.params.values\n for p1, p2 in zip(params_before, params_after):\n assert np.all(p1 != p2)\n\n\ndef test_backprop_dense(fc_model, fake_dataset):\n # train on a single data point\n X, y = fake_dataset\n\n previous_loss = np.inf\n for step in range(50):\n pred = fc_model.forward(X)\n loss, grads = fc_model.backward(pred, y)\n fc_model.apply_grads(grads)\n # loss should decrease monotonically\n assert loss < previous_loss\n previous_loss = loss\n\n\ndef test_backprop_cnn(cnn_model, img_dataset):\n # train on a single data point\n X, y = img_dataset\n\n previous_loss = np.inf\n for step in range(50):\n pred = cnn_model.forward(X)\n loss, grads = cnn_model.backward(pred, y)\n cnn_model.apply_grads(grads)\n # loss should decrease monotonically\n assert loss < previous_loss\n previous_loss = loss\n","repo_name":"harishsg99/Oreoweb","sub_path":"oreoweb/test/test_nn_functionality.py","file_name":"test_nn_functionality.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5561109902","text":"from golfram.units import px\n\nclass Canvas:\n \"\"\"A wrapper for pygame's Surface to help with offsets and rendering\n\n Surfaces have a bounding rectangle which determines what part of the\n Surface is visible. Calls to draw at locations outside of the bounding\n rectangle could possibly be ignored.\n\n \"\"\"\n def __init__(self, surface, bounds=None):\n self.surface = surface\n self.bounds = bounds\n self.width = surface.get_width() * px\n self.height = surface.get_height() * px\n\n def add_entity(self, entity):\n if self.bounds.contains(entity.position):\n destination = (px(entity.position.x), px(entity.position.y))\n self.surface.blit(entity.texture, destination)\n\n def scroll(self, bounds=None):\n self.bounds = bounds\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","repo_name":"calzoneman/Golfram-Alpha","sub_path":"golfram/graphics.py","file_name":"graphics.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"42633568245","text":"# Azalea user-mode API library.\n#\n# Contains parts of the Azalea API that run in user mode.\n\nImport('env')\nfiles = [\n \"error_codes.cpp\",\n \"os_version.cpp\",\n\n \"processes/elf.cpp\",\n \"processes/exec_file.cpp\",\n ]\n\nobj = [ ]\nfor f in files:\n obj = obj + [env.Object(f), ]\n\nReturn (\"obj\")","repo_name":"martin-hughes/project_azalea","sub_path":"user/libs/libazalea/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"29780116098","text":"from Crypto.Util.number import getPrime\nfrom secret import FLAG\n\ndef main():\n\tm = bytes2int(FLAG)\n\tp = getPrime(64)\n\tq = getPrime(64)\n\tN = p * q\n\te = 0x10001\n\tc = pow(m, e, N)\n\n\tprint(f\"Public key: {(N, e)}\")\n\tprint(f\"Ciphertext: {c}\")\n\ndef int2bytes(n):\n\tlength = (n.bit_length() + 7) // 8\n\treturn n.to_bytes(length, \"big\")\n\ndef bytes2int(s):\n\treturn int.from_bytes(s, \"big\")\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"utaha1228/archsec-crypto","sub_path":"chall0/chall.py","file_name":"chall.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72297177319","text":"import os\nimport shutil\nimport subprocess\n\nfrom .get_antigen_chain import get_antigen_chain\nfrom .repair_pdb_to_complex import repair_pdb_to_complex\nfrom .change_chains_antibodies import change_chains_antibodies\nfrom .make_complex_pdb import make_complex_pdb\nfrom .renumber_chains import renumber_chains\nfrom .erase_temp_files import erase_temp_files\nfrom .make_repack_options_file import make_repack_options_file\n\nfrom stages.second import second\nfrom stages.third import third\nfrom stages.fourth import fourth\nfrom stages.fifth import fifth\n\ndef first(antibody, antigen, antigen_pdb, antigen_chain, rosetta_path):\n antibody_HC_path = os.path.join(\n os.getcwd(), \"antibodies\", f\"{antibody}_HC.pdb\")\n antibody_LC_path = os.path.join(\n os.getcwd(), \"antibodies\", f\"{antibody}_LC.pdb\")\n\n if not ((os.path.isfile(antibody_HC_path)) and (os.path.isfile(antibody_LC_path))):\n return False\n\n antigen_pdb_path = os.path.join(os.getcwd(), \"antigens\", antigen_pdb)\n if not os.path.isfile(antigen_pdb_path):\n return False\n\n antigen_split = antigen.split(\":\")\n if len(antigen_split) == 3:\n complex_folder = os.path.join(\n os.getcwd(), \"results\", f\"{antibody}-{antigen_split[2]}\")\n os.mkdir(complex_folder)\n\n shutil.copyfile(antibody_HC_path, os.path.join(\n complex_folder, f\"{antibody}_HC.pdb\"))\n shutil.copyfile(antibody_LC_path, os.path.join(\n complex_folder, f\"{antibody}_LC.pdb\"))\n shutil.copyfile(antigen_pdb_path, os.path.join(\n complex_folder, f\"{antigen_pdb}\"))\n\n result = get_antigen_chain(\n antigen_pdb=antigen_pdb, antigen_chain=antigen_chain, complex_folder=complex_folder)\n\n if result == False:\n return False\n \n row = [complex_folder, antigen_pdb, antigen_chain, \"A\",\n f\"{antibody}_HC.pdb\", \"A\", \"H\", f\"{antibody}_LC.pdb\", \"A\", \"L\"]\n\n result = repair_pdb_to_complex(row=row)\n\n if result == False:\n return False\n \n result = change_chains_antibodies(row=row)\n\n if result == False:\n return False\n\n result = make_complex_pdb(row=row, complex_name=f\"{antibody}-{antigen_split[2]}\")\n\n if result == False:\n return False\n\n result = renumber_chains(row=row, complex_name=f\"{antibody}-{antigen_split[2]}\")\n\n if result == False:\n return False\n \n erase_temp_files(row=row, complex_name=f\"{antibody}-{antigen_split[2]}\")\n\n make_repack_options_file(row=row, antibody=antibody, antigen=antigen_split[2])\n\n # STAGE TWO\n second.main(complex_folder=row[0], rosetta_path=rosetta_path)\n\n # STAGE THREE\n result = third.main(complex_folder=row[0], antibody=antibody, antigen=antigen_split[2], rosetta_path=rosetta_path)\n\n if result == False:\n return False\n \n fourth.main(complex_folder=row[0], rosetta_path=rosetta_path)\n\n result = fifth.main(complex_folder=row[0])\n\n if result == False:\n return False\n else:\n return False\n\n\n return True\n","repo_name":"ClaudioGuevara/rosetta","sub_path":"stages/first/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73687050599","text":"import django.dispatch\nfrom utils.operations import OperationBase\nfrom operation_parser import gobbler\nfrom moderation.models import *\nfrom django.utils.translation import ugettext_noop as _\n\n# Regular Expressions for parsing the stock code and level\nSTOCK_CODE = \"[A-z]+\"\nSTOCK_LEVEL = \"[0-9]+\"\nNOT_ALPHA_NUM = \"[^A-z0-9]*\"\n\nclass StockBase(OperationBase):\n def _error_unrecognized_chars(self, opcode):\n '''\n Return a MessageEffect that indicates a failure as a result of\n the arguments containing unrecognized characters in the argument string.\n '''\n return error(\n _(\"Error Parsing %(op_code)s Arguments\"), { 'op_code': opcode },\n _(\"Extra characters found in message.\"), {}\n )\n\nclass StockLevel(StockBase):\n \"\"\"Implements the StockLevel SMS API.\"\"\"\n\n helptext = \"For example, %(opcode)s P 2100 M 10. Reports 2100 doses of vaccine P, 10 of M, and 0 of any others.\"\n\n def _error_none_found(self, opcode):\n '''\n Return a MessageEffect that indicates a failure as a result of\n the arguments containing no recognized stock levels.\n '''\n return error(\n _(\"Error Parsing %(op_code)s Arguments\"), { 'op_code': opcode },\n _(\"No stock levels found.\"), {}\n )\n\n def _error_duplicate_stock_levels(self, opcode):\n '''\n Return a MessageEffect that indicates a failure as a result of\n the arguments containing duplicate stock levels.\n '''\n return error(\n _(\"Error Parsing %(op_code)s Arguments\"), { 'op_code': opcode },\n _(\"Found a duplicate stock code.\"), {}\n )\n\n def _ok(self, opcode, args):\n '''Return a MessageEffect that indicates success.'''\n return info(\n _(\"Parsed %(op_code)s Arguments\"), { 'op_code': opcode },\n _(\"Parsed: stock_levels is %(stock_levels)s.\"), args\n )\n\n def parse_arguments(self, opcode, arg_string, message):\n \"\"\"\n Parses stock codes and inventory levels from the provided argument\n string. Expects one or more of the format:\n \n\n Returns a 2-tuple containing a list of MessageEffects representing the\n results of the parsing, and a Python dictionary mapping 'stock_levels'\n to another dictionary which maps the parsed stock code strings to\n integer inventory levels.\n \"\"\"\n levels, remaining = gobbler.gobble_all(STOCK_CODE + NOT_ALPHA_NUM + STOCK_LEVEL, arg_string)\n\n if len(remaining) > 0:\n # there are still characters remaining, meaning there was a parsing failure\n effect = self._error_unrecognized_chars(opcode)\n return [effect], {}\n\n if levels == None:\n # did not find any stock code and level combos\n effect = self._error_none_found(opcode)\n return [effect], {}\n\n # create a dictionary: stock code -> stock level\n levels = [ gobbler.gobble(STOCK_CODE, l) for l in levels ]\n stock_levels = {}\n\n for stock_code, stock_level in levels:\n if stock_code in stock_levels:\n # there was a duplicate stock code in the message\n effect = self._error_duplicate_stock_levels(opcode)\n return [effect], {}\n\n # add to the inventory report\n stock_levels[stock_code] = int(stock_level)\n\n # parsing was successful.\n parsed_args = { 'stock_levels': stock_levels }\n effect = self._ok(opcode, parsed_args)\n return [effect], parsed_args\n\nclass StockOut(StockBase):\n \"\"\"Implements the StockOut SMS API.\"\"\"\n\n helptext = \"For example, %(opcode)s P. In an emergency, reports that you are out of doses of vaccine P.\"\n\n def _ok(self, opcode, args):\n '''Return a MessageEffect that indicates success.'''\n return info(\n _(\"Parsed %(op_code)s Arguments\"), { 'op_code': opcode },\n _(\"Parsed: stock_out is %(stock_out)s.\"), args\n )\n\n def _error_none_found(self, opcode):\n '''\n Return a MessageEffect that indicates a failure as a result of\n the arguments containing no recognized stock levels.\n '''\n return error(\n _(\"Error Parsing %(op_code)s Arguments\"), { 'op_code': opcode },\n _(\"No stock code found.\"), {}\n )\n\n def parse_arguments(self, opcode, arg_string, message):\n \"\"\"\n Parses a single stock code from the provided argument string.\n Returns a 2-tuple containing a list of MessageEffects representing the\n results of the parsing, and a Python dictionary mapping 'stock_out'\n to the actual stock code found by the parsing.\n \"\"\"\n\n codes, remaining = gobbler.gobble(STOCK_CODE, arg_string)\n\n if len(remaining) > 0:\n # there are still characters remaining, meaning there was a parsing failure\n effect = self._error_unrecognized_chars(opcode)\n return [effect], {}\n\n if codes == None:\n # could not parse any useful information\n effect = self._error_none_found(opcode)\n return [effect], {}\n\n # codes is a one element list containing the stock code\n stock_code = codes[0]\n\n # parsing was successful.\n parsed_args = { 'stock_out': stock_code }\n effect = self._ok(opcode, parsed_args)\n return [effect], parsed_args","repo_name":"ireynolds/sms-immunization-manager","sub_path":"stock/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":5465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25359692988","text":"class queue:\n queues = []\n global front\n front = 0\n global rear\n rear = 0\n\n def __init__(self, value):\n if value == \"queue\":\n self.enqueue()\n elif value == \"dequeue\":\n self.dequeue()\n\n def enqueue(self):\n\n value = int(input(\"Enter the value to be queued:\\n\"))\n global rear\n self.queues.append(value)\n rear += 1\n print(rear)\n print(self.queues)\n self.display()\n\n def dequeue(self):\n\n global front\n global rear\n\n if rear == 0 and front == 0:\n print(\"Queue is empty\")\n\n else:\n deleted = self.queues[front]\n del self.queues[front]\n rear = rear - 1\n print(\"dequeued {}\".format(deleted))\n self.display()\n\n def display(self):\n\n print(\"Queue \\n\")\n\n for i in range(0, len(self.queues)):\n # print(i)\n print(\"| {} |\".format(self.queues[i]), end='')\n # print(\"\\n\")\n print(\"\\n\")\n\n\nif __name__ == \"__main__\":\n ch = ''\n while ch != 'exit':\n print(\n \"--------------------------------------------------------------------------------------------------------------------\")\n # print(\"\\n\")\n ch = input(\"Enter queue to queue into queue \\nEnter dequeue to dequeue from queue \\nEnter exit to exit\\n\")\n call = queue(ch)\n # print(\"\\n\")\n print(\n \"--------------------------------------------------------------------------------------------------------------------\")\n","repo_name":"Srinivas-Ravindranath/data-structures","sub_path":"queues.py","file_name":"queues.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"56907396","text":"import torch\nimport os\nfrom torch.cuda.amp import GradScaler as GradScaler\nfrom torch.cuda.amp import autocast\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.data import DataLoader\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom utils.dataloader import ClassificationDataset, detection_collate\nfrom utils.utils import get_classes, weights_init, show_config\nfrom utils.loss import (\n Poly1CrossEntropyLoss, Poly1FocalLoss, LabelSmoothSoftmaxCE, JointLoss\n)\nfrom nets import get_model_from_name\nfrom utils.callbacks import LossHistory\nfrom nets.mobileone import reparameterize_model\nfrom utils.optimizer import Ranger\n\nconfig = {\n 'is_cuda' : True, \n 'fp16' : True, # 混合精度训练 \n 'classes_path' : './classes.txt', # 种类\n 'input_shape' : [224, 224], \n 'model_name' : 'mobileone',\n 'pretrained_weights' : False, # 是否需要预训练权重\n 'model_path' : '', # 整个模型的权重\n 'batch_size' : 16,\n 'Epochs' : 400,\n 'learning_rate' : 1e-2,\n 'optimizer_type' : 'SGD',\n 'lr_decay_type' : 'Cosine',\n 'num_worker' : 4,\n 'save_dir' : './logs', # 保存权重以及损失的文件夹\n 'save_period' : 10, # 每隔10Epochs保存一次权重\n 'loss_func_name' : 'Poly_loss', # 损失函数\n 'data_aug' : 'original'\n}\n\n# ---------------------------------------------------- #\n# model_name 可选:mobileone、ghostnetv2\n# optimizer_type 可选:SGD、Adam、Ranger\n# loss_func_name\n# 可选:Poly_loss、PolyFocal、CE、LabelSmoothSoftmaxCE\n# 若设置为是双损失函数,则'loss_func_name'设成列表形式\n# 如:'loss_func_name': [('Poly_loss', 'LabelSmoothSoftmaxCE'), (0.9, 0.1)]\n# 后面一个元组为对应损失函数的权重\n# data_aug 可选:original、randaugment\n# lr_decay_type 可选:Cosine\n# ---------------------------------------------------- #\n\n\nif __name__ == '__main__':\n \n is_cuda = config['is_cuda']\n fp16 = config['fp16']\n classes_path = config['classes_path']\n input_shape = config['input_shape']\n model_name = config['model_name']\n pretrained_weights = config['pretrained_weights']\n model_path = config['model_path']\n batch_size = config['batch_size']\n learning_rate = config['learning_rate']\n optimizer_type = config['optimizer_type']\n lr_decay_type = config['lr_decay_type']\n num_worker = config['num_worker']\n save_dir = config['save_dir']\n save_period = config['save_period']\n loss_func_name = config['loss_func_name']\n Epochs = config['Epochs']\n data_aug = config['data_aug']\n \n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n class_names = get_classes(classes_path)\n num_classes = len(class_names)\n loss_func_dict = {\n 'Poly_loss': Poly1CrossEntropyLoss(num_classes), 'PolyFocal': Poly1FocalLoss(num_classes),\n 'CE': nn.CrossEntropyLoss(),\n 'LabelSmoothSoftmaxCE': LabelSmoothSoftmaxCE()\n }\n if isinstance(loss_func_name, str):\n loss_func = loss_func_dict[loss_func_name]\n else:\n first_loss, first_loss_weight = loss_func_dict[loss_func_name[0][0]], loss_func_name[1][0]\n second_loss, second_loss_weight = loss_func_dict[loss_func_name[0][1]], loss_func_name[1][1]\n loss_func = JointLoss(first_loss, second_loss, first_loss_weight, second_loss_weight)\n if model_name in ['mobileone']:\n model = get_model_from_name[model_name](num_classes=num_classes, variant=\"s0\", pretrained=pretrained_weights, inference_mode=False)\n else:\n model = get_model_from_name[model_name](num_classes=num_classes, pretrained=pretrained_weights)\n if not pretrained_weights:\n weights_init(model)\n if model_path != \"\":\n print(f'Load weights {model_path}.')\n model_dict = model.state_dict()\n pretrained_dict = torch.load(model_path, map_location = device)\n load_key, no_load_key, temp_dict = [], [], {}\n for k, v in pretrained_dict.items():\n if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):\n temp_dict[k] = v\n load_key.append(k)\n else:\n no_load_key.append(k)\n model_dict.update(temp_dict)\n model.load_state_dict(model_dict)\n print(\"\\nSuccessful Load Key:\", str(load_key)[:500], \"……\\nSuccessful Load Key Num:\", len(load_key))\n print(\"\\nFail To Load Key:\", str(no_load_key)[:500], \"……\\nFail To Load Key num:\", len(no_load_key))\n print(\"\\n\\033[1;33;44m温馨提示,head部分没有载入是正常现象,Backbone部分没有载入是错误的。\\033[0m\")\n\n loss_history = LossHistory(save_dir, model, input_shape=input_shape)\n scaler = GradScaler() if fp16 else None\n model_train = model.train()\n if is_cuda:\n model_train = torch.nn.DataParallel(model)\n cudnn.benchmark = True\n model_train = model_train.cuda()\n \n with open('./train_cls.txt', encoding='utf-8') as f:\n train_lines = f.readlines()\n with open('./valid_cls.txt', encoding='utf-8') as f:\n val_lines = f.readlines()\n num_train = len(train_lines)\n num_val = len(val_lines)\n np.random.seed(10101)\n np.random.shuffle(train_lines)\n np.random.seed(None)\n \n train_dataset = ClassificationDataset(train_lines, input_shape, phase='train', data_aug=data_aug)\n val_dataset = ClassificationDataset(val_lines, input_shape, phase='valid')\n train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=num_worker, pin_memory=True, \n drop_last=True, collate_fn=detection_collate)\n valid_dataloader = DataLoader(val_dataset, shuffle=True, batch_size=batch_size, num_workers=num_worker, pin_memory=True, \n drop_last=True, collate_fn=detection_collate)\n \n show_config(config)\n wanted_step = 3e4 if optimizer_type == \"SGD\" else 1e4\n total_step = num_train // batch_size * Epochs\n if total_step <= wanted_step:\n wanted_epoch = wanted_step // (num_train // batch_size) + 1\n print(\"\\n\\033[1;33;44m[Warning] 使用%s优化器时,建议将训练总步长设置到%d以上。\\033[0m\"%(optimizer_type, wanted_step))\n print(\"\\033[1;33;44m[Warning] 本次运行的总训练数据量为%d,Unfreeze_batch_size为%d,共训练%d个Epoch,计算出总训练步长为%d。\\033[0m\"%(num_train, batch_size, Epochs, total_step))\n print(\"\\033[1;33;44m[Warning] 由于总训练步长为%d,小于建议总步长%d,建议设置总世代为%d。\\033[0m\"%(total_step, wanted_step, wanted_epoch))\n optimizer = {\n 'Adam' : optim.Adam(model_train.parameters(), learning_rate, betas = (0.9, 0.999), weight_decay=5e-4),\n 'SGD' : optim.SGD(model_train.parameters(), learning_rate, momentum = 0.9, nesterov=True),\n 'Ranger': Ranger(model_train.parameters(), learning_rate)\n }[optimizer_type]\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer = optimizer,T_max = Epochs)\n epoch_step = num_train // batch_size\n epoch_step_val = num_val // batch_size\n if epoch_step == 0 or epoch_step_val == 0:\n raise ValueError(\"数据集过小,无法继续进行训练,请扩充数据集。\")\n \n \n for epoch in range(Epochs):\n total_loss = 0\n total_accuracy = 0\n val_loss = 0\n val_accuracy = 0\n print(\"Start training!\")\n pbar = tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epochs}',postfix=dict,mininterval=0.3)\n model_train.train()\n for idx, batch in enumerate(train_dataloader):\n if idx >= epoch_step: \n break\n images, targets = batch\n with torch.no_grad():\n if is_cuda:\n images = images.cuda()\n targets = targets.cuda()\n optimizer.zero_grad()\n if not fp16:\n outputs = model_train(images)\n loss_value = loss_func(outputs, targets)\n loss_value.backward()\n optimizer.step()\n else:\n with autocast():\n outputs = model_train(images)\n loss_value = loss_func(outputs, targets)\n scaler.scale(loss_value).backward()\n scaler.step(optimizer)\n scaler.update()\n \n total_loss += loss_value.item()\n with torch.no_grad():\n accuracy = torch.mean((torch.argmax(F.softmax(outputs, dim=-1), dim=-1) == targets).type(torch.FloatTensor))\n total_accuracy += accuracy.item()\n pbar.set_postfix(**{'total_loss': total_loss / (idx + 1), \n 'accuracy' : total_accuracy / (idx + 1), \n 'lr' : scheduler.get_last_lr()[0]})\n pbar.update(1)\n scheduler.step()\n pbar.close()\n print('Finsh Training!')\n print('Start Validation')\n pbar = tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epochs}',postfix=dict,mininterval=0.3)\n model_eval = model_train.eval()\n if model_name in ['mobileone']:\n model_eval = reparameterize_model(model_eval)\n for idx, batch in enumerate(valid_dataloader):\n if idx >= epoch_step_val:\n break\n images, targets = batch\n with torch.no_grad():\n if is_cuda:\n images = images.cuda()\n targets = targets.cuda()\n\n optimizer.zero_grad()\n outputs = model_eval(images)\n loss_value = loss_func(outputs, targets)\n val_loss += loss_value.item()\n accuracy = torch.mean((torch.argmax(F.softmax(outputs, dim=-1), dim=-1) == targets).type(torch.FloatTensor))\n val_accuracy += accuracy.item()\n pbar.set_postfix(**{'total_loss': val_loss / (idx + 1),\n 'accuracy' : val_accuracy / (idx + 1), \n 'lr' : scheduler.get_last_lr()[0]})\n pbar.update(1)\n pbar.close()\n print('Finish Validation')\n loss_history.append_loss(epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val)\n print('Epoch:' + str(epoch + 1) + '/' + str(Epochs))\n print('Total Loss: %.3f || Val Loss: %.3f ' % (total_loss / epoch_step, val_loss / epoch_step_val))\n \n if (epoch + 1) % save_period == 0 or epoch + 1 == Epochs:\n torch.save(model.state_dict(), os.path.join(save_dir, \"ep%03d-loss%.3f-val_loss%.3f.pth\" % (epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val)))\n if len(loss_history.val_loss) <= 1 or (val_loss / epoch_step_val) <= min(loss_history.val_loss):\n print('Save best model to best_epoch_weights.pth')\n torch.save(model.state_dict(), os.path.join(save_dir, \"best_epoch_weights.pth\"))\n torch.save(model.state_dict(), os.path.join(save_dir, \"last_epoch_weights.pth\"))\n \n \n \n \n\n \n \n \n \n ","repo_name":"hao-ux/image-classification-pytorch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9978087215","text":"import tensorflow as tf\nimport keras.backend.tensorflow_backend as KTF\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\nKTF.set_session(sess)\nimport argparse\nimport numpy as np\nimport pickle\nfrom sklearn.cross_validation import StratifiedKFold,KFold\nfrom glob import glob\nfrom models import *\nfrom utils import *\nimport os\n\n\n\ndef train(cfg,data_dir):\n\n\n with open(data_dir+cfg['data_pkl'], 'rb') as f:\n dataset = pickle.load(f)\n with open(data_dir+'tool.pkl', 'rb') as f:\n tool = pickle.load(f)\n cfg['num_word'] = len(tool['word'][0])\n cfg['num_pg'] = len(tool['flag'][0])\n cfg['maxlen'] = len(dataset['text'][0])\n\n\n print(cfg)\n tr_file = list(sorted(set(filename.split('.')[0] for filename in os.listdir(tr_path))))\n tr_file = np.array([idx for idx in tr_file if idx != '']) # tr_path 下有个奇怪的隐藏文件夹删不掉\n folds = KFold(len(tr_file),cfg['nfold'],shuffle=True,random_state=66666)\n\n if cfg['use_adj_feat']:\n oof_y = np.load('./output/oof_aver.npy')\n\n for n_fold, (tr_idx, val_idx) in enumerate(folds):\n if n_fold not in cfg['fold']:\n continue\n\n print(n_fold,'-----------------')\n idx_t,tr_data = split_data(dataset,set(tr_file[tr_idx]))\n idx_v,val_data = split_data(dataset,set(tr_file[val_idx]))\n\n\n model = cfg['model'](cfg)\n if n_fold == 0:\n print(model.summary())\n\n f1_best = np.float('-inf')\n best_i = 0\n\n num_r = cal_total_relations(tr_file[val_idx])\n print('num_r',num_r)\n\n if cfg['use_adj_feat']:\n print(f\"oof score {f1_score_v2(val_data['y'],oof_y[idx_v],num_r)}\")\n\n for e in range(1000):\n if e - best_i > 3:\n break\n print(f'epochs_{e}.......')\n if cfg['use_adj_feat']:\n model.fit(tr_data, (tr_data['y']+oof_y[idx_t])/2,\n batch_size=cfg['bs'],\n epochs=1,\n verbose=2)\n else:\n model.fit(tr_data, tr_data['y'],\n batch_size=cfg['bs'],\n epochs=1,\n verbose=2)\n\n pred = model.predict(val_data, batch_size=256, verbose=0)\n f1 = f1_score_v2(val_data['y'],pred,num_r)\n\n if f1_best < f1:\n f1_best = f1\n best_i = e\n print(f'f1_score{f1}, improved save model.......')\n model.save_weights(f\"../weights/{cfg['name']}_fold{n_fold}.h5\")\n else:\n print(f'f1_score{f1}, best f1_score{f1_best}')\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Process some integers.')\n\n parser.add_argument('--stage', type=int, required=True)\n parser.add_argument('--fold', type=str, required=True)\n\n\n from config import *\n\n args = parser.parse_args()\n cfg = {}\n cfg['nfold'] = 10\n cfg['fold'] = [int(fold) for fold in args.fold]\n\n cfg['model'] = rnn_model\n cfg['word_dim'] = 300\n cfg['alpha'] = 0.55 # 正样本权重\n cfg['lr'] = 0.0005\n cfg['bs'] = 256\n cfg['unit1'] = 320\n cfg['unit2'] = 320\n cfg['emb'] = 0.2\n cfg['use_adj_feat'] = False\n cfg['data_pkl'] = None\n\n assert args.stage in [1,2],'stage error, stage in [1,2]'\n\n if args.stage == 1:\n cfg['data_pkl'] = 'dataset.pkl'\n cfg['use_adj_feat'] = False\n\n cfg['encode_name'] = 'gru'\n cfg['name'] = 'gru'\n train(cfg, data_path + 'test_data/')\n cfg['encode_name'] = 'lstm'\n cfg['name'] = 'lstm'\n train(cfg, data_path + 'test_data/')\n cfg['encode_name'] = 'grulstm'\n cfg['name'] = 'grulstm'\n train(cfg, data_path + 'test_data/')\n cfg['encode_name'] = 'lstmgru'\n cfg['name'] = 'lstmgru'\n train(cfg, data_path + 'test_data/')\n\n elif args.stage == 2:\n cfg['data_pkl'] = 'dataset_v2.pkl'\n cfg['use_adj_feat'] = True\n\n cfg['encode_name'] = 'gru'\n cfg['name'] = 'super_gru'\n train(cfg, data_path + 'test_data/')\n\n cfg['encode_name'] = 'lstm'\n cfg['name'] = 'super_lstm'\n train(cfg,data_path+'test_data/')\n\n cfg['encode_name'] = 'grulstm'\n cfg['name'] = 'super_grulstm'\n train(cfg, data_path + 'test_data/')\n\n cfg['encode_name'] = 'lstmgru'\n cfg['name'] = 'super_lstmgru'\n train(cfg, data_path + 'test_data/')\n\n\n\n\n\n\n\n","repo_name":"qrfaction/ruijin-kg-SuperGUTScode","sub_path":"复赛/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4522,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"18"} +{"seq_id":"41622822360","text":"from dataBase import *\nfrom api import *\nimport csv\n\n\ndef download_database():\n cur, conn = set_up_data_base('test.db')\n create_table(cur, conn)\n # add_data_to_LastFM(cur, conn)\n add_data_to_artist(cur, conn)\n # add_data_to_genre_track(cur, conn)\n\n\ndef get_data():\n cur, conn = set_up_data_base(\"test.db\")\n cur.execute(\"SELECT * FROM Artist\")\n rows = cur.fetchall()\n artist_list = [row[1] for row in rows]\n\n cur.execute(\n \"SELECT Track.title, Track.price FROM Track JOIN Artist ON Track.genre_id = Artist.id\")\n rows = cur.fetchall()\n track_list = [row[0] for row in rows]\n price_list = [row[1] for row in rows]\n\n cur.execute(\"SELECT Genre.name FROM Genre JOIN Artist ON Genre.id = Artist.id\")\n rows = cur.fetchall()\n genre_list = [row[0] for row in rows]\n\n cur.execute(\"SELECT LastFM.listeners, LastFM.url FROM LastFM\")\n rows = cur.fetchall()\n listeners = [row[0] for row in rows]\n url = [row[1] for row in rows]\n\n return artist_list, track_list, genre_list, price_list, listeners, url\n\n\ndef write_csv():\n artist_list, track_list, genre_list, price_list, listeners, url = get_data()\n with open('songs.csv', 'w') as f:\n csv_writer = csv.writer(f)\n csv_writer.writerow(['id',\n 'artist', 'song_name', 'price'])\n for i in range(len(artist_list)):\n csv_writer.writerow(\n [i + 1, artist_list[i], track_list[i], '$' + str(price_list[i])])\n\n\ndownload_database()\n# write_csv()\n","repo_name":"vilktor370/SI206-final-project","sub_path":"driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11903121320","text":"import os\nfrom ftplib import FTP_TLS as FTP\nfrom multiprocessing import Pool\n\n\ndef download_all_in_one_path(targetdir,resultdir,check = True,num = 50):\n\tif(os.path.exists(resultdir) == False):\n\t\tos.makedirs(resultdir)\n\tftp = FTP('129.164.179.23')\n\tftp.login()\n\tftp.prot_p()\n\tftp.cwd(targetdir)\n\tfiles = ftp.nlst()\n\ttarget = 'https://heasarc.gsfc.nasa.gov/FTP' + targetdir\n\tc = None\n\tif(check):\n\t\tc = []\n\tdata1 = []\n\tftp.voidcmd('TYPE I')\n\tprint('正在获取校验信息........')\n\tfor i in files:\n\t\t#print(i)\n\t\tdata = os.path.join(target,i)\n\t\tprint(data)\n\t\tdata1.append(data)\n\t\tif(check):\n\t\t\tc.append(ftp.size(i))\n\tftp.quit()\n\tif(check == False):\n\t\tprint('忽略数据大小校验。')\n\tprint('正在校验...............')\n\tdown(data1,resultdir,check=c,threadnum = num)\n\tprint('\\n任务下载完成!!!')\n\n\ndef down(targlist,resultdir,check = None,threadnum = 50):\n\tif(os.path.exists(resultdir) == False):\n\t\tos.makedirs(resultdir)\n\tos.chdir(resultdir)\n\ttargnumber = len(targlist)\n\trea = os.listdir(resultdir)\n\tnu = len(rea)\n\tif (nu != 0):\n\t\teee = []\n\t\tfor i in rea:\n\t\t\tif (os.path.isfile(i)):\n\t\t\t\teee.append(i)\n\t\tif (len(eee) != 0):\n\t\t\ten = []\n\t\t\tfor index,i in enumerate(targlist):\n\t\t\t\tnn = True\n\t\t\t\tfor j in eee:\n\t\t\t\t\tif (os.path.split(i)[1] == j):\n\t\t\t\t\t\tif ((check == None) | (targnumber != len(check))):\n\t\t\t\t\t\t\tnn = False\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tmyfilesize = os.path.getsize(j)\n\t\t\t\t\t\t\tif (myfilesize >= check[index]):\n\t\t\t\t\t\t\t\tnn = False\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tos.system('rm '+j)\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\tif (nn):\n\t\t\t\t\tkkk = check[index]\n\t\t\t\t\ten.append([i,kkk,resultdir])\n\t\t\ttarglist = en\n\t\telse:\n\t\t\ten = []\n\t\t\tfor index,i in enumerate(targlist):\n\t\t\t\tkkk = check[index]\n\t\t\t\ten.append([i,kkk,resultdir])\n\t\t\ttarglist = en\n\telse:\n\t\ten = []\n\t\tfor index,i in enumerate(targlist):\n\t\t\tkkk = check[index]\n\t\t\ten.append([i,kkk,resultdir])\n\t\ttarglist = en\n\tprint(targlist)\n\tpool = Pool(threadnum)\n\tpool.map(download,targlist)\n\tpool.close()\n\tpool.join()\n\tprint('目标下需要载数:',targnumber)\n\tprint('重复目标数:',targnumber-len(targlist))\n\tprint('实际下需要载数:',len(targlist))\n\n\ndef download(target1):\n\ttarget = target1[0]\n\tcheck = target1[1]\n\tlocal = target1[2]\n\tfilename = os.path.split(target)[1]\n\tt_link = 'wget --quiet --show-progress --read-timeout=5 --tries=0 -P '+local + ' '+target\n\t#print(t_link)\n\tos.system(t_link)\n\tif(os.path.exists(filename) == False):\n\t\tprint('\\n' + filename + ' 下载失败,即将重新下载!')\n\t\tdownload(target1)\n\telse:\n\t\tmyfilesize = os.path.getsize(filename)\n\t\tif(myfilesize < check):\n\t\t\tprint('\\n'+filename + '未通过校验,即将重新下载!!')\n\t\t\tos.system('rm '+filename)\n\t\t\tdownload(target1)\n\n#targetdir = '/fermi/data/gbm/daily/2017/02/08/current/'\n#resultdir = '/home/laojin/gbm_daily_database/2017/02/08/'\n#download_all_in_one_path(targetdir,resultdir,num = 10)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"zoujinhang/my_python","sub_path":"zjh_download.py","file_name":"zjh_download.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6016438303","text":"import asyncio\nimport logging\n\nfrom aiogram import Bot, Dispatcher\nfrom config import TOKEN_API\n\nfrom handlers import register_handlers, register_queries\n\n\nasync def main():\n logging.basicConfig(level=logging.INFO,\n format='%(levelname)s:%(asctime)s - %(message)s')\n\n bot = Bot(TOKEN_API)\n dp = Dispatcher(bot)\n\n register_handlers(dp)\n register_queries(dp)\n\n try:\n await dp.start_polling()\n except:\n logging.error(\"Executor object hasn't been started\")\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"nkorgik/aiogram-inline-mode","sub_path":"aiogram_inline_callback_data/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"3496873919","text":"from cs50 import get_int\n\n# Prompts the user for the height\nwhile True:\n h = get_int(\"Height: \")\n\n if h > 0 and h <= 8:\n break\n\n# Prints the pyramid\nfor i in range(h):\n # Prints the left spaces\n for s in range(h - (i + 1)):\n print(\" \", end=\"\")\n\n # Prints the left hashes\n for p in range(i + 1):\n print(\"#\", end=\"\")\n\n # Prints the gap spaces\n print(\" \", end=\"\")\n\n # Prints the right spaces\n for p in range(i + 1):\n print(\"#\", end=\"\")\n\n # Changes the line\n print()","repo_name":"LGalassi/cs50-2020-psets","sub_path":"psets/pset6/mario/more/mario.py","file_name":"mario.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12981378061","text":"#!/usr/local/bin/python3\n\nimport sys\n# from copy import deepcopy\n# from collections import deque\nfrom collections import defaultdict\n# import functools\n# import numpy as np\n# from PIL import Image\n#import re\n\n\ndef getsides(B):\n sides = 0\n L = set()\n for j in B:\n for l in L:\n d = abs(j[0] - l[0]) + abs(j[1] - l[1]) + abs(j[2] - l[2])\n if d == 1:\n #print(f' {j} and {l} are touching')\n sides -= 2\n\n #print(f'adding {j}, sides {sides}')\n sides += 6\n L.add(j)\n return sides\n\n\nS1 = 0\nS2 = 0\n\ninfile = sys.argv[1] if len(sys.argv) > 1 else 'test.txt'\nprint(\"<<{}>>\".format(infile))\n\nif infile == 'puzzle.txt':\n U = 30\nelse:\n U = 7\n\nwith open(infile) as fin:\n lines = ((fin.read().strip()).split('\\n'))\n\n\nB = []\nfor line in lines:\n x, y, z = line.split(',')\n assert int(x) < 30\n assert int(y) < 30\n assert int(z) < 30\n B.append((int(x), int(y), int(z)))\n\n\n# Part I\nS1 = getsides(B)\n\n\n# Part II\nminZ = defaultdict(lambda: int(1e6))\nminY = defaultdict(lambda: int(1e6))\nminX = defaultdict(lambda: int(1e6))\nmaxZ = defaultdict(lambda: 0)\nmaxY = defaultdict(lambda: 0)\nmaxX = defaultdict(lambda: 0)\nxmi = ymi = zmi = int(1e6)\nxma = yma = zma = 0\n\nfor x,y,z in B:\n minZ[(x,y)] = min(z, minZ[(x,y)])\n maxZ[(x,y)] = max(z, maxZ[(x,y)])\n minY[(x,z)] = min(y, minY[(x,z)])\n maxY[(x,z)] = max(y, maxY[(x,z)])\n minX[(y,z)] = min(x, minX[(y,z)])\n maxX[(y,z)] = max(x, maxX[(y,z)])\n\n xmi = min(xmi, x)\n ymi = min(ymi, y)\n zmi = min(ymi, z)\n xma = max(xma, x)\n yma = max(yma, y)\n zma = max(zma, z)\n\nprint(f'## minX: ', minX)\nprint(f'## maxX: ', maxX)\nprint(f'## minY: ', minY)\nprint(f'## maxY: ', maxY)\nprint(f'## minZ: ', minZ)\nprint(f'## maxZ: ', maxZ)\nprint(xmi, xma, ymi, yma, zmi, zma)\n\n\n\nPI = set() # potentially internal 'holes'\nfor i in range(0, U): # i == x\n for j in range(0, U): # j == y\n for k in range(0, U): # k == z\n p = (i,j,k)\n print(p)\n if p not in B:\n if (minX[(j,k)] < i < maxX[(j,k)]) and (minY[(i,k)] < j < maxY[(i,k)]) and (minZ[(i,j)] < k < maxZ[(i,j)]):\n print(f'candidate {p}')\n PI.add(p)\n else:\n pass\n print('p outside range ', p)\n else:\n print(f'p {p} already in B')\n\n\nprint(PI)\nINT = []\nfor pi in PI:\n if not reaches_outside(pi, B):\n INT.append(pi)\n\np2sides = getsides(PI)\nprint('p2sides ', p2sides)\nS2 = S1 + p2sides\n\n\nprint(\"------------- A -------------\")\nprint('S1 ', S1)\nprint(\"------------- B -------------\")\nprint('S2 ', S2)\nprint(\"-----------------------------\")\n","repo_name":"mortenjc/aoc2022","sub_path":"2022/day18/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27030634820","text":"#!/usr/bin/env python3\nimport mysql.connector\n\n\ndef cmdsqlite(conn, query: str):\n cur = conn.execute(query)\n result = None\n if query.startswith(\"SELECT\"):\n result = cur.fetchall()\n else:\n conn.commit()\n\n return result\n\n\ndef connecsqlite():\n con = None\n try:\n con = mysql.connectior.connect(\n \"localhost\", user=\"root\", password=\"toor123\", databse=\"idk\"\n )\n print(\"connected\")\n\n except:\n print(\"connection error\")\n finally:\n return con\n\n\nqueries = [\n \"\"\"CREATE TABLE IF NOT EXISTS highscores (\n\tid INT AUTO_INCREMENT,\n\tname text NOT NULL,\n\tscore INT,\n\tCONSTRAINT userrole_pk PRIMARY KEY(id)\n);\"\"\",\n \"\"\"INSERT INTO highscores (name, score) VALUES\n ('Alice', 20),\n ('Bob', 25),\n ('Carol', 23),\n ('Dave', 27),\n ('H4ck3r', 2000)\n \"\"\",\n \"\"\"SELECT * FROM highscores WHERE score > 25\"\"\",\n \"\"\"UPDATE highscores SET score=0 WHERE score > 100\"\"\",\n \"\"\"DELETE FROM highscores WHERE name='H4CK3R'\"\"\",\n \"\"\"SELECT * FROM highscores WHERE score > 25\"\"\",\n]\n\nconn = connecsqlite()\n\nfor query in queries:\n cmdsqlite(conn, query)\n\nconn.close()\n","repo_name":"vdbe/school-python","sub_path":"exercises/09_03.py","file_name":"09_03.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22964535649","text":"import csv\nfrom datetime import datetime, timedelta\n\n# Reading the file\nwith open('assignment.csv', 'r') as file:\n reader = csv.DictReader(file)\n data = list(reader)\n\n# Checking the entries\nfor entry in data:\n if entry['Time'] != '':\n entry['Time'] = datetime.strptime(entry['Time'], '%m/%d/%Y %I:%M %p')\n if entry['Time Out'] != '':\n entry['Time Out'] = datetime.strptime(entry['Time Out'], '%m/%d/%Y %I:%M %p')\n\n# Sorting data by Employee Name and then by Time\ndata.sort(key=lambda x: (x['Employee Name'], x['Time']))\n\n# Track consecutive days and shift hours\nconsecutive_days = {}\nshift_hours = {}\nconsecutive_days_threshold = 7\nmin_shift_interval = timedelta(hours=1)\nmax_shift_hours = timedelta(hours=14)\n\n# Process the data\nfor entry in data:\n name = entry['Employee Name']\n if name not in consecutive_days:\n consecutive_days[name] = set()\n shift_hours[name] = timedelta()\n \n # Check for consecutive days worked\n if entry['Time'] != '':\n consecutive_days[name].add(entry['Time'].date())\n if len(consecutive_days[name]) >= consecutive_days_threshold:\n print(f\"{name} has worked for {len(consecutive_days[name])} consecutive days.\")\n\n # Calculate shift duration\n if entry['Time'] != '' and entry['Time Out'] != '':\n shift_duration = entry['Time Out'] - entry['Time']\n shift_hours[name] += shift_duration\n\n # Check for short shift intervals\n if entry['Time'] != '' and entry['Time Out'] != '' and shift_duration < min_shift_interval:\n print(f\"{name} has less than 10 hours between shifts on {entry['Time'].date()}.\")\n\n # Check for long shifts\n if shift_hours[name] > max_shift_hours:\n print(f\"{name} has worked for more than 14 hours on {entry['Time'].date()}.\")\n\n\n","repo_name":"Dhruvshelke15/Employee-analysis","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10466268490","text":"from django.shortcuts import render\nfrom django import forms\n\nfrom watson_developer_cloud import WatsonException\n\nfrom watsonlanguage.watsonutils.languagetranslation import LanguageTranslationUtils\nfrom watsonlanguage.watsonutils.naturallanguageclassification import NaturalLanguageClassifierUtils\n\nimport json\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass Form_language(forms.Form):\n txtdata = forms.CharField(required=True,\n label=\"Text to Process\",\n widget=forms.Textarea)\n\ndef index(request):\n allinfo = {}\n outputTxt = \"TBD\"\n targetlang = 'en'\n classification = None\n form = None\n if request.POST:\n form = Form_language(request.POST)\n if form.is_valid():\n data = form.cleaned_data['txtdata']\n logger.info(\"Text to be process is %s\" % data)\n lang = \"TBD\"\n\n try:\n ltu = LanguageTranslationUtils()\n nlcu = NaturalLanguageClassifierUtils()\n lang = ltu.identifyLanguage(data)\n primarylang = lang[\"language\"]\n confidence = lang[\"confidence\"]\n\n outputTxt = \"I am %s confident that the language is %s\" % (confidence, primarylang)\n if targetlang != primarylang:\n supportedModels = ltu.checkForTranslation(primarylang, targetlang)\n if supportedModels:\n englishTxt = ltu.performTranslation(data, primarylang, targetlang)\n outputTxt += \", which in english is %s\" % englishTxt\n\n classification = nlcu.classifyTheText(englishTxt)\n else:\n outputTxt += \", which unfortunately we can't translate into English\"\n else:\n classification = nlcu.classifyTheText(data)\n if classification:\n outputTxt += \"(and %s confident that it is %s classification)\" \\\n % (classification['confidence'],\n classification['className'])\n except WatsonException as err:\n \tallinfo['error'] = err\n\n allinfo['lang'] = outputTxt\n else:\n allinfo['error'] = \"The form is invalid\" \n else:\n form = Form_language\n \n allinfo['form'] = form\n return render(request, 'watson/wlindex.html', allinfo)\n\n\n\n\n\n","repo_name":"joaquinpunales1992/pythonWatsonAPIs","sub_path":"episode-3/django/src/projwatson/watsonlanguage/views/wl.py","file_name":"wl.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5883904694","text":"# encoding: utf-8\nimport os, sys\nimport random, pygame\nfrom pygame.locals import *\nfrom cellboard import CellBoard\nfrom config import *\n\nWhite = (255, 255, 255)\nRed = (255, 0, 0)\nGreen = (0, 255, 0)\nBlue = (0, 0, 255)\n\n\nclass Window(object):\n\n def __init__(self, screen, bg_image):\n\n self.bg_image = pygame.image.load(bg_image)\n rect = self.bg_image.get_rect()\n self.size = (rect.width, rect.height)\n self.screen = screen\n self.game_board = None\n self.hint_sound = None\n\n def game_board_create(self, level):\n\n ImageRect = self.bg_image.get_rect()\n cell_width = ImageRect.width // level\n cell_height = ImageRect.height // level\n self.game_board = CellBoard(level, level, cell_width, cell_height, self.hint_sound)\n\n def hint_sound_load(self, sound, volume):\n self.hint_sound = pygame.mixer.Sound(sound)\n self.hint_sound.set_volume(volume)\n\n # game start and end interface\n def show_start_interface(self):\n\n width, height = self.size\n image = pygame.image.load('./image/bg2.jpg')\n t_font = pygame.font.Font(FONT_FILE, width // 10)\n c_font = pygame.font.Font(FONT_FILE, width // 20)\n l_font = pygame.font.Font(FONT_FILE, width // 20)\n l_font.set_underline(1)\n l_font.set_italic(1)\n title = t_font.render('拼图游戏', True, Red)\n tips = c_font.render('-请选择游戏级别', True, Blue)\n level_1 = l_font.render('*level 1', True, Green)\n level_2 = l_font.render('*level 2', True, Green)\n level_3 = l_font.render('*level 3', True, Green)\n t_rect = title.get_rect()\n t_rect.midtop = (width / 2.5, height / 10)\n tips_rect = tips.get_rect()\n tips_rect.midtop = (width / 2.2, height / 4)\n start_h = height / 3.2\n level_1_rect = level_1.get_rect()\n level_1_rect.midtop = (width / 2, start_h)\n start_h += level_1_rect.height\n level_2_rect = level_1.get_rect()\n level_2_rect.midtop = (width / 2, start_h)\n start_h += level_2_rect.height\n level_3_rect = level_1.get_rect()\n level_3_rect.midtop = (width / 2, start_h)\n self.screen.blit(image, (0, 0))\n self.screen.blit(title, t_rect)\n self.screen.blit(tips, tips_rect)\n self.screen.blit(level_1, level_1_rect)\n self.screen.blit(level_2, level_2_rect)\n self.screen.blit(level_3, level_3_rect)\n pygame.display.update()\n size = 0\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n stop_game()\n if event.type == MOUSEBUTTONDOWN:\n if self.is_pos_in_rect(event.pos, level_1_rect):\n size = 3\n elif self.is_pos_in_rect(event.pos, level_2_rect):\n size = 4\n elif self.is_pos_in_rect(event.pos, level_3_rect):\n size = 5\n if size:\n break\n return size\n\n def show_end_interface(self):\n\n width, height = self.size\n t_font = pygame.font.Font(FONT_FILE, width // 15)\n t_font.set_italic(1)\n e_font = pygame.font.Font(FONT_FILE, width // 20)\n e_font.set_italic(1)\n e_font.set_underline(1)\n tip = t_font.render('success', True, Blue)\n tip_rect = tip.get_rect()\n tip_rect.midtop = width / 4, height / 8\n c_title = e_font.render('*continue*', True, Blue)\n c_rect = c_title.get_rect()\n c_rect.midtop = width / 2, height / 2\n e_title = e_font.render('*exit*', True, Blue)\n e_rect = e_title.get_rect()\n e_rect.midtop = width / 2, height / 3\n self.screen.fill(White)\n pygame.display.update()\n pygame.time.wait(100)\n self.screen.blit(self.bg_image, (0, 0))\n self.screen.blit(tip, tip_rect)\n self.screen.blit(c_title, c_rect)\n self.screen.blit(e_title, e_rect)\n pygame.display.update()\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n stop_game()\n elif event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n stop_game()\n if event.type == MOUSEBUTTONDOWN:\n if self.is_pos_in_rect(event.pos, c_rect):\n return True\n elif self.is_pos_in_rect(event.pos, e_rect):\n stop_game()\n\n @staticmethod\n def is_pos_in_rect(pos, rect):\n left, top = pos\n if rect.left <= left <= (rect.left + rect.width) and rect.top <= top <= (rect.top + rect.height):\n return True\n return False\n\n\ndef stop_game():\n pygame.quit()\n sys.exit()\n\n\ndef event_loop(board):\n cell_h = board.height\n cell_w = board.width\n for event in pygame.event.get():\n if event.type == QUIT:\n stop_game()\n if event.type == KEYDOWN:\n board.cell_move(event.key)\n if event.type == MOUSEBUTTONDOWN:\n left, top = event.pos\n index = (left // cell_w) + (top // cell_h) * board.columns\n if index == board.i_blank + 1:\n board.cell_move(K_LEFT)\n elif index == board.i_blank - 1:\n board.cell_move(K_RIGHT)\n elif index == board.i_blank + board.columns:\n board.cell_move(K_UP)\n elif index == board.i_blank - board.columns:\n board.cell_move(K_DOWN)\n\n\ndef board_image_get(path):\n files = [f for f in os.listdir(path) if f.endswith('.jpg') and not f == 'bg2.jpg']\n if files:\n rand_num = random.randint(0, len(files) - 1)\n return os.path.join(path, files[rand_num])\n return None\n\n\ndef bg_music_load(bg_music, volume=0.2):\n pygame.mixer.music.load(bg_music)\n pygame.mixer.music.set_volume(volume)\n\n\ndef bg_music_play():\n pygame.mixer.music.play(-1)\n\n\ndef bg_music_pause():\n pygame.mixer.music.pause()\n\n\ndef bg_music_stop():\n pygame.mixer.music.stop()\n\n\ndef game_board_run(screen):\n image_file = board_image_get(IMAGE_PATH)\n if not image_file:\n print('can not load bg image')\n return -1\n window = Window(screen, image_file)\n window.hint_sound_load(HINT_SOUND_FILE, HINT_SOUND_VOLUME)\n level = window.show_start_interface()\n window.game_board_create(level)\n while True:\n # 事件轮训\n event_loop(window.game_board)\n # 判断游戏是否结束\n if window.game_board.is_board_be_restore():\n return window.show_end_interface()\n window.game_board.show(window)\n pygame.display.update()\n\n\ndef main():\n pygame.init()\n pygame.mixer.init()\n main_clock = pygame.time.Clock()\n screen = pygame.display.set_mode((700, 700))\n pygame.display.set_caption('拼图游戏')\n # 加载音乐\n bg_music_load(BG_MUSIC_FILE, BG_MUSIC_VOLUME)\n bg_music_play()\n while True:\n game_board_run(screen)\n main_clock.tick(40)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"gaodb1210/python","sub_path":"pintu/game_main.py","file_name":"game_main.py","file_ext":"py","file_size_in_byte":7138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"812243079","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/10/17 14:53\n# @Author : YaoGengqi\n# @FileName: downsample.py\n# @Software: PyCharm\n# @Description:\n\nimport os\nfrom PIL import Image\nfrom torchvision.transforms import Compose, CenterCrop, Resize\n\ndef hr_transform(crop_size):\n return Compose([\n CenterCrop(crop_size),\n ])\n\ndef lr_transform(crop_size):\n return Compose([\n Resize(crop_size, interpolation=Image.BICUBIC),\n ])\n\ndef down_bicubic(data_root):\n \"\"\"将文件夹内的图片进行下采样并保存\"\"\"\n\n hr_output = data_root\n lr_output = data_root + r'_LR\\x4' # 输出路径\n\n if not os.path.exists(lr_output):\n os.mkdir(lr_output)\n dirs = os.listdir(data_root)\n\n i = 0\n for file in dirs:\n\n i += 1\n if not file.endswith(('.png', '.jpg', '.jpeg', '.PNG', '.JPG', '.JPEG', 'bmp', 'BMP')):\n continue\n\n # 存储并修改hr_img边长为4的倍数\n hr_img = Image.open(os.path.join(data_root, file))\n w, h = hr_img.size\n crop_h, crop_w = h-(h%24), w-(w%24)\n hr_size = hr_transform((crop_h, crop_w))\n hr_img = hr_size(hr_img)\n hr_img.save(os.path.join(hr_output, file[:-4] + '.png'))\n\n # 下采样4倍数并存储\n lr_size = lr_transform((crop_h//4, crop_w//4))\n lr_img = lr_size(hr_img)\n lr_img.save(os.path.join(lr_output, file[:-4] + '.png'))\n\n print(\"\\rSaving [\" + str(i) + \"/\" + str(len(dirs)) + '] : ' + os.path.join(lr_output, file), end=\"\")\n\ndef jpg2png(data_root):\n\n dirs = os.listdir(data_root)\n\n i = 0\n for file in dirs:\n\n i += 1\n if not file.endswith(('.jpg')):\n continue\n\n jpg_img = Image.open(os.path.join(data_root, file))\n file = os.path.splitext(file)[0] + '.png'\n jpg_img.save(os.path.join(data_root, file))\n\n print(\"\\rSaving [\" + str(i) + \"/\" + str(len(dirs)) + '] : ' + os.path.join(data_root, file), end=\"\")\n\n return\n\ndef upsample_bicubic(lr_path, hr_output):\n\n if not os.path.exists(hr_output):\n os.mkdir(hr_output)\n dirs = os.listdir(lr_path)\n\n i = 0\n for file in dirs:\n\n i += 1\n if not file.endswith('.png'):\n continue\n\n # 存储并修改hr_img边长为4的倍数\n lr_img = Image.open(os.path.join(lr_path, file))\n w, h = lr_img.size\n\n hr_size = lr_transform((h*4, w*4))\n hr_img = hr_size(lr_img)\n hr_img.save(os.path.join(hr_output, file[:-4] + '.png'))\n\n print(\"\\rSaving [\" + str(i) + \"/\" + str(len(dirs)) + '] : ' + os.path.join(hr_output, file), end=\" \")\n\n\n# down_bicubic(data_root=r'D:\\OneDrive\\Project\\TT_SR\\Datasets\\Set14')\n\n# jpg2png(r'D:\\OneDrive\\Project\\TT_SR\\Datasets\\BSD100')\n\nupsample_bicubic(r'D:\\OneDrive\\Project\\TT_SR\\Datasets\\Manga109_LR\\x4', r'D:\\OneDrive\\Project\\TT_SR\\Result\\BICUBIC\\Manga109')","repo_name":"lizhangray/MTKDSR","sub_path":"downsample.py","file_name":"downsample.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"41496728610","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom lgr.tools.utils import parse_single_cp_input, parse_codepoint_input\nfrom lgr_advanced.lgr_editor.forms.fields import (ValidatingRepertoire,\n FILE_FIELD_ENCODING_HELP)\nfrom lgr_advanced.widgets import DataSelectWidget\nfrom .utils import MultipleChoiceFieldNoValidation\n\n\nclass CodepointField(forms.CharField):\n def __init__(self, *args, **kwargs):\n # single codepoint only?\n self.single = kwargs.pop('single', False)\n super(CodepointField, self).__init__(*args, **kwargs)\n\n def clean(self, value):\n \"\"\"\n Validates the given value and returns its \"cleaned\" value as an\n appropriate Python object.\n\n Raises ValidationError for any errors.\n \"\"\"\n\n # convert to python type and validate at the same time\n value = super(CodepointField, self).clean(value)\n try:\n if self.single:\n value = parse_single_cp_input(value)\n else:\n value = parse_codepoint_input(value)\n except ValueError as e:\n raise ValidationError(str(e))\n\n return value\n\n\nclass AddCodepointForm(forms.Form):\n codepoint = CodepointField(label=_(\"Code point\"))\n override_repertoire = forms.BooleanField(label=_(\"Override repertoire\"),\n required=False)\n\n\nclass AddMultiCodepointsForm(forms.Form):\n codepoint = forms.MultipleChoiceField(label=_('Code points'),\n widget=forms.CheckboxSelectMultiple(attrs={'checked': 'true'}))\n\n disabled_codepoint = forms.MultipleChoiceField(\n label=_('Disabled code points'),\n widget=forms.CheckboxSelectMultiple(attrs={'disabled': 'true'}))\n\n tmp_lgr = forms.CharField(widget=forms.HiddenInput, label='', initial=None)\n\n\nclass AddRangeForm(forms.Form):\n first_cp = CodepointField(label=_(\"First code point\"), single=True)\n last_cp = CodepointField(label=_(\"Last code point\"), single=True)\n\n def clean(self):\n cd = super(AddRangeForm, self).clean()\n # TODO check that we don't get sequences\n if 'first_cp' in cd and 'last_cp' in cd and cd['first_cp'] > cd['last_cp']:\n raise ValidationError(_('Last code point (%(last_cp)s) must not be '\n 'smaller than the first code point '\n '(%(first_cp)s)') % cd)\n\n\nclass AddCodepointFromScriptForm(forms.Form):\n validating_repertoire = forms.ChoiceField(label=_(\"Validating repertoire\"),\n required=True,\n initial=ValidatingRepertoire.default_choice(),\n widget=DataSelectWidget)\n script = forms.ChoiceField(label=_(\"Script\"),\n required=True)\n manual_import = forms.BooleanField(label=_(\"Manual import\"),\n required=False)\n\n def __init__(self, *args, **kwargs):\n unicode_database = kwargs.pop('unicode_database')\n scripts = ValidatingRepertoire.scripts(unicode_database)\n super(AddCodepointFromScriptForm, self).__init__(*args, **kwargs)\n self.fields['script'].choices = sorted(\n set((s, s) for vr_scripts in scripts.values() for s in vr_scripts))\n self.fields['validating_repertoire'].choices = ValidatingRepertoire.choices()\n self.fields['validating_repertoire'].widget.data = {\n k: {'scripts': ','.join(v)} for k, v in scripts.items()\n }\n\n\nclass ImportCodepointsFromFileForm(forms.Form):\n file = forms.FileField(label=_(\"Select a file\"),\n help_text=f\"{_('File containing data to be imported.')} {FILE_FIELD_ENCODING_HELP}\")\n manual_import = forms.BooleanField(label=_(\"Manual import\"),\n required=False)\n\n\nclass AddVariantForm(forms.Form):\n codepoint = CodepointField(label=_(\"Code point\"))\n override_repertoire = forms.BooleanField(label=_(\"Override repertoire\"),\n required=False)\n\n\nclass EditCodepointsForm(forms.Form):\n when = forms.ChoiceField(label='when', required=False)\n not_when = forms.ChoiceField(label='not-when', required=False)\n tags = MultipleChoiceFieldNoValidation(label='Tags', required=False, help_text='space-separated tags')\n cp_id = MultipleChoiceFieldNoValidation() # will contain a list of code points\n\n def __init__(self, *args, **kwargs):\n rule_names = kwargs.pop('rule_names', None)\n tags = kwargs.pop('tags', tuple())\n super(EditCodepointsForm, self).__init__(*args, **kwargs)\n if rule_names:\n self.fields['when'].choices = rule_names\n self.fields['not_when'].choices = rule_names\n self.fields['tags'].choices = tags\n\n def clean(self):\n cleaned_data = super(EditCodepointsForm, self).clean()\n if 'add-rules' in self.data:\n if cleaned_data['when'] and cleaned_data['not_when']:\n self.add_error('when', _('Cannot add when and not-when rules simultaneously'))\n self.add_error('not_when', _('Cannot add when and not-when rules simultaneously'))\n elif not cleaned_data.get('when') and not cleaned_data.get('not_when'):\n self.add_error('when', _('Please provide at least one value'))\n self.add_error('not_when', _('Please provide at least one value'))\n\n if 'add-tags' in self.data and not cleaned_data.get('tags'):\n self.add_error('tags', _('Please provide at least one value'))\n\n return cleaned_data\n","repo_name":"icann/lgr-django","sub_path":"src/lgr_advanced/lgr_editor/forms/codepoints.py","file_name":"codepoints.py","file_ext":"py","file_size_in_byte":5885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22845301571","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 22 21:12:09 2020\nQ2 - RK4\n@author: Adi Pall (but mostly Prof. Wong)\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef ode_func(t, y):\n \"\"\" ode function for lecture example (y' = 2ty) \"\"\"\n return 2*t*y\n\ndef sol_true(t, y0):\n \"\"\" true solution for the example, given y(0) = y0 \"\"\"\n return y0*np.exp(t**2)\n\ndef rk4(func, a, b, y0, h):\n \"\"\" 4th order RK method with fixed step size input, using while loop\"\"\"\n y = y0\n yvals = [y]\n tvals = [a]\n t = a # t0 = t = a\n while t < b - 1e-12: # (avoids rounding error where t misses b slightly)\n k1 = h*func(t,y)\n k2 = h*func(t + 0.5*h, y + 0.5*k1)\n k3 = h*func(t + 0.5*h, y + 0.5*k2)\n k4 = h*func(t + h, y + k3)\n \n y = y + (1./6.)*(k1 + 2*k2 + 2*k3 + k4)\n yvals.append(y) # update yvals tracking\n \n t = t + h \n tvals.append(t)\n \n return tvals, yvals\n\ndef example_plot():\n \"\"\" Solve y' = 2ty, y(0) = 1 using RK4 method.\n (Example from lecture with a plot)\n \"\"\"\n b = 1\n h = 0.01\n y0 = 1\n t, y_approx = rk4(ode_func, 0, b, y0, h)\n t_true = np.linspace(0, b, 200) \n y_true = sol_true(t_true, y0) \n\n print(\"{:.2f} \\t {:.2f}\".format(t[-1], y_approx[-1]))\n\n plt.figure()\n plt.plot(t, y_approx, '.--r')\n plt.plot(t_true, y_true, '-k')\n plt.legend(['approx', 'actual sol'])\n plt.ylabel('y')\n plt.xlabel('t')\n \ndef convergence_ex():\n \"\"\" Solve y' = 2ty, y(0) = 1 using RK4 method.\n Use the true solution to compute the max error,\n and show that it is O(h^4)\n \"\"\"\n hvals = [(0.1)*2**(-k) for k in range(8)]\n y0 = 1\n b = 1\n\n err = [0]*len(hvals)\n for k in range(len(hvals)): # err[k] is the max error given spacing h[k]\n t, u = rk4(ode_func, 0, b, y0, hvals[k])\n\n # compute errors at each t (point_errs), then max. error\n # (use zip/list comprehension trick to iterate over t and u)\n point_errs = [abs(sol_true(t1, y0) - u1) for t1, u1 in zip(t, u)]\n err[k] = max(point_errs) # max error\n\n plot_hvals = [(1e-4)*2**(-4*k) for k in range(8)]\n plt.figure()\n plt.loglog(hvals, plot_hvals, '--r')\n plt.loglog(hvals, err, '.--k')\n plt.legend(['slope 4', 'max. err.'])\n plt.xlabel('$h$')\n plt.ylabel('$err$')\n plt.savefig('convergence_HW8.png')\n \nif __name__ == \"__main__\":\n example_plot()\n convergence_ex()\n \n\n","repo_name":"adipall/math_260","sub_path":"HW8/Pall_Adi_HW8_Q2.py","file_name":"Pall_Adi_HW8_Q2.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43628133975","text":"from utils.parse import Parser\nfrom itertools import islice, repeat, chain, cycle\n\n\ndef get_pattern(position):\n base = [0, 1, 0, -1]\n repeats = (repeat(n, position + 1) for n in base)\n repeated = chain(*repeats)\n cycled = cycle(repeated)\n return islice(cycled, 1, None)\n\n\ndef compute_phase(data):\n size = len(data)\n calculated = (sum(n * p for n, p in zip(data, get_pattern(pos)))\n for pos in range(size))\n output = [abs(n) % 10 for n in calculated]\n return output\n\n\ndef phases(initial_data):\n current = initial_data\n while True:\n current = compute_phase(current)\n yield current\n\n\nparser = Parser(\"Day 16: Flawed Frequency Transmission - Part 1\")\nparser.parse()\nwith parser.input as input:\n line = input.readline().strip()\n data = [int(el) for el in line]\n\nbound_chain = islice(phases(data), 100)\n*_, result = bound_chain\n\nprinted = \"\".join(map(str, result[:8]))\nprint(printed)\n","repo_name":"tranzystorek-io/aoc2019-python","sub_path":"days/d16p1.py","file_name":"d16p1.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"17386847725","text":"\"\"\"\nCP1404 Prac 5 - Amber Hogarth - Emails\nEstimated: 25 minutes\nActual: 35 minutes\n\"\"\"\n\n\ndef main():\n email_to_name = {}\n email = input(\"Email: \")\n while email != \"\":\n name = get_name_from_email(email)\n name_confirmation = input(f\"Is your name {name}? Y/n \").upper()\n if name_confirmation != \"Y\" and name_confirmation != \"\":\n name = input(\"Name: \")\n email_to_name[email] = name\n email = input(\"Email: \")\n\n for email, name in email_to_name.items():\n print(f\"{name} ({email})\")\n\n\ndef get_name_from_email(email):\n \"\"\"Get name from email address.\"\"\"\n prefix = email.split(\"@\")[0]\n prefix_parts = prefix.split(\".\")\n name = \" \".join(prefix_parts).title()\n return name\n\n\nmain()\n","repo_name":"amberhogarth/cp1404practicals","sub_path":"prac_05/emails.py","file_name":"emails.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9762322878","text":"import os\nimport sys\nfrom pathlib import Path\nimport pycksum\n\nfrom url_to_filename import url_to_filename\n\nEXIT_ERROR = 1\n\nif __name__ == '__main__':\n with open(sys.argv[1]) as fh:\n\n print(next(fh).strip(), file=sys.stderr)\n\n for line in fh:\n line = line.strip()\n url, bytes, chksum = line.split()\n file_name = url_to_filename(url)\n\n\n print(line, file=sys.stderr)\n\n path = Path('MIRROR') / file_name\n if bytes != '.':\n bytes = int(bytes)\n bytes_on_file_system = os.path.getsize(path)\n if bytes != bytes_on_file_system:\n print(f'Error: size of file from url {url} does not match (expected {bytes} bytes got {bytes_on_file_system} bytes)', file=sys.stderr)\n print('Exiting..', file=sys.stderr)\n sys.exit(EXIT_ERROR)\n\n if chksum != '.':\n chksum = int(chksum)\n chksum_on_file_system = pycksum.cksum(open(path, 'rb'))\n if bytes != bytes_on_file_system:\n print(f'Error: chksum of file from url {url} does not match (expected {chksum} got {chksum_on_file_system})', file=sys.stderr)\n print('Exiting..', file=sys.stderr)\n sys.exit(EXIT_ERROR)\n\n print(file=sys.stderr)\n print('All OK', file =sys.stderr)\n","repo_name":"varioustoxins/nmrpipe-tracker","sub_path":".github/workflows/scripts/cksum.py","file_name":"cksum.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"32076012915","text":"import numpy as np\nimport torch\nimport sys\nfrom cvxopt import matrix, solvers\nsolvers.options['show_progress'] = False\n\n\ndef solve_QP(grad_list, c_G, c_h):\n l = len(grad_list)\n c_l = len(c_G)\n\n x = grad_list.cpu().data.numpy()\n y = np.dot(x, x.transpose())\n y = y.astype(np.double)\n\n Q = 2 * matrix(y)\n p = matrix([0.0] * l)\n\n #G = matrix([[-1.0, 0.0], [0.0, -1.0]])\n #h = matrix([0.0, 0.0])\n\n G_tmp = []\n h_tmp = []\n for i in range(l):\n x = [0.0]*l\n x[i] = -1\n G_tmp.append(x)\n h_tmp.append(0.0)\n for i in range(c_l):\n G_tmp.append(c_G[i])\n h_tmp.append(c_h[i])\n\n G = matrix(np.array(G_tmp))\n h = matrix(h_tmp)\n A = matrix([1.0] * l, (1, l))\n b = matrix(1.0)\n sol = solvers.qp(Q, p, G, h, A, b)\n return sol['x'], 0.0\n\n\nif __name__ == \"__main__\":\n print('ok')\n","repo_name":"CFCF-anonymous/Counterfactual-Review-based-Recommendation","sub_path":"utils/QPC.py","file_name":"QPC.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"39"} +{"seq_id":"34767833958","text":"from flask import Flask\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///:memory:'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:111111@localhost:3306/demo'\n# 配置多个数据库绑定\n# app.config['SQLALCHEMY_BINDS'] = {\n# 'users': 'sqlite:///:memory:',\n# 'main': 'mysql+pymysql://root:111111@localhost:3306/demo'\n# }\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app) # 数据库对象\n# db = SQLAlchemy()\n# 延迟绑定\n# db.init(app)\nmigrate = Migrate(app, db)\n\n\n# 数据模型定义\n# class User(db.Model):\n# id = db.Column(db.Integer, primary_key=True)\n# # status = db.Column(db.SmallInteger, primary_key=True, nullable=True)\n# username = db.Column(db.String(80), unique=True) # 必须指明长度\n# email = db.Column(db.String(120), unique=True)\n\n# xuanke = db.Table(\n# 'xuanke',\n# db.Column('user_id', db.Integer, db.ForeignKey('user.id')),\n# db.Column('subject_id', db.Integer, db.ForeignKey('subject.id'))\n# )\nclass Xuanke(db.Model):\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), primary_key=True)\n subject_id = db.Column(db.Integer, db.ForeignKey('subject.id'), primary_key=True)\n\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80))\n # subjects = db.relationship(\n # 'Subject', secondary=xuanke,\n # backref=db.backref('users'), lazy='dynamic'\n # )\n subjects = db.relationship('Xuanke', backref='student')\n\n\nclass Subject(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20))\n subjects = db.relationship('Xuanke', backref='subject')\n\n\nclass Project(db.Model):\n # __tablename__ = 'project_info'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80))\n # modules = db.relationship('Module', backref='module_project') # 和数据库的表是没有关系的\n modules = db.relationship('Module', back_populates='module_project') # 另一种\n # 当查询到一个project的时候,---> project.modules\n # module = Module.query.get(1) module.module_project\n # 多对一关系 backref back_populates\n # 一对一关系 relationship(userlist=False)\n\n\nclass Module(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80))\n project_id = db.Column(db.Integer, db.ForeignKey('project.id'))\n module_project = db.relationship('Project', back_populates='modules')\n\n\n@app.route('/insert')\ndef insert():\n # 添加数据\n # new_user = User(username='apple', email='demo')\n # db.session.add(new_user) # 保存到会话\n # db.session.commit() # 事务\n # try:\n # db.session.commit()\n # except ValueError:\n # db.session.rollback()\n user1 = User(name='apple')\n user2 = User(name='python')\n user3 = User(name='orange')\n subject1 = Subject(name='自动化')\n subject2 = Subject(name='测试开发')\n subject3 = Subject(name='前端')\n user1.subjects.append(subject1)\n user2.subjects.append(subject2)\n\n db.session.add_all([user1, user2, user3, subject1, subject2, subject3])\n db.session.commit()\n return 'success'\n\n\n@app.route('/select')\ndef select():\n user = User.query.get(1)\n a = user.subjects\n for xuanke in a:\n print(xuanke.subject)\n print(a)\n return 'select true'\n\n\n@app.route('/')\ndef index():\n # 查询数据\n # users = User.query.all() # 查询所有的\n # users = User.query.filter_by(username='apple').all()\n # users = User.query.filter(User.username=='apple').order_by(User.id.desc()).all()\n # User.query.get() # 获取主键\n # User.query.filter_by()\n users = db.session.execute(\"SELECT * FROM user\") # \n users = users.fetchall() # [(1, 'python', '123445556@qq.com')]\n print(users)\n return 'hello'\n\n\n@app.route('/update')\ndef update():\n user = User.query.get(1)\n user.username = 'python'\n user.email = '123445556@qq.com'\n db.session.add(user)\n db.session.commit()\n print(user)\n return 'update'\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n'''\nORM\n将PySql和SQL语句封装成对象\n好处:避免SQL注入;各个不同的数据需要写不同的查询语句,这个不同\n坏处:每一个的具体语法是不一样的,而SQL语句却是大体相同的\n数据库创建步骤:\n0. 安装sqlalchemy\n1. 配置数据库\n2. 定义表结构,设计表\n3. 创建表\n\n创建数据库\n1. 在command创建:\nset FLASK_APP=app.py\nflask shell 进入python shell\nfrom app import db\ndb.create_all()\n2. 代码创建\ndef create_app():\n app = Flask(__name__)\n db.init_app(app)\n return app\n # 推入上下文更好\n with app.app_context as ctx:\n db.create_all()\n3. 通过migrate创建:迁移的时候方便;动态修改数据库结构\nset FLASK_APP=app.py\nflask db init\nflask db migrate 生成脚本\nflask db upgrade 更新到数据库\nflask db downgrade 退回\n'''\n\n\n\n\n","repo_name":"fengzhiziLy/PythonFlask","sub_path":"08database/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"72990177075","text":"#!/usr/bin/python\n\n# This snippet requires:\n# 1) The official [Google API Client Libraries: \n# https://code.google.com/p/google-api-python-client/\n# (pip install --upgrade google-api-python-client)\n#\n# 2) A simple API key from Google: \n# https://developers.google.com/console/help/#generatingdevkeys\n#\n\nimport sys\nfrom apiclient.discovery import build \n\nkey = \"myKeyFromGoogleApiConsole\"\n\nservice = build('urlshortener', 'v1', developerKey=key)\n\nurl = service.url()\n\n# Create a shortened URL by inserting the URL into the url collection.\nbody = {'longUrl': '%clipboard' }\nresp = url.insert(body=body).execute()\n\nsys.stdout.write(resp['id'])\n","repo_name":"grahams/textexpander","sub_path":"googl-shortener.py","file_name":"googl-shortener.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"39"} +{"seq_id":"33104701107","text":"import asyncio\nimport aiohttp\nimport backoff\nimport math\nfrom typing import Any, AsyncGenerator, Dict, Iterable, Optional, TypedDict\nimport logging\n\nfrom depobs.clients.aiohttp_client_config import AIOHTTPClientConfig\nfrom depobs.scanner.models.package_meta_result import Result\nfrom depobs.util.serialize_util import grouper\nfrom depobs.util.traceback_util import exc_to_str\n\nlog = logging.getLogger(__name__)\n\n\nclass NPMRegistryClientConfig(\n AIOHTTPClientConfig, total=False\n): # don't require keys defined below\n\n # an npm registry access token for fetch_npm_registry_metadata. Defaults NPM_PAT env var. Should be read-only.\n npm_auth_token: str\n\n\ndef aiohttp_session(config: NPMRegistryClientConfig) -> aiohttp.ClientSession:\n # \"Accept\": \"application/json vnd.npm.install-v1+json; q=1.0, # application/json; q=0.8, */*\"\n # doesn't include author and maintainer info\n\n # alternatively npm login then\n # npm view [<@scope>/][@] [[.]...]\n\n # the registry does support GET·/{package}/{version}\n #\n # https://github.com/npm/registry/blob/master/docs/REGISTRY-API.md#getpackageversion\n #\n # but it seems to be busted for scoped packages e.g.\n # e.g. https://registry.npmjs.com/@hapi/bounce/2.0.8\n #\n # https://replicate.npmjs.com/ (flattened scopes) seems to be busted\n headers = {\"Accept\": \"application/json\", \"User-Agent\": config[\"user_agent\"]}\n # from 'npm token create --read-only' to give us a higher rate limit\n if config.get(\"npm_auth_token\", None):\n headers[\"Authorization\"] = f\"Bearer {config['npm_auth_token']}\"\n\n return aiohttp.ClientSession(\n headers=headers,\n timeout=aiohttp.ClientTimeout(total=config[\"total_timeout\"]),\n connector=aiohttp.TCPConnector(limit=config[\"max_connections\"]),\n raise_for_status=True,\n )\n\n\nasync def async_query(\n session: aiohttp.ClientSession, package_name: str, dry_run: bool\n) -> Optional[Dict]:\n # NB: scoped packages OK e.g. https://registry.npmjs.com/@babel/core\n url = f\"https://registry.npmjs.com/{package_name}\"\n response_json: Optional[Dict] = None\n if dry_run:\n log.warn(f\"in dry run mode: skipping GET {url}\")\n return response_json\n\n log.debug(f\"GET {url}\")\n try:\n response = await session.get(url)\n response_json = await response.json()\n return response_json\n except aiohttp.ClientResponseError as err:\n if is_not_found_exception(err):\n log.info(f\"got 404 for package {package_name}\")\n log.debug(f\"{url} not found: {err}\")\n return None\n raise err\n\n\ndef is_not_found_exception(err: Exception) -> bool:\n is_aiohttp_404 = isinstance(err, aiohttp.ClientResponseError) and err.status == 404\n return is_aiohttp_404\n\n\nasync def fetch_npm_registry_metadata(\n config: NPMRegistryClientConfig,\n package_names: Iterable[str],\n total_packages: Optional[int] = None,\n) -> AsyncGenerator[Result[Dict[str, Dict]], None]:\n \"\"\"\n Fetches npm registry metadata for one or more node package names\n \"\"\"\n total_groups: Optional[int] = None\n if total_packages:\n total_groups = math.ceil(total_packages / config[\"package_batch_size\"])\n async with aiohttp_session(config) as s:\n async_query_with_backoff = backoff.on_exception(\n backoff.expo,\n (aiohttp.ClientResponseError, aiohttp.ClientError, asyncio.TimeoutError),\n max_tries=config[\"max_retries\"],\n giveup=is_not_found_exception,\n logger=log,\n )(async_query)\n\n for i, group in enumerate(grouper(package_names, config[\"package_batch_size\"])):\n log.info(f\"fetching group {i} of {total_groups}\")\n try:\n group_results = await asyncio.gather(\n *[\n async_query_with_backoff(s, package_name, config[\"dry_run\"])\n for package_name in group\n if package_name is not None\n ]\n )\n for result in group_results:\n if result is not None:\n yield result\n except Exception as err:\n log.error(\n f\"error fetching group {i} for package names {group}: {err}:\\n{exc_to_str()}\"\n )\n yield err\n","repo_name":"psiinon/dependency-observatory","sub_path":"depobs/clients/npm_registry.py","file_name":"npm_registry.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"39"} +{"seq_id":"7330538669","text":"import pulumi\nimport pulumi_azure as azure\nfrom genetic import Genetics\n\nclass AKS(Genetics):\n def __init__(self, cluster_count, resource_group_name, location, project, environment, tags):\n self.cluster_count = cluster_count\n self.cluster_names = []\n self.kube_configs = []\n super().__init__(resource_group_name, location, project, environment, tags)\n\n def create_cluster(self):\n aks_name = f'{self.project}-aks-{self.environment}'\n dns_prefix = f'{aks_name}-{self.environment}'\n\n for i in range(self.cluster_count):\n cluster_name = f'{aks_name}{i}'\n dns_prefix_multi = f'{dns_prefix}{i}'\n pulumi_poc_aks = azure.containerservice.KubernetesCluster(cluster_name,\n name=cluster_name,\n location=self.location,\n resource_group_name=self.resource_group_name,\n dns_prefix=dns_prefix_multi,\n default_node_pool={\n \"name\": \"default\",\n \"node_count\": 1,\n \"vm_size\": \"Standard_D2_v2\",\n },\n identity={\n \"type\": \"SystemAssigned\",\n },\n tags=self.tags\n )\n\n cert = f'clientCertificate{i}'\n kubeConfig = f'kubeConfig{i}'\n pulumi.export(cert, pulumi_poc_aks.kube_configs[0][\"clientCertificate\"])\n pulumi.export(kubeConfig, pulumi_poc_aks.kube_config_raw)\n self.cluster_names.append(cluster_name)\n self.kube_configs.append(pulumi_poc_aks.kube_config_raw)","repo_name":"tonedefdev/pulumi-poc","sub_path":"aks.py","file_name":"aks.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"18093317437","text":"\"\"\"Common mixins for the project.\"\"\"\n\nfrom kombu import Connection, Exchange, Queue\nfrom sqlalchemy import Engine, create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom project_settings import amqp_credentials, database_credentials\n\n\nclass DatabaseMixin:\n \"\"\"Mixin to add database dependencies for class.\n\n Attributes:\n db_engine (Engine): Database engine.\n db_session_maker (sessionmaker): Database session maker.\n\n \"\"\"\n\n db_engine: Engine\n db_session_maker: sessionmaker\n\n def init_database(self) -> None:\n \"\"\"Initializes database dependencies.\"\"\"\n self.db_engine = create_engine(database_credentials().build_url())\n self.db_session_maker = sessionmaker(bind=self.db_engine)\n\n\nclass AMQPMixin:\n \"\"\"Mixin to add AMQP dependencies for class.\n\n Attributes:\n amqp_routing_keys (dict[str, str]): AMQP routing keys.\n amqp_connection (Connection): AMQP connection.\n amqp_exchange (Exchange): AMQP direct exchange.\n amqp_task_queue (Queue): AMQP queue for collection tasks.\n amqp_result_queue (Queue): AMQP queue for collection results.\n\n \"\"\"\n\n amqp_routing_keys: dict[str, str] = {\n \"task\": \"task_key\",\n \"result\": \"result_key\",\n \"reply\": \"reply_key\",\n }\n\n amqp_connection: Connection\n amqp_exchange: Exchange\n amqp_task_queue: Queue\n amqp_result_queue: Queue\n amqp_reply_queue: Queue\n\n def init_amqp(\n self, init_task_queue: bool = False, init_result_queue: bool = False, init_reply_queue: bool = False\n ) -> None:\n \"\"\"Initializes AMQP dependencies.\n\n Args:\n init_task_queue (bool): Whether to init AMQP task queue. Default is `False`.\n init_result_queue (bool): Whether to init AMQP result queue. Default is `False`.\n init_reply_queue (bool): Whether to init AMQP reply queue. Default is `False`.\n\n \"\"\"\n amqp_creds = amqp_credentials()\n self.amqp_connection = Connection(amqp_creds.build_url(), connect_timeout=10)\n self.amqp_exchange = Exchange(amqp_creds.AMQP_DIRECT_EXCHANGE, \"direct\", durable=True)\n\n if init_task_queue:\n self.amqp_task_queue = Queue(\n amqp_creds.AMQP_TASK_QUEUE,\n exchange=self.amqp_exchange,\n routing_key=self.amqp_routing_keys[\"task\"],\n )\n\n if init_result_queue:\n self.amqp_result_queue = Queue(\n amqp_creds.AMQP_RESULT_QUEUE,\n exchange=self.amqp_exchange,\n routing_key=self.amqp_routing_keys[\"result\"],\n )\n\n if init_reply_queue:\n self.amqp_reply_queue = Queue(\n amqp_creds.AMQP_REPLY_QUEUE,\n exchange=self.amqp_exchange,\n routing_key=self.amqp_routing_keys[\"reply\"],\n )\n","repo_name":"ilarionkuleshov/marketplace-monitoring-bot","sub_path":"src/utils/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"71908587314","text":"def valores(): #función estática, nunca se modifican\n global num1,num2 # si ponemos la palabra reservada global podemos acceder desde fuera del contexto de la funcion\n num1=110\n num2=40\n resultado = num1 +num2\n return resultado\n\nprint(valores())\n\n# resta = num1 - num2 vemos que esto falla (sin poner el global), al variable num1, num2 existen dentro del contexto de la funcion\n# print(resta)\n\nresta = num1 - num2 \nprint(resta)","repo_name":"itorlou/CursoPython3","sub_path":"9.- Funciones/Bloque3/VariablesGlobales.py","file_name":"VariablesGlobales.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"36015870213","text":"import numpy as np\nimport pandas as pd\n\n#使用一个分类的数据\nimport pandas as pd\ndf = pd.read_csv('./datas/iris.data')\ndf['Iris-setosa'] = pd.Categorical(df['Iris-setosa']).codes\ndf = df[df['Iris-setosa'] != 2]\nx = df.drop(['Iris-setosa'],1)\ny = df['Iris-setosa']\n\n#使用梯度下降法求解\n\n#概率转换成01的类数据\ndef prob2class(y_prob):\n y_class = [1 if i>=0.5 else 0 for i in y_prob]\n return y_class\n\n#sigmoid函数\ndef sigmoid(theta,x):\n prob = 1/(1+np.exp(-x.dot(theta)))\n return prob.values\n\n#损失函数\ndef lr_loss(y_true,prob):\n laplace = 1e-10\n lr_loss = - sum(y_true * np.log(prob).ravel() + (1-y_true) * np.log(1-prob+laplace).ravel()) + 1/len(y)*np.power(theta,2).sum()\n return lr_loss\n#初始化aplha 和 theta\nalpha =0.02\ntheta = np.zeros((4,1))\ntheta\n\n#梯度下降法开始迭代\ny_true = y.reshape(-1,1)\nlr_prob = sigmoid(theta,x)\n#print(lr_prob)\nlr_class = prob2class(lr_prob)\nprint(lr_class)\ntheta = theta + alpha * x.T.dot(y_true - sigmoid(theta,x))\nlr_loss(y,lr_prob)\n","repo_name":"NongfuSpring-wu/line-regression-and-logistic-regression","sub_path":"逻辑回归求解/逻辑回归求解.py","file_name":"逻辑回归求解.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"19040060401","text":"import subprocess\nimport yaml\nfrom datetime import datetime, timedelta\nfrom tqdm import tqdm\nimport os\nimport shutil\nimport tempfile\nimport xarray as xr\nfrom logging import getLogger\nimport pandas as pd\n\nfrom utils.logging import LOG_NAME, NOTIFICATION\nimport configparser\n\nlog = getLogger(LOG_NAME)\n\ndef run_command(cmd):\n \"\"\"Safely runs a command, and returns the returncode silently in case of no error. Otherwise,\n raises an Exception\n \"\"\"\n res = subprocess.run(cmd, check=True, capture_output=True)\n \n if res.returncode != 0:\n log.error(f\"Error with return code {res.returncode}\")\n raise Exception\n return res.returncode\n\ndef determine_precip_version(date):\n \"\"\"Determines which version of IMERG to download. Most preferred is IMERG Late, followed by\n IMERG Early. IMERG-Final has some issues. Currently only running using IMERG Early and Late\n \"\"\"\n version = None\n # if date < (datetime.today() - timedelta(days=4*30)):\n # version = \"IMERG-FINAL\"\n # elif date < (datetime.today() - timedelta(days=10)):\n # version = \"IMERG-LATE\"\n # else:\n # version = \"IMERG-EARLY\"\n if date < (datetime.today() - timedelta(days=2)):\n version = \"IMERG-LATE\"\n else:\n version = \"IMERG-EARLY\"\n return version\n\ndef download_precip(date, version, outputpath, secrets):\n \"\"\"\n Parameters:\n date: datetime object that defines the date for which data is required\n version: which version of data to download - IMERG-LATE or IMERG-EARLY\n outputpath: path where the data should be downloaded\n =======\n TODO: Add ability to select either CHIRPS or IMERG data\n \"\"\"\n if version == \"IMERG-FINAL\":\n link = f\"ftp://arthurhou.pps.eosdis.nasa.gov/gpmdata/{date.strftime('%Y')}/{date.strftime('%m')}/{date.strftime('%d')}/gis/3B-DAY-GIS.MS.MRG.3IMERG.{date.strftime('%Y%m%d')}-S000000-E235959.0000.V06A.tif\"\n elif version == \"IMERG-LATE\":\n if date >= datetime(2022, 5, 8): # Version was changed from V06B to V06C\n link = f\"https://jsimpsonhttps.pps.eosdis.nasa.gov/imerg/gis/{date.strftime('%Y')}/{date.strftime('%m')}/3B-HHR-L.MS.MRG.3IMERG.{date.strftime('%Y%m%d')}-S233000-E235959.1410.V06C.1day.tif\"\n else:\n link = f\"https://jsimpsonhttps.pps.eosdis.nasa.gov/imerg/gis/{date.strftime('%Y')}/{date.strftime('%m')}/3B-HHR-L.MS.MRG.3IMERG.{date.strftime('%Y%m%d')}-S233000-E235959.1410.V06B.1day.tif\"\n else:\n if date >= datetime(2022, 5, 8): # Version was changed from V06B to V06C\n link = f\"https://jsimpsonhttps.pps.eosdis.nasa.gov/imerg/gis/early/{date.strftime('%Y')}/{date.strftime('%m')}/3B-HHR-E.MS.MRG.3IMERG.{date.strftime('%Y%m%d')}-S233000-E235959.1410.V06C.1day.tif\"\n else:\n link = f\"https://jsimpsonhttps.pps.eosdis.nasa.gov/imerg/gis/early/{date.strftime('%Y')}/{date.strftime('%m')}/3B-HHR-E.MS.MRG.3IMERG.{date.strftime('%Y%m%d')}-S233000-E235959.1410.V06B.1day.tif\"\n\n # Define the command (different for FINAL, same for EARLY and LATE)\n if version == \"IMERG-FINAL\":\n cmd = [\n \"curl\",\n '-o',\n outputpath,\n '--ssl-reqd',\n '-u',\n f'{secrets[\"imerg\"][\"username\"]}:{secrets[\"imerg\"][\"pwd\"]}',\n link\n ]\n else:\n cmd = [\n \"wget\",\n \"-O\",\n outputpath,\n \"--user\",\n f'{secrets[\"imerg\"][\"username\"]}',\n '--password',\n f'{secrets[\"imerg\"][\"pwd\"]}',\n link,\n '--no-proxy'\n ]\n log.debug(\"Downloading precipitation file: %s (%s)\", date.strftime('%Y-%m-%d'), version)\n return run_command(cmd)\n\ndef download_tmax(year, outputpath):\n \"\"\"\n Parameters:\n year: year for which data is to be downloaded, as a string\n outputpath: path where the data has to be saved\n \"\"\"\n cmd = [\n 'wget', \n '-O', \n f'{outputpath}', \n f'ftp://ftp.cdc.noaa.gov/Datasets/cpc_global_temp/tmax.{year}.nc'\n ]\n log.debug(\"Downloading tmax: %s\", year)\n return run_command(cmd)\n\ndef download_tmin(year, outputpath):\n \"\"\"\n Parameters:\n year: year for which data is to be downloaded, as a string\n outputpath: path where the data has to be saved\n \"\"\"\n cmd = [\n 'wget', \n '-O', \n f'{outputpath}', \n f'ftp://ftp.cdc.noaa.gov/Datasets/cpc_global_temp/tmin.{year}.nc'\n ]\n log.debug(\"Downloading tmin: %s\", year)\n return run_command(cmd)\n\ndef download_uwnd(year, outputpath):\n \"\"\"\n Parameters:\n year: year for which data is to be downloaded, as a string\n outputpath: path where the data has to be saved\n \"\"\"\n cmd = [\n 'wget', \n '-O', \n f'{outputpath}', \n f'ftp://ftp2.psl.noaa.gov/Datasets/ncep.reanalysis/surface_gauss/uwnd.10m.gauss.{year}.nc'\n ]\n log.debug(\"Downloading uwnd: %s\", year)\n return run_command(cmd)\n\ndef download_vwnd(year, outputpath):\n \"\"\"\n Parameters:\n year: year for which data is to be downloaded, as a string\n outputpath: path where the data has to be saved\n \"\"\"\n cmd = [\n 'wget', \n '-O', \n f'{outputpath}', \n f'ftp://ftp2.psl.noaa.gov/Datasets/ncep.reanalysis/surface_gauss/vwnd.10m.gauss.{year}.nc']\n log.debug(\"Downloading vwnd: %s\", year)\n return run_command(cmd)\n\ndef download_data(begin, end, datadir, secrets):\n \"\"\"Downloads the data between dates defined by begin and end\n\n Parameters:\n begin: Data will start downloading from this date, including this date\n end: Data will be downloaded until this date, including this date\n datedir: Base directory for downloading data\n \"\"\"\n\n # Obtain list of dates to be downloaded\n # required_dates = [begin+timedelta(days=n) for n in range((end-begin).days)]\n required_dates = pd.date_range(begin, end)\n required_years = list(set([d.strftime(\"%Y\") for d in required_dates]))\n\n # Download Precipitation\n log.debug(\"Downloading Precipitation\")\n # with tqdm(required_dates) as pbar:\n for date in required_dates:\n # determine what kind of data is required\n data_version = determine_precip_version(date)\n outputpath = os.path.join(datadir, \"precipitation\", f\"{date.strftime('%Y-%m-%d')}_IMERG.tif\")\n # pbar.set_description(f\"{date.strftime('%Y-%m-%d')} ({data_version})\")\n download_precip(date, data_version, outputpath, secrets)\n # pbar.update(1)\n \n # Download other forcing data\n log.debug(\"Downloading TMax, TMin, UWnd, and VWnd\")\n # with tqdm(required_years, total=len(required_years)*4) as pbar:\n for year in required_years:\n # pbar.set_description(f\"{year} (TMax)\")\n download_tmax(year, os.path.join(datadir, \"tmax\", year+'.nc'))\n # pbar.update(1)\n\n # pbar.set_description(f\"{year} (TMin)\")\n download_tmin(year, os.path.join(datadir, \"tmin\", year+'.nc'))\n # pbar.update(1)\n\n # pbar.set_description(f\"{year} (UWnd)\")\n download_uwnd(year, os.path.join(datadir, \"uwnd\", year+'.nc'))\n # pbar.update(1)\n\n # pbar.set_description(f\"{year} (VWnd)\")\n download_vwnd(year, os.path.join(datadir, \"vwnd\", year+'.nc'))\n # pbar.update(1)\n\ndef process_precip(srcpath, dstpath, temp_datadir=None):\n \"\"\"For any IMERG Precipitation file located at `srcpath` is clipped, scaled and converted to\n ASCII grid file and saved at `dstpath`. All of this is done in a temporarily created directory\n which can be controlled by the `datadir` path\n \"\"\"\n if temp_datadir is not None and not os.path.isdir(temp_datadir):\n raise Exception(f\"ERROR: {temp_datadir} directory doesn't exist\")\n \n log.debug(\"Processing Precipitation file: %s\", srcpath)\n\n with tempfile.TemporaryDirectory(dir=temp_datadir) as tempdir:\n clipped_temp_file = os.path.join(tempdir, 'clipped.tif')\n cmd = [\n \"gdalwarp\",\n \"-dstnodata\", \n \"-9999.0\",\n \"-tr\",\n \"0.0625\",\n \"0.0625\",\n \"-te\",\n \"93.875\",\n \"9.5625\",\n \"108.6875\",\n \"33.8125\",\n '-of',\n 'GTiff',\n '-overwrite', \n f'{srcpath}',\n clipped_temp_file\n ]\n run_command(cmd)\n\n # Scale down (EARLY)\n scaled_temp_file = os.path.join(tempdir, 'scaled.tif')\n cmd = [\n \"gdal_calc.py\", \n \"-A\", \n clipped_temp_file, \n f\"--calc=A*0.1\", \n f\"--outfile={scaled_temp_file}\", \n \"--NoDataValue=-9999\", \n \"--format=GTiff\"\n ]\n run_command(cmd)\n\n # Change format, and save as processed file\n aai_temp_file = os.path.join(tempdir, 'processed.tif')\n cmd = [\n 'gdal_translate',\n '-of', \n 'aaigrid', \n scaled_temp_file, \n aai_temp_file\n ]\n run_command(cmd)\n\n # Move to destination\n shutil.move(aai_temp_file, dstpath)\n\ndef process_nc(date, srcpath, dstpath, temp_datadir=None):\n \"\"\"For TMax, TMin, UWnd and VWnd, the processing steps are same, and can be performed using\n this function.\n\n Parameters:\n date: Datetime object of the date of data\n srcpath: path of the nc file\n dstpath: path where the final ascii file will be saved\n temp_datadir: directory where the temporary data will be stored\n \"\"\"\n if temp_datadir is not None and not os.path.isdir(temp_datadir):\n raise Exception(f\"ERROR: {temp_datadir} directory doesn't exist\")\n \n log.debug(\"Processing NC file: %s for date %s\", srcpath, date.strftime('%Y-%m-%d'))\n\n with tempfile.TemporaryDirectory(dir=temp_datadir) as tempdir:\n # Convert from NC to Tif\n band = date.strftime(\"%-j\") # required band number is defined by `day of year`\n converted_tif_temp_file = os.path.join(tempdir, \"converted.tif\")\n\n cmd = [\"gdal_translate\", \"-of\", \"Gtiff\", \"-b\", band, srcpath, converted_tif_temp_file]\n run_command(cmd)\n\n # Change resolution\n scaled_temp_file = os.path.join(tempdir, \"scaled.tif\")\n cmd = [\n \"gdalwarp\",\n \"-dstnodata\", \n \"-9999.0\",\n \"-tr\",\n \"0.0625\",\n \"0.0625\",\n \"-te\",\n \"93.875\",\n \"9.5625\",\n \"108.6875\",\n \"33.8125\",\n '-of',\n 'GTiff',\n '-overwrite', \n converted_tif_temp_file, \n scaled_temp_file]\n run_command(cmd)\n\n # Convert GeoTiff to AAI\n aai_temp_file = os.path.join(tempdir, \"final_aai.tif\")\n cmd = [\"gdal_translate\", \"-of\", \"aaigrid\", scaled_temp_file, aai_temp_file]\n run_command(cmd)\n\n # Move file to destination\n shutil.move(aai_temp_file, dstpath)\n\ndef process_data(raw_datadir, processed_datadir, begin, end, temp_datadir):\n if not os.path.isdir(temp_datadir):\n os.makedirs(temp_datadir)\n\n #### Process precipitation ####\n log.debug(\"Processing Precipitation\")\n raw_datadir_precip = os.path.join(raw_datadir, \"precipitation\")\n processed_datadir_precip = os.path.join(processed_datadir, \"precipitation\")\n\n\n # with tqdm(os.listdir(raw_datadir_precip)) as pbar:\n ds = pd.date_range(begin, end)\n for srcname in os.listdir(raw_datadir_precip):\n if datetime.strptime(srcname.split(os.sep)[-1].split(\"_\")[0], \"%Y-%m-%d\") in ds:\n srcpath = os.path.join(raw_datadir_precip, srcname)\n dstpath = os.path.join(processed_datadir_precip, srcname.replace(\"tif\", \"asc\"))\n\n # pbar.set_description(f\"Precipitation: {srcname.split('_')[0]}\")\n process_precip(srcpath, dstpath, temp_datadir)\n # pbar.update(1)\n\n #### Process NC files ####\n # required_dates = [begin+timedelta(days=n) for n in range((end-begin).days)]\n required_dates = pd.date_range(begin, end)\n #### Process TMAX ####\n log.debug(\"Processing TMAX\")\n raw_datadir_tmax = os.path.join(raw_datadir, \"tmax\")\n processed_datadir_tmax = os.path.join(processed_datadir, \"tmax\")\n\n # with tqdm(required_dates) as pbar:\n for date in required_dates:\n srcpath = os.path.join(raw_datadir_tmax, date.strftime('%Y')+'.nc')\n dstpath = os.path.join(processed_datadir_tmax, f\"{date.strftime('%Y-%m-%d')}_TMAX.asc\")\n\n # pbar.set_description(f\"TMAX: {date.strftime('%Y-%m-%d')}\")\n process_nc(date, srcpath, dstpath, temp_datadir)\n # pbar.update(1)\n \n #### Process TMin ####\n log.debug(\"Processing TMIN\")\n raw_datadir_tmin = os.path.join(raw_datadir, \"tmin\")\n processed_datadir_tmin = os.path.join(processed_datadir, \"tmin\")\n\n # with tqdm(required_dates) as pbar:\n for date in required_dates:\n srcpath = os.path.join(raw_datadir_tmin, date.strftime('%Y')+'.nc')\n dstpath = os.path.join(processed_datadir_tmin, f\"{date.strftime('%Y-%m-%d')}_TMIN.asc\")\n\n # pbar.set_description(f\"TMIN: {date.strftime('%Y-%m-%d')}\")\n process_nc(date, srcpath, dstpath, temp_datadir)\n # pbar.update(1)\n\n #### Process UWND ####\n log.debug(\"Processing UWND\")\n raw_datadir_uwnd = os.path.join(raw_datadir, \"uwnd\")\n daily_datadir_uwnd = os.path.join(raw_datadir, \"uwnd_daily\")\n processed_datadir_uwnd = os.path.join(processed_datadir, \"uwnd\")\n\n uwnd_files = [os.path.join(raw_datadir_uwnd, f) for f in os.listdir(raw_datadir_uwnd)]\n\n for uwnd_f in uwnd_files:\n xr.open_dataset(uwnd_f).resample(time='1D').mean().to_netcdf(os.path.join(daily_datadir_uwnd, uwnd_f.split(os.sep)[-1]))\n # xr.open_dataset(vwnd_f).resample(time='1D').mean().to_netcdf(os.path.join(vwnd_outdir, vwnd_f.split(os.sep)[-1]))\n\n # with tqdm(required_dates) as pbar:\n for date in required_dates:\n srcpath = os.path.join(daily_datadir_uwnd, date.strftime('%Y')+'.nc')\n dstpath = os.path.join(processed_datadir_uwnd, f\"{date.strftime('%Y-%m-%d')}_UWND.asc\")\n\n # pbar.set_description(f\"UWND: {date.strftime('%Y-%m-%d')}\")\n process_nc(date, srcpath, dstpath, temp_datadir)\n # pbar.update(1)\n\n #### Process VWND ####\n log.debug(\"Processing VWND\")\n raw_datadir_vwnd = os.path.join(raw_datadir, \"vwnd\")\n daily_datadir_vwnd = os.path.join(raw_datadir, \"vwnd_daily\")\n processed_datadir_vwnd = os.path.join(processed_datadir, \"vwnd\")\n\n vwnd_files = [os.path.join(raw_datadir_vwnd, f) for f in os.listdir(raw_datadir_vwnd)]\n\n for vwnd_f in vwnd_files:\n xr.open_dataset(vwnd_f).resample(time='1D').mean().to_netcdf(os.path.join(daily_datadir_vwnd, vwnd_f.split(os.sep)[-1]))\n\n # with tqdm(required_dates) as pbar:\n for date in required_dates:\n srcpath = os.path.join(daily_datadir_vwnd, date.strftime('%Y')+'.nc')\n dstpath = os.path.join(processed_datadir_vwnd, f\"{date.strftime('%Y-%m-%d')}_VWND.asc\")\n\n # pbar.set_description(f\"VWND: {date.strftime('%Y-%m-%d')}\")\n process_nc(date, srcpath, dstpath, temp_datadir)\n # pbar.update(1)\n\n\ndef get_newdata(project_base, startdate, enddate, download=True, process=True):\n datadir = os.path.join(project_base, \"data\")\n raw_datadir = os.path.join(datadir, \"raw\")\n processed_datadir = os.path.join(datadir, \"processed\")\n temp_datadir = os.path.join(datadir, \"temp\")\n\n secrets = configparser.ConfigParser()\n secrets_path = os.path.join(project_base, 'params/secrets.ini')\n secrets.read(secrets_path) # assuming there's a secret ini file with user/pwd\n\n enddate = enddate\n\n startdate_str = startdate.strftime(\"%Y-%m-%d\")\n enddate_str = enddate.strftime(\"%Y-%m-%d\")\n\n log.log(NOTIFICATION, \"Started Downloading and Processing data from %s -> %s\", startdate_str, enddate_str)\n log.debug(\"Raw data directory: %s\", raw_datadir)\n log.debug(\"Processed data directory: %s\", processed_datadir)\n\n #### DATA DOWNLOADING ####\n if download:\n download_data(startdate, enddate, raw_datadir, secrets)\n\n #### DATA PROCESSING ####\n if process:\n process_data(raw_datadir, processed_datadir, startdate, enddate, temp_datadir)\n\n\n\ndef main():\n #### INITIALIZATION ####\n try:\n project_base = os.environ[\"PROJECT_BASE\"]\n except:\n project_base = \"/houston2/pritam/rat_mekong_v3/backend\"\n\n # metapath = os.path.join(project_base, \"metadata.yml\")\n # meta = yaml.load(open(metapath).read(), yaml.SafeLoader)\n\n # startdate = datetime.strptime(meta['lastran'], \"%Y-%m-%d\") + timedelta(days=1)\n # enddate = datetime.strptime(meta['enddate'], \"%Y-%m-%d\")\n\n # log.debug(f\"We need data from {startdate} to {enddate}\")\n \n datadir = os.path.join(project_base, \"data\")\n raw_datadir = os.path.join(datadir, \"raw\")\n processed_datadir = os.path.join(datadir, \"processed\")\n temp_datadir = os.path.join(datadir, \"temp\")\n \n \n #### OPTIONALLY OVERRIDE START ####\n # raw_datadir = os.path.join(project_base, \"temp\", \"data\", \"imerg_early_raw\")\n # processed_datadir = os.path.join(project_base, \"temp\", \"data\", \"imerg_early_processed\")\n\n startdate = datetime.strptime(\"2001-01-01\", \"%Y-%m-%d\")\n enddate = datetime.strptime(\"2021-06-14\", \"%Y-%m-%d\")\n #### OPTIONALLY OVERRIDE END ####\n\n\n # # #### DATA DOWNLOADING ####\n # download_data(startdate, enddate, raw_datadir)\n\n # #### DATA PROCESSING ####\n # process_data(raw_datadir, processed_datadir, startdate, enddate, temp_datadir)\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"pritamd47/rat_v2","sub_path":"backend/scripts/data_processing/newdata.py","file_name":"newdata.py","file_ext":"py","file_size_in_byte":17601,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"39"} +{"seq_id":"24036073572","text":"\"\"\"\nhttps://leetcode.com/problems/partition-labels\n\"\"\"\n\n\ndef partition_labels(s: str) -> list[int]:\n \"\"\"\"\"\"\n\n # https://leetcode.com/problems/partition-labels/solution\n # https://leetcode.com/problems/partition-labels/discuss/298474/Python-two-pointer-solution-with-explanation\n\n # https://leetcode.com/problems/partition-labels/solution/132107: \"How about an approach using intervals. Compute\n # interval (start, end) for each letter [a-z], where start is first occurrence of letter, and end is last occurrence\n # of letter. Then we merge any overlapping intervals, and the resulting intervals can form the answer.\"\n # Me: Exactly! (Easier to sink it in.)\n\n # 1) Optimal (Form Intervals, Merge, Yield Size): TC = O(n); SC = O(1) {s consists of lowercase English letters.}\n\n # Forming Partitions as Intervals:\n '''\n first_index, last_index = {}, {}\n for i, char in enumerate(s):\n if char not in first_index.keys():\n first_index[char] = i # add only once\n last_index[char] = i # update everytime\n # intervals = [[a, b] for a, b in zip(first_index.values(), last_index.values())]\n intervals = list(map(list, zip(first_index.values(), last_index.values())))\n '''\n interval = {}\n for i, char in enumerate(s):\n if char not in interval.keys():\n interval[char] = [i, i] # init interval of char\n else:\n interval[char][1] = i # update interval (to be precise: interval's end)\n intervals = list(interval.values()) # now we only want the resultant intervals formed\n # print(intervals) #debugging\n\n # Merging Overlapping Intervals in order to calculate final size of Partitions:\n # https://github.com/samyak1409/DSA/blob/main/SDE%20Sheet/01%29%20Arrays/008%29%20Merge%20Intervals.py\n prev = intervals[0]\n for i in range(1, len(intervals)):\n curr = intervals[i]\n if curr[0] <= prev[1]: # => intervals are overlapping!\n prev[1] = max(prev[1], curr[1]) # merging\n else:\n yield prev[1]-prev[0]+1 # adding the non-overlapping intervals to the output\n prev = curr # updating previous in order to check if it can be merged with the following interval\n yield prev[1]-prev[0]+1 # adding the last (overlapping/non-overlapping) interval to the output\n","repo_name":"samyak1409/DSA","sub_path":"SDE Sheet/01) Arrays/008.3) Partition Labels.py","file_name":"008.3) Partition Labels.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"39"} +{"seq_id":"15769308148","text":"import twitter\nimport requests\n\nfrom spiders.config import api_credentials\n\nrequests.packages.urllib3.disable_warnings()\n\nclass SweepRequester:\n def __init__(self):\n self.api = twitter.Api(consumer_key=api_credentials['consumer_key'],\n consumer_secret=api_credentials['consumer_secret'],\n access_token_key=api_credentials['access_token_key'],\n access_token_secret=api_credentials['access_token_secret']\n )\n\n def request_sweepstake_statuses(self):\n keywords = {\n # 'main': ['#contest', '#giveaway', 'RT to win']\n 'main': ['#contest RT', '#giveaway RT']\n }\n self.statuses = []\n\n # loop through keywords main and append to an array\n for word in keywords['main']:\n results = self.api.GetSearch(term=word, count=50)\n for result in results:\n if self.not_yet_included(result):\n self.statuses.append(result)\n\n return self.statuses\n\n def not_yet_included(self, status):\n for s in self.statuses:\n if s.id == status.id:\n return False\n return True\n","repo_name":"AnthonyBobsin/sweepstakes","sub_path":"Sweep/sweep_requester.py","file_name":"sweep_requester.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"39"} +{"seq_id":"27469056888","text":"import subprocess\nimport numpy\nfrom numpy.core.fromnumeric import clip\n\nPROG = \"./matmult -n {size} -v {version}\"\nRUNS_PER_SIZE = 25\n\ndef run_program(size, version):\n command = PROG.format(size=size, version=version)\n return subprocess.check_output(command, shell=True, encoding=\"UTF8\")\n\ndef get_running_time(result: str):\n last_line = result.split(\"\\n\")[2]\n running_time = last_line.split(\" \")[2]\n return float(running_time)\n\ndef run_n_times(n, size, version) -> list:\n results = []\n for i in range(n):\n result = run_program(size, version)\n running_time = get_running_time(result)\n results.append(running_time)\n return results\n\ndef get_average(results : list):\n results.sort()\n n = len(results)\n five_percent_of_n = int(n * 0.05)\n without_extremes = results[five_percent_of_n : -five_percent_of_n]\n return numpy.mean(without_extremes)\n\ndef test_version(size,version):\n results = run_n_times(RUNS_PER_SIZE, size, version)\n return get_average(results)\n\ndef benchmark_versions():\n sizes = [ x * 16 for x in range(4,45) ] \n versions = [0,1,2,3]\n version_results = [ [], [], [], [] ]\n for size in sizes:\n for version in versions:\n print(\"Testing {version} size {size}\".format(version=version,size=size))\n r = test_version(size, version)\n version_results[version].append(r)\n with open(\"versions.dat\", \"w\") as file:\n n_of_sizes = len(sizes)\n for i in range(n_of_sizes):\n file.write(str(sizes[i]) + \" \")\n for version in versions:\n file.write( str(version_results[version][i]) + \" \")\n file.write(\"\\n\")\n\ndef benchmark_blocks_offset():\n sizes = [ x * 16 for x in range(4,45) ]\n results = [[], []]\n with open(\"versions.dat\",\"r\") as file:\n content = file.read()\n lines = content.split(\"\\n\")\n for line in lines:\n prev_result = line.split(\" \")[4]\n results[0].append(prev_result)\n for size in sizes:\n print(\"Testing {version} size {size}\".format(version=3,size=size))\n r = test_version(size, 3)\n results[1].append(r)\n with open(\"blocks_offset.dat\", \"w\") as file:\n n_of_sizes = len(sizes)\n for i in range(n_of_sizes):\n file.write(str(sizes[i]) + \" \")\n file.write(str(results[0][i]) + \" \")\n file.write(str(results[1][i]) + \" \")\n file.write(\"\\n\")\n\n# benchmark_versions()\n# benchmark_blocks_offset()","repo_name":"MusicFreak456/Uniwroc","sub_path":"SemestrIV/ASK/Pracownia/opt-matmult-MusicFreak456/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"41149791635","text":"from .group import ChatGroup\n\n\nclass SimpleChat:\n\n DEFAULT_INDEX = 'default'\n\n def __init__(\n self,\n nlp,\n matched_threshold=0.95\n ):\n self.nlp = nlp\n self.chats = {}\n self.matched_threshold = matched_threshold\n\n def learn(\n self,\n cid: str,\n say: str,\n reply: str,\n index=''\n ):\n if not cid or not say or not reply:\n return\n\n doc = self.nlp(say)\n if not doc.has_vector:\n return\n\n index = index or self.DEFAULT_INDEX\n if index not in self.chats:\n self.chats[index] = ChatGroup(self.matched_threshold)\n group = self.chats[index]\n group.add(cid, say, doc, reply)\n\n def reply(\n self,\n say: str,\n threshold=0.85,\n index=''\n ):\n index = index or self.DEFAULT_INDEX\n if not say:\n return self.empty_reply()\n\n if index not in self.chats:\n return self.empty_reply()\n\n chat = self.chats[index]\n say_doc = self.nlp(say)\n result = chat.reply(say_doc, threshold)\n return {\"matched\": result[0], \"reply\": result[1], \"likely\": result[2]}\n\n @staticmethod\n def empty_reply():\n return {\"matched\": \"\", \"reply\": \"\", \"likely\": 0.0}\n","repo_name":"thirdgerb/spaCy-nlu","sub_path":"simpleChat/simpleChat.py","file_name":"simpleChat.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"31618976381","text":"from data.util import load_tokenizer\nimport torch\nimport torch.nn as nn\nfrom torch.nn import TransformerEncoder, TransformerEncoderLayer, LayerNorm\nimport math\n\n\n# helper Module that adds positional encoding to the token embedding to introduce a notion of word order.\nclass PositionalEncoding(nn.Module):\n def __init__(self, emb_size, dropout, maxlen = 5000):\n super(PositionalEncoding, self).__init__()\n den = torch.exp(-torch.arange(0, emb_size, 2) * math.log(10000) / emb_size)\n pos = torch.arange(0, maxlen).reshape(maxlen, 1)\n pos_embedding = torch.zeros((maxlen, emb_size))\n pos_embedding[:, 0::2] = torch.sin(pos * den)\n pos_embedding[:, 1::2] = torch.cos(pos * den)\n pos_embedding = pos_embedding.unsqueeze(-2)\n\n self.dropout = nn.Dropout(dropout)\n self.register_buffer(\"pos_embedding\", pos_embedding)\n\n def forward(self, token_embedding):\n return self.dropout(token_embedding + self.pos_embedding[: token_embedding.size(0), :])\n\n\n# helper Module to convert tensor of input indices into corresponding tensor of token embeddings\nclass TokenEmbedding(nn.Module):\n def __init__(self, vocab_size, emb_size):\n super(TokenEmbedding, self).__init__()\n self.embedding = nn.Embedding(vocab_size, emb_size)\n self.emb_size = emb_size\n\n def forward(self, tokens):\n return self.embedding(tokens.long()) * math.sqrt(self.emb_size)\n\n\ndef compute_accs(logits, tgt):\n batch_size = tgt.size(0)\n preds = torch.argmax(logits, dim=-1)\n correct = (preds == tgt)\n correct[tgt == 0] = True\n\n acc_elem = correct[tgt != 0].float().mean()\n acc_seq = correct.view(batch_size, -1).all(dim=0).float().mean()\n\n return acc_elem, acc_seq\n\n# Seq2Seq Network\nclass TransformerDecoder(nn.Module):\n def __init__(\n self,\n num_encoder_layers,\n emb_size,\n nhead,\n dim_feedforward,\n dropout,\n code_dim,\n ):\n super(TransformerDecoder, self).__init__()\n encoder_layer = TransformerEncoderLayer(emb_size, nhead, dim_feedforward, dropout, \"gelu\")\n encoder_norm = LayerNorm(emb_size)\n self.transformer = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)\n\n self.tokenizer = load_tokenizer()\n vocab_size = self.tokenizer.get_vocab_size()\n self.generator = nn.Linear(emb_size, vocab_size)\n self.tok_emb = TokenEmbedding(vocab_size, emb_size)\n self.positional_encoding = PositionalEncoding(emb_size, dropout=dropout)\n self.code_encoder = nn.Linear(code_dim, emb_size)\n \n def forward(self, batched_sequence_data, codes):\n batched_sequence_data = batched_sequence_data.transpose(0, 1)\n\n mask, key_padding_mask = self.create_mask(batched_sequence_data)\n outs = self.positional_encoding(self.tok_emb(batched_sequence_data)) + self.code_encoder(codes).unsqueeze(0)\n outs = self.transformer(outs, mask, key_padding_mask)\n logits = self.generator(outs)\n\n logits = logits.transpose(0, 1)\n return logits\n \n def sample(self, codes, argmax, max_len):\n batch_size = codes.size(0)\n ys = torch.ones(batch_size, 1).fill_(self.tokenizer.token_to_id(\"[BOS]\")).type(torch.long).to(codes.device)\n \n ended = torch.zeros(batch_size, 1, dtype=torch.bool, device=codes.device)\n for _ in range(max_len-1):\n prob = self(ys, codes)[:, -1]\n if argmax:\n next_word = torch.argmax(prob, dim=-1).unsqueeze(1)\n else:\n assert False\n\n next_word[ended] = self.tokenizer.token_to_id(\"[PAD]\")\n ys = torch.cat([ys, next_word], dim=1)\n ended = ended | (next_word == self.tokenizer.token_to_id(\"[EOS]\"))\n \n return ys\n\n def generate_square_subsequent_mask(self, sz, device):\n mask = (torch.triu(torch.ones((sz, sz), device=device)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float(\"-inf\")).masked_fill(mask == 1, float(0.0))\n return mask\n\n\n def create_mask(self, seq):\n seq_len = seq.shape[0]\n mask = self.generate_square_subsequent_mask(seq_len, device=seq.device)\n key_padding_mask = (seq == self.tokenizer.token_to_id(\"[PAD]\")).transpose(0, 1)\n return mask, key_padding_mask \n\n","repo_name":"sungsoo-ahn/mol-hrl","sub_path":"src/module/decoder/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":4366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"2905547849","text":"from io import StringIO\nimport numpy as np\nimport pandas as pd\nimport scanpy as sc\nimport anndata\nfrom typing import Optional\nimport matplotlib.pyplot as plt\nimport bioquest as bq\n\ndef labeled(\n adata: anndata.AnnData, \n cluster_names: str, \n reference_key: str, \n cell_type_key: str = 'CellType', \n inplace: bool = True\n ) -> Optional[anndata.AnnData]:\n '''\n labeled(hdata,cluster_names=new_cluster_names,reference_key='leiden',cell_type_key='CellType')\n '''\n\n _adata = adata if inplace else adata.copy()\n _ref_df = _adata.obs.loc[:, [reference_key]]\n _annot_df = pd.read_csv(StringIO(cluster_names), header=None, dtype='object')\n _adata.obs[cell_type_key] = pd.merge(\n _ref_df, _annot_df, left_on=reference_key, right_on=0, how='left')[1].values\n return None if inplace else _adata\n\n\ndef label_helper(number_of_cluster: int):\n '''\n number_of_cluster: 最后一个cluster的数字\n '''\n _s1 = \",\\n\".join([str(i) for i in range(number_of_cluster+1)])\n _s2 = \"\\nnew_cluster_names ='''\\n\" + _s1 + \",\\n'''\\n\"\n print(_s2)\n\ndef auc_heatmap(adata,marker,out_prefix,ref_key=\"Cluster\",figsize=(12,6),use_raw=True):\n import decoupler\n net=marker.melt(var_name=\"source\",value_name=\"target\").dropna()\n decoupler.run_aucell(adata,net,source=\"source\",target=\"target\",min_n=1,seed=1314,use_raw=use_raw)\n dt2=adata.obsm[\"aucell_estimate\"].groupby(by=adata.obs.loc[:,ref_key]).agg(np.mean)\n import seaborn\n seaborn.clustermap(dt2.T,method='complete',z_score=0,cmap=\"viridis\",figsize=figsize);\n plt.savefig(f\"{out_prefix}.pdf\",bbox_inches='tight')\n dt2.index.name=\"CellType\"\n dt2.to_csv(f\"{out_prefix}_score.csv.gz\")\n\ndef score_heatmap(adata,marker_df,reference_key=\"Cluster\",figsize=(9,6),return_score=False,save_fig=False):\n obs = adata.obs\n markers_dict = {x:np.intersect1d(marker_df.loc[:,x].dropna(),adata.raw.var_names) for x in marker_df.columns}\n for x in markers_dict.keys():\n sc.tl.score_genes(adata,gene_list=markers_dict[x],score_name=f\"{x}_Marker_Score\")\n dt = bq.tl.select(adata.obs,columns=[reference_key],pattern=\"_Marker_Score$\")\n adata.obs = obs\n a=dt.groupby(by=reference_key).apply(np.mean,axis=0)\n a.columns = bq.st.removes(string=a.columns,pattern=r\"_Marker_Score$\")\n import seaborn as sns\n sns.clustermap(a.T,method='complete',standard_scale=0,cmap=\"viridis\",figsize=figsize);\n if return_score:\n return dt\n if save_fig:\n plt.savefig(f\"{save_fig}/anno_heatmap.pdf\",bbox_inches='tight')\n a.to_csv(f\"{out_prefix}_score.csv.gz\")","repo_name":"BioQuestX/sckit","sub_path":"cell_type_annotation.py","file_name":"cell_type_annotation.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"14840483355","text":"#-----------------------------------------------------\r\n#-----------------Data Cleaning-----------------------\r\n#-----------------------------------------------------\r\n\r\n#File Name:dailyCalories_merged.csv\r\n#--------------------Authors--------------------------\r\n#Syhu Nowsath Ali syhu91@gmail.com\r\n#Sathyanarayanan S narayanansathya2108@gmail.com\r\n#-----------------------------------------------------\r\n\r\n#-----------------------------------------------------\r\n#We considered three process of Data Cleaning:\r\n# 1.Eliminating Redundancy/Duplicates\r\n# 2.Eliminating Missing Values\r\n# 3.Outliers Analysis\r\n#-----------------------------------------------------\r\n\r\n#Importing Necessary libraries\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import stats\r\nimport numpy as np\r\n\r\n#Importing CSV file as Pandas Dataframe\r\ndaily_activity = pd.read_csv(r'dailyActivity_merged.csv')\r\nprint(\"The uploaded csv contains:\",daily_activity.shape[0],\"Rows\")\r\n#print(daily_activity)\r\n\r\n#Data Cleaning\r\n\r\n\r\n#-------------------Scatter Plot----------------------\r\n\r\n#Creating a ScatterPlot to view Outliers\r\nfig, ax = plt.subplots(figsize = (18,10))\r\nax.scatter(daily_activity['Id'], daily_activity['TotalSteps'], color='orange')\r\n\r\n# X axis label\r\nax.set_xlabel('User Id')\r\n\r\n# Y axis label\r\nax.set_ylabel('TotalSteps')\r\nplt.title(\"How does raw data look like\")\r\nplt.show()\r\n\r\n#------------------------------------------------------\r\n\r\n\r\n#------------------------------------------------------\r\n\r\n\r\n\r\n#-----------------Redundency Analysis-----------------\r\n\r\n#created a list to check if there are any duplicates\r\nduplicates_list = daily_activity[daily_activity.duplicated(keep = False, subset=['Id','ActivityDate'])]\r\nprint(\"Number of Duplicates:\",duplicates_list.shape[0])\r\nprint(duplicates_list)\r\n\r\n#---------OR---------\r\n\r\n#Return Data frame removing all duplicates occurrences\r\ndata_without_duplicates = daily_activity.drop_duplicates(subset=['Id', 'ActivityDate'], keep=False)\r\nprint(data_without_duplicates)\r\n#-----------------------------------------------------\r\n\r\n\r\n\r\n#-----------------Missing Values----------------------\r\n\r\n#-----------------NA/Null Values----------------------\r\n\r\n#Created list in order to find if any null values in the table\r\nna_list = daily_activity[daily_activity.isna().any(axis=1)]\r\nprint(\"There are \",na_list.shape[0],\" rows contains NA\")\r\nprint(na_list)\r\n\r\n#-----------------Zero Values----------------------\r\n\r\n#To find out if any values in TotalSteps column has zero values(Its impossible to have a person taking 0 steps a day)\r\n\r\nrows_with_zeroes = daily_activity.loc[(daily_activity['TotalSteps'] == 0)]\r\nprint(\"There are \",rows_with_zeroes.shape[0],\" number of entries containing Zero steps.\")\r\nprint(rows_with_zeroes)\r\n\r\n#creating rows_without_zeroes to remove any zero values in Total steps.\r\nrows_without_zeros = daily_activity.loc[(daily_activity['TotalSteps'] != 0)]\r\nprint(\"After Removing Rows with 0 Calories, Entries count:\",rows_without_zeros.shape[0])\r\nprint(rows_without_zeros)\r\n\r\n#-----------------------------------------------------------\r\n\r\n\r\n\r\n#-----------------Outliers Analysis-------------------\r\n\r\n#-------------------Scatter Plot------------------------\r\n\r\n#Creating a ScatterPlot to view Outliers\r\n\r\nfig, ax = plt.subplots(figsize = (18,10))\r\nax.scatter(rows_without_zeros['Id'], rows_without_zeros['TotalSteps'], color='orange')\r\n\r\n# X axis label\r\nax.set_xlabel('User')\r\n\r\n# Y axis label\r\nax.set_ylabel('Total Steps')\r\nplt.title(\"Data with outliers\")\r\nplt.show()\r\n\r\n#----------------------Z-Score-----------------------\r\n#Identifying and Eliminating Outliers using Z-Score\r\n#Zscore = (data_point -mean) / std. deviation\r\n#Outliers are defined as data points having Z-Score -/+3(Gaussian Distribution approach)\r\n\r\n#upper_threshold = 3\r\n#lower_threshold = -3\r\n\r\nz = np.abs(stats.zscore(rows_without_zeros['TotalSteps']))\r\nprint(\"Z-Scores for the data:\")\r\nprint(z)\r\n\r\n#View outlier Data to decide if outliers to be eliminated or not\r\n\r\noutliers = (rows_without_zeros.iloc[np.where(z> 3) or np.where(z < -3)])\r\nprint(\"The data contains \",outliers.shape[0],\" outliers.\")\r\nprint(outliers)\r\n\r\n#Final cleaned data after removing outliers\r\ndata_cleaned_daily_activity = pd.concat([rows_without_zeros, outliers]).drop_duplicates(keep=False)\r\nprint(\"The final cleaned data:\")\r\nprint(data_cleaned_daily_activity)\r\n\r\n#-------------------Scatter Plot----------------------\r\n\r\n#Creating a ScatterPlot to view Outliers\r\nfig, ax = plt.subplots(figsize = (18,10))\r\nax.scatter(data_cleaned_daily_activity['Id'], data_cleaned_daily_activity['TotalSteps'], color='orange')\r\n\r\n# X axis label\r\nax.set_xlabel('User Id')\r\n\r\n# Y axis label\r\nax.set_ylabel('TotalSteps')\r\nplt.title(\"How does cleaned data look like\")\r\nplt.show()\r\n\r\n#------------------------------------------------------\r\n\r\n\r\n#Save the final cleaned data on a new csv to take it to \"Process\" phase\r\n\r\ndata_cleaned_daily_activity.to_csv(r'dailyactivity_merged_datacleaned.csv')\r\nprint(\"---------------------Data Cleaned Successfully---------------------\")\r\nprint(\"Final cleaned Data stored in: \\\"dailyactivity_merged_datacleaned.csv\\\"\")\r\nprint(\"-------------------------------------------------------------------\")\r\n\r\n\r\n","repo_name":"SyhuNowsath/Capstone-bellabeat-using-python","sub_path":"dailyActivity_merged_datacleaning.py","file_name":"dailyActivity_merged_datacleaning.py","file_ext":"py","file_size_in_byte":5236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"11703052971","text":"import pandas as pd\nbooks = pd.read_excel('filling.xlsx', index_col='DATE')\nfor i in books.index:\n books['TPRICE'].at[i] = books['PRICE'].at[i]*books['NUM'].at[i]\nbooks['TPRICE'] = books['TPRICE'].apply(lambda x: x+2)\nprint(books)\n# ------------------------------------------------------\n# books.sort_values(by='TPRICE', inplace=True, ascending=False)\nbooks.sort_values(by=['INSTORE', 'TPRICE'],\n inplace=True, ascending=[True, False])\nprint(books)\n# ------------------------------------------------------\n\n\n# def price_20_40(a):\n# return a >= 20 and a <= 40\n\n\n# def level_a(s):\n# return s >= 100 and s <= 1000\n\n\n# books = books.loc[books['PRICE'].apply(\n# price_20_40)].loc[books['TPRICE'].apply(level_a)]\n# books = books.loc[books.PRICE.apply(\n# price_20_40)].loc[books.TPRICE.apply(level_a)]\nbooks = books.loc[books.PRICE.apply(\n lambda a:a >= 20 and a <= 40)].loc[books.TPRICE.apply(lambda x:x >= 100 and x <= 1000)]\nprint(books)\n#--------------------------------------------------------\n#","repo_name":"Kidays/Testcode","sub_path":"Python for excel/3.blank filling_function.py","file_name":"3.blank filling_function.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"34656632041","text":"# 编写一个程序,找到两个单链表相交的起始节点。\n#\n# 例如,下面的两个链表:\n#\n# A: a1 → a2\n# ↘\n# c1 → c2 → c3\n# ↗\n# B: b1 → b2 → b3\n# 在节点c1开始相交。\n#\n# 注意:\n# 如果两个链表没有交点,返回null.\n# 在返回结果后,两个链表仍须保持原有的结构。\n# 可假定整个链表结构中没有循环。\n# 程序尽量满足O(n)时间复杂度,且仅用O(1)内存。\n\nclass ListNode(object):\n def __init__(self, num):\n self.value = num\n self.next = None\n\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n \"\"\"\n :param headA: ListNode\n :param headB: ListNode\n :return: ListNode\n \"\"\"\n if headA == None or headB == None:\n return\n a, b = headA, headB\n la, lb = 1, 1 # A,B的长度\n while a.next:\n la += 1\n a = a.next\n while b.next:\n lb += 1\n b = b.next\n if a.value != b.value:\n return\n\n a, b = headA, headB # 回到头节点\n if lb > la:\n # 如果B比A长,A右移动\n for _ in range(lb - la):\n b = b.next\n else:\n for _ in range(la - lb):\n a = a.next\n while a: # 重新遍历,一定能发现重复的点,重复一个点也交做相交了\n if a.value == b.value:\n return a\n a = a.next\n b = b.next\n\n\nif __name__ == \"__main__\":\n test = Solution()\n headA = ListNode(1)\n l2 = ListNode(2)\n l3 = ListNode(3)\n l4 = ListNode(4)\n l5 = ListNode(5)\n headA.next = l2\n l2.next = l3\n l3.next = l4\n l4.next = l5\n headB = ListNode(1)\n ll1 = ListNode(1)\n ll2 = ListNode(1)\n ll3 = ListNode(3)\n ll4 = ListNode(4)\n ll5 = ListNode(5)\n headB.next = ll1\n ll1.next = ll2\n ll2.next = ll3\n ll3.next = ll4\n ll4.next = ll5\n print(test.getIntersectionNode(headA, headB))\n","repo_name":"Yara7L/python_algorithm","sub_path":"leetcode2/59_getIntersectionNode.py","file_name":"59_getIntersectionNode.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"19334354386","text":"#Finding the closest Starbucks and the distance to that Starbucks.\n\n#First, calculate the distance to each Starbucks for each house, then sort for closest and save that store location and distance.\n\n\n\n\n#algorithm to find distance, borrowed from \"http://www.johndcook.com/blog/python_longitude_latitude/\"\nimport math\n \ndef miles_distance_on_unit_sphere(lat1, long1, lat2, long2):\n \n # Convert latitude and longitude to \n # spherical coordinates in radians.\n degrees_to_radians = math.pi/180.0\n \n # phi = 90 - latitude\n phi1 = (90.0 - lat1)*degrees_to_radians\n phi2 = (90.0 - lat2)*degrees_to_radians\n \n # theta = longitude\n theta1 = long1*degrees_to_radians\n theta2 = long2*degrees_to_radians\n \n # Compute spherical distance from spherical coordinates.\n \n # For two locations in spherical coordinates \n # (1, theta, phi) and (1, theta', phi')\n # cosine( arc length ) = \n # sin phi sin phi' cos(theta-theta') + cos phi cos phi'\n # distance = rho * arc length\n \n cos = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) + \n math.cos(phi1)*math.cos(phi2))\n arc = math.acos( cos )\n \n # Remember to multiply arc by the radius of the earth \n # in your favorite set of units to get length.\n # to get length in km, multiply arc by 6373\n # to get length in miles, multiply arc by 3960\n miles_distance = arc * 3960\n return miles_distance\n\nimport pandas as pd\n\nsbux = pd.read_csv('C:/incubator/sbux.csv')\n\nzillhomes = pd.read_csv('C:/incubator/zillhomes.csv')\nzillhomes\n\n#loop to go through and create columns in homes data for distance to each starbucks\nnumsbux = len(sbux)\n\nfor x in range(0,numsbux):\n zillhomes[ ( sbux ['Store ID'][x] ) ] = 'NA'\n\nnumzills = len(zillhomes)\n\nfor z in range(0,numzills):\n #get lat of home for lat1 as a\n a = zillhomes['latitude'][z]\n #get long of home for long1 as b\n b = zillhomes['longitude'][z]\n for y in range(0,numsbux):\n #get lat of sbux for lat2 as c\n c = sbux['Latitude'][y]\n #get long of sbux for long2 as d\n d = sbux['Longitude'][y]\n #get distance using lat1, long1, lat2, long2 as a,b,c,d\n zillhomes[(sbux['Store ID'] [y])][z] = miles_distance_on_unit_sphere(a, b, c, d)\n\n#create a vector of starbucks locations\nsbuxlocations = sbux['Store ID']\n\n#finding minimum starbucks distance each house\nzillhomes['Smallest Sbux Distance'] = zillhomes[sbuxlocations].min(axis=1)\n\n#matching that distance to sbux store id\nzillhomes['Closest Sbux'] = id(zillhomes[sbuxlocations].min(axis=1))\n","repo_name":"DistrictDataLabs/03-gentrifuge","sub_path":"sbuxDistances.py","file_name":"sbuxDistances.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"19596387371","text":"# -*- coding: utf-8 -*-\nimport os\nimport hashlib\nfrom datetime import timedelta\n\nfrom sqlalchemy.sql import func\n\nimport models\nfrom core.db import db\n\n\nclass PublicModel(models.saveable_model.SaveableModel):\n __tablename__ = 'public_model'\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n model_name = db.Column(db.String(128))\n md5 = db.Column(db.String(32))\n\n def __init__(self, model_name, md5):\n super().__init__()\n\n # Store the data in the object\n self.model_name = str(model_name).strip()\n self.md5 = md5\n\n @classmethod\n def init_public_models(cls):\n # Get all models in the database\n models_set = set([model.model_name for model in cls.get_public_models()])\n public_models_path = os.path.join('.', 'lora_models', 'public')\n for filename in os.listdir(public_models_path):\n name, extension = os.path.splitext(filename)\n model_path = os.path.join(public_models_path, filename)\n if name in models_set:\n models_set.remove(name)\n continue\n with open(model_path, 'rb') as f:\n file_data = f.read()\n md5_hash = hashlib.md5(file_data).hexdigest()\n new_public_model = PublicModel(name, md5_hash)\n new_public_model.save(commit=False)\n removed_models_list = list(models_set)\n for model_name in removed_models_list:\n db.session.delete(cls.find_public_model_by_model_name(model_name))\n db.session.commit()\n \n @classmethod\n def get_public_models(cls):\n return cls.query.all() \n \n @classmethod\n def find_public_model_by_model_name(cls, model_name):\n return cls.query.filter_by(model_name=model_name).first()\n \n","repo_name":"RapDoodle/LoRA-Playground","sub_path":"web/models/public_model.py","file_name":"public_model.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"43008963775","text":"# -*- coding: utf-8 -*-\r\n\r\n###############################################################################\r\n# This is a Python REST WEB Service client for the 4th assigment of distributed \r\n# systems.\r\n#\r\n# Assigment: Arquitetura Cliente-Servidor Middleware para Comunicação entre \r\n# Ambientes heterogêneos\r\n# Creator: Samuel Pelegrinello Caipers (1097261)\r\n# Professor: Ana Cristina B. Kochem Vendramin\r\n###############################################################################\r\n\r\nimport accomodationControl\r\nimport airFlightTicketControl\r\nimport sys\r\n\r\nclass main():\r\n \"\"\"\r\n Main class that receives user commands\r\n \"\"\"\r\n \r\n while True:\r\n print(\"What do you need?\")\r\n print(\"[1] buy an accommodation\")\r\n print(\"[2] query hotels\")\r\n print(\"[3] query accommodation\")\r\n print(\"[4] buy a ticket\")\r\n print(\"[5] query flights\")\r\n print(\"[6] query airTickets booked\")\r\n print(\"[0] exit\")\r\n\r\n code = raw_input()\r\n \r\n if code == \"\":\r\n continue\r\n elif code == \"1\":\r\n accomodationControl.buyAccommodation()\r\n elif code == \"2\":\r\n accomodationControl.getHotels()\r\n elif code == \"3\":\r\n accomodationControl.getAccommodation()\r\n elif code == \"4\":\r\n airFlightTicketControl.buyFlightTicket()\r\n elif code == \"5\":\r\n airFlightTicketControl.getFlights()\r\n elif code == \"6\":\r\n airFlightTicketControl.getAirTicketsBooked()\r\n elif code == \"0\":\r\n sys.exit(0)","repo_name":"Caipers/t4_webservices","sub_path":"client_python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"43168359563","text":"# import and load necessary package and mosules\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nURL = \"http://sec.gov.ng/statistical-bulletin/\"\n\n\ndef scrape_data_links(sec_url=URL):\n \"\"\"\n Get links where our data is housed\n \"\"\"\n # scraping to get required links to data of interest\n res = requests.get(sec_url)\n resp = res.text\n soup = BeautifulSoup(resp, \"html.parser\")\n containerz = soup.find(\"div\", {\"class\": \"the-contain\"})\n child = containerz.find_all(\"p\")\n sec_statiscal_bulletine = []\n for i in child:\n for j in i.find_all(\"a\"):\n if j.has_attr(\"href\"):\n sec_statiscal_bulletine.append(j.attrs[\"href\"])\n for link in sec_statiscal_bulletine:\n print(f\"Our link:{link}\")\n return sec_statiscal_bulletine\n\n\ndata = scrape_data_links()\n\n\n# injest and transform selected items of financial statements\ndef clean_col(col):\n return (\n col.strip()\n .replace(\"(N'000)\", \"N(K)\")\n .replace(\"/\", \"\")\n .replace(\" \", \"_\")\n .replace(\"(%)\", \"pct\")\n .replace(\"\\n\", \"\")\n .replace(\"-\", \"\")\n .lower()\n )\n\n\ndef company_financials(fin_data=data[-2]):\n \"\"\"\n Selected Items from companies Income and Financial position of\n Quoted Companies\n Quarterly fillings of listed companies Financials with SEC and NSE\n NOTE:\n\n # 1) QUARTER: Note that since the Financial-year of different companies can\n vary, it is possible that a company's Q1 is March while it falls into\n another month for a different company.\n # 2) NUMBER OF MONTHS: This is the number of months for which a\n quarter's account is presented. It is usually 3-, 6-, 9-\n and 12-months for Q1, Q2, Q3 and Q4 respectively.\n # In some few cases however, a company may report 3-months\n account for Q2 or Q3. 3) TURNOVER/REVENUE/ GROSS EARNINGS/\n GROSS PREMIUM INCOME: Turnover/Revenue, Gross Earnings and Gross Premium\n # Income are used to capture the Income of Non-financial firms,\n Banks and Insurance companies respectively. When 'Gross Earnings'\n is not supplied by a bank,\n # it is computed as: Gross Earnings = Interest Income + Fee\n and Commission Income + Net Gain/ (Losses) on Financial Instruments +\n Other Income. In this case,\n # the summation may slightly vary from the actual Gross Earnings\n when a bank presents some of these components in the net form; e.g.\n 'Net Interest Income' instead of 'Interest Income'.\n # 4) STOCK PRICES: Month3=3rd and end month of a quarter;\n Month2=2nd month of a quarter; Month1=1st month of a quarter.\n For example, if the Q2 of a firm ends in June, then Month1 are Apr,\n # Month2 are May and Month3 are Jun end prices respectively\n \"\"\"\n df = pd.read_excel(\n fin_data, sheet_name=\"D.2\", skiprows=[0, 1, 2], dtype=\"object\"\n ) # noqa\n\n cleancol_df = df.rename(columns=clean_col)\n # transformws to csv\n cleancol_df.to_csv(\"./docs/abridge-financials.csv\", index=False)\n\n\ndef money_mkt_indicators(mm_data=data[-1]):\n \"\"\"\n Money market indicator updated monthly by CBN\n \"\"\"\n # transformed money mkt indicator data\n money_mkt = pd.read_excel(\n mm_data, sheet_name=\"E.1\", skiprows=[0, 1], dtype=\"object\"\n )\n mm_rep_name = money_mkt.rename(columns={\"Unnamed: 0\": \"date\"})\n clean_col_mm = mm_rep_name.rename(columns=clean_col)\n clean_col_mm.to_csv(\"./docs/money-mkt-indicators.csv\", index=False)\n\n\ndef money_credit_stat(mc_data=data[-1]):\n \"\"\"\n Money and credit statistics updated monthly by CBN\n \"\"\"\n # transformed money and credit statistics data\n money_credit = pd.read_excel(\n mc_data,\n sheet_name=\"E.2\",\n skiprows=[0, 1],\n usecols=[0, 1, 2, 3, 4, 5, 6],\n dtype=\"object\",\n )\n\n m_c_name = money_credit.rename(columns={\"Unnamed: 0\": \"date\"})\n mc_clean_col = m_c_name.rename(columns=clean_col)\n mc_clean_col.to_csv(\"./docs/money-credit-stat.csv\", index=False)\n\n\ndef gross_domestic_prod(gdp_data=data[-1]):\n \"\"\"\n Gross Domestic Product,production updated yearly by NBS\n \"\"\"\n # transformed the GDP-Yearly data\n gdpby_yearly = pd.read_excel(\n gdp_data, sheet_name=\"E.4\", skiprows=[0, 1], dtype=\"object\"\n )\n gdpname = gdpby_yearly.rename(columns={\"Unnamed: 0\": \"sectors\"})\n gdp_drop_nacol = gdpname.dropna(axis=1)\n gdp_drop_nacol.to_csv(\"./docs/gdp-yearly.csv\", index=False)\n\n\ndef labour_force_stats(labour_data=data[-1]):\n \"\"\"\n Unemployment and Underemployment watch Updated quarterly by NBS\n \"\"\"\n # Transform the Labour force-Quarterly data\n\n labour_stat = pd.read_excel(\n labour_data,\n sheet_name=\"E.8\",\n skiprows=[0, 1],\n usecols=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n dtype=\"object\",\n )\n labour_clean_col = labour_stat.rename(columns=clean_col)\n labour_clean_col.to_csv(\n \"./docs/unemployment-underemployment-watch.csv\", index=False\n )\n\n\ndef crude_oil_production(crude_data=data[-1]):\n \"\"\"\n Oil Market report Updated monthly by OPEC\n \"\"\"\n # Transformed crude oil production data\n crude_production = pd.read_excel(\n crude_data,\n sheet_name=\"E.9\",\n skiprows=[0, 1],\n usecols=[0, 1, 2, 3],\n dtype=\"object\",\n )\n\n crude_chg_name = crude_production.rename(columns={\"Unnamed: 0\": \"date\"})\n crude_prod_clean = crude_chg_name.rename(columns=clean_col)\n crude_prod_clean.to_csv(\"./docs/crude-oil-production.csv\", index=False)\n\n\ndef nigeria_top_traders(top_trader_data=data[-1]):\n \"\"\"\n Nigeria top trade partners updated Quarterly by NBS\n \"\"\"\n # Transformed top trader partners data\n top_trade_partners = pd.read_excel(\n top_trader_data,\n sheet_name=\"E.10\",\n skiprows=[0, 1],\n usecols=[0, 1, 2, 3, 4, 6, 7, 8],\n dtype=\"object\",\n )\n top_dropna = top_trade_partners.dropna(thresh=5)\n top_partner_clean = top_dropna.rename(columns=clean_col)\n top_partner_clean.to_csv(\"./docs/top-trade-partners.csv\", index=False)\n\n\ndef summary_foreign_trade(sum_trade_data=data[-1]):\n \"\"\"\n summary od foreign trade data updated monthly by NBS (N'millions)\n \"\"\"\n # Transformed summarized foreign trade data\n sumary_foreign_trade = pd.read_excel(\n sum_trade_data,\n sheet_name=\"E.11\",\n skiprows=[0, 1],\n usecols=[0, 1, 2, 3, 4, 5, 6],\n dtype=\"object\",\n )\n sum_foreign_clean = sumary_foreign_trade.rename(columns=clean_col)\n sum_foreign_clean.to_csv(\"./docs/summarized-foreign-trade.csv\", index=False) # noqa\n\n\ndef external_reserve(ext_res_data=data[-1]):\n \"\"\"\n Movement in Foreign Reserves-30 day moving average-updated monthly by CBN\n \"\"\"\n # Transformed foreign reserves data\n ext_reserved = pd.read_excel(\n ext_res_data, sheet_name=\"E.14\", skiprows=[0, 1], dtype=\"object\"\n )\n\n ext_res_clean = ext_reserved.rename(columns=clean_col)\n ext_res_clean.to_csv(\"./docs/foreign-reserves.csv\", index=False)\n\n\ndef nse_allshare_index(nse_index_data=data[2]):\n \"\"\"\n NSE all share index data updated monthly\n \"\"\"\n # transformed NSE all share index data\n nse_all_share_index = pd.read_excel(\n nse_index_data, sheet_name=\"B.4\", skiprows=[0, 1], dtype=\"object\"\n )\n nse_index_clean = nse_all_share_index.rename(columns=clean_col)\n nse_index_clean.to_csv(\"./docs/nse-all-share-index.csv\", index=False)\n\n\ndef equities_mkt_cap(equity_cap_data=data[2]):\n \"\"\"\n Nigerian Stock Exchange Market Capitalization-Equities (since 1985)\n \"\"\"\n # transformed nse equity capitalization data\n mkt_cap_equities = pd.read_excel(\n equity_cap_data,\n sheet_name=\"B.5\",\n skiprows=[0, 1],\n usecols=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],\n dtype=\"object\",\n )\n mkt_cap_clean = mkt_cap_equities.rename(columns=clean_col)\n mkt_cap_clean.to_csv(\"./docs/nse-equities-mkt-cap.csv\", index=False)\n\n\ndef nse_foreign_port_movt(foreign_port_data=data[3]):\n \"\"\"\n Foreign and domestic Transctions on the NSE\n Foreign portfolio investment report\n \"\"\"\n # Transformed foreign portfolio transaction data\n nse_foreign_portfolio = pd.read_excel(\n foreign_port_data,\n sheet_name=\"C.2\",\n skiprows=[0, 1],\n usecols=[0, 1, 2, 3, 4, 5, 6, 7],\n dtype=\"object\",\n )\n foreign_port_clean = nse_foreign_portfolio.rename(columns=clean_col)\n foreign_port_clean.to_csv(\"./docs/foreign-portfolio.csv\", index=False)\n\n\ndef capital_import_by_invest(capt_imp_data=data[3]):\n \"\"\"\n Capital importation by type of investment (USD'millions)\n capital importation report\n \"\"\"\n # transformed capital importation data\n\n cap_imp_inv = pd.read_excel(\n capt_imp_data,\n sheet_name=\"C.3\",\n skiprows=[0, 1],\n usecols=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n dtype=\"object\",\n )\n\n cap_imp_col = cap_imp_inv.rename(columns={\"Unnamed: 0\": \"date\"})\n cap_imp_clean = cap_imp_col.rename(columns=clean_col)\n cap_imp_clean.to_csv(\n \"./docs/capital-importation-investment.csv\", index=False\n ) # noqa\n\n\ndef pension_assetby_invest(pen_asset_data=data[3]):\n \"\"\"\n Pension Fund Asset by Investment classes (N'Billions)\n \"\"\"\n # trandformed pension Fund by investment data\n pension_asset_invest = pd.read_excel(\n pen_asset_data,\n sheet_name=\"C.5\",\n skiprows=[0, 1],\n usecols=[\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 11,\n 12,\n 13,\n 14,\n 15,\n 17,\n 18,\n 19,\n 20,\n 21,\n 22,\n 23,\n ],\n dtype=\"object\",\n )\n\n pen_asset_col = pension_asset_invest.rename(columns={\"Row Labels\": \"date\"})\n pen_asset_clean = pen_asset_col.rename(columns=clean_col)\n pen_asset_clean.to_csv(\"./docs/pension-investments.csv\", index=False)\n\n\n# the control\ndef main():\n company_financials()\n money_mkt_indicators()\n money_credit_stat()\n gross_domestic_prod()\n labour_force_stats()\n crude_oil_production()\n nigeria_top_traders()\n summary_foreign_trade()\n external_reserve()\n nse_allshare_index()\n equities_mkt_cap()\n nse_foreign_port_movt()\n capital_import_by_invest()\n pension_assetby_invest()\n print(\"Data refreshed!\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ajakaiye33/data-pipeline","sub_path":"data_etl.py","file_name":"data_etl.py","file_ext":"py","file_size_in_byte":10533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"7520893892","text":"import yaml\nimport pkg_resources\n\n_RESOURCES_PATH = pkg_resources.resource_filename('langtoolkit', 'resources')\n_CONFIG_FILE = pkg_resources.resource_filename('langtoolkit', 'resources/config.yaml')\n_CONFIG = None\n\n\n\ndef set_config(config_file):\n global _CONFIG_FILE\n _CONFIG_FILE = config_file\n\n\ndef get_config():\n global _CONFIG\n \n if _CONFIG is None:\n with open(_CONFIG_FILE, 'r', encoding='utf-8') as config_data:\n _CONFIG = yaml.load(config_data, Loader=yaml.FullLoader)\n\n return _CONFIG\n\n\ndef get_resources_path():\n return _RESOURCES_PATH\n","repo_name":"maguelo/langtoolkit","sub_path":"src/langtoolkit/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"1756748444","text":"from torch import nn\r\nfrom torch.optim import Adam\r\nfrom trunk.pyt_nasnet.dense_model import DenseModel\r\nfrom sklearn.metrics import accuracy_score\r\nimport numpy as np\r\n\r\n\r\nclass NetManager:\r\n def __init__(self, num_input,\r\n num_classes,\r\n learning_rate,\r\n train_loader,\r\n test_loader,\r\n device,\r\n bathc_size=100):\r\n\r\n self.num_input = num_input\r\n self.num_classes = num_classes\r\n self.learning_rate = learning_rate\r\n self.train_loader = train_loader\r\n self.test_loader = test_loader\r\n\r\n self.bathc_size = bathc_size\r\n self.device = device\r\n\r\n def get_reward(self, action):\r\n model = DenseModel(self.num_input, self.num_classes, action).to(self.device)\r\n print(model)\r\n loss_func = nn.CrossEntropyLoss()\r\n optimizer = Adam(model.parameters(), lr=self.learning_rate)\r\n model.train()\r\n all_mean_val_acc = []\r\n for epoch in range(30):\r\n for step, (tx, ty) in enumerate(self.train_loader):\r\n tx = tx.view(-1, self.num_input).to(self.device)\r\n optimizer.zero_grad()\r\n to = model(tx)\r\n loss = loss_func(to, ty.to(self.device))\r\n loss.backward()\r\n optimizer.step()\r\n model.eval()\r\n mean_val_acc = []\r\n for step, (tx, ty) in enumerate(self.test_loader):\r\n tx = tx.view(-1, self.num_input).to(self.device)\r\n to = model(tx)\r\n out = np.argmax(to.cpu().detach().numpy(), axis=1)\r\n val_acc = accuracy_score(ty.cpu().detach().numpy(), out)\r\n mean_val_acc.append(val_acc)\r\n all_mean_val_acc.append(mean_val_acc)\r\n return np.mean(all_mean_val_acc)\r\n","repo_name":"dgai91/nasnet-basic-model","sub_path":"pyt_nasnet/dense_net_manager.py","file_name":"dense_net_manager.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"39"} +{"seq_id":"42300823966","text":"# 使用tensorflow来解决MNIST手写体数字识别问题\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# MNIST数据集相关的常数\ninput_node = 784 # 输入层的节点数,图片的像素\noutput_node = 10 # 输出层的节点数,0-9的类别数\n\n# 配置神经网络的参数\nlayer_node = 500 # 隐藏层节点数\nbatch_size = 100 # 一个训练batch中数据个数,数字越小,越接近随机梯度下降,越大,越接近梯度下降\nlearning_rate_base = 0.8 # 基础的学习率\nlearning_rate_decay = 0.99 # 学习率的衰减率\nregularization_rate = 0.0001 # 描述模型复杂度的正则化项在损失函数中的系数\ntraining_steps = 30000 # 训练轮数\nmoving_average_decay = 0.99 # 滑动平均衰减率\n\n\n# 给定神经网络的输入与所有参数,计算神经网络的前向通达的传播结果\ndef inference(input_tensor, avg_class, weight1, biases1, weight2, biases2):\n if avg_class == None:\n layer1 = tf.nn.relu(tf.matmul(input_tensor, weight1) + biases1)\n return tf.matmul(layer1, weight2) + biases2\n else:\n layer1 = tf.nn.relu(tf.matmul(input_tensor, avg_class.average(weight1)) + avg_class.average(biases1))\n return tf.matmul(layer1, avg_class.average(weight2)) + avg_class.average(biases2)\n # 训练模型的过程\n\n\ndef train(mnist):\n x = tf.placeholder(tf.float32, [None, input_node], name='x-input')\n y_ = tf.placeholder(tf.float32, [None, output_node], name='y-input')\n\n # 生成隐藏层的参数\n weight1 = tf.Variable(tf.truncated_normal([input_node, layer_node], stddev=0.1))\n biases1 = tf.Variable(tf.constant(0.1, shape=[layer_node]))\n\n # 生成输出层的参数\n weight2 = tf.Variable(tf.truncated_normal([layer_node, output_node], stddev=0.1))\n biases2 = tf.Variable(tf.constant(0.1, shape=[output_node]))\n\n # 计算当前参数下神经网络前向通道的结果,不使用滑动平均值\n y = inference(x, None, weight1, biases1, weight2, biases2)\n # 代表训练轮数的变量指定为不可训练的参数\n global_step = tf.Variable(0, trainable=False)\n # 给定滑动平均衰减率与训练轮数的变量,初始化滑动平均类\n variable_averages = tf.train.ExponentialMovingAverage(moving_average_decay, global_step)\n # 在所有代表神经网络参数的变量上使用滑动平均\n variable_averages_op = variable_averages.apply(tf.trainable_variables())\n # 计算使用了滑动平均之后的前向传播结果\n average_y = inference(x, variable_averages, weight1, biases1, weight2, biases2)\n # 计算交叉熵作为刻画预测值与真实值之间差距的损失函数\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))\n # 计算在当前batch中所有样例的交叉熵平均值\n cross_entropy_mean = tf.reduce_mean(cross_entropy)\n\n # 计算L2正则化损失函数\n regularizer = tf.contrib.layers.l2_regularizer(regularization_rate)\n # 计算模型的正则化损失\n regularization = regularizer(weight1) + regularizer(weight2)\n # 总损失\n loss = cross_entropy_mean + regularization\n # 设置指数衰减的学习率\n learning_rate = tf.train.exponential_decay(learning_rate_base, global_step,\n mnist.train.num_examples / batch_size,\n learning_rate_decay)\n train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n\n # 在训练神经网络模型时,每过一遍数据就要通过BP更新神经网络的参数以及每个参数的滑动平均值\n with tf.control_dependencies([train_step, variable_averages_op]):\n train_op = tf.no_op(name='train')\n\n # 检验使用了滑动平均模型的神经网络前向传播结果是否挣正确\n correct_prediction = tf.equal(tf.argmax(average_y, 1), tf.argmax(y_, 1))\n # 计算模型的正确率\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n # 初始化会话并开始训练过程\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n # 准备验证数据\n validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}\n # 准备测试数据\n test_feed = {x: mnist.test.images, y_: mnist.test.labels}\n # 迭代的训练神经网络\n for i in range(training_steps):\n if i % 1000 == 0:\n validate_acc = sess.run(accuracy, feed_dict=validate_feed)\n print(\"After %d training steps, validation accuracy using average model is %g\" % (i, validate_acc))\n # 产生这一轮使用的batch的训练数据,并运行训练过程\n xs, ys = mnist.train.next_batch(batch_size)\n sess.run(train_op, feed_dict={x: xs, y_: ys})\n\n # 测试结束后,在测试数据上检测神经网络模型的最终正确率\n test_acc = sess.run(accuracy, feed_dict=test_feed)\n print('After %d training steps ,test accuracy using average model is %g' % (training_steps, test_acc))\n\n\n# 主程序入口\ndef main(argv=None):\n # 声明处理MNIST 数据集的类,这个类在初始化时会自动下载\n mnist = input_data.read_data_sets(\"../../../data/MNIST_data\", one_hot=True)\n train(mnist)\n\n\n# Tensorflow 提供的一个主程序入口,tf.app.run 会调用上面定义的main函数\nif __name__ == '__main__':\n tf.app.run()","repo_name":"cking0821/DLLearning","sub_path":"src/ck/CNN/cnn_mnist_test.py","file_name":"cnn_mnist_test.py","file_ext":"py","file_size_in_byte":5525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"40213004672","text":"from util import *\nfrom emulator import Emulator\nfrom test import *\nimport os\nimport shutil\n\n\nclass SameBoy(Emulator):\n def __init__(self):\n super().__init__(\"SameBoy\", \"https://sameboy.github.io/\", startup_time=4.5, features=(PCM,))\n \n def setup(self):\n downloadGithubRelease(\"LIJI32/SameBoy\", \"downloads/sameboy.zip\")\n if extract(\"downloads/sameboy.zip\", \"emu/sameboy\"):\n os.unlink(\"emu/sameboy/cgb_boot.bin\")\n os.unlink(\"emu/sameboy/dmg_boot.bin\")\n os.unlink(\"emu/sameboy/sgb_boot.bin\")\n download(\"https://gbdev.gg8.se/files/roms/bootroms/cgb_boot.bin\", \"emu/sameboy/cgb_boot.bin\")\n download(\"https://gbdev.gg8.se/files/roms/bootroms/dmg_boot.bin\", \"emu/sameboy/dmg_boot.bin\")\n download(\"https://gbdev.gg8.se/files/roms/bootroms/sgb_boot.bin\", \"emu/sameboy/sgb_boot.bin\")\n setDPIScaling(\"emu/sameboy/sameboy.exe\")\n os.makedirs(os.path.join(os.environ[\"APPDATA\"], \"SameBoy\"), exist_ok=True)\n\n def startProcess(self, rom, *, model, required_features):\n if model == DMG:\n shutil.copyfile(os.path.join(os.path.dirname(__file__), \"sameboy.prefs.dmg.bin\"), os.path.join(os.environ[\"APPDATA\"], \"SameBoy\", \"prefs.bin\"))\n self.startup_time = 6.5\n elif model == CGB:\n shutil.copyfile(os.path.join(os.path.dirname(__file__), \"sameboy.prefs.gbc.bin\"), os.path.join(os.environ[\"APPDATA\"], \"SameBoy\", \"prefs.bin\"))\n self.startup_time = 3.5\n elif model == SGB:\n shutil.copyfile(os.path.join(os.path.dirname(__file__), \"sameboy.prefs.sgb.bin\"), os.path.join(os.environ[\"APPDATA\"], \"SameBoy\", \"prefs.bin\"))\n self.startup_time = 6.5\n else:\n return None\n return subprocess.Popen([\"emu/sameboy/sameboy.exe\", os.path.abspath(rom)], cwd=\"emu/sameboy\")\n","repo_name":"daid/GBEmulatorShootout","sub_path":"emulators/sameboy.py","file_name":"sameboy.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"39"} +{"seq_id":"71243993393","text":"import os\nimport subprocess\nimport sys\nfrom time import sleep\n\n\n\npath=r'./player'\nls=os.listdir(path)\ntotal_num=len(ls)\n\nvs_time=[]\nid_score=[]\nfor i in range(0,total_num):\n vs_time.append([])\n id_score.append(0)\n for j in range(0,total_num):\n vs_time[i].append(0)\n\n\n#print(ls)\n\nl=len(ls)\nfor i in range(0,l):\n for j in range(i+1,l):\n echo=' echo foo> /dev/null 2>&1'\n ls1='./player/'+ls[i]+echo\n ls2='./player/'+ls[j]+echo\n cm1='mkdir '+ls[i]+'_'+ls[j]\n subprocess.call(cm1,shell=True)\n print(ls1,ls2)\n for count in range(0,4):\n ser=subprocess.Popen('python3 main.py echo foo> /dev/null 2>&1',shell=True)\n sleep(2)\n if count<2:\n pl1=subprocess.Popen(ls1,shell=True)\n sleep(1)\n while True:\n try:\n with open('connection','r') as f:\n a=f.read()\n if a=='1':\n with open('connection','w') as f:\n f.write('0')\n break\n else:\n sleep(1)\n except :\n sleep(1)\n\n pl2=subprocess.Popen(ls2,shell=True)\n r='player0:'+ls[i]+' player1:'+ls[j]+' winner:'\n else :\n pl2=subprocess.Popen(ls2,shell=True)\n sleep(1)\n while True:\n try:\n with open('connection','r') as f:\n a=f.read()\n if a=='1':\n with open('connection','w') as f:\n f.write('0')\n break\n else:\n sleep(1)\n except :\n sleep(1)\n pl1=subprocess.Popen(ls1,shell=True)\n r='player0:'+ls[j]+' player1:'+ls[i]+' winner:'\n while ser.poll()==None:\n sleep(2)\n pl1.kill()\n pl2.kill()\n kl1='kill '+str(pl1.pid)\n kl2='kill '+str(pl2.pid)\n subprocess.call(kl1,shell=True)\n subprocess.call(kl2,shell=True)\n with open('this_path','r') as f:\n save_path=f.read()\n cm=\"mv \"+save_path+\" ./\"+ls[i]+'_'+ls[j]+'/'+save_path+'_'\n if count<2:\n cm=cm+ls[i]+'_'+ls[j]+'.zip'\n else:\n cm=cm+ls[j]+'_'+ls[i]+'.zip'\n subprocess.call(cm,shell=True)\n sleep(1)\n res=None\n with open('temp_result','r') as f:\n res=f.read()\n r=r+res+'\\n'\n res=int(res)\n with open('result','a') as f:\n f.write(r)\n if count==3:\n f.write('\\n')\n if count<2:\n vs_time[i][j]+=1\n else:\n vs_time[j][i]+=1\n if count<2:\n if res==0:\n id_score[i]+=2\n elif res==1:\n id_score[j]+=2\n else:\n id_score[j]+=1\n id_score[i]+=1\n else :\n if res==0:\n id_score[j]+=2\n elif res==1:\n id_score[i]+=2\n else:\n id_score[i]+=1\n id_score[j]+=1\nwith open('final_socre','w') as f:\n for i in range(0,total_num):\n tes='team: '+ls[i]+' score:'+str(id_score[i])+'\\n'\n f.write(tes)\n\nwith open('total_com','w') as f:\n for i in range(0,total_num):\n f.write(ls[i])\n f.write(' ')\n f.write('\\n')\n for i in range(0,total_num):\n for j in range(0,total_num):\n f.write(str(vs_time[i][j]))\n f.write(' ')\n f.write('\\n')\n","repo_name":"eesast/teamstyle19new","sub_path":"ts19tattack.py","file_name":"ts19tattack.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"9196107625","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport tensorflow as tf\ntf.enable_eager_execution()\n\nimport os\nimport time\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom tensorflow.python.keras.models import *\nfrom tensorflow.python.keras.layers import *\nfrom keras.optimizers import *\nfrom keras.losses import *\nfrom keras import backend as K\n\nIMG_SIZE = 160\nBATCH_SIZE = 64\nBUFFER_SIZE = 60000\nEPOCHS = 150\nNOISE_DIM = 100\nTRAINING_RATIO = 5 # WGAN loss parameters\nGRADIENT_PENALTY_WEIGHT = 10 # As per the paper\n\n#check if GPU is being used\nsess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n\n''' Get training data '''\ntrain_df = pd.read_csv('../train_input.csv')\ntraining_imgs = np.load('../train_output.npz')\nseq_lengths = train_df['length'].values\n\nregions = []\nfor i in range(len(train_df)):\n for j in range(seq_lengths[i] // IMG_SIZE):\n regions.append(training_imgs['arr_'+str(i)][j*IMG_SIZE:(j+1)*IMG_SIZE, j*IMG_SIZE:(j+1)*IMG_SIZE])\nregions = np.array(regions)\nregions = np.tanh(regions.reshape((-1, IMG_SIZE, IMG_SIZE, 1))).astype(np.float32)\n\ntrain_dataset = tf.data.Dataset.from_tensor_slices(regions).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)\n\n\n\n\n# In[2]:\n\n\n''' Models '''\ndef get_generator(img_size):\n in_size = img_size / (2**4) # Transpose Convolution naturally upsamples\n\n z = Input(shape = (NOISE_DIM, ))\n x = Dense(256 * in_size * in_size, use_bias=False)(z)\n x = ReLU()(x)\n x = BatchNormalization()(x)\n x = Dropout(0.1)(x)\n x = Reshape([in_size, in_size, 256])(x)\n\n x = Conv2DTranspose(128, (4, 4), strides=(2, 2), use_bias=False, padding='same')(x)\n x = ReLU()(x)\n x = BatchNormalization()(x)\n x = Dropout(0.1)(x)\n\n x = Conv2DTranspose(64, (4, 4), strides=(2, 2), use_bias=False, padding='same')(x)\n x = ReLU()(x)\n x = BatchNormalization()(x)\n x = Dropout(0.1)(x)\n\n x = Conv2DTranspose(32, (4, 4), strides=(2, 2), use_bias=False, padding='same')(x)\n x = ReLU()(x)\n x = BatchNormalization()(x)\n x = Dropout(0.1)(x)\n\n x = Conv2DTranspose(1, (4, 4), strides=(2, 2), use_bias=False, padding='same')(x)\n y = Activation('tanh')(x) # normalize output into [-1, 1]\n\n return Model(z, y)\n\ndef get_discriminator(img_size):\n img = Input(shape = (img_size, img_size, 1, ))\n\n x = Conv2D(32, (4, 4), strides=(2, 2), padding='same')(img)\n x = LeakyReLU(0.2)(x)\n\n x = Conv2D(64, (4, 4), strides=(2, 2), padding='same')(x)\n x = LeakyReLU(0.2)(x)\n x = BatchNormalization()(x)\n x = Dropout(0.1)(x)\n\n x = Conv2D(128, (4, 4), strides=(2, 2), padding='same')(x)\n x = LeakyReLU(0.2)(x)\n x = BatchNormalization()(x)\n x = Dropout(0.1)(x)\n\n x = Flatten()(x)\n y = Dense(1, use_bias=False)(x)\n\n return Model(img, y)\n\ngenerator_model = get_generator(IMG_SIZE)\ngenerator_model.summary()\n\ndiscriminator_model = get_discriminator(IMG_SIZE)\ndiscriminator_model.summary()\n\n''' Losses '''\ndef wasserstein_loss(y, y_):\n return K.mean(y * y_)\n\ndef gradient_penalty_loss(y_true, y_pred, averaged_samples, gradient_penalty_weight):\n gradients = K.gradients(y_pred, averaged_samples)[0]\n gradients = K.square(gradients)\n gradients_sqr_sum = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape)))\n gradients = K.sqrt(gradients)\n gradients = gradient_penalty_weight * K.square(1 - gradients)\n return K.mean(gradients)\n\n\ngen_optim = tf.train.AdamOptimizer( 0.0002, beta1=0.5, beta2=0.9)\ndis_optim = tf.train.AdamOptimizer( 0.0002, beta1=0.5, beta2=0.9)\n\ngenerator_model.compile(optimizer=gen_optim, loss=wasserstein_loss)\ndiscriminator_model.compile(optimizer=dis_optim, loss=wasserstein_loss)\n\n\n'''\nSetup checkpoints in case of failure\nProvide function for storing generated images\n'''\ncheckpoint_dir = './training_checkpoints'\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\n\ncheckpoint = tf.train.Checkpoint(generator_optimizer=gen_optim,\n discriminator_optimizer=dis_optim,\n generator=generator_model,\n discriminator=discriminator_model)\n\nnum_examples_to_generate = 16\nrandom_vector_for_generation = tf.random_normal([num_examples_to_generate, NOISE_DIM])\ndef generate_and_save_images(model, epoch, test_input):\n predictions = model(test_input, training=False)\n fig = plt.figure(figsize=(4,4))\n for i in range(predictions.shape[0]):\n plt.subplot(4, 4, i+1)\n plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5)\n plt.axis('off')\n plt.savefig('./gan_images/image_at_epoch_{:04d}.png'.format(epoch))\n plt.close()\n\n''' Training\n\n This training was written back when the vanilla losses were still in this\n file. This is no longer the case. Essentially this needs to be updated to\n use train_on_batches. Models need to be properly connected and WGAN loss\n used to properly update.\n See improved WGAN paper:\n https://arxiv.org/pdf/1704.00028.pdf\n See a Keras implementation of improved WGAN loss:\n https://github.com/keras-team/keras-contrib/blob/master/examples/improved_wgan.py\n'''\n\n\n# In[37]:\n\n\ndef make_trainable(net, val):\n net.trainable = val\n for l in net.layers:\n l.trainable = val\n \ndef train(dataset, epochs, noise_dim):\n positive_y = np.ones((BATCH_SIZE, 1), dtype=np.float32)\n negative_y = -positive_y\n dummy_y = np.zeros((BATCH_SIZE, 1), dtype=np.float32)\n\n for epoch in range(epochs):\n start = time.time()\n\n print(\"Epoch: \", epoch)\n print(\"Number of batches: \", int(dataset.shape[0] // BATCH_SIZE))\n print(\"Dataset.shape[0]: \", dataset.shape[0])\n\n discriminator_loss = []\n generator_loss = []\n\n minibatches_size = BATCH_SIZE * TRAINING_RATIO\n\n for i in range(int(dataset.shape[0]) // (BATCH_SIZE * TRAINING_RATIO)):\n discriminator_minibatches = dataset[i * minibatches_size:(i + 1) * minibatches_size]\n print(\"discriminator_minibatches.shape: \", discriminator_minibatches.shape)\n for j in range (TRAINING_RATIO):\n ## Generator Images ##\n image_batch = discriminator_minibatches[j * BATCH_SIZE : (j+1) * BATCH_SIZE]\n noise_gen = np.random.uniform(0,1,size=[BATCH_SIZE,100])\n generated_images = generator_model.predict(noise_gen)\n \n X = np.concatenate((image_batch, generated_images))\n #print(\"X.shape: \", X.shape)\n y = np.zeros([2*BATCH_SIZE,2])\n y[0:BATCH_SIZE,1] = 1\n y[BATCH_SIZE:,0] = 0\n \n ## Train Discriminator ##\n make_trainable(discriminator_model,True)\n discriminator_loss.append(discriminator_model.train_on_batch(X , y))\n \n noise_tr = np.random.uniform(0,1,size=[BATCH_SIZE,100])\n y2 = np.ones([BATCH_SIZE,160,160,1])\n y2[:,1] = 1\n #print(y2)\n \n ## Train Generator ##\n make_trainable(discriminator_model,True)\n generator_loss.append(generator_model.train_on_batch(noise_tr, y2))\n \n generate_and_save_images(generator_model, epoch + 1, random_vector_for_generation)\n if (epoch + 1) % 15 == 0:\n checkpoint.save(file_prefix = checkpoint_prefix)\n\n print('Time taken for epoc {} is {} sec'.format(epoch + 1, time.time()-start))\n generator_model.save('gen.h5')\n discriminator_model.save('dis.h5')\n\ntrain(regions, EPOCHS, NOISE_DIM)\n\n","repo_name":"denizulcay/Projects","sub_path":"Protein_Tertiary_Structure_Prediction/src/dcgan2.0.py","file_name":"dcgan2.0.py","file_ext":"py","file_size_in_byte":7581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"15174842443","text":"from bestchannel import find_best_channel\nimport re, sys\ndef set_hostapd_channel(channel, configfile):\n new_config = \"\"\n reChannel = r'^channel=\\d+'\n with open(configfile, \"r\") as roconfig:\n for line in roconfig:\n if re.match(reChannel, line):\n new_config += \"channel=\"+str(channel)+\"\\n\"\n else:\n new_config += line\n with open(configfile, \"w\") as writeconfig:\n writeconfig.write(new_config)\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 4:\n print(\"usage : \" + sys.argv[0] + \" [interface] [max_rssi] [max_bssid_per_chan]\")\n sys.exit(1)\n interface = sys.argv[1]\n max_rssi = float(sys.argv[2])\n max_bssid_per_chan = int(sys.argv[3])\n\n best_channel = find_best_channel(interface, max_rssi, max_bssid_per_chan)\n print(\"Setting\", best_channel, \"as the hostapd channel\")\n set_hostapd_channel(best_channel, \"/etc/hostapd/hostapd.conf\")\n","repo_name":"furest/tbWebgui","sub_path":"installers/hostapd_autochannel.py","file_name":"hostapd_autochannel.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"5968118293","text":"import numpy as np\nimport matplotlib.pyplot as plt\nalpha = 1\n#numpy.linspace 函数用于创建一个一维数组,数组是一个等差数列构成的,格式如下:\n#np.linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None)\n#start    序列的起始值\n#stop    序列的终止值,如果endpoint为true,该值包含于数列中\n#num    要生成的等步长的样本数量,默认为50\n#endpoint    该值为 ture 时,数列中中包含stop值,反之不包含,默认是True。\n#retstep    如果为 True 时,生成的数组中会显示间距,反之不显示。\n#dtype    ndarray 的数据类型\ntheta = np.linspace(0,2*np.pi,num=500)\nx = alpha * np.sqrt(2) * np.cos(theta) / (np.sin(theta)**2+1)\ny = alpha * np.sqrt(2) * np.cos(theta) * np.sin(theta)/(np.sin(theta)**2+1)\nplt.title(r\"$\\rho^{2}=a^{2}\\cos 2\\theta\\quad a=1$\")\nplt.plot(x,y)\nplt.grid()\nplt.show()\n","repo_name":"kong-ling/scripts","sub_path":"python/algo/shuangniuxian.py","file_name":"shuangniuxian.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"10382182425","text":"\"\"\" Copy rights 2019, Mahmoud Mansour\"\"\"\n\nfrom network import LTE\nimport time\n\nclass Connection:\n\n def __init__(self):\n self.lte = LTE()\n\n def nb_connect (self, band=20, apn=\"nb.inetd.gdsp\"):\n counter1 = 0\n counter2 = 0\n\n if not self.lte.isattached():\n print(\"Attaching to LTE...\")\n self.lte.attach(band=band, apn=apn)\n while not self.lte.isattached():\n counter1 += 1\n print(str(counter1) + ' seconds elapsed')\n if counter1 >= 50 :\n import machine\n machine.reset()\n time.sleep(1)\n\n if not self.lte.isconnected():\n print(\"Obtaining IP address...\")\n self.lte.connect()\n while not self.lte.isconnected():\n counter2 += 1\n print(str(counter2) + ' seconds elapsed')\n time.sleep(0.25)\n\n print(\"Network ready ...\")\n\n def nb_disconnect(self):\n if self.lte.isconnected():\n self.lte.disconnect()\n while self.lte.isattached():\n try:\n self.lte.dettach()\n except OSError as e:\n print(e, type(e))\n else:\n print(\"Network is now disconnected\")\n\n\n ","repo_name":"mansour1991/socks5_micropython","sub_path":"lib/connect_nb.py","file_name":"connect_nb.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"30996526676","text":"#2. program which accepts the radius of a circle from the user and compute the area.\r\n\r\ndef f(num):\r\n appx_pi=22/7\r\n area= appx_pi*num**2\r\n print(\"{} * {}^2 = \".format(appx_pi,num),end=\"\")\r\n print(\"{0:.3f}\".format(area))\r\n \r\nradius=int(input(\"Enter the radius: \"))\r\n\r\nf(radius)\r\n\r\n","repo_name":"shreya643/python_assignment_dec1","sub_path":"area_of_the _circle.py","file_name":"area_of_the _circle.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"7314702637","text":"from pmkoalas.conformance.dataaware import compute_guard_precision,compute_guard_recall\nfrom pmkoalas.models.petrinet import parse_pnml_for_dpn\nfrom pmkoalas.read import read_xes_complex\n\nfrom os.path import join \n\nWORKING_DIR = join(\".\", \"paper example\")\n\nEXAMPLE_MODELS = [\n join(WORKING_DIR, f\"paper_example_dpn_{letter}.pnml\")\n for letter \n in [\"a\",\"b\",\"c\"]\n]\nEXAMPLE_LOG = join(WORKING_DIR, \"paper_example_log.xes\")\n\ndef compute_measurements():\n log = read_xes_complex(EXAMPLE_LOG)\n for model,label in zip(EXAMPLE_MODELS, [\"a\", \"b\", \"c\"]):\n recall = compute_guard_recall(log, parse_pnml_for_dpn(model))\n prec = compute_guard_precision(log, parse_pnml_for_dpn(model))\n print(f\"computed measurements for example model {label} are : grec - {recall:.3f} , gprec {prec:.3f}\")\n\nif __name__ == \"__main__\":\n compute_measurements()","repo_name":"AdamBanham/data-aware-propositions","sub_path":"paper example/proposal_run.py","file_name":"proposal_run.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"37220874547","text":"from setuptools import setup\n\npackage_name = 'heading'\n\nsetup(\n name=package_name,\n version='0.0.0',\n packages=[package_name],\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='autonav',\n maintainer_email='rljudy4981@icloud.com',\n description='TODO: Package description',\n license='TODO: License declaration',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n \"fusion=heading.fusion:main\",\n \"teensy=heading.teensy:main\",\n \"gps_publisher=heading.gps_reader:main\",\n ],\n },\n)\n","repo_name":"rjudy1/autonav","sub_path":"ros2_ws/src/driving/heading/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"42823385902","text":"#========================================================================================\n# TOPIC: PYTHON - XML DOM Programming\n#========================================================================================\n# NOTES: * PYTHON provides mechanisms to parse and examin XML files\n# * This program demonstrated DOM parsing of the test XML, in PYTHON\n# * The Document Object Model (DOM), its very useful to randomly \n# access elements of an XML.\n# * this program uses xml dom minidom\n# * First operation is to parse the XML document\n# * Each XML is made up of elements, its values (Optional) and attributes \n# (optional) .The trick is to get the An XML ELEMENT using getElementsByTagName,\n# with in that extract the element value and attribute as needed\n# \n#========================================================================================\n#\n# FILE-NAME : 033_python_xml_dom.py\n# DEPENDANT-FILES : These are the files and libraries needed to run this program ;\n# text.xml (This is included in the comments in the program\n#\n# AUTHOR : tinitiate.com / Venkata Bhattaram\n# (c) 2014\n#\n# DESC : PYTHON XML Programming\n#\n#========================================================================================\n\n###########\n# text.xml\n###########\n# Save the following in c:\\tinitiate\\text.xml file\n# -- test.xml START --\n#\n# \n# JAVA\n# UNIX\n# PERL\n# PYTHON\n# \n# Python XML Parsing \n# Python File writing \n#\n# -- test.xml END --\n\n# IMPORT the XML.DOM minidom module\nfrom xml.dom import minidom\n\n\n# Create a DOM object by reading the XML\nxmldoc = minidom.parse('c:\\\\tinitiate\\\\test.xml')\n\n\n# Extract the TINITIATE element and its children from the xmldoc\ntinitiate_elements = xmldoc.getElementsByTagName(\"tinitiate\")\n\n\n# Since we know the tinitiate element has children we use the loop\nfor tinitiate_element in tinitiate_elements:\n # For every tinitiate element, extract the next clind node, i.e, training\n training_elements = tinitiate_element.getElementsByTagName(\"training\")\n\n\n # Get Values of the training_elements list\n for training_element in training_elements:\n # For every course element, extract the next clind node, i.e, COURSE\n course_elements = training_element.getElementsByTagName(\"course\")\n\n\n # Get Values of the course_elements list\n for course_element in course_elements:\n # For every course element, print the nodeValue,\n # NOTE: There is only ONE child Node at the lowest level so use index ZERO\n print(course_element.childNodes[0].nodeValue)\n\n\n # READING Attributes, in CODE element which is at the same level\n # as the training_element\n code_elements = xmldoc.getElementsByTagName(\"code\")\n\n print(\"Total Count of 'CODE' elements in XML: \",len(code_elements))\n\n # Loop through the code_elements list\n for code_element in code_elements:\n\n # print the attributes of every code_element\n print(code_element.attributes[\"id\"].value)\n # print the NodeValue of every code_element\n print(code_element.childNodes[0].nodeValue)\n\n\n#========================================================================================\n# END OF CODE\n#========================================================================================\n#TAGS: PYTHON - XML parsing python DOM SAX parsing tutorial\n#\n#========================================================================================\n","repo_name":"YaswanthPonnam/python-1","sub_path":"advanced/033_python_xml_dom.py","file_name":"033_python_xml_dom.py","file_ext":"py","file_size_in_byte":3732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"3077812914","text":"import asyncio\nimport functools\nimport logging\nimport typing as t\n\nfrom aiohttp import web\nfrom odss.http.common import Request, Response, RouteInfo, HttpError\n\nlogger = logging.getLogger(__name__)\n\n\nHandler = t.Callable[[Request], t.Awaitable[Response]]\n\nRequestHandler = t.Callable[[Handler, Request], t.Awaitable[Response]]\n\n\nasync def route_handler(handler: Handler, request: Request) -> web.Response:\n response = handler(request)\n if asyncio.iscoroutine(response):\n response = await response\n return response\n\n\nclass ServerEngineFactory:\n def create(self, request_handler: RequestHandler, host: str, port: int):\n return ServerEngine(request_handler, host, port)\n\n\nclass Application(web.Application):\n def __init__(self, request_handler: RequestHandler) -> None:\n super().__init__(middlewares=[])\n self.request_handler = request_handler\n\n async def _handle(self, request: Request) -> web.StreamResponse:\n match_info = await self._router.resolve(request)\n request._match_info = match_info\n try:\n response = await self.request_handler(match_info.handler, request)\n response.finish()\n return web.Response(\n body=response.body,\n status=response.code,\n content_type=response.content_type,\n charset=response.charset,\n headers=response.headers,\n )\n except HttpError as ex:\n return web.json_response(\n ex.to_json(),\n status=ex.code,\n reason=ex.status,\n content_type=ex.content_type,\n # charset=ex.charset,\n headers=ex.headers,\n )\n\n\nclass ServerEngine:\n def __init__(\n self, request_handler: RequestHandler, host: str = \"0.0.0.0\", port: int = 8765\n ):\n self.request_handler = request_handler\n self.host = host\n self.port = port\n\n async def open(self):\n self.app = Application(self.request_handler)\n self.app._router.freeze = lambda: None # remove freeze\n self.runner = web.AppRunner(self.app)\n await self.runner.setup()\n self.site = web.TCPSite(self.runner, self.host, self.port, ssl_context=None)\n await self.site.start()\n\n async def close(self):\n logger.info(\"Stop http server http://%s:%d\", self.host, self.port)\n await self.site.stop()\n await self.runner.shutdown()\n self.site = None\n self.runner = None\n self.app = None\n\n def add_route(\n self,\n route_info: RouteInfo,\n ) -> t.Callable[[], None]:\n logger.info(\n \"Add route: %s %s (name=%s)\",\n route_info.method,\n route_info.path,\n route_info.name,\n )\n handler = functools.partial(route_handler, route_info.handler)\n app_route = self.app.router.add_route(\n route_info.method, route_info.path, handler, name=route_info.name\n )\n\n def unregister_route(app_route):\n logger.info(\n \"Remove route: %s %s (name=%s)\",\n route_info.method,\n route_info.path,\n route_info.name,\n )\n resource = app_route.resource\n resource._routes.remove(app_route)\n if not resource._routes and self.app:\n router = self.app.router\n router._resources.remove(resource)\n if resource.name:\n del router._named_resources[resource.name]\n\n return functools.partial(unregister_route, app_route)\n","repo_name":"odss/py-odss","sub_path":"odss.http.core/src/odss/http/core/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"74337120754","text":"from collections import Counter\n\nclass Solution:\n def partitionLabels(self, s: str) -> List[int]:\n counted = Counter(s)\n \n temp = {s[0]:counted[s[0]]}\n ans = []\n count = 0\n \n for i in s:\n # print(i,counted[i], temp, \"top\", ans)\n if len(temp) == 0:\n ans.append(count)\n count = 0\n if i in temp:\n temp[i] -= 1\n if i not in temp and counted[i] > 1:\n temp[i] = counted[i] - 1\n count += 1\n # print(temp, i, counted[i], \"mid\")\n if i in temp and temp[i] == 0:\n del temp[i]\n # print(ans)\n if count > 0:\n ans.append(count)\n return ans","repo_name":"Stargazing-11/A2SV","sub_path":"Oct-Week1/Partition Labels -- Leetcode -- 763.py","file_name":"Partition Labels -- Leetcode -- 763.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"40622809948","text":"# link to the problem - https://leetcode.com/problems/valid-sudoku/\n\n# Determine if a 9 x 9 Sudoku board is valid. Only the filled cells need to be validated according to the following rules:\n# Each row must contain the digits 1-9 without repetition.\n# Each column must contain the digits 1-9 without repetition.\n# Each of the nine 3 x 3 sub-boxes of the grid must contain the digits 1-9 without repetition.\n# Note:\n# A Sudoku board (partially filled) could be valid but is not necessarily solvable.\n# Only the filled cells need to be validated according to the mentioned rules.\n\n# link to submission - https://leetcode.com/submissions/detail/765670269/\n\nclass Solution:\n def isValidSudoku(self, board: List[List[str]]) -> bool:\n for i in range(9):\n row = \"\"\n col = \"\"\n box = \"\"\n for j in range(9):\n\n if board[i][j] != \".\":\n if board[i][j] not in row:\n row += board[i][j]\n else:\n print(row)\n return False\n\n if board[j][i] != \".\":\n if board[j][i] not in col:\n col += board[j][i]\n else:\n return False\n\n bi = (i//3)*3 + j//3\n bj = (i%3)*3 + j%3\n if board[bi][bj] != \".\":\n if board[bi][bj] not in box:\n box += board[bi][bj]\n else:\n return False\n return True","repo_name":"boomhaa/leetcode","sub_path":"Medium/36 - ValidSudoku.py","file_name":"36 - ValidSudoku.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"24695258009","text":"from django.shortcuts import render, HttpResponseRedirect, redirect, HttpResponse\nfrom django.contrib import messages\nfrom .forms import ContactForm\nfrom dashboard.models import UserReview\nfrom .models import Contact\n\n# Create your views here.\n\ndef home(request):\n review_object = UserReview.objects.all()\n if request.method == \"POST\":\n form = ContactForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, \"Your message has been sent\")\n return redirect(\"/#contact\")\n \n else:\n form = ContactForm()\n\n context = {\"form\":form, \"review_object\":review_object}\n return render(request, \"home/index.html\", context)\n\ndef signup_user(request):\n if not request.user.is_authenticated: \n if request.method == \"POST\":\n forms = SignupForm(request.POST)\n if forms.is_valid():\n forms.save() \n return redirect(\"login\")\n else:\n forms = SignupForm()\n\n context = {\"forms\":forms}\n return render(request, \"home/signup.html\", context)\n\n else:\n return redirect(\"login\")\n\ndef login_user(request):\n if not request.user.is_authenticated:\n if request.method == \"POST\":\n forms = AuthenticationForm(request=request, data=request.POST)\n if forms.is_valid():\n login_uname = forms.cleaned_data[\"username\"]\n login_upass = forms.cleaned_data[\"password\"]\n user = authenticate(username = login_uname, password = login_upass)\n\n if user is not None:\n login(request, user)\n return redirect(\"home\")\n else:\n forms = AuthenticationForm()\n\n context = {\"forms\":forms}\n return render(request, \"home/login.html\", context)\n else:\n return redirect(\"home\")\n\ndef logout_user(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect(\"login\")","repo_name":"aryandev17/osmsProject","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"13983051303","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"MGE modelling module\n\nThis module includes the main MGE class inheriting from the BaseModel\nFor questions, please contact Eric Emsellem at eric.emsellem@eso.org\n\nThis module requires astropy - Raises an Exception if not available\n\"\"\"\n\n__authors__ = \"Eric Emsellem\"\n__copyright__ = \"(c) 2015, ESO + CRAL\"\n__license__ = \"3-clause BSD License\"\n__contact__ = \" \"\n\n# Importing Modules\nimport numpy as np\n\ntry:\n import astropy as apy\n from astropy.modeling import models as astropy_models\n from astropy.modeling import Parameter, Model, Fittable1DModel\n from astropy import constants as constants, units as units\nexcept ImportError:\n raise Exception(\"astropy is required for this module\")\n\n__version__ = '0.0.1 (14 August 2014)'\n\nclass MGEModel(BaseModel) :\n \"\"\" MGE model \n\n This class defines the basic MGE model, which should include both\n a reference 2D Base model made of n_gaussians Gaussians, and the\n associated 3D Gaussians, using the viewing Euler Angles\n \"\"\"\n\n def __init__(self, **kwargs) :\n \"\"\"Initialise the MGE model\n \"\"\"\n\n # General verbose parameter for the MGE model\n self.verbose = kwargs.get(\"verbose\", False)\n\n # Truncation Method = Default is Ellipsoid, (can also be Cylindre)\n self.truncation_method = kwargs.get(\"truncation_method\", \"Ellipsoid\") \n # Default Truncation radius in parsec\n self.mcut = kwargs.get(\"mcut\", 50000.)\n\n # Initial value for the Gravitational Constant\n # G is in (km/s)2. Msun-1 . pc .\n self.GGRAV = constants.G.to(units.km**2 * units.pc \n / units.s**2 / units.M_sun).value\n # Distance of the model in parsec\n self.distance = kwargs.get(\"distance\", 1.0e6)\n\n # Viewing angles in degrees\n # Second value is inclination for axisymmetric systems\n # 90 degrees inclination means edge-on view\n self.euler_angles = kwargs.get(\"euler_angles\", np.array([0., 90., 0.]))\n\n # Input Parameters\n self.n_gaussians = np.int(kwargs.get(\"n_Gaussians\", 1))\n\n # Now setting the 2D / 3D Gaussians\n BaseModel.__init__(self, **kwargs)\n\n # =================================================\n # Distance is a property which defines the scale\n # including the parsec per arsec scale\n # =================================================\n @property\n def distance(self) :\n return self._distance\n\n @distance.setter\n def distance(self, value) :\n if value is None :\n if self.verbose : print(\"WARNING: setting default\" \n \"Distance to 10.0 Mpc (10^6 pc)\")\n # Setting the default in case the Distance is None\n value = 1.0e6 \n elif value <= 0. :\n if self.verbose:\n print(\"WARNING: you provided a negative Distance value\")\n print(\"WARNING: it will be set to the default (10 Mpc)\")\n # Setting the default in case the Distance is negative\n value = 1.0e6 \n\n # Deriving the scale conversion factor between pc and arcsec\n self._pc_per_arcsec = np.float32(np.pi * value / 648000.0)\n # Default truncation - in arcseconds at 10 Mpc\n self._mcutarc = self.mcut / self._pc_per_arcsec \n\n # Gravitation constant is in (km/s)2. Msun-1 . pc\n # We need to include arcseconds in there to deal \n # with MGE models (which depends on distance)\n # We multiply it by pc . arcsec-1\n # so the unit becomes: (km/s)2. Msun-1 . pc2 . arcsec-1\n self._GGRAV_arc = self.GGRAV * self._pc_per_arcsec\n # We now calculate 4 * PI * G in units of Garc\n self._PIG = 4. * np.pi * self._GGRAV_arc\n\n self._distance = value\n # --------------------------------------------------\n\n","repo_name":"emsellem/pygme2","sub_path":"pygme2/mge.py","file_name":"mge.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"10150767105","text":"# -*- coding: utf-8 -*-\n\n# Import standard library\nfrom collections import namedtuple\n\n# Import modules\nimport pytest\nfrom pandas import DataFrame\nfrom sqlalchemy.sql.expression import ClauseElement\n\nSpellDB = namedtuple(\"SpellDB\", [\"spell\", \"dburl\"])\n\n\nclass BaseTestSpell:\n \"\"\"Base Test class for all spell implementations\"\"\"\n\n @pytest.fixture\n def spelldb(self):\n \"\"\"Return an instance of SpellDB\n\n A spelldb is simply a namedtuple with fields \"spell\" and \"dburl.\" The\n first parameter consists of an initialized instance of the\n :code:`Spell`, whereas the second one is the :code:`dburl` from which\n the spell will be made.\n \"\"\"\n raise NotImplementedError\n\n @pytest.mark.usefixtures(\"spelldb\", \"sample_points\")\n def test_query_return_type(self, spelldb, sample_points):\n \"\"\"Test if query() returns the correct type\"\"\"\n\n core = spelldb.spell.get_core(spelldb.dburl)\n engine = core.get_engine()\n\n source_table, target_table = core.get_tables(\n source_uri=spelldb.spell.source_table,\n target=sample_points,\n engine=engine,\n )\n # Perform the test\n query = spelldb.spell.query(\n source=source_table,\n target=target_table,\n core=core,\n column=\"WKT\",\n pkey=\"__index_level_0__\",\n )\n assert isinstance(query, ClauseElement)\n\n @pytest.mark.usefixtures(\"spelldb\", \"sample_points\")\n def test_cast_return_type(self, spelldb, sample_points):\n \"\"\"Test if cast() returns the correct type\"\"\"\n results = spelldb.spell.cast(target=sample_points, dburl=spelldb.dburl)\n assert isinstance(results, DataFrame)\n\n @pytest.mark.usefixtures(\"spelldb\", \"sample_points\")\n def test_cast_return_not_empty(self, spelldb, sample_points):\n \"\"\"Test if cast() returns a set of values. All our test cases should not be empty\"\"\"\n results = spelldb.spell.cast(target=sample_points, dburl=spelldb.dburl)\n assert results.values.size != 0\n\n @pytest.mark.usefixtures(\"spelldb\")\n @pytest.mark.parametrize(\"on\", [\"fclass:embassy\", \"embassy\"])\n def test_extract_columns_return_values(self, on, spelldb):\n \"\"\"Test if extract_columns() returns a tuple (source_column, source_filter)\"\"\"\n source_column, source_filter = spelldb.spell.extract_columns(on)\n assert source_column == \"fclass\"\n assert source_filter == \"embassy\"\n","repo_name":"thinkingmachines/geomancer","sub_path":"tests/spells/base_test_spell.py","file_name":"base_test_spell.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":211,"dataset":"github-code","pt":"39"} +{"seq_id":"28559884168","text":"#!/usr/bin/python3\n\"\"\"\nSearches and updates a file at specific sections\n\"\"\"\n\n\ndef check_same(haystack, needle):\n \"\"\"\n checks for the same\n \"\"\"\n for y in range(len(needle)):\n if y == len(haystack):\n return False\n if haystack[y] != needle[y]:\n return False\n return True\n\n\ndef find_str(system, component):\n \"\"\"\n the parent same checker\n \"\"\"\n idx = 0\n c_p = ''\n for char in system:\n if char == component[0]:\n c_p = system[idx:]\n if check_same(c_p, component):\n return True\n idx += 1\n return False\n\n\ndef append_after(filename=\"\", search_string=\"\", new_string=\"\"):\n \"\"\"\n a function to append after\n \"\"\"\n line_number = 1\n line_list = []\n with open(filename, 'r') as f:\n for line in f:\n if find_str(line, search_string):\n line_list.append(line_number)\n line_number += 1\n with open(filename, 'a') as f:\n current_line = 1\n for line_num in line_list:\n while current_line < line_num:\n f.readline()\n current_line += 1\n f.write(new_string)\n\n\n# append_after(\"README.md\", 'ALXSE', \"Authority Place\")\n# test_str = \"Yoho pp pythod wpd ooood Pythonx x\"\n# search = \"Pythonx \"\n\n# if find_str(test_str, search):\n# print('The string was found!')\n# else:\n# print('No such metadata')\n","repo_name":"Dadaauth/alx-higher_level_programming","sub_path":"0x0B-python-input_output/100-append_after.py","file_name":"100-append_after.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"20590110948","text":"import time\nfrom dual_g2_hpmd_rpi import MAX_SPEED\nfrom RPi import GPIO\n\nencoder_clk_1 = 17\nencoder_data_1 = 18\nbutton = 27\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(encoder_clk_1, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(encoder_data_1, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(button, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\nval_1 = MAX_SPEED / 2 # start at 50% speed\n \nstep_size = MAX_SPEED / 30 # 16 speed per step\n\nclkLastState_1 = GPIO.input(encoder_clk_1)\n\nbutton_pressed = False\n\ntry:\n while not button_pressed:\n clkState_1 = GPIO.input(encoder_clk_1)\n dtState_1 = GPIO.input(encoder_data_1)\n\n if clkState_1 != clkLastState_1:\n if dtState_1 != clkState_1:\n val_1 = val_1 + step_size\n else:\n val_1 = val_1 - step_size\n\n print('Motors: ' + str(val_1))\n\n if val_1 >= MAX_SPEED:\n val_1 = MAX_SPEED\n if val_1 <= 0:\n val_1 = 0\n\n time.sleep(0.002)\n\n clkLastState_1 = clkState_1\n button_pressed = not GPIO.input(button)\n\n time.sleep(0.01)\n\n else:\n # Stop the motors slowly.\n while val_1 > 0 or val_1 > 0:\n val_1 = val_1 - step_size\n val_1 = val_1 - step_size\n\n if val_1 < 0:\n val_1 = 0\n if val_1 < 0:\n val_1 = 0\n print('Motors: ' + str(val_1))\n time.sleep(0.3)\n\nfinally:\n GPIO.cleanup()\n","repo_name":"TechXTT/Pitching_Machine","sub_path":"push_button_test.py","file_name":"push_button_test.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"39926914952","text":"from django.core.exceptions import ValidationError\nfrom django.forms.formsets import (BaseFormSet,\n ORDERING_FIELD_NAME, DELETION_FIELD_NAME)\nfrom django.utils.functional import cached_property\nfrom collections import OrderedDict\n\nfrom .forms import MergingProxyForm\n\n##############################################################################\n\nclass InvalidFormsetsError(ValueError):\n pass\n\n##############################################################################\n\nclass SubFormSetsBuildMixin(BaseFormSet):\n formset_classes = OrderedDict()\n\n @cached_property\n def formsets(self):\n return OrderedDict((name, self._construct_formset(name))\n for name in self.formset_classes.keys())\n\n def _construct_formset(self, name, **kwargs):\n klass = self.formset_classes[name]\n defaults = {\n 'auto_id': self.auto_id,\n 'prefix': self.add_prefix(name),\n 'error_class': self.error_class,\n }\n if self.is_bound:\n defaults['data'] = self.data\n defaults['files'] = self.files\n if self.initial:\n defaults['initial'] = self.initial\n defaults.update(kwargs)\n return klass(**defaults)\n\n##############################################################################\n\nclass InlineSubFormSetsMixin(SubFormSetsBuildMixin):\n def __init__(self, *args, **kwargs):\n self.instances = kwargs.pop('instances', {})\n super(InlineSubFormSetsMixin, self).__init__(*args, **kwargs)\n for name in self.instances:\n if not name in self.formsets:\n raise KeyError('Instance for unkown formset %r', name)\n\n def _construct_formset(self, name, **kwargs):\n defaults = {\n 'instance': self.instances.get(name),\n }\n defaults.update(kwargs)\n return super(InlineSubFormSetsMixin, self)._construct_formset(name, **defaults)\n\n def save(self, only=None, **kwargs):\n keys = self.formsets.keys() if only is None else only\n return OrderedDict((name, self._save_formset(name, **kwargs)) for name in keys)\n\n def _save_formset(self, name, **kwargs):\n return self.formsets[name].save(**kwargs)\n\n##############################################################################\n\nclass SubFormSetsProxyMixin(BaseFormSet):\n form = MergingProxyForm\n formset_group_fields = OrderedDict()\n validate_max = False\n\n @property\n def management_form(self):\n forms = dict((name, formset.management_form) for name, formset in self.formsets.items())\n kwargs = {\n 'forms': forms,\n }\n if self.is_bound:\n kwargs['data'] = self.data\n kwargs['files'] = self.files\n form = MergingProxyForm(**kwargs)\n if self.is_bound and not form.is_valid():\n raise ValidationError('Invalid management form')\n return form\n\n def _construct_forms(self):\n \"\"\" On django < 1.6, this method was used instaed of a cached property \"\"\"\n pass\n\n @cached_property\n def forms(self):\n #TODO: peek at initial data to guess the number beforehand and alter\n # subformset's management form TOTAL/extra\n groups, extras = OrderedDict(), []\n # Fill groups with forms from formsets\n first = True\n for formset_name, formset in self.formsets.items():\n # Group initial forms by key (generated from fields in formset_group_fields)\n for form in formset.initial_forms:\n key = tuple(form.initial.get(field, form.fields[field].initial)\n for field in self.formset_group_fields.keys())\n if first:\n groups[key] = {formset_name: form}\n else: # ensure a mismatch key raises an exception\n groups[key][formset_name] = form\n\n # Check that formset grouping is correct\n if not first:\n if formset.initial_form_count() != len(groups):\n raise InvalidFormsetsError(\n 'formsets do not have the same number of initial form groups: %d != %d' %\n (formset.initial_form_count(), len(groups))\n )\n if len(formset.extra_forms) != len(extras):\n raise InvalidFormsetsError(\n 'formsets do not have the same number of extra forms: %d != %d' %\n (len(formset.extra_forms), len(extras))\n )\n\n # Simply group extra forms by their index\n for index, form in enumerate(formset.extra_forms):\n if first:\n extras.append({formset_name: form})\n else:\n extras[index][formset_name] = form\n\n first = False\n\n linked_fields = self.formset_group_fields.copy()\n if self.can_order:\n linked_fields[ORDERING_FIELD_NAME] = None\n if self.can_delete:\n linked_fields[DELETION_FIELD_NAME] = None\n\n forms = (tuple(self._construct_form(i, forms=group, linked_fields=linked_fields)\n for i, group in enumerate(groups.values()))\n +\n tuple(self._construct_form(i, forms=group, linked_fields=linked_fields)\n for i, group in enumerate(extras, len(groups))))\n return forms\n\n def initial_form_count(self):\n count = next(iter(self.formsets.values())).initial_form_count()\n if any(formset.initial_form_count() != count for formset in self.formsets.values()):\n raise InvalidFormsetsError('initial_form_count()s differ amongst sub-formsets')\n return count\n\n def total_form_count(self):\n count = next(iter(self.formsets.values())).total_form_count()\n if any(formset.total_form_count() != count for formset in self.formsets.values()):\n raise InvalidFormsetsError('total_form_count()s differ amongst sub-formsets')\n return count\n\n def full_clean(self):\n self._errors = []\n self._non_form_errors = self.error_class()\n if not self.is_bound:\n return\n\n for i in range(0, self.total_form_count()):\n self.forms[i].push_linked_fields()\n\n unique_non_form_errors = set()\n for formset in self.formsets.values():\n unique_non_form_errors.update(formset.non_form_errors())\n # do not add form errors as we will get them right after\n self._non_form_errors.extend(unique_non_form_errors)\n\n for form in self.forms:\n self._errors.append(form.errors)\n try:\n self.clean()\n except ValidationError as e:\n self._non_form_errors = self.error_class(e.messages)\n\n @property\n def min_num(self):\n \"\"\" Django >= 1.7 \"\"\"\n return max(formset.min_num for formset in self.formsets.values())\n\n @property\n def can_order(self):\n return all(formset.can_order for formset in self.formsets.values())\n\n @property\n def can_delete(self):\n return all(formset.can_delete for formset in self.formsets.values())\n\n @property\n def empty_form(self):\n form_list = tuple(formset.empty_form for formset in self.formsets.values())\n form = self.form(forms=form_list)\n self.add_fields(form, None)\n return form\n\n##############################################################################\n\nclass ProxyFormSet(SubFormSetsProxyMixin, BaseFormSet):\n def __init__(self, *args, **kwargs):\n self.formsets = kwargs.pop('formsets')\n self.formset_group_fields = self.formset_group_fields.copy()\n self.formset_group_fields.update(kwargs.pop('formset_group_fields', {}))\n super(ProxyFormSet, self).__init__(*args, **kwargs)\n\nclass CompoundFormSet(SubFormSetsBuildMixin, SubFormSetsProxyMixin, BaseFormSet):\n pass\n\nclass CompoundInlineFormSet(InlineSubFormSetsMixin, SubFormSetsProxyMixin, BaseFormSet):\n pass\n\ndef compoundformset_factory(formsets, base=CompoundFormSet, formset_group_fields=None):\n attrs = {\n 'formset_classes': formsets,\n }\n if formset_group_fields is not None:\n attrs['formset_group_fields'] = formset_group_fields\n return type(base.__name__, (base,), attrs)\n","repo_name":"spectras/django-compound-forms","sub_path":"compound_forms/formsets.py","file_name":"formsets.py","file_ext":"py","file_size_in_byte":8357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"40251477050","text":"import os\nfrom matplotlib.pyplot import plot\n\n\n\nresultats_str = open(\"resultat.txt\", \"r\")\nresultats = []\nresultats_str\n\n##\nligne = resultats_str.readline()\nwhile ligne != \"\" :\n \n \n ligne.strip()\n resultats.append(ligne.split())\n ligne = resultats_str.readline()\n\nresultats_str.close()\n\n\n##\n\n\ndef cv_strList_into_floatList(liste):\n for k in range(len(liste)):\n liste[k] = float(liste[k])\n return liste\n\nfor i in range (len(resultats)):\n cv_strList_into_floatList(resultats[i])\n \nresultats\n##\ndef pourcentagePersonnesSorties (liste):\n n = len(liste)\n for k in range (1,len(liste)):\n if abs(liste[k] - liste[0]) < 10**-14 :\n n -= 1\n return round(n/len(liste),3)\n \n##\n\ndef nombrePersonnesSorties(liste):\n n = len(liste)\n for k in range (1,len(liste)):\n if abs(liste[k] - liste[0]) < 10**-14 :\n n -= 1\n return n\n \n \n##\ndef pourcentageSortie(liste, t):\n temps = 0\n sortie = []\n baseTemps = []\n while temps < 9.8 :\n k = 0\n \n while liste[k][0] < temps :\n k+= 1\n sortie.append(pourcentagePersonnesSorties(liste[k]))\n baseTemps.append(temps)\n temps += t\n return baseTemps, sortie\n \ntest = pourcentageSortie(resultats, 0.01)\nplot(*test)\n\n## Debit\n\ndef nombreSorties(liste, t):\n temps = 0\n sortie = []\n baseTemps = []\n while temps < 9.8 :\n k = 0\n \n while liste[k][0] < temps :\n k+= 1\n sortie.append(pourcentagePersonnesSorties(liste[k]))\n baseTemps.append(temps)\n temps += t\n temps = round(temps, 2)\n return baseTemps, sortie","repo_name":"Poroing/CrowdDeplacementOptimisation","sub_path":"ModelisationPhysique/src/exploitation_debit.py","file_name":"exploitation_debit.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"35938312224","text":"import random\nimport psycopg2\nimport names\n\n\ndef populate():\n courses = [i for i in range(0, 10)]\n students = [i for i in range(0, 100)]\n faculty = [i for i in range(0, 100)]\n\n conn = psycopg2.connect(database=\"postgres\", user=\"postgres\", password=\"postgres\", host=\"127.0.0.1\", port=\"5432\")\n cur = conn.cursor()\n\n cur.execute('DROP TABLE IF EXISTS COURSES')\n cur.execute('DROP TABLE IF EXISTS STUDENTS')\n cur.execute('DROP TABLE IF EXISTS FACULTY')\n cur.execute(\"CREATE TABLE IF NOT EXISTS COURSES (CID INT PRIMARY KEY NOT NULL, NAME TEXT NOT NULL)\")\n cur.execute(\"CREATE TABLE IF NOT EXISTS STUDENTS (SID INT PRIMARY KEY NOT NULL, NAME TEXT NOT NULL, AGE INT NOT NULL)\")\n cur.execute(\"CREATE TABLE IF NOT EXISTS FACULTY (FID INT PRIMARY KEY NOT NULL, NAME TEXT NOT NULL, FIELD TEXT NOT NULL)\")\n for i in courses:\n j = random.randint(0,4)\n pre = ['CSE', 'EE', 'ME', 'CE', 'IC']\n name = pre[j] + str(i)\n cur.execute(\"INSERT INTO COURSES (CID, NAME) VALUES (\" + str(i) + \", '\" + name + \"')\")\n for i in students:\n name = names.get_full_name()\n age = random.randint(18, 25)\n cur.execute(\"INSERT INTO STUDENTS (SID, NAME, AGE) VALUES (\" + str(i) + \", '\" + name + \"', \" + str(age) + \")\")\n for i in faculty:\n name = names.get_full_name()\n j = random.randint(0,3)\n field = ['CSE', 'EE', 'ME', 'CE']\n cur.execute(\"INSERT INTO FACULTY (FID, NAME, FIELD) VALUES (\" + str(i) + \", '\" + name + \"', '\" + str(field[j]) + \"')\")\n conn.commit()\n conn.close()\n\nif __name__ == '__main__':\n populate()\n\n\n\n# Some expensive Queries to test \n# SELECT COURSES.NAME, STUDENTS.NAME FROM COURSES, STUDENTS;\n# SELECT COURSES.NAME, STUDENTS.NAME, STUDENTS.AGE, FACULTY.NAME, FACULTY.FIELD FROM COURSES, STUDENTS, FACULTY LIMIT 500000;\n\n# Get rejected if queried again within 30s and execution time greater than 5s. ","repo_name":"akhilsinghal1234/Postgres-Assignment","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"41082796624","text":"from flask import Blueprint, jsonify, abort, request\nfrom ..healthtrace_models import db, Practitioners\nimport sqlalchemy\n\nbp = Blueprint('practitioners', __name__, url_prefix='/practitioners')\n\n\n@bp.route('', methods=['GET']) # decorator takes path and list of HTTP verbs\ndef get_all_practitioners():\n practitioners = Practitioners.query.all() # ORM performs SELECT query\n result = []\n for p in practitioners:\n result.append(p.serialize()) # build list of Tweets as dictionaries\n return jsonify(result) # return JSON response\n\n\n@bp.route('', methods=['POST'])\ndef create_practitioner():\n if 'first_name' not in request.json or 'last_name' not in request.json:\n return abort(400)\n\n practitioner = Practitioners(\n id = request.json['id'],\n first_name = request.json['first_name'],\n last_name = request.json['last_name'],\n specility = request.json['specility'],\n registration_number = request.json['registration_number'],\n phone = request.json['phone'],\n email = request.json['email'],\n created_at = request.json['created_at'],\n )\n try:\n db.session.add(practitioner) \n db.session.commit() \n return jsonify(practitioner.serialize())\n except:\n # something went wrong :(\n return jsonify(False)\n \n\n@bp.route('/', methods=['GET'])\ndef get_practitioner_by_id(id: int):\n practitioner = Practitioners.query.get_or_404(id)\n return jsonify(practitioner.serialize())\n\n\n@bp.route('/', methods=['DELETE'])\ndef delete_practitioner(id: int):\n practitioner = Practitioners.query.get_or_404(id)\n try:\n db.session.delete(practitioner) \n db.session.commit() \n return jsonify(message=\"Practitioner deleted successfully\")\n except:\n # something went wrong :(\n return jsonify(False)\n\n@bp.route('/practitioners/', methods=['PUT', 'PATCH'])\ndef modify_practitioner(practitioner_id):\n practitioner = Practitioners.query.get_or_404(practitioner_id)\n req_data = request.get_json()\n if 'first_name' not in request.json or 'last_name' not in request.json:\n return abort(400)\n \n practitioner.id = req_data['id']\n practitioner.first_name = req_data['first_name']\n practitioner.last_name = req_data['last_name']\n practitioner.specility = req_data['specility']\n practitioner.registration_number = req_data['registration_number']\n practitioner.phone = req_data['phone']\n practitioner.email = req_data['email']\n practitioner.created_at = req_data['created_at'] \n \n try:\n db.session.commit()\n return jsonify(practitioner.serialize(), message=\"Practitoner modified successfully\")\n except:\n return jsonify(message = \"something went wrong :(\")\n ","repo_name":"pvmartins/Nucamp_bootcamp","sub_path":"2-SQL/healthtrace/src/api/practitioners.py","file_name":"practitioners.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26051149421","text":"import copy\n\n# 2d list\nlist_a = [[1, 2, 3], [4, 5, 6]]\nlist_b = copy.copy(list_a)\n\nlist_b[0][0] = 10\n\nprint(list_a)\nprint(list_b)\n\n# as it's a 2d list(which is more than 1d data structure) and copy.copy() provides a shallow copy,\n# so, it's changing the original list.\n\n","repo_name":"Sudipta0102/PyBasic","sub_path":"13.Shallow_Deep_copy/03.shallow_copy_bigger_than_1d_datastructure.py","file_name":"03.shallow_copy_bigger_than_1d_datastructure.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17829067492","text":"from round import Round, RoundSettings\nfrom points_estimator import OddsPointEstimator\nfrom surfer import Surfer, SurferPool\n#from heat_mappings import round1_m, round2_m, round3_m, round4_m, roundQ_m, roundS_m, roundF_m\nimport heat_mappings\n\nclass Event:\n def __init__(self):\n round1_set = RoundSettings(\"1\", 12, 3, False, heat_mappings.round1_m, heat_mappings.round1_s)\n round2_set = RoundSettings(\"2\", 4, 3, True, heat_mappings.round2_m, heat_mappings.round2_s)\n round3_set = RoundSettings(\"3\", 16, 2, False, heat_mappings.round3_m, heat_mappings.round3_s)\n round4_set = RoundSettings(\"4\", 8, 2, False, heat_mappings.round4_m)\n roundQ_set = RoundSettings(\"Q\", 4, 2, False, heat_mappings.roundQ_m)\n roundS_set = RoundSettings(\"S\", 2, 2, False, heat_mappings.roundS_m)\n roundF_set = RoundSettings(\"F\", 1, 2, False, heat_mappings.roundF_m)\n\n self.rounds = [Round(round1_set),\n Round(round2_set),\n Round(round3_set),\n Round(round4_set),\n Round(roundQ_set),\n Round(roundS_set),\n Round(roundF_set)]\n\n def update_surfers(self, surfer_pool: SurferPool):\n for r in self.rounds:\n r.update_heat_maps(surfer_pool)\n r.update_surfer_points(surfer_pool)\n r.progress_winners(surfer_pool)\n\n def calculate_average_points(self, surfer_pool: SurferPool, iterations):\n surfer_dict = {}\n for i in range(0, iterations):\n surfer_pool.reset()\n self.update_surfers(surfer_pool)\n # print(\"{} got {}\".format(surfer_pool.surfers[0].name, surfer_pool.surfers[0].expected_totals))\n for s in surfer_pool.surfers:\n if s.name not in surfer_dict:\n surfer_dict[s.name] = 0\n surfer_dict[s.name] += (sum(s.expected_totals) - surfer_dict[s.name])/(i+1) # This is an incremental average\n return surfer_dict","repo_name":"calhamd/WSL_Fantasy","sub_path":"event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"75255582759","text":"\"\"\"\nPolygon path.\n\n\"\"\"\n\nfrom __future__ import absolute_import\n#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.\nimport __init__\n\nfrom fabmetheus_utilities.geometry.creation import lineation\nfrom fabmetheus_utilities.geometry.geometry_tools import path\nfrom fabmetheus_utilities.geometry.geometry_utilities import evaluate\nfrom fabmetheus_utilities.vector3 import Vector3\nfrom fabmetheus_utilities import euclidean\nimport math\n\n\n__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'\n__credits__ = 'Art of Illusion '\n__date__ = '$Date: 2008/02/05 $'\n__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'\n\n\ndef getGeometryOutput(derivation, xmlElement):\n\t\"Get vector3 vertexes from attribute dictionary.\"\n\tif derivation == None:\n\t\tderivation = PolygonDerivation(xmlElement)\n\tloop = []\n\tspiral = lineation.Spiral(derivation.spiral, 0.5 * derivation.sideAngle / math.pi)\n\tfor side in xrange(derivation.start, derivation.start + derivation.extent + 1):\n\t\tangle = float(side) * derivation.sideAngle\n\t\tunitPolar = euclidean.getWiddershinsUnitPolar(angle)\n\t\tvertex = spiral.getSpiralPoint(unitPolar, Vector3(unitPolar.real * derivation.radius.real, unitPolar.imag * derivation.radius.imag))\n\t\tloop.append(vertex)\n\tloop = euclidean.getLoopWithoutCloseEnds(0.000001 * max(derivation.radius.real, derivation.radius.imag), loop)\n\tsideLength = derivation.sideAngle * lineation.getRadiusAverage(derivation.radius)\n\tlineation.setClosedAttribute(derivation.revolutions, xmlElement)\n\treturn lineation.getGeometryOutputByLoop(lineation.SideLoop(loop, derivation.sideAngle, sideLength), xmlElement)\n\ndef getGeometryOutputByArguments(arguments, xmlElement):\n\t\"Get vector3 vertexes from attribute dictionary by arguments.\"\n\tevaluate.setAttributeDictionaryByArguments(['sides', 'radius'], arguments, xmlElement)\n\treturn getGeometryOutput(None, xmlElement)\n\ndef getNewDerivation(xmlElement):\n\t'Get new derivation.'\n\treturn PolygonDerivation(xmlElement)\n\ndef processXMLElement(xmlElement):\n\t\"Process the xml element.\"\n\tpath.convertXMLElement(getGeometryOutput(None, xmlElement), xmlElement)\n\n\nclass PolygonDerivation:\n\t\"Class to hold polygon variables.\"\n\tdef __init__(self, xmlElement):\n\t\t'Set defaults.'\n\t\tself.sides = evaluate.getEvaluatedFloat(4.0, 'sides', xmlElement)\n\t\tself.sideAngle = 2.0 * math.pi / self.sides\n\t\tcosSide = math.cos(0.5 * self.sideAngle)\n\t\tself.radius = lineation.getComplexByMultiplierPrefixes(cosSide, ['apothem', 'inradius'], complex(1.0, 1.0), xmlElement)\n\t\tself.radius = lineation.getComplexByPrefixes(['demisize', 'radius'], self.radius, xmlElement)\n\t\tself.radius = lineation.getComplexByMultiplierPrefixes(2.0, ['diameter', 'size'], self.radius, xmlElement)\n\t\tself.sidesCeiling = int(math.ceil(abs(self.sides)))\n\t\tself.start = evaluate.getEvaluatedInt(0, 'start', xmlElement)\n\t\tend = evaluate.getEvaluatedInt(self.sidesCeiling, 'end', xmlElement)\n\t\tself.revolutions = evaluate.getEvaluatedInt(1, 'revolutions', xmlElement)\n\t\tself.extent = evaluate.getEvaluatedInt(end - self.start, 'extent', xmlElement)\n\t\tself.extent += self.sidesCeiling * (self.revolutions - 1)\n\t\tself.spiral = evaluate.getVector3ByPrefix(None, 'spiral', xmlElement)\n\n\tdef __repr__(self):\n\t\t\"Get the string representation of this PolygonDerivation.\"\n\t\treturn str(self.__dict__)\n","repo_name":"amsler/skeinforge","sub_path":"fabmetheus_utilities/geometry/creation/polygon.py","file_name":"polygon.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"18"} +{"seq_id":"10190893192","text":"from scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport pickle\n\ndef poly(x, c2, c3):\n return c2*x**2 + c3*x**3\n\ndf = pd.read_excel('curved_data.xlsx', sheet_name = 'FEA (3D, c3=0.25, F=-10)')\n\nskip_lines = 0 # -1 of what you think it should be\n\nplt.figure()\nfor i in range(1): #range(int(len(df.columns)/2)):\n ii = 2*i\n x = np.array(df.values[skip_lines:, ii])\n y = np.array(df.values[skip_lines:, ii+1])\n # Remove NaN\n x = np.array(list(x[~np.isnan(list(x))]))\n y = np.array(list(y[~np.isnan(list(y))]))\n\n # Fit\n popt, pcov = curve_fit(poly, x, y)\n print(popt)\n plt.plot(x, y, 'b', label = 'Raw %i' % i)\n plt.plot(x, poly(x, *popt), 'r--', label = 'Fit %i' % i)\nplt.show()\n","repo_name":"leal26/AeroPy","sub_path":"examples/structural/fit_any.py","file_name":"fit_any.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"18"} +{"seq_id":"10570745399","text":"from rest_framework.serializers import ModelSerializer\n\nfrom apps.course.models import Course, CourseCategory\n\n\nclass CourseCategorySerializer(ModelSerializer):\n class Meta:\n model = CourseCategory\n fields = [\"id\", \"title\", \"icon\"]\n\n\nclass CourseListSerializer(ModelSerializer):\n category = CourseCategorySerializer(read_only=True)\n\n class Meta:\n model = Course\n fields = [\n \"id\",\n \"title\",\n \"author\",\n \"lang_code\",\n \"price\",\n \"discounted_price\",\n \"discounted_expire_date\",\n \"category\",\n \"level\",\n ]\n","repo_name":"fnabiyevuz/uzchess","sub_path":"apps/course/api_endpoints/course/CourseList/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32140930033","text":"def solution(N, stages):\n ### 마지막 단계를 성공한 사람은 실패한게 아니니까 아예 제외\n ans = {}\n step = {}\n rest = len(stages) # 남은사람\n stages.sort()\n for i in range(N+1):\n step[i] = stages.count(i+1)\n for i in range(N):\n if step[i] == 0:\n cal = 0\n else:\n cal = step[i] / rest\n\n ans[i+1] = cal\n rest -= step[i]\n\n ans = sorted(ans, key=lambda k :ans[k], reverse=True)\n return ans\n\n\nprint(solution(N=5,stages=[2,1,2,6,2,4,3,3]))\nprint(solution(N=4,stages=[4,4,4,4,4]))\n\n### 한줄쏘쓰\ndef solution(N, stages):\n result = {}\n denominator = len(stages)\n for stage in range(1, N+1):\n if denominator != 0:\n count = stages.count(stage)\n result[stage] = count / denominator\n denominator -= count\n else:\n result[stage] = 0\n return sorted(result, key=lambda x : result[x], reverse=True)","repo_name":"newjini/Algo_study","sub_path":"Python/Programmers/Lv.1/Prog_실패율.py","file_name":"Prog_실패율.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38826916891","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n@Author : Alfred T\n@Time : 2020/5/16 22:17\n@description : \n\"\"\"\n\nimport xlrd\nfrom selenium import webdriver\nimport time\n\n\n# web初始化\ndef web_init():\n # # 打开ie浏览器\n # driver = webdriver.Ie()\n # 打开Chrome浏览器\n options = webdriver.ChromeOptions()\n options.add_argument('lang=zh_CN.UTF-8')\n driver = webdriver.Chrome(chrome_options=options)\n\n driver.get(\"http://usp.cdrcbank.com/usp/\")\n # driver.get(\"https://www.baidu.com/\")\n\n # 等待输入后开始\n print(\"=====前置步骤结束后按下回车后开始程序=====\")\n\n input()\n\n print(\"=====开始执行=====\")\n # 获取打开的多个窗口句柄\n windows = driver.window_handles\n # 切换到当前最新打开的窗口\n driver.switch_to.window(windows[-1])\n # 切换到iframe\n driver.switch_to.frame(\"main-content-container\")\n\n return driver\n\n\n# 执行一次新增操作\n# driver:浏览器驱动\ndef new_costumer(driver, name):\n # 点击新增按钮\n driver.find_element_by_xpath(\"//*[@id='global-query-table-div']/div/div[1]/div/div/a[3]/span/span/span[1]\").click()\n # /html/body/div[2]/div/div[1]/div/div/a[3]/span/span/span[1]\n # /html/body/div[2] == //*[@id='global-query-table-div']\n time.sleep(1)\n\n # 第一个个输入框\n # 下拉按钮 orderBusiType 1198\n driver.find_element_by_xpath(\n \"//*[@id='pms.service.booking.bookingCreateWindow']/div[2]/div/div[1]/span/div/table[1]/tbody/tr/td[2]/div/div/div/table[1]/tbody/tr/td[2]/table/tbody/tr/td[2]/div\").click()\n # /html/body/div[9]/div[2]/div == //*[@id=\"ext-comp-1075\"]\n # //*[@id='pms.service.booking.bookingCreateWindow'] == /html/body/div[6]\n # /html/body/div[6]/div[2]/div/div[1]/span/div/table[1]/tbody/tr/td[2]/div/div/div/table[1]/tbody/tr/td[2]/table/tbody/tr/td[2]/div\n\n # 选择第二个\n driver.find_element_by_xpath(\"//*[@role = 'option' and text()='个人存款']\").click()\n # /html/body/div[10]\n\n # 第二个输入框\n driver.find_element_by_xpath(\"//*[@name = 'cusName']\").send_keys(name)\n\n # 第三个输入框(时间)ext-gen1195\n # 时间按钮\n driver.find_element_by_xpath(\n \"//*[@id='pms.service.booking.bookingCreateWindow']/div[2]/div/div[1]/span/div/table[2]/tbody/tr/td[2]/div/div/div/table/tbody/tr/td[2]/table/tbody/tr/td[2]/div\").click()\n\n # 选择今天 button-1064-btnIconEl\n driver.find_element_by_xpath(\"//*[contains(@class,'x-datepicker')]/div/div[2]/a/span/span/span[2]\").click()\n # /html/body/div[11]/div/div[2]/a/span/span/span[2]\n # /html/body/div[11] == //*[@class= 'x-datepicker']\n\n # 第四个��入框(金额) textfield-1050-inputEl\n time.sleep(0.5)\n driver.find_element_by_xpath(\"//*[@name = 'orderAmt']\").send_keys(\"100\")\n\n # 机构选择按钮 button-1054-btnIconEl\n driver.find_element_by_xpath(\n \"//*[@id='pms.service.booking.bookingCreateWindow']/div[2]/div/div[1]/span/div/table[4]/tbody/tr/td[2]/div/div/div/a/span/span/span[2]\").click()\n # /html/body/div[8]/div[2]/div/div[1]/span/div/table[4]/tbody/tr/td[2]/div/div/div/a/span/span/span[2]\n\n # 一级下拉菜单展开 //*[@id=\"each-busi-org-select-window\"] == /html/body/div[10]\n driver.find_element_by_xpath(\n \"//*[@id='each-busi-org-select-window']/div[2]/div/div[2]/div/table/tbody/tr/td[1]/div/img[1]\").click()\n # 睡眠两秒避免系统反应不及时\n time.sleep(1.5)\n\n # 金牛支行 ext-gen1303 /html/body/div[10]/div[2]/div/div[2]/div/table/tbody/tr[14]/td[1]/div/span //*[@id=\"each-busi-org-select-window\"]\n # /html/body/div[11]/div[2]/div/div[2]/div/table/tbody/tr[17]/td[1]/div/span\n # driver.find_element_by_xpath(\n # \"//*[@id='each-busi-org-select-window']/div[2]/div/div[2]/div/table/tbody/tr[14]/td[1]/div/img[2]\").click()\n driver.find_element_by_xpath('//*[contains(text(), \"金牛支行管理中心\")]/../img[2]').click()\n time.sleep(2.5)\n\n # 金牛成化支行 ext-gen1373\n # /html/body/div[11]/div[2]/div/div[2]/div/table/tbody/tr[25]/td[1]/div/span\n # driver.find_element_by_xpath(\n # \"//*[@id='each-busi-org-select-window']/div[2]/div/div[2]/div/table/tbody/tr[23]/td[1]/div/span\").click()\n try:\n bank_name = driver.find_element_by_xpath('//span[contains(text(), \"金牛化成支行\")]/..')\n bank_name.click()\n except Exception as e:\n print(\"支行名字没取到\")\n time.sleep(1.5)\n\n # 确认 button-1071-btnIconEl\n driver.find_element_by_xpath(\n \"//*[@id='each-busi-org-select-window']/div[2]/div/div[3]/div/div/a[1]/span/span/span[2]\").click()\n # /html/body/div[12]/div[2]/div/div[3]/div/div/a[1]/span/span/span[2]\n time.sleep(1.5)\n\n # 保存按钮 button-1056-btnIconEl\n driver.find_element_by_xpath(\n \"//*[@id='pms.service.booking.bookingCreateWindow']/div[2]/div/div[2]/div/div/a[1]/span/span/span[2]\").click()\n # /html/body/div[6]/div[2]/div/div[2]/div/div/a[1]/span/span/span[2]\n\n time.sleep(1)\n\n # 操作成功的确定 button-1005-btnIconEl\n driver.find_element_by_xpath(\"//*[contains(@class,'x-message-box')]/div[3]/div/div/a[1]/span/span/span[2]\").click()\n # /html/body/div[8]/div[3]/div/div/a[1]/span/span/span[2]\n # /html/body/div[8] = //*[contains(@class,'x-message-box')]\n time.sleep(0.5)\n\n\nif __name__ == \"__main__\":\n data = xlrd.open_workbook(\"./格式模板 - 副本.xlsx\")\n # 选择Sheet2\n table = data.sheet_by_name('Sheet2')\n rowNum = table.nrows\n # web初始化\n driver = web_init()\n # 每一行入录一次\n for i in range(rowNum):\n name = table.row_values(i)[0]\n print(str(\"==================正在写入:\" + name + \"==================\"))\n new_costumer(driver, name)\n","repo_name":"YuAlfred/my_python_scripts","sub_path":"excel_to_web.py","file_name":"excel_to_web.py","file_ext":"py","file_size_in_byte":5762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10927947903","text":"import math\nclass Ejercicio6:\n def __init__(self,titulo):\n self.mensaje = titulo\n \n def hipotenusa(self,n1,n2):\n\n hip = math.sqrt((n1**2) + (n2**2))\n print(\"La hipotenusa es: {}\".format(hip))\n\na1 = float(input(\"Ingrese el primer lado:\"))\nb1 = float(input(\"Ingrese el primer lado:\"))\n\nejercicio6Variable = Ejercicio6(\"Titulo\")\nejercicio6Variable.hipotenusa(a1,b1)","repo_name":"wandrestv/ejercicios","sub_path":"ejercicio6.py","file_name":"ejercicio6.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40243457521","text":"# coding: utf-8\n# 057_square_root_convergents.py\n\n\nnumerator_list = [1, 3]\ndenominator_list = [1, 2]\n\nfor i in range(1, 1001):\n\tnumerator_list.append(numerator_list[i]*2+numerator_list[i-1])\n\tdenominator_list.append(denominator_list[i]*2+denominator_list[i-1])\n\n# 분자, 분모 각각 다음항은 현재 항에 2를 곱한 후 이전 항을 더하면 구할 수 있다.\n\ncount = 0\nfor i in range(1, 1001):\n\tif len(str(numerator_list[i])) > len(str(denominator_list[i])):\n\t\tcount += 1\n\n\nprint(count)\n\n\n\n\n","repo_name":"lee-seul/project-euler","sub_path":"057_square_root_convergents.py","file_name":"057_square_root_convergents.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21532933788","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom home import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('signup/', views.signup_view, name='signup_view'),\n path('login/', views.login_view, name='login_view'),\n path('logout/', views.logoutUser, name='logout'),\n path('add_product/', views.addproduct, name='add_product'),\n path('contact/', views.contactus, name='contactus'),\n path('prod_detail//',views.prod_detail,name = 'prod_detail'),\n path('search',views.search,name = 'search'),\n path('userprofile',views.userprofile,name = 'userprofile'),\n path('category//',views.category_view,name = 'category_view')\n\n]","repo_name":"devAdikavi/Merokitab_","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"40950435343","text":"from enum import Enum\n\n\nclass Palo(Enum):\n DIAMANTE = 1\n CORAZON = 2\n TREBOL = 3\n PICA = 4\n\n\nclass Carta:\n def __init__(self, palo, numero, es_joker=False):\n self.palo = palo\n self.numero = numero\n self.es_joker = es_joker\n","repo_name":"ericbrandwein/tapada","sub_path":"src/juego/mazo/carta.py","file_name":"carta.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20852589755","text":"# RESTful API standard operations\nPOST = 'POST'\nGET = 'GET'\nPUT = 'PUT'\nDELETE = 'DELETE'\nPATCH = 'PATCH'\nOPTIONS = 'OPTIONS'\n\n# HTTP Response Status Codes\nOK = 200\nACCEPTED = 201\nNOTREGISTERED = 403\nPAGENOTFOUND = 404\nCONFLICT = 409\nINVALIDINPUT = 422\nINTERNALSERVERERROR = 500\nSERVICEUNAVAILABLE = 503\n\n# Error Messages\nSERVERTIMEOUT = 'The endpoint server is down. Please try again later.'\nINVALIDPATH = {\"error\":\"The page requested is not found. Please check the URL.\"}\n\n# Headers\nCONTENTTYPE = 'Content-Type'\nJSON = 'application/json'\n","repo_name":"pyj4104/ExpenseReportSystemBE","sub_path":"constants/webCommunications.py","file_name":"webCommunications.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14215926740","text":"import numpy as np\nimport itertools\nimport sys\nfrom Node import Node\n\ndef naive_sort(bin_, boxes):\n # metoda naiwna z sortowaniem\n bin_copy = bin_.copy()\n # plot_naive_sort = [0]\n\n volumes = [l * w * h for l, w, h in boxes]\n sortedIndices = np.argsort(volumes).tolist()[::-1]\n\n # sort a by descending volumes\n boxes_sorted = [boxes[i] for i in sortedIndices]\n\n for box in boxes_sorted:\n bin_copy[2] += min(box)\n # plot_naive_sort.append(bin_copy[2])\n\n return bin_copy[2]\n\n\ndef naive(bin_, boxes):\n bin_copy = bin_.copy()\n a_copy = boxes.copy()\n # plot_naive = [0]\n\n cubes = []\n for box in a_copy:\n bin_copy[2] += min(box)\n # plot_naive.append(bin_copy[2])\n\n return bin_copy[2]\n\n\ndef search(boxes, bin_copy):\n plot = [0]\n left_upper_corner = 0\n previous_max_length = 0\n max_length_in_row = 0\n current_height = 0\n max_height_in_basis = 0\n\n for box_ in boxes:\n flaga = True\n\n while flaga:\n if left_upper_corner + box_[0] < bin_copy[0]:\n if previous_max_length + box_[1] < bin_copy[1]:\n if box_[2] > max_height_in_basis:\n max_height_in_basis = box_[2]\n if max_length_in_row < previous_max_length + box_[1]:\n max_length_in_row = previous_max_length + box_[1]\n left_upper_corner += box_[0]\n flaga = False\n else:\n left_upper_corner = 0\n previous_max_length = 0\n max_length_in_row = 0\n current_height += max_height_in_basis\n max_height_in_basis = 0\n plot.append(max_height_in_basis)\n else:\n previous_max_length = max_length_in_row\n left_upper_corner = 0\n\n current_height += max_height_in_basis\n\n return current_height, plot\n\n\ndef systematic_search(bin_, boxes):\n bin_copy = bin_.copy()\n a_copy = boxes.copy()\n\n # kolejnosc boxów\n permutations_of_a = list(itertools.permutations(a_copy))\n\n best_result = sys.maxsize\n\n for permut_a in permutations_of_a:\n boxes_perm = []\n for current_box in permut_a:\n boxes_perm.append(list(dict.fromkeys(itertools.permutations(current_box))))\n\n combinations = list(itertools.product(*boxes_perm))\n\n for combination in combinations:\n result = search(combination, bin_copy)\n if result[0] < best_result:\n best_result = result[0]\n # plot_sys_search = result[1]\n\n return best_result\n\n\ndef layer_search(bin_, boxes):\n bin_copy = bin_.copy()\n a_copy = boxes.copy()\n\n # obrócenie boxów tak, żeby ich wysokość była jak najmniejsza\n for box in a_copy:\n box[::-1].sort()\n\n # sortowanie nierosnąco po objętości\n a_copy = sorted(a_copy, key=lambda box: box[0] * box[1] * box[2], reverse=True)\n\n result = search(a_copy, bin_copy)\n best_result = result[0]\n # plot_layers = result[1]\n\n return best_result\n\n\ndef tree_search(bin_, boxes):\n bin_copy = bin_.copy()\n a_copy = boxes.copy()\n\n # obrócenie boxów tak, żeby ich wysokość była jak najmniejsza\n for box in a_copy:\n box[::-1].sort()\n\n # sortowanie nierosnąco po objętości\n a_copy = sorted(a_copy, key=lambda box: box[0] * box[1] * box[2], reverse=True)\n\n overall_height = 0\n curr_height = 0\n plot_tree = [0]\n\n root = Node(0, 0, bin_copy[0], bin_copy[1])\n\n for box in a_copy:\n new_insert = root.insert(box)\n if new_insert is None:\n # new tree\n root = Node(0, 0, bin_copy[0], bin_copy[1])\n overall_height += curr_height\n curr_height = 0\n else:\n if new_insert > curr_height:\n curr_height = new_insert\n # plot_tree.append(overall_height + curr_height)\n\n overall_height += curr_height\n\n return overall_height","repo_name":"bindas1/AAL-project","sub_path":"src/algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":4005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10931990798","text":"import uvicorn\nfrom fastapi import FastAPI\nfrom fastapi.responses import RedirectResponse\nfrom fastapi.testclient import TestClient\n\napp = FastAPI()\n\n\n@app.get(\"/redirector\")\nasync def redirector():\n return RedirectResponse(\"https://docs.python.org/\")\n\n\nclient = TestClient(app)\n\n\ndef test_redirector():\n resp = client.get(\"/redirector\", allow_redirects=True)\n # returns a 404 instead of 200\n assert resp.status_code == 404\n\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)\n","repo_name":"falkben/fastapi_experiments","sub_path":"experiments/test_external_request_testclient.py","file_name":"test_external_request_testclient.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"18"} +{"seq_id":"27047530803","text":"import pandas as pd\nimport json\nfrom scripts_dataLoading_aws.p2_files import get_files_paths\nimport datetime\n\n##### GET FILES \nfiles = get_files_paths()\n# files = files[0:100]\nlen(files)\ntotal_count = len(files)\n\nerrors = []\n\nstarttime = datetime.datetime.now().strftime(\"%H:%M:%S\")\nfor i in range(len(files)):\n\n print('working on file: ', files[i])\n\n ## load in a xml file\n file_name = files[i].split('/')[-1].split('.')[0]\n df = pd.read_xml(f'{files[i]}')\n\n ## create some study object\n studyobject = []\n\n ## loop through each column in df, getting the values, \n ## dropping the NaNs, and appending to studyobject\n for col in df.columns:\n col_values = df[col].dropna().values\n col_name = col \n values = {col: col_values}\n studyobject.append(values)\n\n ## flatten the studyobject // nice simple dictionary \n studyobject_flat = {k: v for d in studyobject for k, v in d.items()}\n\n ## save file locally\n with open(f'temp/{file_name}.json', 'w') as f:\n json.dump(studyobject_flat, default=lambda x: x.tolist(), fp=f)\n\n ## print number of files remaining\n print('files remaining: ', total_count - i)\n \nendtime = datetime.datetime.now().strftime(\"%H:%M:%S\")\ntotal_time_minutes = (datetime.datetime.strptime(endtime, \"%H:%M:%S\") - datetime.datetime.strptime(starttime, \"%H:%M:%S\")).total_seconds() / 60\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n### note - decided to not take this approach, manually force evey value to be part of a list/array, versus using xmltodict\n### in which sometimes things will be strings (single value) versus multiple values (list/array) - best just to force everything to be a list/array\n## for simlicity sake for now\n# import xml.etree.ElementTree as ET\n# import numpy as np\n# import xmltodict\n# files = os.listdir('data/sample')\n# for i in range(len(files)):\n# file_name = files[i]\n# with open(f\"data/sample/{files[i]}\", \"r\") as f:\n# xml_content = f.read()\n# data_dict = xmltodict.parse(xml_content)\n# ## save json to file\n# with open(f'temp/{file_name}.json', 'w') as f:\n# json.dump(data_dict, indent=4, fp=f)\n# print('completed: ', files[i])\n \n","repo_name":"hantswilliams/clinicaltrials_trec_2022","sub_path":"scripts/p1_scripts_dataLoading_aws/p3_xml_json.py","file_name":"p3_xml_json.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29294434001","text":"from socket import*\nimport time\n\nserverPort = 9999 \nserverSocket = socket(AF_INET, SOCK_STREAM)\nserverSocket.bind(('',serverPort))\nserverSocket.listen(1)\nprint('The server is ready to receive')\nwhile True:\n \n connectionSocket,addr= serverSocket.accept()\n output = connectionSocket.recv(1024)\n capitalizedSentence = output.upper()\n time.sleep(10)\n connectionSocket.send(capitalizedSentence.encode())\n connectionSocket.close()\n","repo_name":"MichaelOdumosu57/ManualTCP","sub_path":"ServerTCPelasped.py","file_name":"ServerTCPelasped.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"26097511474","text":"# 1678. Goal Parser Interpretation\n# You own a Goal Parser that can interpret a string command. The command consists of an alphabet of \"G\", \"()\" and/or \"(al)\" in some order. The Goal Parser will interpret \"G\" as the string \"G\", \"()\" as the string \"o\", and \"(al)\" as the string \"al\". The interpreted strings are then concatenated in the original order.\n# Given the string command, return the Goal Parser's interpretation of command.\n\nclass Solution:\n def interpret(self, command: str) -> str:\n i = 0\n output = ''\n while i < len(command):\n if command[i] == 'G':\n output += 'G'\n i += 1\n elif command[i:i+2] == '()':\n output += 'o'\n i += 2\n else: \n output += 'al'\n i += 4\n return output\n\ns = Solution()\nprint(s.interpret(\"G()(al)\"))\nprint(s.interpret(\"G()()()()(al)\"))\nprint(s.interpret(\"(al)G(al)()()G\"))","repo_name":"tpett20/LeetCode","sub_path":"1678.py","file_name":"1678.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3997751914","text":"import tensorflow as tf\nfrom tensorflow.lite.testing.zip_test_utils import create_tensor_data\nfrom tensorflow.lite.testing.zip_test_utils import make_zip_of_tests\nfrom tensorflow.lite.testing.zip_test_utils import register_make_test_function\n\n\n@register_make_test_function()\ndef make_where_tests(options):\n \"\"\"Make a set of tests to do where.\"\"\"\n\n test_parameters = [\n {\n \"input_dtype\": [tf.float32, tf.int32],\n \"input_shape_set\": [([1, 2, 3, 4], [1, 2, 3, 4]),],\n \"use_where_v2\": [False, True],\n \"fully_quantize\": [False],\n },\n {\n \"input_dtype\": [tf.float32, tf.int32],\n \"input_shape_set\": [([], []),],\n \"use_where_v2\": [],\n \"fully_quantize\": [False],\n },\n {\n \"input_dtype\": [tf.float32],\n \"input_shape_set\": [\n ([1, 2, 3, 4], [1, 2, 3, 4]),\n ([], []),\n ],\n \"use_where_v2\": [False, True],\n \"fully_quantize\": [True],\n },\n # High dimension broadcasting support in MLIR converter.\n {\n \"input_dtype\": [tf.float32, tf.int32],\n \"input_shape_set\": [([8, 7, 6, 5, 4, 3, 2, 1], [4, 3, 2, 1]),\n ([8, 7, 6, 5, 4, 3, 2, 1], [None, 3, 2, 1]),\n ([8, 7, 6, 5, None, 3, 2, 1], [None, 3, 2, 1])],\n \"use_where_v2\": [True],\n \"fully_quantize\": [False],\n \"dynamic_size_value\": [4, 1],\n },\n {\n \"input_dtype\": [tf.float32],\n \"input_shape_set\": [([8, 7, 6, 5, 4, 3, 2, 1], [4, 3, 2, 1])],\n \"use_where_v2\": [True],\n \"fully_quantize\": [True],\n \"dynamic_size_value\": [4],\n },\n {\n \"input_dtype\": [tf.float32, tf.int32],\n \"input_shape_set\": [([], []), ([1], []), ([], [1])],\n \"use_where_v2\": [False, True],\n \"fully_quantize\": [False],\n },\n ]\n\n def populate_dynamic_shape(parameters, input_shape):\n return [\n parameters[\"dynamic_size_value\"] if x is None else x\n for x in input_shape\n ]\n\n def build_graph(parameters):\n \"\"\"Build the where op testing graph.\"\"\"\n input_value1 = tf.compat.v1.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input2\",\n shape=parameters[\"input_shape_set\"][0])\n input_value2 = tf.compat.v1.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input3\",\n shape=parameters[\"input_shape_set\"][1])\n less = tf.less(input_value1, input_value2)\n where = tf.compat.v2.where if parameters[\n \"use_where_v2\"] else tf.compat.v1.where\n out = where(less, input_value1, input_value2)\n return [input_value1, input_value2], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_shape_1 = populate_dynamic_shape(parameters,\n parameters[\"input_shape_set\"][0])\n input_shape_2 = populate_dynamic_shape(parameters,\n parameters[\"input_shape_set\"][1])\n\n input_value1 = create_tensor_data(\n parameters[\"input_dtype\"], input_shape_1, min_value=-1, max_value=1)\n input_value2 = create_tensor_data(\n parameters[\"input_dtype\"], input_shape_2, min_value=-1, max_value=1)\n return [input_value1, input_value2], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))\n\n make_zip_of_tests(\n options,\n test_parameters,\n build_graph,\n build_inputs,\n expected_tf_failures=4)\n","repo_name":"tensorflow/tensorflow","sub_path":"tensorflow/lite/testing/op_tests/where.py","file_name":"where.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","stars":178918,"dataset":"github-code","pt":"18"} +{"seq_id":"35694428205","text":"#Import libraries\nfrom PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\nimport RPi.GPIO as GPIO\nimport epd2in7\nimport Image\nimport ImageFont\nimport ImageDraw\nimport time\n\n\nepd = epd2in7.EPD()\nepd.init()\nEPD_WIDTH = epd2in7.EPD_WIDTH\nEPD_HEIGHT = epd2in7.EPD_HEIGHT\nGPIO.setmode(GPIO.BCM)\nkey1 = 5\nkey2 = 6\nkey3 = 13\nkey4 = 19\nGPIO.setup(key1, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(key2, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(key3, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(key4, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nimage = Image.new('1', (epd2in7.EPD_WIDTH, epd2in7.EPD_HEIGHT), 255)\n \n\ndef updateDisplay(string):\n\n draw = ImageDraw.Draw(image)\n font = ImageFont.truetype('/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf', 18)\n\n draw.text((20, 50), string, font = font, fill = 0)\n #draw.rectangle((epd2in7.EPD_WIDTH/2-10, epd2in7.EPD_HEIGHT/2-10, epd2in7.EPD_WIDTH/2+10, epd2in7.EPD_HEIGHT/2+10), fill = 0)\n print('update display')\n epd.display_frame(epd.get_frame_buffer(image))\n\ndef makeImage():\n #Image Size\n EPD_WIDTH = 176\n EPD_HEIGHT = 264\n # Create a white mask \n mask = Image.new('1', (EPD_HEIGHT,EPD_WIDTH), 255) \n #Create a Draw object than allows to add elements (line, text, circle...) \n draw = ImageDraw.Draw(mask)\n #Some Text\n draw.text((EPD_HEIGHT/4,EPD_WIDTH/2), 'Demo Python PILL', fill = 0)\n #Horizontal line\n draw.line((0,EPD_WIDTH/2 + 12, EPD_HEIGHT, EPD_WIDTH/2 + 12), fill = 0)\n #Save the picture on disk\n mask.save('demopill.bmp',\"bmp\")\n \n\nupdateDisplay(\"Hello\")\n","repo_name":"SPConrad/epaper-weather-station","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36168650822","text":"#If the bill was $150.00, split between 5 people, with 12% tip. \n\n#Each person should pay (150.00 / 5) * 1.12 = 33.6\n#Format the result to 2 decimal places = 33.60\n\n#Tip: There are 2 ways to round a number. You might have to do some Googling to solve this.💪\n\n#Write your code below this line 👇\n\n\n\n\n#STEVE: So, I figured this exercise out...except for the formatting to round out decimal two spaces. Angela's solution of course worked (see line 19), but it was also a bit of an example of \"Draw a circle, now draw an owl.\" This is the first time the formatting function has been introduced, and saying \"Google it\" without explaining how or why it works isn't particularly helpful. I'm still not sure why it works?\n\nbill = float(input(\"What was the total bill? $\"))\ntip = float(input(\"What percentage would you like to tip? \")) / 100\npeople = float(input(\"How many people to split the bill? \"))\ntotal = (bill * tip + bill) / people\nfinal_total = \"{:.2f}\".format(total)\n# final_total = round(total, 2) \n# final_total = \"{.2f}\".format(final_total)\nprint(f\"Each person pays ${final_total}.\")","repo_name":"StevenGaughran/python_learning","sub_path":"DAY_2_tip_calculator.py","file_name":"DAY_2_tip_calculator.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"73992868200","text":"#!/usr/bin/env python3\n\nimport Jetson.GPIO as GPIO\nimport time\n\nled_pin = 12\n\nprint(\"Start led blinking example on pin: \" + str(led_pin) + \".\")\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(led_pin, GPIO.OUT)\n\ni = 0\nwhile i < 10:\n GPIO.output(led_pin, GPIO.HIGH)\n time.sleep(1)\n GPIO.output(led_pin, GPIO.LOW)\n time.sleep(1)\n i = i + 1\n\nprint(\"End of led blinking example.\")\n","repo_name":"ChampiB/Lucy","sub_path":"mind/test/gpio.py","file_name":"gpio.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31427921421","text":"# Switched from js to python bc of very large numbers\n'''\n https://projecteuler.net/problem=16\n 2^15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.\n\n What is the sum of the digits of the number 2^1000?\n just pow of number to pow then add digits\n'''\nnumber = 2\npower = 1000\n\n\ndef main():\n numberPow = pow(number,power)\n sum = 0\n numberPow = str(numberPow)\n for i in numberPow:\n sum += int(i)\n print(sum)\n\nif __name__ == \"__main__\":\n main()","repo_name":"KaelPearson/ProjectEulerChallenges","sub_path":"Completed/problem16.py","file_name":"problem16.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24502427485","text":"from __init__ import *\nimport utils as _U\nreload(_U)\n\n\nSUPPORTED_INDICATORS = ['MA']\n\ndef cal_indicators(tabular_df, indicator_name, parameters):\n if indicator_name == \"MA\":\n assert len(parameters) == 1, f'Wrong parameters num, expected 1, got {len(parameters)}'\n slice_win_size = int(parameters[0])\n MA = tabular_df['close'].rolling(slice_win_size, min_periods=1).mean()\n return MA # pd.Series\n\n\n\ndef single_symbol_image(tabular_df, image_size, start_date, sample_rate, indicators, show_volume, mode):\n ''' generate Candlelist images\n \n parameters: [\n tabular_df -> pandas.DataFrame: tabular data,\n image_size -> tuple: (H, W), size shouble (32, 15), (64, 60)\n start_date -> int: truncate extra rows after generating images,\n indicators -> dict: technical indicators added on the image, e.g. {\"MA\": [20]},\n show_volume -> boolean: show volume bars or not\n mode -> 'train': for train & validation; 'test': for test; 'inference': for inference\n ]\n \n Note: A single day's data occupies 3 pixel (width). First rows's dates should be prior to the start date in order to make sure there are enough data to generate image for the start date.\n \n return -> list: each item of the list is [np.array(image_size), binary, binary, binary]. The last two binary (0./1.) are the label of ret5, ret20\n \n '''\n \n \n ind_names = []\n if indicators:\n for i in range(len(indicators)//2):\n ind = indicators[i*2].NAME\n ind_names.append(ind)\n params = str(indicators[i*2+1].PARAM).split(' ')\n tabular_df[ind] = cal_indicators(tabular_df, ind, params)\n \n dataset = []\n valid_dates = []\n lookback = image_size[1]//3\n for d in range(lookback-1, len(tabular_df)):\n # random skip some trading dates\n if np.random.rand(1) > sample_rate:\n continue\n # skip dates before start_date\n if tabular_df.iloc[d]['date'] < start_date:\n continue\n \n price_slice = tabular_df[d-(lookback-1):d+1][['open', 'high', 'low', 'close']+ind_names].reset_index(drop=True)\n volume_slice = tabular_df[d-(lookback-1):d+1][['volume']].reset_index(drop=True)\n\n # number of no transactions days > 0.2*look back days\n if (1.0*(price_slice[['open', 'high', 'low', 'close']].sum(axis=1)/price_slice['open'] == 4)).sum() > lookback//5: \n continue\n \n valid_dates.append(tabular_df.iloc[d]['date']) # trading dates surviving the validation\n \n # project price into quantile\n price_slice = (price_slice - np.min(price_slice.values))/(np.max(price_slice.values) - np.min(price_slice.values))\n volume_slice = (volume_slice - np.min(volume_slice.values))/(np.max(volume_slice.values) - np.min(volume_slice.values))\n\n if not show_volume:\n price_slice = price_slice.apply(lambda x: x*(image_size[0]-1)).astype(int)\n else:\n if image_size[0] == 32:\n price_slice = price_slice.apply(lambda x: x*(25-1)+7).astype(int)\n volume_slice = volume_slice.apply(lambda x: x*(6-1)).astype(int)\n else:\n price_slice = price_slice.apply(lambda x: x*(51-1)+13).astype(int)\n volume_slice = volume_slice.apply(lambda x: x*(12-1)).astype(int)\n \n image = np.zeros(image_size)\n for i in range(len(price_slice)):\n # draw candlelist \n image[price_slice.loc[i]['open'], i*3] = 255.\n image[price_slice.loc[i]['low']:price_slice.loc[i]['high']+1, i*3+1] = 255.\n image[price_slice.loc[i]['close'], i*3+2] = 255.\n # draw indicators\n for ind in ind_names:\n image[price_slice.loc[i][ind], i*3:i*3+2] = 255.\n # draw volume bars\n if show_volume:\n image[:volume_slice.loc[i]['volume'], i*3+1] = 255.\n \n label_ret5 = 1 if np.sign(tabular_df.iloc[d]['ret5']) > 0 else 0\n label_ret20 = 1 if np.sign(tabular_df.iloc[d]['ret20']) > 0 else 0\n \n entry = [image, label_ret5, label_ret20]\n dataset.append(entry)\n \n if mode == 'train' or mode == 'test':\n return dataset\n else:\n return [tabular_df.iloc[0]['code'], dataset, valid_dates]\n\n\nclass ImageDataSet():\n def __init__(self, win_size, start_date, end_date, mode, label, indicators=[], show_volume=False, parallel_num=-1):\n ## Check whether inputs are valid\n assert isinstance(start_date, int) and isinstance(end_date, int), f'Type Error: start_date & end_date shoule be int'\n assert start_date < end_date, f'start date {start_date} cannnot be later than end date {end_date}'\n assert win_size in [5, 20], f'Wrong look back days: {win_size}'\n assert mode in ['train', 'test', 'inference'], f'Type Error: {mode}'\n assert label in ['RET5', 'RET20'], f'Wrong Label: {label}'\n assert indicators is None or len(indicators)%2 == 0, 'Config Error, length of indicators should be even'\n if indicators:\n for i in range(len(indicators)//2):\n assert indicators[2*i].NAME in SUPPORTED_INDICATORS, f\"Error: Calculation of {indicators[2*i].NAME} is not defined\"\n \n ## Attributes of ImageDataSet\n if win_size == 5:\n self.image_size = (32, 15)\n self.extra_dates = datetime.timedelta(days=40)\n else:\n self.image_size = (64, 60)\n self.extra_dates = datetime.timedelta(days=40)\n \n self.start_date = start_date\n self.end_date = end_date \n self.mode = mode\n self.label = label\n self.indicators = indicators\n self.show_volume = show_volume\n self.parallel_num = parallel_num\n \n ## Load data from zipfile\n self.load_data()\n \n # Log info\n if indicators:\n ind_info = [(self.indicators[2*i].NAME, str(self.indicators[2*i+1].PARAM).split(' ')) for i in range(len(self.indicators)//2)]\n else:\n ind_info = []\n print(f\"DataSet Initialized\\n \\t - Mode: {self.mode.upper()}\\n \\t - Image Size: {self.image_size}\\n \\t - Time Period: {self.start_date} - {self.end_date}\\n \\t - Indicators: {ind_info}\\n \\t - Volume Shown: {self.show_volume}\")\n \n \n @_U.timer('Load Data', '8')\n def load_data(self):\n if 'data' not in os.listdir():\n print('Download Original Tabular Data')\n os.system(\"mkdir data && cd data && wget 'https://cloud.tsinghua.edu.cn/f/f0bc022b5a084626855f/?dl=1' -O tabularDf.zip\")\n \n if 'data' in os.listdir() and 'tabularDf.zip' not in os.listdir('data'):\n print('Download Original Tabular Data')\n os.system(\"cd data && wget 'https://cloud.tsinghua.edu.cn/f/f0bc022b5a084626855f/?dl=1' -O tabularDf.zip\")\n \n with ZipFile('data/tabularDf.zip', 'r') as z:\n f = z.open('tabularDf.csv')\n tabularDf = pd.read_csv(f, index_col=0)\n f.close()\n z.close()\n \n # add extra rows to make sure image of start date and returns of end date can be calculated\n padding_start_date = int(str(pd.to_datetime(str(self.start_date)) - self.extra_dates).split(' ')[0].replace('-', ''))\n paddint_end_date = int(str(pd.to_datetime(str(self.end_date)) + self.extra_dates).split(' ')[0].replace('-', ''))\n self.df = tabularDf.loc[(tabularDf['date'] > padding_start_date) & (tabularDf['date'] < paddint_end_date)].copy(deep=False)\n tabularDf = [] # clear memory\n \n self.df['ret5'] = np.zeros(self.df.shape[0])\n self.df['ret20'] = np.zeros(self.df.shape[0])\n self.df['ret5'] = (self.df['close'].pct_change(5)*100).shift(-5)\n self.df['ret20'] = (self.df['close'].pct_change(20)*100).shift(-20)\n \n self.df = self.df.loc[self.df['date'] <= self.end_date]\n \n \n def generate_images(self, sample_rate):\n dataset_all = Parallel(n_jobs=self.parallel_num)(delayed(single_symbol_image)(\\\n g[1], image_size = self.image_size,\\\n start_date = self.start_date,\\\n sample_rate = sample_rate,\\\n indicators = self.indicators,\\\n show_volume = self.show_volume, \\\n mode = self.mode\n ) for g in tqdm(self.df.groupby('code'), desc=f'Generating Images (sample rate: {sample_rate})'))\n \n if self.mode == 'train' or self.mode == 'test':\n image_set = []\n for symbol_data in dataset_all:\n image_set = image_set + symbol_data\n dataset_all = [] # clear memory\n \n if self.mode == 'train': # resample to handle imbalance\n image_set = pd.DataFrame(image_set, columns=['img', 'ret5', 'ret20'])\n image_set['index'] = image_set.index\n smote = SMOTE()\n if self.label == 'RET5':\n num0_before = image_set.loc[image_set['ret5'] == 0].shape[0]\n num1_before = image_set.loc[image_set['ret5'] == 1].shape[0]\n resample_index, _ = smote.fit_resample(image_set[['index', 'ret20']], image_set['ret5'])\n image_set = image_set[['img', 'ret5', 'ret20']].loc[resample_index['index']]\n num0 = image_set.loc[image_set['ret5'] == 0].shape[0]\n num1 = image_set.loc[image_set['ret5'] == 1].shape[0]\n image_set = image_set.values.tolist()\n \n else:\n num0_before = image_set.loc[image_set['ret20'] == 0].shape[0]\n num1_before = image_set.loc[image_set['ret20'] == 1].shape[0]\n resample_index, _ = smote.fit_resample(image_set[['index', 'ret5']], image_set['ret20'])\n image_set = image_set[['img', 'ret5', 'ret20']].loc[resample_index['index']]\n num0 = image_set.loc[image_set['ret20'] == 0].shape[0]\n num1 = image_set.loc[image_set['ret20'] == 1].shape[0]\n image_set = image_set.values.tolist()\n \n print(f\"LABEL: {self.label}\\n\\tBefore Resample: 0: {num0_before}/{num0_before+num1_before}, 1: {num1_before}/{num0_before+num1_before}\\n\\tResampled ImageSet: 0: {num0}/{num0+num1}, 1: {num1}/{num0+num1}\")\n \n \n return image_set\n \n else:\n return dataset_all","repo_name":"RichardS0268/CNN-for-Trading","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":10810,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"18"} +{"seq_id":"70912301800","text":"#!/usr/bin/env python3\n\nimport fnmatch\nimport os\nimport re\nimport sys\n\n\ndef get_files():\n # Allow running from root directory and tools directory\n root_dir = \"..\"\n if os.path.exists(\"addons\"):\n root_dir = \".\"\n\n sqf_files = []\n\n for root, _, files in os.walk(root_dir):\n for file in fnmatch.filter(files, \"*.sqf\"):\n sqf_files.append(os.path.join(root, file))\n\n sqf_files.sort()\n\n return sqf_files\n\n\ndef filter_files(filepaths):\n filtered_files = []\n\n # Return only files that have a docblock\n for filepath in filepaths:\n with open(filepath, 'r') as file_contents:\n for line in file_contents:\n contents = line.strip()\n\n # A possible docblock starts\n if contents.startswith('/*'):\n # Find the `* Return Value:` comment\n lines = list(map(\n # Remove \\n from all the lines\n (lambda s: s.strip()), file_contents.readlines()\n ))\n\n return_value_comment_index = lines.index('* Return Value:')\n return_value_index = return_value_comment_index + 1\n\n # Drop the first two characters (e.g. `* `) so it returns the return type\n return_value = lines[return_value_index][2:]\n\n filtered_files.append([filepath, return_value])\n\n break\n\n return filtered_files\n\n\ndef get_last_line(filepath):\n with open(filepath, 'r') as file_contents:\n lines = file_contents.readlines()\n last_line = lines[-1].strip()\n\n # Handle multiple blank lines at the end of the file\n if last_line == \"\":\n i = -2\n\n while lines[i].strip() == \"\":\n i -= 1\n\n return lines[i].strip()\n return last_line\n\n\ndef check_last_character(filepath, return_value):\n last_line = get_last_line(filepath)\n last_line_character = last_line[-1]\n\n # If return type is None and the last line has a semicolon OR the last thing is just the nil keyword OR last thing is a closing bracket\n if return_value == 'None' and (last_line_character == ';' or last_line == 'nil' or last_line == '};'):\n return True\n elif return_value != 'None' and (last_line_character != ';' or last_line == '};'):\n return True\n else:\n return False\n\n\ndef get_expected_last_line(last_line, return_value):\n last_line_character = last_line[-1]\n\n if return_value == 'None':\n # If last character is a letter or a number\n if re.search(r'[A-Za-z0-9]', last_line_character):\n return '{};'.format(last_line)\n else:\n return 'nil'\n else:\n if last_line_character == ';':\n return last_line[:-1]\n\n return 'Unknown'\n\n\ndef main():\n print('Validating Return Types')\n print('-----------------------')\n\n bad_files = []\n\n files = get_files()\n filtered_files = filter_files(files)\n\n for file_details in filtered_files:\n filepath, return_value = file_details\n\n status = check_last_character(filepath, return_value)\n\n if not status:\n bad_files.append(\n [filepath, return_value, get_last_line(filepath)])\n\n error_count = len(bad_files)\n print('Found {} error(s)'.format(error_count))\n\n for bad_file in bad_files:\n filepath, return_value, last_line = bad_file\n\n expected_last_line = get_expected_last_line(last_line, return_value)\n\n print('\\nERROR: In file {}'.format(filepath))\n print('Incorrect return type, expected `{}`'.format(return_value))\n print('Found line `{}`'.format(last_line))\n print('Expected line `{}`'.format(expected_last_line))\n\n if error_count:\n print('\\nReturn Validation FAILED')\n else:\n print('\\nReturn Validation PASSED')\n\n return error_count\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"ArmaForces/Mods","sub_path":"tools/return_checker.py","file_name":"return_checker.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"4594964293","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 6 18:22:04 2011\n\n@author: -\n\"\"\"\nimport os\nimport numpy\nfrom matplotlib import pyplot\nfrom neuronpy.graphics import spikeplot\nfrom bulbspikes import *\nfrom neuronpy.util import spiketrain\nfrom params import sim_var\n\nhomedir = os.path.join(os.path.relpath('..'))\nanalysis_path = homedir\n\ndef format_axes(ax, dt=1, ylim=(0.,4.)):\n #ax.set_xticks(numpy.arange(0,num_intervals,(num_intervals-1)/4.))\n #ax.set_xticklabels(['$-\\pi$','$-\\pi/2$','$0$','$\\pi/2$','$\\pi$'], fontsize=18)\n xlim = ax.get_xlim()\n timesteps=int((xlim[1]*dt-xlim[0]*dt)/2.)\n ax.set_xticks(numpy.linspace(xlim[0],xlim[1],5))\n ax.set_xticklabels(numpy.asarray(numpy.linspace(-timesteps,timesteps,5), dtype=int))\n ax.set_xlabel('lag (ms)')\n ax.set_ylim(ylim)\n ax.set_ylabel('Synchronization magnitude')\n\ndef draw_cell(cellid, ax, color='black'):\n xloc = 10+cellid*20\n # Lateral dends\n y = numpy.abs(numpy.subtract(range(101), xloc))\n yvec = numpy.log(numpy.add(y,1))\n ax.plot(range(101), yvec, color=color)\n # Soma\n ax.fill_between(range(101), numpy.ones(101), yvec, \\\n where=numpy.ma.masked_where(yvec < 1., yvec).mask, \\\n color=color, linewidth=0.)\n # Glom\n ax.plot([xloc], [9], color=color, marker='o', markersize=10, markerfacecolor='white', markeredgecolor=color)\n ax.plot([xloc], [9], color=color, marker='o', markersize=9, alpha=0.25)\n ax.plot([xloc], [9], color=color, marker='1', markersize=7, markeredgewidth=2)\n # Primary dendrite\n ax.plot([xloc, xloc], [0,8], color=color, linewidth=2)\n format_schematic_axis(ax)\n \ndef draw_weights(cellids, ax, color='black',scale=1.):\n \"\"\"Draw granule cells\"\"\"\n import synweightsnapshot\n sws = synweightsnapshot.SynWeightSnapshot( \\\n nummit=sim_var['num_mitral'], \\\n numgran=sim_var['num_granule'])\n \n raw=sws.read_file(sim_var['wt_input_file'], \n os.path.join(homedir, sim_var['weight_dir']))\n sws.parse_data(raw)\n for cellid in cellids:\n wts = sws.m2g[cellid,:,0]\n wts = wts/numpy.max(wts)\n \n for i in range(len(wts)):\n if wts[i] > 0.0001:\n cellloc = 10+cellid*20\n y = numpy.abs(i - cellloc)\n yloc = numpy.log(numpy.add(y,1))\n gloc = -3.5+((i%2)*1.5)\n ax.plot([i],[yloc], marker='o', markerfacecolor=color, markersize=4.*scale, markeredgecolor=color)\n ax.plot([i,i],[yloc, gloc], color=color)\n ax.plot([i],[gloc], marker='^', markerfacecolor=color, markersize=6.*scale, markeredgecolor=color)\n format_schematic_axis(ax)\n \ndef format_schematic_axis(ax):\n ax.set_xlim((0,100))\n xticks = [10,30,50,70,90]\n ax.set_xticks(xticks)\n ax.set_xticklabels(numpy.multiply(xticks,10))\n ax.set_xlabel('distance in microns')\n ax.set_ylim((-5,11))\n ax.spines['left'].set_color('none')\n ax.spines['right'].set_color('none')\n ax.set_yticks([])\n ax.spines['top'].set_color('none')\n ax.spines['bottom'].set_color('black')\n ax.xaxis.set_ticks_position('bottom')\n\n\ndef read_weightevents():\n M = numpy.loadtxt(os.path.join(analysis_path, 'stimweightevents.txt'))\n data = []\n for i in range(5):\n data.append([])\n for m in M:\n data[int(m[0])].append(m[1])\n return data\n\ndef read_delayevents():\n M = numpy.loadtxt(os.path.join(analysis_path, 'stimdelayevents.txt'))\n data = []\n for i in range(5):\n data.append([])\n for m in M:\n data[int(m[0])].append(m[1])\n return data\n\n\ndef raster(pair=[0,4], cluster_width=5, fi=.005, xlim=(1000,2000)):\n# pos1 = (10+pair[0]*20, cluster_width, 1, pair)\n# pos2 = (10+pair[1]*20, cluster_width, 1, pair)\n# stim_odor_mags = numpy.ones(5)*.55\n\n fig = pyplot.figure(figsize=(9.5,5.7))\n raster_ax = fig.add_axes([.1,.1,.8,.27])\n schematic_ax = fig.add_axes([.1,.85,.8,.1])\n syn_ax = fig.add_axes([.1,.45,.8,.225])\n\n draw_cell(pair[0], schematic_ax, color='red')\n draw_cell(pair[1], schematic_ax, color='blue')\n draw_weights(pair, schematic_ax, color='black')\n\n # Analyze an output file in some_dir\n bulb_spikes = BulbSpikes(sim_time=sim_var['tstop'])\n bulb_spikes.read_file(os.path.join(homedir,'spikeout.spk'))\n breath_events = numpy.loadtxt(os.path.join(homedir, 'breathevents.txt'))\n\n wts = read_weightevents()\n delays = read_delayevents()\n \n dt = 1\n tstop = xlim[1]\n x = numpy.arange(0,tstop,dt)\n y0 = numpy.zeros(tstop/dt)\n y1 = numpy.zeros(tstop/dt)\n EXP = numpy.exp(numpy.multiply(x,-1./200.))-numpy.exp( \\\n numpy.multiply(x,-1./20.))\n \n idx = 0\n for b in breath_events:\n if b >= tstop:\n break\n else:\n dtidx = int((b+delays[pair[0]][idx])/dt)\n y0[dtidx:] += EXP[:-dtidx]*wts[pair[0]][idx]\n dtidx = int((b+delays[pair[1]][idx])/dt)\n y1[dtidx:] += EXP[:-dtidx]*wts[pair[1]][idx]\n idx += 1\n redplt = syn_ax.plot(x,y0, color='red')\n blueplt = syn_ax.plot(x,y1, color='blue')\n for breath in breath_events:\n breathplt = syn_ax.plot([breath, breath], [0,2], linestyle='--', \\\n color='gray', linewidth=2)\n syn_ax.set_xlim(xlim)\n syn_ax.set_ylim(0,1.6)\n syn_ax.set_yticks([])\n syn_ax.set_xticks([])\n syn_ax.set_ylabel('EPSC onto tuft')\n leg = syn_ax.legend([breathplt, redplt, blueplt], \\\n ['sniff event', 'input onto red', 'input onto blue'], \\\n bbox_to_anchor=(0, 1.15, 1., .102), loc=1, ncol=3, mode=\"expand\", \\\n borderaxespad=0., handletextpad=.2)\n # Mark sniff interval\n for i in range(len(breath_events)):\n if breath_events[i] > xlim[0]:\n span = syn_ax.annotate('', xy=(breath_events[i], .28), xycoords='data',\n xytext=(breath_events[i+1], .28), \\\n textcoords='data', \\\n arrowprops=dict(arrowstyle=\"|-|\", linewidth=2)\n )\n \n syn_ax.text((breath_events[i]+breath_events[i+1])/2., .53, \\\n 'sniff every\\n150 - 250 ms', \\\n horizontalalignment='center', verticalalignment='top', \\\n backgroundcolor='white')\n break\n\n # Mark amplitude interval\n span = syn_ax.annotate('', xy=(1190, 1.28), xycoords='data',\n xytext=(1190, 1.12), \\\n textcoords='data', \\\n arrowprops=dict(arrowstyle=\"|-|\", linewidth=2)\n )\n\n syn_ax.text(1215, 1.21, \\\n '+/- 5%', \\\n horizontalalignment='left', verticalalignment='center')\n \n # Mark delay interval\n for i in range(len(breath_events)):\n if breath_events[i] > 1400:\n span = syn_ax.annotate('', xy=(breath_events[i]-2, .5), xycoords='data',\n xytext=(breath_events[i]+17, .5), \\\n textcoords='data', \\\n arrowprops=dict(arrowstyle=\"|-|\", linewidth=2)\n )\n \n syn_ax.text(breath_events[i]+7.5, .28, \\\n 'delay 0-15 ms', \\\n horizontalalignment='center', verticalalignment='top', \\\n backgroundcolor='white')\n break\n \n\n spikes = bulb_spikes.get_mitral_spikes()\n ref=spikes[pair[0]]\n comp=spikes[pair[1]]\n gcspikes = bulb_spikes.get_granule_spikes()\n mididx = 10+pair[0]*20\n gcleft = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1]\n mididx = 10+pair[1]*20\n gcright = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1]\n\n sp = spikeplot.SpikePlot(fig=fig, savefig=False)\n sp.set_markercolor('blue')\n sp.set_markeredgewidth(2.)\n sp.set_markerscale(4)\n sp.plot_spikes([comp], label='comp', cell_offset=cluster_width*2+5, \\\n draw=False )\n sp.set_markercolor('red')\n sp.plot_spikes([ref], label='ref', cell_offset=cluster_width*2+2, \\\n draw=False)\n sp.set_markerscale(1.3)\n\n sp.set_markeredgewidth(1.5)\n sp.set_markercolor('blue')\n sp.plot_spikes(gcright, label='gcright', cell_offset=cluster_width, \\\n draw=False)\n sp.set_markercolor('red')\n sp.plot_spikes(gcleft, label='gcleft', cell_offset=0, \\\n draw=False)\n\n coincidences, mask_a, mask_b, ratio = \\\n spiketrain.get_sync_traits(ref, comp, window=5)\n# idx = 0\n# for i in mask_a:\n# if i == 1:\n# raster_ax.plot([ref[idx]],[cluster_width*2+1.9], marker='o', color='red')\n# idx += 1\n idx = 0\n for i in mask_b:\n if i == 1:\n if comp[idx] >= xlim[0] and comp[idx] < xlim[1]:\n raster_ax.text(comp[idx],cluster_width*2+8.5, '*', \\\n color='purple', fontweight='bold', \\\n horizontalalignment='center', verticalalignment='center')\n #raster_ax.plot([comp[idx]],[cluster_width*2+7], marker='o', color='blue')\n idx += 1\n\n raster_ax.text(2000,cluster_width*2+8.5, '(synchronized)', color='purple', \\\n horizontalalignment='center', verticalalignment='center',\n fontsize=11)\n\n raster_ax.set_yticks([])\n ylim = (0.5, cluster_width*2+7.5)\n for breath in breath_events:\n raster_ax.plot([breath, breath], [ylim[0], ylim[1]], linestyle='--', color='gray', linewidth=2)\n\n sp.update_xlim(xlim)\n raster_ax.set_ylim(ylim)\n raster_ax.set_xlabel('time (ms)')\n raster_ax.set_ylabel('spike output\\n granule mitral\\n\\n', horizontalalignment='center')\n\n pos = schematic_ax.get_position()\n schematic_ax.text(.025, pos.ymax+.02, 'A)', transform=fig.transFigure, \n verticalalignment='baseline')\n pos = syn_ax.get_position()\n syn_ax.text(.025, pos.ymax+.07, 'B)', transform=fig.transFigure, \n verticalalignment='baseline') \n pos = raster_ax.get_position()\n raster_ax.text(.025, pos.ymax+.02, 'C)', transform=fig.transFigure, \n verticalalignment='baseline') \n\n# fig.savefig(os.path.join(analysis_path, 'raster_w%d_(%d-%d)_%.3f.pdf') %(cluster_width, pair[0], pair[1], fi))\n fig.savefig(os.path.join(analysis_path, 'fig1.pdf'))\nraster()\n","repo_name":"JustasB/OlfactoryBulb","sub_path":"prev_ob_models/McTavish2012/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":10367,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"18"} +{"seq_id":"2769936388","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import ConstantKernel, RBF\n\n# Set dimension.\nd = 1\n# Number of training points.\nn = 100\n# Length of the training set.\nL = 2\n# Generate training features.\nx = np.linspace(start=0, stop=L, num=n)\nX = x.reshape(n, d)\n\nprint(X)\nprint(X[1])\nprint(x)\nprint(x[1])\n\nsigma_n = 0.4\n# Errors. #il s'agit de l'epsilon dans la formule y = f(x) +epsilon\nepsilon = np.random.normal(loc=0, scale=sigma_n, size=n)\nprint(\"epsilon\", epsilon)\n\n\n# Generate non-linear function.\ndef f(x):\n f = np.sin((4 * np.pi) * x) + np.sin((7 * np.pi) * x) + np.sin((3 * np.pi) * x)\n return f\n\n\nf_x = f(x)\n\n# Observed target variable.\ny = f_x + epsilon\n\nprint(\"prout\", y)\n\nn_star = n\nx_star = np.linspace(start=0, stop=(L + 0.5), num=n_star)\n\nX_star = x_star.reshape(n_star, d)\n############################################################################\n\n\n# Define kernel parameters. # ce sont les parametres principaux pour construire notre processus gaussien\nl = 0.1\nsigma_f = 2\n\n# Define kernel object. # il s'agit de la fonction de covariance: la fonction avec laquelle nous allons definir notre\n# processus gaussien\nkernel = ConstantKernel(constant_value=sigma_f, constant_value_bounds=(1e-2, 1e2)) \\\n * RBF(length_scale=l, length_scale_bounds=(1e-2, 1e2))\n\n# Define GaussianProcessRegressor object.\ngp = GaussianProcessRegressor(kernel=kernel, alpha=sigma_n ** 2, n_restarts_optimizer=10, )\n\n# Fit to data using Maximum Likelihood Estimation of the parameters.\ngp.fit(X, y)\n\ny_pred = gp.predict(X_star)\n\n#############################################################################\nfig, ax = plt.subplots(figsize=(15, 8))\n# Plot training data.\nsns.scatterplot(x=x, y=y, label='training data', ax=ax)\n# Plot \"true\" linear fit.\nsns.lineplot(\n x=x_star,\n y=f(x_star),\n color='red',\n label='f(x)',\n ax=ax\n)\n# Plot prediction\nsns.lineplot(x=x_star, y=y_pred, color='green', label='pred')\nax.set(title='Prediction & Credible Interval')\nax.legend(loc='lower left')\n\nplt.show()\n","repo_name":"AlySh4/STAGE-2A","sub_path":"OriginalTemplates/Algo GPR exemple.py","file_name":"Algo GPR exemple.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32244247648","text":"import csv\r\nimport os\r\nfrom enum import Enum\r\nfrom typing import List\r\n\r\nCSV_DELIMITER = ','\r\nDATA_DIR = \"data\"\r\nAUTH_DIR = \"auth\"\r\nDATA_EXTENSION = \"csv\" # DO NOT change the extension\r\n\r\n\r\nclass Category(Enum):\r\n BOARD = \"board\"\r\n CARD = \"card\"\r\n LIST = \"list\"\r\n LABEL = \"label\"\r\n\r\n def __str__(self) -> str:\r\n return str(self.value)\r\n\r\n\r\ndef write_to_db(category: Category, data: List[str]) -> None:\r\n with open(_get_file_path(category), \"a\", newline='') as f:\r\n csv_writer = csv.writer(f, delimiter=CSV_DELIMITER, quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n csv_writer.writerow(data)\r\n\r\n\r\ndef read_auth() -> List[str]:\r\n with open(os.path.join(AUTH_DIR, f\"{AUTH_DIR}.{DATA_EXTENSION}\")) as f:\r\n csv_reader = csv.reader(f, delimiter=CSV_DELIMITER)\r\n for row in csv_reader:\r\n return [row[0], row[1], row[2]]\r\n return []\r\n\r\n\r\ndef get_list_id(board_name: str, list_name: str) -> str:\r\n with open(_get_file_path(Category.LIST)) as f:\r\n csv_reader = csv.reader(f, delimiter=CSV_DELIMITER)\r\n for row in csv_reader:\r\n if row[2] == list_name and row[0] == get_board_id(board_name):\r\n return row[1]\r\n return ''\r\n\r\n\r\ndef _get_file_path(cat: Category) -> str:\r\n return os.path.join(DATA_DIR, f\"{cat}.{DATA_EXTENSION}\")\r\n\r\n\r\ndef get_board_id(board_name: str) -> str:\r\n with open(_get_file_path(Category.BOARD)) as f:\r\n csv_reader = csv.reader(f, delimiter=CSV_DELIMITER)\r\n for row in csv_reader:\r\n if row[1] == board_name:\r\n return row[0]\r\n return ''\r\n\r\n\r\ndef get_board_name(board_id: str) -> str:\r\n with open(_get_file_path(Category.BOARD)) as f:\r\n csv_reader = csv.reader(f, delimiter=CSV_DELIMITER)\r\n for row in csv_reader:\r\n if row[0] == board_id:\r\n return row[1]\r\n return ''\r\n\r\n\r\ndef get_card_id(board_name: str, list_name: str, card_name: str) -> str:\r\n with open(_get_file_path(Category.CARD)) as f:\r\n csv_reader = csv.reader(f, delimiter=CSV_DELIMITER)\r\n for row in csv_reader:\r\n if (row[0] == get_board_id(board_name) and\r\n row[1] == get_list_id(board_name, list_name) and\r\n row[3] == card_name):\r\n return row[2]\r\n return ''\r\n\r\n\r\ndef get_label_id(board_name: str, color: str) -> str:\r\n with open(_get_file_path(Category.LABEL)) as f:\r\n csv_reader = csv.reader(f, delimiter=CSV_DELIMITER)\r\n for row in csv_reader:\r\n if row[1] == get_board_id(board_name) and row[3] == color:\r\n return row[0]\r\n return ''\r\n\r\n\r\ndef get_all_boards() -> List[List[str]]:\r\n with open(_get_file_path(Category.BOARD)) as f:\r\n csv_reader = csv.reader(f, delimiter=CSV_DELIMITER)\r\n return [board for board in csv_reader]\r\n\r\n\r\ndef get_all_lists() -> List[List[str]]:\r\n with open(_get_file_path(Category.LIST)) as f:\r\n csv_reader = csv.reader(f, delimiter=CSV_DELIMITER)\r\n return [list_ for list_ in csv_reader]\r\n\r\n\r\ndef confirmation(entity: Category, name: str) -> bool:\r\n answer = \"\"\r\n while answer not in [\"y\", \"n\"]:\r\n answer = input(f\"{entity} with name of {name} exists. Do you want to create duplicate? [Y/N]\").lower()\r\n return answer == \"y\"\r\n","repo_name":"erayozer17/trello_tool","sub_path":"trello_cli/db_utils.py","file_name":"db_utils.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"28786053426","text":"import pandas as pd\nimport numpy as np\n\n\ndef table2fasta(table, file_out):\n file = open(file_out, 'w')\n for index, row in table.iterrows():\n file.write('>{0}\\n'.format(row['id']))\n file.write('{0}\\n'.format(row['seq']))\n file.close()\n print('Write finished')\n","repo_name":"kingstdio/BioUniprot","sub_path":"tools/commontools.py","file_name":"commontools.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"30872533260","text":"# Runtime: 80 ms, faster than 10.44% of Python3 online submissions for Roman to Integer.\n# Memory Usage: 14.2 MB, less than 60.38% of Python3 online submissions for Roman to Integer.\n# https://leetcode.com/submissions/detail/592111945/\n\nclass Solution:\n def romanToInt(self, s: str) -> int:\n roman = {\n 'I':1, \n 'V':5,\n 'X':10,\n 'L':50,\n 'C':100,\n 'D':500,\n 'M':1000,\n 'IV':4,\n 'IX':9,\n 'XL':40,\n 'XC':90,\n 'CD':400,\n 'CM':900\n }\n i = 0\n num = 0\n while i < len(s):\n if i+1 -> s_num_fc and num_bin_edges_fc\ndef num_fc(s_num, bin_num_fineclass = 20):\n '''\n params:\n - bin_num_fineclass: number of fineclass bins, by default 20. at least 2.\n Return the binned series and a numpy array of the bin edges\n Example:\n s_num_fc, num_bin_edges_fc = num_fc(df['Borrower_rate'])\n num_bin_edges_fc\n \n array([0.0274, 0.0294, 0.0314, 0.0333, 0.0334, 0.0381, 0.0496, 0.0582,\n 0.0724, 0.0754, 0.092 , 0.116 , 0.133 , 0.149 , 0.168 , 0.2 ,\n 0.243 , 0.32 ])\n '''\n s_num_fc = pd.Series(name = s_num.name)\n num_bin_edges_fc = []\n \n if len(s_num)>0:\n s_interval = pd.qcut(s_num, q=bin_num_fineclass, duplicates = 'drop')\n num_bins = s_interval.value_counts().shape[0] #sometimes there is a bin with 0 record, have to use value_counts to see it instead of unique\n print('num of bins is ',num_bins)\n s_interval_bin_number = pd.qcut(s_num, q=bin_num_fineclass, duplicates = 'drop', labels = range(num_bins)).astype(str)\n\n #Add number in front of the interval, convert interval to string for better compatibility with matplotlib in later functions\n s_label = s_interval.map(lambda x: str(round(x.left, 4)) + '< - <=' + str(round(x.right,4))).astype('str')\n s_num_fc = s_interval_bin_number.str.cat(s_label, sep='. ')\n\n unique_intervals = s_interval.unique().sort_values()\n num_bin_edges_fc = np.append(unique_intervals.map(lambda x: x.left).astype(float).min(), unique_intervals.map(lambda x: x.right).astype(float))\n \n #avoid min or max missed out from data due to truncation to 4 digit float format\n num_bin_edges_fc[0] = np.minimum(np.min(s_num), num_bin_edges_fc[0])\n num_bin_edges_fc[-1] = np.maximum(np.max(s_num), num_bin_edges_fc[-1])\n \n return s_num_fc, num_bin_edges_fc\n\n# Fineclass for s_cat s_cat -> -> s_cat_cc and cat_bin_values_fc\ndef cat_fc(s_cat, missing_label = 'Missing', special_value_label_dict = {}):\n '''\n fineclass for categorical series\n Return the binned series and a numpy array of the bin values\n Example:\n s_cat_fc, cat_bin_values_fc = cat_fc(s_cat=(df['Days_in_arrears']))\n cat_bin_values_fc\n \n array(['Missing', '1-30 days', '>90 days', '61-90 days', '31-60 days'],\n dtype=object)\n '''\n s_cat_fc = s_cat.fillna(missing_label)\n # Replace special values with the labels \n s_cat_fc = s_cat_fc.replace(special_value_label_dict)\n \n cat_bin_values_fc = np.sort(s_cat_fc.unique())\n \n return s_cat_fc, cat_bin_values_fc\n\n# Stack s_num_fc and s_cat_fc\ndef combine_num_cat_fc(s_num_fc, s_cat_fc):\n s_fc = s_num_fc.append(s_cat_fc)\n return s_fc\n \n# WOE IV Stats\ndef bin_woe_iv(s, target, desc = 'Fineclass'):\n '''\n Pass the feature and target series in (both should have the same index)\n Return the dataframe of woe IV stats and plots\n params:\n desc: by default 'Fineclass', can set as Coarse class with customised description\n '''\n eval_df = pd.DataFrame({'feat': s, 'target': target})\n eval_df.sort_values(by='feat')\n eval_df['good'] = (eval_df['target']==0)\n eval_df['bad'] = (eval_df['target']==1)\n eval_df_summary = eval_df.groupby('feat')[['good', 'bad']].sum().rename(columns = {'good':'N_good', 'bad':'N_bad'}).reset_index()\n #get the numeric order of num part so 10. is not ranked before 2.\n eval_df_summary['feat_num_index'] = pd.to_numeric(eval_df_summary['feat'].map(lambda x: x.split('.')[0]),errors='coerce')\n eval_df_summary.sort_values(by=['feat_num_index', 'feat'], inplace = True)\n eval_df_summary = eval_df_summary.reset_index(drop=True)\n eval_df_summary['N_count'] = eval_df_summary['N_good'] + eval_df_summary['N_bad']\n num_good_total = eval_df_summary['N_good'].sum()\n num_bad_total = eval_df_summary['N_bad'].sum()\n\n eval_df_summary['dist_good']=eval_df_summary['N_good']/num_good_total\n eval_df_summary['dist_bad']=eval_df_summary['N_bad']/num_bad_total\n eval_df_summary['bin_count_perc'] = eval_df_summary['N_count']/(num_good_total + num_bad_total)\n eval_df_summary['woe'] = np.log(eval_df_summary['dist_good']/eval_df_summary['dist_bad']).replace(np.inf, np.nan)\n eval_df_summary['logodds'] = np.log(eval_df_summary['N_good']/eval_df_summary['N_bad']).replace(np.inf, np.nan)\n eval_df_summary['p_bad'] = (eval_df_summary['N_bad']/eval_df_summary['N_count']).replace(np.inf, np.nan)\n eval_df_summary['iv'] = (eval_df_summary['dist_good']-eval_df_summary['dist_bad'])*eval_df_summary['woe']\n \n s_total = eval_df_summary.sum().drop(['feat_num_index', 'woe', 'logodds', 'p_bad'])\n s_total.iloc[0] = 'Total'\n \n eval_df_summary = eval_df_summary.append(s_total, ignore_index = True)\n eval_df_summary['var'] = s.name\n eval_df_summary['desc'] = desc\n \n print(desc)\n print('IV is ' + str(eval_df_summary['iv'].sum()))\n# if eval_df_summary['N_count'].sum()==((target==0)|(target==1)).sum():\n# print('total number of rows match')\n# else:\n# print('total number of rows DOES NOT match')\n \n eval_df_summary_forplot = eval_df_summary.loc[eval_df_summary['feat']!='Total']\n plt.plot(eval_df_summary_forplot['woe'], label='woe', marker = '.')\n plt.title(s.name + ' woe')\n plt.xticks(ticks = eval_df_summary_forplot.index.values, labels=eval_df_summary_forplot['feat'].values)\n plt.xticks(rotation=60)\n plt.legend(loc='upper left')\n plt.twinx()\n plt.bar(eval_df_summary_forplot.index, eval_df_summary_forplot['bin_count_perc'], alpha=0.1, label='bin_vol%')\n plt.legend(loc='upper right')\n plt.show()\n return eval_df_summary\n \n# Define coarseclass for s_num 1) s_num and num_bin_edges_fc -> -> num_bin_edges_cc -> -> num_bin_edges_cc \ndef num_cc_edges_nonzerobad(s_num, num_bin_edges_fc, eval_df_summary_fc):\n '''\n if there is any bin with 0 number of bads, combine it with the next non-zero bad bin\n Return a numpy array of the bin edges\n params: \n num_bin_edges_fc: the pandas series of target\n eval_df_summary_fc: the output dataframe of 20 bin fineclass\n Example:\n num_bin_edges_cc = num_cc_edges_nonzerobad(s_num, num_bin_edges_fc, eval_df_summary_fc)\n num_bin_edges_cc\n \n array([0.0274, 0.0724, 0.092 , 0.133 , 0.149 , 0.168 , 0.2 , 0.243 ,\n 0.32 ])\n '''\n # for the non 0 N_bad bins for numeric bins, get an array of the max in the intervals (min <- <= max) \n nonzero_bad_bin_edge_max = pd.to_numeric(eval_df_summary_fc.loc[(eval_df_summary_fc['N_bad']!=0) & (eval_df_summary_fc['feat_num_index'].notnull())]['feat']\\\n .map(lambda x: x.split('<=')[-1])).dropna().astype(float)\n # add min in front of it to make it friendly for pd.qcut\n num_bin_edges_cc = np.append(num_bin_edges_fc.min(), nonzero_bad_bin_edge_max)\n # In case the highest value bins have 0 bad, add the max to the end to make it friendly for pd.qcut\n if num_bin_edges_cc.max()=0]\n\n #comparing the woe_diff_abs of group backwards or forwards to drop min or max of the bin edges\n #if by default group forward (drop max), but if forward is missing (last row), or woe diff abs backward < forward, group backward (drop min)\n non_monoto_bins['bin_edges_index_to_drop'] = np.where(((non_monoto_bins['woe_diff_abs_forward'].isnull()) | \\\n (non_monoto_bins['woe_diff_abs_backward']=2, 'group at least two bins at a time'\n \n s_cat_cc[np.isin(s_cat_cc, cat_bin_values[group])] = ', '.join(cat_bin_values[group])\n \n cat_bin_values_cc = np.sort(s_cat_cc.unique())\n print('output bin values ', cat_bin_values_cc)\n \n return s_cat_cc, cat_bin_values_cc\n\ndef cc_cat_zerobad_lowvolwoesmooth(cat_bin_values, eval_df_summary, min_bin_count=20, smoothing=10, min_n_bad = 0.01, desc = '1. Coarseclass smoothe 0 bad bin woe'):\n '''\n Example:\n eval_df_summary_cc = cc_cat_zerobad_lowvolwoesmooth(cat_bin_values = cat_bin_values_fc, \\\n eval_df_summary = eval_df_summary_fc, min_bin_count=20, smoothing=10, min_n_bad = 0.01)\n eval_df_summary_cc\n '''\n #Assign 0.1 bad to zero bad bin\n eval_df_summary.loc[((eval_df_summary['N_bad']==0) & (eval_df_summary['feat_num_index'].isnull())), 'N_bad'] = min_n_bad\n \n #Recalculate woe iv\n eval_df_summary['N_count'] = eval_df_summary['N_good'] + eval_df_summary['N_bad']\n num_good_total = eval_df_summary['N_good'].sum()\n num_bad_total = eval_df_summary['N_bad'].sum()\n\n eval_df_summary['dist_good']=eval_df_summary['N_good']/num_good_total\n eval_df_summary['dist_bad']=eval_df_summary['N_bad']/num_bad_total\n eval_df_summary['bin_count_perc'] = eval_df_summary['N_count']/(num_good_total + num_bad_total)\n eval_df_summary['woe'] = np.log(eval_df_summary['dist_good']/eval_df_summary['dist_bad']).replace(np.inf, np.nan)\n \n #Smooth woe for low volume bins (at least 20 observations to be meaningful)\n eval_df_summary['smoothing_factor'] = 1 / (1 + np.exp(-(eval_df_summary['N_count'] - min_bin_count) / smoothing)) \n eval_df_summary['woe'] = eval_df_summary['smoothing_factor'] * eval_df_summary['woe']\n \n eval_df_summary['logodds'] = np.log(eval_df_summary['N_good']/eval_df_summary['N_bad']).replace(np.inf, np.nan)\n eval_df_summary['p_bad'] = (eval_df_summary['N_bad']/eval_df_summary['N_count']).replace(np.inf, np.nan)\n eval_df_summary['iv'] = (eval_df_summary['dist_good']-eval_df_summary['dist_bad'])*eval_df_summary['woe']\n \n var_name = eval_df_summary['var'].unique()[0]\n s_total = eval_df_summary.sum().drop(['feat_num_index', 'woe', 'logodds', 'p_bad', 'var'])\n s_total.iloc[0] = 'Total'\n \n eval_df_summary = eval_df_summary.append(s_total, ignore_index = True)\n \n eval_df_summary['var'] = var_name\n \n eval_df_summary['desc'] = desc\n \n return eval_df_summary\n\ndef cc_to_woe(s_cc, eval_df_summary_cc):\n '''\n Input post coarse classing series and the woe summary data frame\n Return the woe series\n Example:\n s_cc_woe = cc_to_woe(s_cc, eval_df_summary_cc)\n s_cc_woe\n \n {'0. 0.0274< - <=0.0724': 1.7180816231078904, '1. 0.0724< - <=0.133': 0.32514271926933125, \n '2. 0.133< - <=0.168': -0.591148012604824, '3. 0.168< - <=0.2': -0.6601408840917752, \n '4. 0.2< - <=0.243': -1.3532880646517205, '5. 0.243< - <=0.32': -1.7587531727598849}\n 0 1.718082\n 1 0.325143\n 2 1.718082\n 3 1.718082\n 4 1.718082\n ... \n 901 -1.353288\n 902 -1.353288\n 903 1.718082\n 904 -0.660141\n 905 -1.353288\n Name: Borrower_rate_woe, Length: 906, dtype: float64\n '''\n woe_dict = dict(zip(eval_df_summary_cc['feat'], eval_df_summary_cc['woe']))\n print('woe encoding', woe_dict)\n s_cc_woe = s_cc.replace(woe_dict)\n s_cc_woe.name = s_cc.name + '_woe'\n return s_cc_woe, woe_dict","repo_name":"cxl923cc/logistic_regression","sub_path":"logreg_binning.py","file_name":"logreg_binning.py","file_ext":"py","file_size_in_byte":18000,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"36497506088","text":"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom torchvision.models import squeezenet1_1\nimport torch.nn.functional as F\n\nimport math\n\ndef default_conv(in_channelss, out_channels, kernel_size, bias=True):\n return nn.Conv2d(\n in_channelss, out_channels, kernel_size,\n padding=(kernel_size // 2), bias=bias)\n\nclass ResBlock(nn.Module):\n def __init__(\n self, conv, n_feat, kernel_size,\n bias=True, act=nn.ReLU(True), res_scale=1):\n super().__init__()\n\n modules_body = []\n for i in range(2):\n modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n if i == 0: modules_body.append(act)\n\n self.body = nn.Sequential(*modules_body)\n self.res_scale = res_scale\n\n def forward(self, x):\n res = self.body(x).mul(self.res_scale)\n res += x\n\n return res\n\nclass Upsampler(nn.Sequential):\n def __init__(self, conv, scale, n_feat, act=False, bias=True):\n\n modules = []\n modules.append(conv(n_feat, scale**2 * n_feat, 3, bias))\n modules.append(nn.PixelShuffle(scale))\n\n super().__init__(*modules)\n\nclass EDSR(nn.Module):\n def __init__(self, args, conv=default_conv):\n super().__init__()\n\n self.upscale = args.upscale\n\n n_resblock = args.n_blocks\n n_feats = args.n_feats\n kernel_size = 3 \n n_colors = 3\n act = nn.ReLU(True)\n\n # define head module\n modules_head = [conv(n_colors, n_feats, kernel_size)]\n # define body module\n modules_body = [\n ResBlock(\n conv, n_feats, kernel_size, act=act, res_scale=1) \\\n for _ in range(n_resblock)]\n modules_body.append(conv(n_feats, n_feats, kernel_size))\n # define tail module\n modules_tail = [\n Upsampler(conv, self.upscale[0], n_feats, act=False),\n conv(n_feats, n_colors, kernel_size)]\n\n self.head = nn.Sequential(*modules_head)\n self.body = nn.Sequential(*modules_body)\n self.tail = nn.Sequential(*modules_tail)\n\n if args.init_weight:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n torch.nn.init.kaiming_normal(m.weight)\n\n def forward(self, x):\n SR = x\n output = []\n for _ in self.upscale:\n SR = self.head(SR)\n res = self.body(SR)\n res += SR\n SR = self.tail(res)\n output.append(SR)\n\n return output\n\n\n\n \n\n\n","repo_name":"colorjam/SR","sub_path":"code/model/edsr.py","file_name":"edsr.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25633222948","text":"# A great python progress bar module\n# See https://github.com/tqdm/tqdm for more information\n\nfrom tqdm import tqdm\nfrom tqdm import trange\nfrom time import sleep\n\nfor i in tqdm(range(10), desc= \"Text You want\"):\n\tsleep(0.1)\n\nfor i in trange(10):\n\tsleep(0.1)\n\npbar = tqdm([\"a\", \"b\", \"c\", \"d\"])\nfor char in pbar:\n sleep(0.25)\n pbar.set_description(\"Processing %s\" % char)\n\nwith tqdm(total=100) as pbar:\n for i in range(10):\n sleep(0.1)\n pbar.update(10)\n\npbar = tqdm(initial=900, total=1000)\nfor i in range(10):\n sleep(0.1)\n pbar.update(10)\npbar.close()\n\n# tqdm can also be used in a command line or a shell script directly, see the GitHub link on the top.\n\n\n","repo_name":"NoNo721/Python-Examples","sub_path":"tqdm_progressbar.py","file_name":"tqdm_progressbar.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"11073191665","text":"import json\nimport math\nfrom typing import List\n\nfrom requests import Response\n\nfrom crawl_utils.common_utils import delete_empty_value\nfrom crawl_utils.crawl_request import ConfigurableCrawlRequest\n\n####################\n# https://ebank.pingan.com.cn/aum/common/sales_list/index.html?initPage=true\n####################\nfrom crawl_utils.db_utils import getLocalDate\nfrom crawl_utils.logging_utils import get_logger\nfrom 平安银行_完成.payh_config import PC_REQUESTS_ITER, FIELD_VALUE_MAPPING, MASK, PC_METHOD\n\nlogger = get_logger(name=__name__)\n\n\nclass PayhPCCrawlRequest(ConfigurableCrawlRequest):\n\n def _row_processor(self, row: dict) -> dict:\n return row\n\n def __init__(self):\n super().__init__(name='平安银行PC端')\n self.request_iter_index = None\n self.page_no = None\n self.total_page = None\n self.check_props = ['logId', 'cpbm', 'bank']\n\n def _pre_crawl(self):\n self.candidate_check_props['cpbm'] = 'cpmc'\n logger.info(f\"准备爬取{self.name}的数据\")\n if self.crawl_config.state == 'DEV':\n self.total_page = 1\n self.mask = MASK\n self.check_props = ['logId', 'cpbm', 'bank']\n self.request_iter_index = 0\n self.field_value_mapping = FIELD_VALUE_MAPPING\n\n def _config_params(self):\n if self.page_no is None:\n self.page_no = 1\n else:\n self.page_no += 1\n self.field_name_2_new_field_name = PC_REQUESTS_ITER[self.request_iter_index]['field_name_2_new_field_name']\n for k, v in PC_REQUESTS_ITER[self.request_iter_index]['request'].items():\n if not hasattr(v, '__call__'):\n self.request[k] = v\n # 设置data参数\n self.request['data'] = PC_REQUESTS_ITER[self.request_iter_index]['request']['data'](self.page_no)\n # 设置method参数\n self.request['method'] = PC_METHOD\n\n def _parse_response(self, response: Response) -> List[dict]:\n resp_str = response.text.encode(response.encoding).decode('utf-8', errors='ignore') \\\n if response.encoding else response.text\n loads = json.loads(resp_str)\n rows = loads['data']['superviseProductInfoList']\n if self.total_page is None:\n self.total_page = math.ceil(int(float(loads['data']['totalSize'])) / len(rows))\n return rows\n\n def _config_end_flag(self):\n if self.page_no is not None and self.total_page is not None and self.page_no == self.total_page:\n if self.request_iter_index + 1 == len(PC_REQUESTS_ITER):\n self.end_flag = True\n else:\n self.request_iter_index += 1\n self.page_no = None\n if self.crawl_config.state == 'DEV':\n self.total_page = 1\n else:\n self.total_page = None\n\n def _row_post_processor(self, row: dict):\n row['logId'] = self.log_id\n row['createTime'] = getLocalDate()\n row['mark'] = 'PC'\n row['ywfl'] = PC_REQUESTS_ITER[self.request_iter_index]['title']\n row['bank'] = '平安银行'\n delete_empty_value(row)\n return row\n\n\nif __name__ == '__main__':\n PayhPCCrawlRequest().init_props(log_id=1).do_crawl()","repo_name":"wuyiping2019/crawl_finance_product","sub_path":"平安银行_完成/payh_pc.py","file_name":"payh_pc.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"604884428","text":"from geopy import Nominatim\nregion = [\"Dakar\",\n \"Thies\", \n \"Diourbel\", \n \"Kaolack\", \n \"Fatick\", \n \"Kaffrine\", \n \"Louga\", \n \"Matam\", \n \"Tambacounda\", \n \"Kédougou\", \n \"Kolda\", \n \"Ziguinchor\", \n \"Sédhiou\",\n \"Pikine\",\n \"Guediawaye\", \n \"Rufisque\", \n \"Keur Massar\",\n \"Mbour\", \n \"Tivaoune\",\n \"Bambey\", \n \"Mbacké\",\n \"Foundiougne\", \n \"Gossas\",\n \"Guingueneo\", \n \"Nioro Du Rip\",\n \"Birkelane\", \n \"Malem Hodar\", \n \"Koungheul\",\n \"Kébémer\",\n \"Dagana\", \n \"Podor\",\n \"Kanel\", \n \"Ranerou\",\n \"Koumpentoum\", \n \"Goudiry\", \n \"Bakel\",\n \"Sareya\", \n \"Salemata\",\n \"Vélingara\", \n \"Medina Yoro Foula\",\n \"Oussouye\", \n \"Bignona\",\n \"Goudomp\", \n \"Bounkiling\"\n ]\n\nCoord = []\ngeolocator = Nominatim(user_agent=\"Microsoft Edge\")\nfor i in region:\n location = geolocator.geocode(i)\n Coord += [[location.latitude, location.longitude]]\n print(i, \" a pour coordonnées : \", location.latitude, \",\", location.longitude)\n print()\n print(\"***************************************************************************\")","repo_name":"mouhamedsylla/python_exercise","sub_path":"region_coordonnees.py","file_name":"region_coordonnees.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37551386216","text":"from math import sqrt\nfrom datetime import datetime\n\nA = int(input(\"Ener a value: \"))\n\nstartTime = datetime.now()\nminimum = A+1\n\nfor factor1 in range(1,int(sqrt(A))):\n factor2 = A/factor1\n if (factor2%1) == 0:\n if (factor2 + factor1) < minimum:\n minimum = int(factor2 + factor1)\n final_factor1 = int(factor1)\n final_factor2 = int(factor2)\nprint(\"Code time:\", datetime.now() - startTime)\nprint (A, \"=>\", minimum)\n","repo_name":"marc-p-greenfield/daily_challenges","sub_path":"integer_complexity_1.py","file_name":"integer_complexity_1.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20181636576","text":"from rest_framework import serializers\nfrom rest_framework.authtoken.models import Token\n\nfrom users.models import User\n\n\nclass UserSobrietySerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = [\n 'id',\n 'sobriety_date',\n ]\n\n\nclass AppUserLoginSerializer(serializers.ModelSerializer):\n social_type_name = serializers.CharField(source='get_social_type_display', read_only=True)\n\n class Meta:\n model = User\n fields = [\n 'id',\n 'social_id',\n 'social_type',\n 'social_type_name',\n ]\n\n def create(self, validated_data):\n social_id = validated_data.get('social_id')\n social_type = validated_data.get('social_type')\n\n user_details = User.objects.filter(social_id=social_id, social_type=social_type)\n if user_details.exists():\n return user_details\n return User.objects.create_user(social_id=social_id, social_type=social_type)\n\n def to_representation(self, instance):\n representation = super().to_representation(instance)\n representation['token'] = str(Token.objects.update_or_create(user=instance)[0])\n return representation\n","repo_name":"cupid-22/sober_root","sub_path":"soberapp/users/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70460334441","text":"'''\nCOMP9321 2019 Term 1 Assignment Two Code Template\nStudent Name: Taiyan Zhu\nStudent ID: z5089986\n'''\n\nimport json\n\n\nfrom flask import Flask\nfrom flask import request\nfrom flask_restplus import Resource, Api\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_restplus import fields\nfrom datetime import datetime\nimport urllib.request\nfrom xml.dom.minidom import parse\nimport xml.dom.minidom\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\napi = Api(app,\n default = \"Assignment2\", # Default namespace\n title = \"Worldbank Dataset\", # Documentation Title\n description = \"This is Assignment 2 implementation.\" # Documentation Description\n )\n\n\n# The following is the schema of Book\nclass WBmodel(db.Model):\n collection_id = db.Column(db.Integer, primary_key=True)\n indicator = db.Column(db.String(80), unique=True, nullable=False)\n indicator_value = db.Column(db.String(120), nullable=False)\n creation_time = db.Column(db.DateTime, nullable=False)\n entries = db.Column(db.JSON, nullable=True)\n\n def __repr__(self):\n return '' % self.indicator\n\ndef create_db(db_file):\n if(db_file != ''):\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + db_file\n\n db.drop_all()\n db.create_all()\n\n urlbyte = urllib.request.urlopen('http://api.worldbank.org/v2/indicators')\n xml_str = urlbyte.read().decode('utf-8')\n output = open('indicators.xml', 'w')\n output.write(xml_str)\n output.close()\n\n DOMTree = xml.dom.minidom.parse(\"indicators.xml\")\n collection = DOMTree.documentElement\n\n tags = collection.getElementsByTagName(\"wb:indicator\")\n\n output_indicators = open('indicators', 'w')\n\n for e in tags: output_indicators.write(e.getAttribute(\"id\")+'\\n')\n\n output_indicators.close()\n\n pass\n\n\nwmodel = api.model(\"WorldBank\", {\n \"indicator_id\": fields.String\n})\n\n@api.route(\"/ass\")\nclass WorldBankCollection(Resource):\n\n @api.response(201, \"created\")\n @api.response(200, \"OK\")\n @api.response(404, \"error\")\n @api.expect(wmodel, validate = True)\n\n def post(self):\n input = json.loads(request.data)\n indicator = input[\"indicator_id\"]\n data = WBmodel.query.filter(WBmodel.indicator == indicator).first()\n\n if(data): return {\"location\": \"/ass/\" + str(data.indicator)}\n\n url = \"http://api.worldbank.org/v2/countries/all/indicators/\" + indicator + \"?date=2013:2018&format=json&per_page=2000\"\n response = urllib.request.urlopen(url)\n data = json.loads(response.read())\n\n if(len(data) < 2):\n return {\"message\": \"Wrong Indicator, please double check.\"}, 404\n\n time = datetime.now()\n current_time = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n entries = []\n\n for e in data[1]:\n entrie={}\n entrie[\"country\"] = e[\"country\"][\"value\"]\n entrie[\"date\"] = e[\"date\"]\n entrie[\"value\"] = e[\"value\"]\n entries.append(entrie)\n\n collection = WBmodel(indicator=str(indicator), indicator_value=\"GDP (current US$)\", creation_time=time, entries= entries)\n\n db.session.add(collection)\n db.session.commit()\n\n res = {}\n res[\"location\"] = \"/ass/\" + str(collection.collection_id)\n res[\"collection_id\"] = str(collection.collection_id)\n res[\"creation_time\"] = str(current_time)\n res[\"indicator\"] = str(indicator)\n return res, 201\n\n @api.response(200, \"OK\")\n @api.response(404, \"error\")\n def get(self):\n data = WBmodel.query.all()\n if(len(data)<1): return {\"message\": \"There is no collection\"}, 404\n res = []\n for e in data:\n wb = {\n \"location\": \"/ass/\" + str(e.collection_id),\n \"collection_id\": str(e.collection_id),\n \"creation_time\": e.creation_time.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n \"indicator\": e.indicator\n }\n res.append(wb)\n return res, 200\n\n\n\n\n@api.route(\"/ass/\")\n@api.param(\"collection_id\", \"The collection_id\")\nclass WorldBank(Resource):\n @api.response(200, \"OK\")\n @api.response(404, \"The collection_id cannot be found\")\n #@api.doc(description=\"Q2 delete & Q4 - Retrieve a collection\")\n\n def get(self, collection_id):\n data = WBmodel.query.filter(WBmodel.collection_id == collection_id).first()\n if(not data): return {\"message\": \"There is no collection\"}, 404\n wb = {\n \"collection_id\" : collection_id,\n \"indicator\": str(data.indicator),\n \"indicator_value\": str(data.indicator_value),\n \"creation_time\" : data.creation_time.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n \"entries\" : data.entries\n }\n return wb, 200\n\n @api.response(200, \"OK\")\n @api.response(404, \"error\")\n def delete(self, collection_id):\n data = WBmodel.query.filter(WBmodel.collection_id == collection_id).first()\n if(data):\n db.session.delete(data)\n db.session.commit()\n return { \"message\": \"Collection = {} is removed from the database!\".format(collection_id)}, 200\n return {\"message\": \"Wrong Indicator, please double check.\"}, 404\n\n\n\n@api.route(\"/ass///\")\nclass Query_Year_Country(Resource):\n @api.response(200, \"OK\")\n @api.response(404, \"error\")\n def get(self, collection_id, year, country):\n\n data = WBmodel.query.filter(WBmodel.collection_id == collection_id).all()\n if( len(data) < 1): return {\"message\": \"Wrong Indicator, please double check.\"}, 404\n\n\n for indicator in data:\n for entry in indicator.entries: # collect data by requirement\n if entry[\"country\"] == country and entry[\"date\"] == year:\n output = {\n \"collection_id\": collection_id,\n \"indicator\": indicator.indicator,\n \"country\": country,\n \"year\":entry[\"date\"],\n \"value\":entry[\"value\"]\n }\n return output, 200\n\n\n return {\"message\":\"No information for {0} in {1}\".format(country,year)}, 404\n\n\n\n@api.route(\"/ass//\")\n@api.param(\"q\", \"Query\")\nclass Query_Year(Resource):\n\n @api.response(200, \"OK\")\n @api.response(404, \"error\")\n def get(self, collection_id, year):\n\n query = request.args.get(\"q\")\n\n data = WBmodel.query.filter(WBmodel.collection_id == collection_id).first()\n if(not data): return {\"message\": \"ID cannot be found in the collection\"}, 404\n res = []\n for entry in data.entries:\n if entry[\"date\"] == year :\n res.append(entry)\n\n if query:\n res = list(filter(lambda x: x[\"value\"] != None, res))\n\n if \"top\" in str(query).lower():\n try:\n N = int(query[3:])\n except:\n return {\"message\": \"The N should be an integer.\"}, 404\n res = sorted(res, key = lambda x: float(x[\"value\"]), reverse = True)[:N]\n\n elif \"bottom\" in str(query).lower():\n try:\n N = int(query[6:])\n except:\n return {\"message\": \"The N should be an integer.\"}, 404\n res = sorted(res, key = lambda x: float(x[\"value\"]), reverse = False)[:N]\n else:\n return {\"message\": \"wrong query.\"}, 404\n\n if N > 100:\n return {\"message\": \"N should be in range bewteen 1 and 100.\"}, 404\n\n\n\n return {\n \"indicator\": data.indicator,\n \"indicator_value\": data.indicator_value,\n \"entries\" : res\n }, 200\n\n# if __name__ == '__main__':\n# create_db('data.db')\n# app.run()\n","repo_name":"zyjwarlock/CS9321_ASS2_Data-Service-for-World-Bank-Economic-Indicators","sub_path":"a2.py","file_name":"a2.py","file_ext":"py","file_size_in_byte":8009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3626779327","text":"m, n = map(int, input().split())\r\narr = [list(map(int, input().split())) for _ in range(n)]\r\nq = [0] * (m * n)\r\nfirst = -1\r\nrear = -1\r\nday = 0\r\nfor i in range(n):\r\n for j in range(m):\r\n if arr[i][j] == 1:\r\n rear += 1\r\n q[rear] = (day, i, j)\r\nwhile first != rear:\r\n first += 1\r\n day, i, j = q[first]\r\n for di, dj in [[0, 1], [1, 0], [0, -1], [-1, 0]]:\r\n ni = di + i\r\n nj = dj + j\r\n if 0 <= ni < n and 0 <= nj < m and arr[ni][nj] == 0:\r\n arr[ni][nj] = 1\r\n rear += 1\r\n q[rear] = (day+1, ni, nj)\r\nfor i in range(n):\r\n for j in range(m):\r\n if arr[i][j] == 0:\r\n day = -1\r\n break\r\n if day == -1:\r\n break\r\nprint(day)","repo_name":"OctoHun/Baekjoon","sub_path":"백준/Gold/7576. 토마토/토마토.py","file_name":"토마토.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1876969291","text":"\"\"\"a=int(input())\nfor i in range (1,10,2):\n c=a*i\n print(c)\nelse:\n print(\"loop finished\")\n\nitem = [ [\"talib\", 1],[\"python\", 2], [\"saquib\", 3] ]\nfor name,number in item:\n print(number) \"\"\"\n\nitems = [\"king\", \"talib\", \"saquib\", 4,6,34,654,23,56]\nfor num in items:\n if str(num).isnumeric() and num>6:\n print(num)","repo_name":"Mohdtalibakhtar/Python-codes","sub_path":"for.py","file_name":"for.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11750089744","text":"# $Id: update_msg.py 40425 2011-09-21 13:16:42Z campbellbarton $\n# ***** BEGIN GPL LICENSE BLOCK *****\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ***** END GPL LICENSE BLOCK *****\n\n# \n\n# Write out messages.txt from blender\n\n# Execite:\n# blender --background --python po/update_msg.py\n\nimport os\n\nCURRENT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.normpath(os.path.abspath(os.path.join(CURRENT_DIR, \"..\")))\n\nFILE_NAME_MESSAGES = os.path.join(CURRENT_DIR, \"messages.txt\")\n\n\ndef dump_messages_rna(messages):\n import bpy\n\n # -------------------------------------------------------------------------\n # Function definitions\n\n def walkProperties(properties):\n import bpy\n for prop in properties:\n messages.add(prop.name)\n messages.add(prop.description)\n\n if isinstance(prop, bpy.types.EnumProperty):\n for item in prop.enum_items:\n messages.add(item.name)\n messages.add(item.description)\n\n def walkRNA(bl_rna):\n if bl_rna.name and bl_rna.name != bl_rna.identifier:\n messages.add(bl_rna.name)\n\n if bl_rna.description:\n messages.add(bl_rna.description)\n\n walkProperties(bl_rna.properties)\n\n def walkClass(cls):\n walkRNA(cls.bl_rna)\n\n def walk_keymap_hierarchy(hier):\n for lvl in hier:\n messages.add(lvl[0])\n\n if lvl[3]:\n walk_keymap_hierarchy(lvl[3])\n\n # -------------------------------------------------------------------------\n # Dump Messages\n\n for cls in type(bpy.context).__base__.__subclasses__():\n walkClass(cls)\n\n for cls in bpy.types.Space.__subclasses__():\n walkClass(cls)\n\n for cls in bpy.types.Operator.__subclasses__():\n walkClass(cls)\n\n from bl_ui.space_userpref_keymap import KM_HIERARCHY\n\n walk_keymap_hierarchy(KM_HIERARCHY)\n\n\n ## XXX. what is this supposed to do, we wrote the file already???\n #_walkClass(bpy.types.SpaceDopeSheetEditor)\n\n\ndef dump_messages_pytext(messages):\n \"\"\" dumps text inlined in the python user interface: eg.\n\n layout.prop(\"someprop\", text=\"My Name\")\n \"\"\"\n import ast\n\n # -------------------------------------------------------------------------\n # Gather function names\n\n import bpy\n # key: func_id\n # val: [(arg_kw, arg_pos), (arg_kw, arg_pos), ...]\n func_translate_args = {}\n\n # so far only 'text' keywords, but we may want others translated later\n translate_kw = (\"text\", )\n\n for func_id, func in bpy.types.UILayout.bl_rna.functions.items():\n # check it has a 'text' argument\n for (arg_pos, (arg_kw, arg)) in enumerate(func.parameters.items()):\n if ((arg_kw in translate_kw) and\n (arg.is_output == False) and\n (arg.type == 'STRING')):\n\n func_translate_args.setdefault(func_id, []).append((arg_kw,\n arg_pos))\n # print(func_translate_args)\n\n # -------------------------------------------------------------------------\n # Function definitions\n\n def extract_strings(fp, node_container):\n \"\"\" Recursively get strings, needed incase we have \"Blah\" + \"Blah\",\n passed as an argument in that case it wont evaluate to a string.\n \"\"\"\n for node in ast.walk(node_container):\n if type(node) == ast.Str:\n eval_str = ast.literal_eval(node)\n if eval_str:\n # print(\"%s:%d: %s\" % (fp, node.lineno, eval_str)) # testing\n messages.add(eval_str)\n\n def extract_strings_from_file(fn):\n filedata = open(fn, 'r', encoding=\"utf8\")\n root_node = ast.parse(filedata.read(), fn, 'exec')\n filedata.close()\n\n for node in ast.walk(root_node):\n if type(node) == ast.Call:\n # print(\"found function at\")\n # print(\"%s:%d\" % (fn, node.lineno))\n\n # lambda's\n if type(node.func) == ast.Name:\n continue\n\n # getattr(self, con.type)(context, box, con)\n if not hasattr(node.func, \"attr\"):\n continue\n\n translate_args = func_translate_args.get(node.func.attr, ())\n\n # do nothing if not found\n for arg_kw, arg_pos in translate_args:\n if arg_pos < len(node.args):\n extract_strings(fn, node.args[arg_pos])\n else:\n for kw in node.keywords:\n if kw.arg == arg_kw:\n extract_strings(fn, kw.value)\n\n # -------------------------------------------------------------------------\n # Dump Messages\n\n mod_dir = os.path.join(SOURCE_DIR, \"release\", \"scripts\", \"startup\", \"bl_ui\")\n\n files = [os.path.join(mod_dir, f)\n for f in os.listdir(mod_dir)\n if not f.startswith(\"_\")\n if f.endswith(\"py\")\n ]\n\n for fn in files:\n extract_strings_from_file(fn)\n\n\ndef dump_messages():\n messages = {\"\"}\n\n # get strings from RNA\n dump_messages_rna(messages)\n\n # get strings from UI layout definitions text=\"...\" args\n dump_messages_pytext(messages)\n\n messages.remove(\"\")\n\n message_file = open(FILE_NAME_MESSAGES, 'w', encoding=\"utf8\")\n message_file.writelines(\"\\n\".join(sorted(messages)))\n message_file.close()\n\n print(\"Written %d messages to: %r\" % (len(messages), FILE_NAME_MESSAGES))\n\n\ndef main():\n\n try:\n import bpy\n except ImportError:\n print(\"This script must run from inside blender\")\n return\n\n dump_messages()\n\n\nif __name__ == \"__main__\":\n print(\"\\n\\n *** Running %r *** \\n\" % __file__)\n main()\n","repo_name":"zakharov/blenderColladaKinematics","sub_path":"external/blender/po/update_msg.py","file_name":"update_msg.py","file_ext":"py","file_size_in_byte":6515,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"18"} +{"seq_id":"74838321640","text":"\"\"\"\nImplementation of Apriori algorithm. Use this to compare MODL to apriori.\n\nIt mines for association rules and not complexes per se, but can still be useful.\n\nCredits : based on code from https://github.com/coorty/apriori-agorithm-python combined with https://github.com/asaini/Apriori/blob/master/apriori.py\n\nTODO: Implement it as a potential alternative to MODL ? See notes in overlap_stats_compute.py for how to do it easily.\n\"\"\"\n\nfrom collections import defaultdict\n\nimport numpy as np\nimport pandas as pd\n\n\nclass Apriori:\n\n def __init__(self, min_support):\n \"\"\"\n Apriori-based itemset miner.\n \n min_support is the minimum frequency that an itemset must have to be considered \"frequent\".\n\n To run apriori, use the following steps, if X is a matrix with one line per transaction, one column per item and a '1' if the item is presnet in the transaction :\n\n >>> X = [[1,1,0],[1,0,1]]\n >>> names = ['A','B','C']\n >>> from pygtftk.stats.intersect.modl.apriori import Apriori, matrix_to_list_of_transactions, apriori_results_to_matrix\n >>> transactions = matrix_to_list_of_transactions(X, names)\n >>> myminer = Apriori(min_support = 0)\n >>> myminer.run_apriori(transactions)\n >>> results = myminer.produce_results()\n >>> apriori_results_df = apriori_results_to_matrix(results, names)\n \n \"\"\"\n self.min_support = min_support\n\n self.apriori_was_run = False\n\n # ---- Utility functions ---- #\n\n ## Initialization\n def get_one_item_set(self, transactions):\n \"\"\" Get unique 1-item set in `set` format \"\"\"\n itemSet = set()\n for line in transactions:\n for item in line: itemSet.add(frozenset([item]))\n return itemSet\n\n def get_joined_item_set(self, termSet, k):\n \"\"\" Generate new k-terms candiate itemset\"\"\"\n return set([term1.union(term2) for term1 in termSet for term2 in termSet\n if len(term1.union(term2)) == k])\n\n def get_support(self, item):\n \"\"\" Get the proportional support of an item\"\"\"\n return self.item_count_dict[item] / self.nb_transactions\n\n def get_items_with_min_support(self, transactions, item_set, frequent_items_set, min_support):\n \"\"\" Calculates the support for items in the itemSet and returns a subset\n of the itemSet each of whose elements satisfies the minimum support\"\"\"\n result_items_set = set()\n\n _local_set = defaultdict(int)\n\n for item in item_set:\n for transac in transactions:\n # Does the transaction contain the item ?\n if item.issubset(transac):\n frequent_items_set[item] += 1\n _local_set[item] += 1\n\n # Only conserve frequent item-set\n n = self.nb_transactions\n for item, count in _local_set.items():\n support = float(count) / len(transactions)\n\n if support >= self.min_support:\n result_items_set.add(item)\n\n return result_items_set\n\n # ---- Main function ---- #\n\n def run_apriori(self, transactions):\n \"\"\"\n transactions must have format of lst of transactions ? EXPLAIN\n It is a list that contains sets, eg. [(A,B),(B,C),(A,B,D)]\n Run the apriori algorithm, return the frequent itemsets. \n \"\"\"\n\n ## Initialization of results variables\n self.nb_transactions = len(transactions) # How many transactions are there ?\n\n frequent_item_sets = dict() # a dict store all frequent *-items set\n # Its structure is dict[k] = [all itemsets of length k]\n\n ## Dictionary to hold itemset counts\n # Key = candidate k_item set ; value = its count\n item_count_dict = defaultdict(int)\n\n # Begin the algorithm with all 1-item sets\n item_set = self.get_one_item_set(transactions)\n self.unique_items = item_set\n\n # Get the frequent 1-item sets\n freq_one_item_set = self.get_items_with_min_support(transactions, item_set, item_count_dict, self.min_support)\n\n # --- Main loop\n # Main idea is to \"grow\" in length\n k = 1\n current_frequent_term_set = freq_one_item_set\n\n while current_frequent_term_set != set():\n frequent_item_sets[k] = current_frequent_term_set # Save result\n k += 1\n\n # Get new candiate k-terms set\n current_candidate_item_sets = self.get_joined_item_set(current_frequent_term_set, k)\n\n # Now restrict to only frequent k-terms set\n current_frequent_term_set = self.get_items_with_min_support(transactions, current_candidate_item_sets,\n item_count_dict, self.min_support)\n\n # Save results\n self.item_count_dict = item_count_dict\n self.frequent_item_sets = frequent_item_sets\n # Only frequent items(a dict: freqSet[1] indicate frequent 1-term set)\n\n # Now for what to return\n self.apriori_was_run = True\n\n def produce_results(self):\n if not self.apriori_was_run: raise Exception(\"Must run apriori first\")\n\n # Return the items, along with their support\n to_return_items = list()\n for k, itemsets in self.frequent_item_sets.items():\n to_return_items.extend([(tuple(iset), self.get_support(iset))\n for iset in itemsets])\n\n # NOTE To get the association rules, simply go over all itemsets and \n # compute the confidence of rules.\n # A rule is: \"When X is present, Y is also present\" where X and Y can be\n # single items or itemsets themselves.\n # For example confidence (X --> Y) = support({X,Y})/support(X)\n\n return to_return_items\n\n\n# The apriori algo requires transactions to be in a list of list format, one transaction per list.\n# So we must convert our overlap/transaction matrix into such a list of lists.\ndef matrix_to_list_of_transactions(x, names):\n \"\"\"\n From a matrix with one line per transaction and one column per element with 1 if present and 0 if absent, \n returns a list of transaction\n \"\"\"\n # Enforce type\n names = np.array(names)\n x = np.array(x)\n\n # Get the list of all nonzero elements in each row\n result = []\n for row in x:\n current_transaction = np.nonzero(row)[0]\n current_transation_items = names[current_transaction]\n result += [current_transation_items.tolist()]\n return result\n\n\ndef apriori_results_to_matrix(results, names):\n \"\"\"\n Turns back apriori results of the form [(itemset, support)] to a matrix of words\n Does not further filter by support, keeps all itemsets. Filter by support before passing.\n \"\"\"\n\n matrix = []\n\n names = list(names) # Convert names to a list if an array\n\n for res in results:\n itemset = res[0]\n\n itemset_matrix = [0] * len(names)\n for elem in itemset:\n itemset_matrix[names.index(elem)] = 1\n\n matrix += [itemset_matrix]\n\n # Dataframe of results\n resdf = pd.DataFrame(matrix, columns=names)\n resdf['support'] = [res[1] for res in results]\n resdf.sort_values(by=['support'], inplace=True, ascending=False)\n\n return resdf\n","repo_name":"dputhier/pygtftk","sub_path":"pygtftk/stats/intersect/modl/apriori.py","file_name":"apriori.py","file_ext":"py","file_size_in_byte":7295,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"18"} +{"seq_id":"40958994749","text":"#Your runtime beats 55.22% of python submissions. (One Time AC)\n#Given an array and a value, remove all instances of that value in place and return the new length.\n#Do not allocate extra space for another array, you must do this in place with constant memory.\n#The order of elements can be changed. It doesn't matter what you leave beyond the new length.\n#Example:Given input array nums = [3,2,2,3], val = 3\n#Your function should return length = 2, with the first two elements of nums being 2.\n\nclass Solution(object):\n def removeElement(self, nums, val):\n length=len(nums)\n numbers=0\n if length==0:\n return numbers\n for i in range(length): #Don't miss range\n if nums[i]!=val:\n nums[numbers]=nums[i]\n numbers+=1\n return numbers\n \n \n#A more concise version \nclass Solution(object):\n def removeElement(self,nums,val):\n dis_index=i=0\n for i in range(len(nums)):\n if nums[i]!=val:\n nums[dis_index]=nums[i]\n dis_index+=1\n return dis_index\n ","repo_name":"kyliewing/LeetCode","sub_path":"027_RemoveElement.py","file_name":"027_RemoveElement.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1572786784","text":"#!/usr/bin/python3\ndef weight_average(my_list=[]):\n w_avg = 0\n if my_list:\n product_sum = sum(x[0] * x[1] for x in my_list)\n weight_sum = sum(x[1] for x in my_list)\n\n return product_sum / weight_sum\n\n return w_avg\n","repo_name":"fr4nkln11/alx-higher_level_programming","sub_path":"0x04-python-more_data_structures/100-weight_average.py","file_name":"100-weight_average.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4461967905","text":"from tkinter import Frame, Label, Button, SUNKEN, PhotoImage, Text, E, W, filedialog, StringVar, Entry\nfrom datastruct.ImageLabel import ImageLabel\nfrom datastruct.ImageProcessor import ImageProcessor\nfrom PIL import Image\nfrom datastruct.Eigenshape import EigenShape\n\n\nclass RecognitionController:\n def __init__(self, parent_window):\n self.parent_window = parent_window # of type Tk\n self.image_open = Image.open(\"etc/open2.png\") # set the default input image\n self.image_processor = ImageProcessor(self.image_open)\n self.image_result = self.image_processor.get_canny()\n self.imagelabel_open = None\n self.imagelabel_result = None\n self.text_recognition = None\n self.text_result = StringVar() # of type Text/stringvar, to display output\n self.text_output_label = None\n\n def process_image(self, input_img):\n self.image_processor = ImageProcessor(input_img)\n self.text_recognition = EigenShape(input_img)\n #print(self.text_recognition.get_detected_text())\n return self.image_processor.get_canny(), self.text_recognition.get_detected_text()\n\n def open_file(self):\n image_filename = filedialog.askopenfilename(title=\"Select text image\",\n filetypes=((\"PNG files\", \"*.png\"), (\"All files\", \"*.*\")))\n try:\n print(image_filename)\n self.image_open = Image.open(image_filename)\n self.image_result, text = self.process_image(self.image_open)\n self.imagelabel_open.change_image(self.image_open)\n self.imagelabel_result.change_image(self.image_result)\n self.text_result.set(text)\n\n except AttributeError:\n #self.message = \"user clicked cancel\"\n print(\"user clicked cancel\")\n pass\n\n\nclass Recognition(Frame, RecognitionController):\n def __init__(self, parent_window):\n RecognitionController.__init__(self, parent_window)\n Frame.__init__(self, self.parent_window, height=2, bd=1, relief=SUNKEN)\n\n open_button = Button(self, text=\"Open Image..\", command=self.open_file)\n open_button.grid(padx=10, pady=5, sticky=(E, W))\n self.imagelabel_open = ImageLabel(self, self.image_open)\n self.imagelabel_open.grid(padx=10, pady=5, sticky=(E, W))\n\n self.imagelabel_result = ImageLabel(self, self.image_result)\n self.imagelabel_result.grid(padx=10, pady=5, sticky=(E, W))\n\n Label(self, text=\"OUTPUT:\").grid()\n self.text_output_label = Label(self, textvariable=self.text_result)\n self.text_output_label.grid()\n\n\n","repo_name":"imamkhaira/swt_py","sub_path":"screen/Recognition.py","file_name":"Recognition.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33519754189","text":"from typing import Tuple, List\n\nfrom algosdk.v2client.algod import AlgodClient\nfrom algosdk.future import transaction\nfrom algosdk.logic import get_application_address\nfrom algosdk import account, encoding\n\nfrom pyteal import compileTeal, Mode, Keccak256\nfrom tellorflex.methods import report\n\nfrom utils.account import Account\nfrom tellorflex.contracts import approval_program, clear_state_program\nfrom utils.helpers import add_standalone_account, fund_account\nfrom utils.util import (\n waitForTransaction,\n fullyCompileContract,\n getAppGlobalState,\n)\n\nAPPROVAL_PROGRAM = b\"\"\nCLEAR_STATE_PROGRAM = b\"\"\n\nclass Scripts:\n\n def __init__(self, client, tipper, reporter, governance_address) -> None:\n \n self.client = client\n self.tipper = tipper\n self.reporter = reporter\n self.governance_address = governance_address.getAddress()\n\n\n def get_contracts(self, client: AlgodClient) -> Tuple[bytes, bytes]:\n \"\"\"Get the compiled TEAL contracts for the tellor contract.\n Args:\n client: An algod client that has the ability to compile TEAL programs.\n Returns:\n A tuple of 2 byte strings. The first is the approval program, and the\n second is the clear state program.\n \"\"\"\n global APPROVAL_PROGRAM\n global CLEAR_STATE_PROGRAM\n\n if len(APPROVAL_PROGRAM) == 0:\n APPROVAL_PROGRAM = fullyCompileContract(client, approval_program())\n CLEAR_STATE_PROGRAM = fullyCompileContract(client, clear_state_program())\n\n return APPROVAL_PROGRAM, CLEAR_STATE_PROGRAM\n\n def deploy_tellor_flex(\n self,\n query_id: str,\n query_data: str\n ) -> int:\n \"\"\"Create a new tellor reporting contract.\n Args:\n client: An algod client.\n sender: The account that will request data through the contract\n governance_address: the account that can vote to dispute reports\n query_id: the ID of the data requested to be put on chain\n query_data: the in-depth specifications of the data requested\n Returns:\n The ID of the newly created auction app.\n \"\"\"\n approval, clear = self.get_contracts(self.client)\n\n globalSchema = transaction.StateSchema(num_uints=7, num_byte_slices=5)\n localSchema = transaction.StateSchema(num_uints=0, num_byte_slices=0)\n\n app_args = [\n encoding.decode_address(self.governance_address),\n query_id.encode(\"utf-8\"),\n query_data.encode(\"utf-8\"),\n ]\n\n txn = transaction.ApplicationCreateTxn(\n sender=self.tipper.getAddress(),\n on_complete=transaction.OnComplete.NoOpOC,\n approval_program=approval,\n clear_program=clear,\n global_schema=globalSchema,\n local_schema=localSchema,\n app_args=app_args,\n sp=self.client.suggested_params(),\n )\n\n signedTxn = txn.sign(self.tipper.getPrivateKey())\n\n self.client.send_transaction(signedTxn)\n\n response = waitForTransaction(self.client, signedTxn.get_txid())\n assert response.applicationIndex is not None and response.applicationIndex > 0\n self.app_id = response.applicationIndex\n self.app_address = get_application_address(self.app_id)\n\n def stake(self) -> None:\n \"\"\"Place a bid on an active auction.\n Args:\n client: An Algod client.\n appID: The app ID of the auction.\n reporter: The account staking to report.\n \"\"\"\n appAddr = get_application_address(self.app_id)\n # appGlobalState = getAppGlobalState(client, appID)\n\n # if any(appGlobalState[b\"bid_account\"]):\n # # if \"bid_account\" is not the zero address\n # prevBidLeader = encoding.encode_address(appGlobalState[b\"bid_account\"])\n # else:\n # prevBidLeader = None\n\n stake_amount = 180*1000000 #200 dollars of ALGO\n\n suggestedParams = self.client.suggested_params()\n\n payTxn = transaction.PaymentTxn(\n sender=self.reporter.getAddress(),\n receiver=self.app_address,\n amt=stake_amount,\n sp=suggestedParams,\n )\n\n optInTx = transaction.ApplicationOptInTxn(\n sender=self.reporter.getAddress(),\n index=self.app_id,\n sp=suggestedParams,\n )\n\n transaction.assign_group_id([payTxn, optInTx])\n\n signedPayTxn = payTxn.sign(self.reporter.getPrivateKey())\n signedAppCallTxn = optInTx.sign(self.reporter.getPrivateKey())\n\n self.client.send_transactions([signedPayTxn, signedAppCallTxn])\n\n waitForTransaction(self.client, optInTx.get_txid())\n\n\n def closeAuction(self, client: AlgodClient, appID: int, closer: Account):\n \"\"\"Close an auction.\n This action can only happen before an auction has begun, in which case it is\n cancelled, or after an auction has ended.\n If called after the auction has ended and the auction was successful, the\n NFT is transferred to the winning bidder and the auction proceeds are\n transferred to the seller. If the auction was not successful, the NFT and\n all funds are transferred to the seller.\n Args:\n client: An Algod client.\n appID: The app ID of the auction.\n closer: The account initiating the close transaction. This must be\n either the seller or auction creator if you wish to close the\n auction before it starts. Otherwise, this can be any account.\n \"\"\"\n appGlobalState = getAppGlobalState(client, appID)\n\n nftID = appGlobalState[b\"nft_id\"]\n\n accounts: List[str] = [encoding.encode_address(appGlobalState[b\"seller\"])]\n\n if any(appGlobalState[b\"bid_account\"]):\n # if \"bid_account\" is not the zero address\n accounts.append(encoding.encode_address(appGlobalState[b\"bid_account\"]))\n\n deleteTxn = transaction.ApplicationDeleteTxn(\n sender=closer.getAddress(),\n index=appID,\n accounts=accounts,\n foreign_assets=[nftID],\n sp=client.suggested_params(),\n )\n signedDeleteTxn = deleteTxn.sign(closer.getPrivateKey())\n\n client.send_transaction(signedDeleteTxn)\n\n waitForTransaction(client, signedDeleteTxn.get_txid())\n\nif __name__ == \"__main__\":\n\n def setup():\n algo_address = \"http://localhost:4001\"\n algo_token = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\n\n client = AlgodClient(algod_address=algo_address, algod_token=algo_token)\n\n gov_address = Account.FromMnemonic(\"figure adapt crumble always cart twist scatter timber smooth artist gaze raise genre say scissors arena hidden poem mimic worry race burst yard about key\")\n tipper = Account.FromMnemonic(\"lava side salad unit door frozen clay skate project slogan choose poverty magic arrow pond swing alcohol bachelor witness monkey iron remind team abstract mom\")\n reporter = Account.FromMnemonic(\"gaze hockey eight fog scrub bind calm scrub change cannon recipe face shield smart member toward turkey pyramid item quote explain witness music ability weapon\")\n\n\n print(\"gov\", gov_address.getAddress())\n print(\"tipper\", tipper.getAddress())\n print(\"reporter\", reporter.getAddress())\n\n s = Scripts(client=client, tipper=tipper, reporter=reporter, governance_address=gov_address)\n\n return s\n\n s = setup()\n app_id = s.deploy_tellor_flex(\n query_id=\"hi\",\n query_data=\"hi\",\n )\n\n s.stake()","repo_name":"tallywiesenberg/algorand-tellorflex","sub_path":"scripts/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":7672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16045575966","text":"import logging\nfrom enum import unique, IntEnum\nfrom typing import Any\n\nfrom util.utils import baserepr, basestr\nfrom oom import *\n\n\nlogger = logging.getLogger(__name__)\n\n\n# The column width\nINDENT_SPACE = ' ' * 4\n\n# Prefix for the reserved attributes\nATTRIBUTE_RESERVED_PREFIX = '_'\n\n# Tags' properties\nPROPERTY_ENTITY = 'entity'\nPROPERTY_LENGTH = 'length'\nPROPERTY_INDEX = 'index'\nPROPERTY_TYPE = 'type'\n\n# Properties's values\nPROPERTY_VALUE_OBJECT = 'object'\nPROPERTY_VALUE_ATTRIBUTE = 'attribute'\n\n\nclass UnknownInterpreterError(Exception):\n \"\"\" Raised when it is requested an unknown interpreter. \"\"\"\n pass\n\n\nclass InterpretationError(Exception):\n \"\"\" Raised when an error occurs during the interpretation. \"\"\"\n pass\n\n\nclass Interpreter(object):\n \"\"\" The interpret that builds the representation of the attack scenario. \"\"\"\n\n @unique\n class Type(Enum):\n XML: str = 'xml'\n# JSON: str = 'json'\n# YAML: str = 'yaml'\n\n @classmethod\n def exist(cls, interpreter: str) -> bool:\n \"\"\" Checks if the given interpreter exists. \"\"\"\n if interpreter.lower() in tuple(e.value.lower() for e in cls.Type):\n return True\n return False\n\n @classmethod\n def interpret(cls, scenario: Scenario, interpreter: str) -> str:\n \"\"\" Interprets the given scenario by using the requested interpreter. \"\"\"\n if interpreter.lower() == cls.Type.XML.value.lower():\n return interpret_xml(scenario)\n# if interpreter.lower() == cls.Type.JSON.value.lower():\n# return interpret_json(scenario)\n# if interpreter.lower() == cls.Type.YAML.value.lower():\n# return interpret_yaml(scenario)\n else:\n raise UnknownInterpreterError(\"The interpreter '{}' is unknown\".format(interpreter))\n\n\ndef interpret_xml(statement: Any, indentation: int = 0, index: int = None) -> str:\n \"\"\" Provides the XML interpretation for the given scenario. \"\"\"\n logger.debug(\"interpret_xml: statement [{}], indentation [{}]\".format(\n statement.__class__.__name__,\n indentation))\n\n if statement is None:\n return ''\n\n xml = ''\n if indentation == 0:\n xml += '\\n'\n\n if index is None:\n xml += '{}<{} {}=\"{}\">\\n'.format(\n INDENT_SPACE * indentation,\n statement.__class__.__name__,\n PROPERTY_ENTITY,\n PROPERTY_VALUE_OBJECT)\n else:\n xml += '{}<{} {}=\"{}\" {}=\"{}\">\\n'.format(\n INDENT_SPACE * indentation,\n statement.__class__.__name__,\n PROPERTY_ENTITY,\n PROPERTY_VALUE_OBJECT,\n PROPERTY_INDEX,\n index)\n # Inspects the object's variables (discarding the reserved ones)\n for key in statement.__dict__.keys():\n if not key.startswith(ATTRIBUTE_RESERVED_PREFIX):\n logger.debug(\"attribute: {}\".format(key))\n if isinstance(statement.__dict__[key], (int, float, bool, str)):\n logger.debug(\"type: int, float, bool, str\")\n xml += '{}<{} {}=\"{}\" {}=\"{}\">\\n'.format(\n INDENT_SPACE * (indentation + 1),\n key,\n PROPERTY_ENTITY,\n PROPERTY_VALUE_ATTRIBUTE,\n PROPERTY_TYPE,\n statement.__dict__[key].__class__.__name__)\n xml += '{}{}\\n'.format(\n INDENT_SPACE * (indentation + 2),\n statement.__dict__[key])\n elif isinstance(statement.__dict__[key], (list, tuple)):\n logger.debug(\"type: list, tuple\")\n xml += '{}<{} {}=\"{}\" {}=\"{}\" {}=\"{}\">\\n'.format(\n INDENT_SPACE * (indentation + 1),\n key,\n PROPERTY_ENTITY,\n PROPERTY_VALUE_ATTRIBUTE,\n PROPERTY_TYPE,\n statement.__dict__[key].__class__.__name__,\n PROPERTY_LENGTH,\n len(statement.__dict__[key]))\n for index, item in enumerate(statement.__dict__[key]):\n xml += interpret_xml(item, indentation + 2, index)\n elif isinstance(statement.__dict__[key], dict):\n logger.debug(\"type: dict\")\n xml += '{}<{} {}=\"{}\" {}=\"{}\" {}=\"{}\">\\n'.format(\n INDENT_SPACE * (indentation + 1),\n key,\n PROPERTY_ENTITY,\n PROPERTY_VALUE_ATTRIBUTE,\n PROPERTY_TYPE,\n statement.__dict__[key].__class__.__name__,\n PROPERTY_LENGTH,\n len(statement.__dict__[key]))\n for index, subkey in enumerate(statement.__dict__[key].keys()):\n xml += interpret_xml(statement.__dict__[key][subkey], indentation + 2, index)\n else: # Anything else\n logger.debug(\"type: not built-in\")\n xml += '{}<{} {}=\"{}\" {}=\"{}\">\\n'.format(\n INDENT_SPACE * (indentation + 1),\n key,\n PROPERTY_ENTITY,\n PROPERTY_VALUE_ATTRIBUTE,\n PROPERTY_TYPE,\n statement.__dict__[key].__class__.__name__)\n xml += interpret_xml(statement.__dict__[key], indentation + 2)\n xml += '{}\\n'.format(\n INDENT_SPACE * (indentation + 1),\n key)\n logger.debug(\"closing class name {}\".format(statement.__class__.__name__))\n xml += '{}\\n'.format(\n INDENT_SPACE * indentation,\n statement.__class__.__name__)\n return xml\n\n\n#def interpret_json(scenario: Scenario) -> str:\n# \"\"\" Provides the YAML interpretation for the given scenario. \"\"\"\n# return ''\n\n\n#def interpret_yaml(scenario: Scenario) -> str:\n# \"\"\" Provides the JSON interpretation for the given scenario. \"\"\"\n# return ''\n","repo_name":"francescoracciatti/py-adele","sub_path":"src/model/interpreter.py","file_name":"interpreter.py","file_ext":"py","file_size_in_byte":5929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32374814774","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport signal\nimport logging\nimport configparser\nimport paho.mqtt.client as mqtt\nimport argparse\nimport asyncio\nfrom pyvlx import Position, PyVLX, OpeningDevice, Window, Blind, Awning, RollerShutter, GarageDoor, Gate, Blade\nfrom pyvlx.log import PYVLXLOG\n\nfrom ha_mqtt.ha_device import HaDevice\nfrom ha_mqtt.mqtt_device_base import MqttDeviceSettings\nfrom ha_mqtt.util import HaDeviceClass\nfrom mqtt_cover import MqttCover\nfrom mqtt_switch_with_icon import MqttSwitchWithIcon\n\nparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"Allows to control devices paired with Velux KLF200 via MQTT.\\n\" \\\n \"Registers the devices to Homeassistant using MQTT Autodiscovery.\")\nparser.add_argument('config_file', metavar=\"\", help=\"configuration file\")\nargs = parser.parse_args()\n\n# read and parse config file\nconfig = configparser.RawConfigParser()\nconfig.read(args.config_file)\n# [mqtt]\nMQTT_HOST = config.get(\"mqtt\", \"host\")\nMQTT_PORT = config.getint(\"mqtt\", \"port\")\nMQTT_LOGIN = config.get(\"mqtt\", \"login\", fallback=None)\nMQTT_PASSWORD = config.get(\"mqtt\", \"password\", fallback=None)\n# [homeassistant]\nHA_PREFIX = config.get(\"homeassistant\", \"prefix\", fallback=\"\")\nHA_INVERT_AWNING = config.get(\"homeassistant\", \"invert_awning\", fallback=False)\n# [velux]\nVLX_HOST = config.get(\"velux\", \"host\")\nVLX_PW = config.get(\"velux\", \"password\")\n# [log]\nVERBOSE = config.get(\"log\", \"verbose\", fallback=False)\nKLF200LOG = config.get(\"log\", \"klf200\", fallback=False)\nLOGFILE = config.get(\"log\", \"logfile\", fallback=None)\n\nAPPNAME = \"vlxmqttha\"\n\n# init logging \nLOGFORMAT = '%(asctime)-15s %(message)s'\n\nif VERBOSE:\n loglevel = logging.DEBUG\nelse:\n loglevel = logging.INFO\n\nif KLF200LOG:\n pyvlxLogLevel = logging.DEBUG\nelse:\n pyvlxLogLevel = logging.INFO\n\n\nif LOGFILE:\n logging.basicConfig(filename=LOGFILE, format=LOGFORMAT, level=loglevel)\nelse:\n logging.basicConfig(stream=sys.stdout, format=LOGFORMAT, level=loglevel)\n\nlogging.info(\"Starting \" + APPNAME)\nif VERBOSE:\n logging.info(\"DEBUG MODE\")\nelse:\n logging.debug(\"INFO MODE\")\n\nPYVLXLOG.setLevel(pyvlxLogLevel)\nch = logging.StreamHandler(sys.stdout)\nch.setLevel(pyvlxLogLevel)\nPYVLXLOG.addHandler(ch)\n\nclass VeluxMqttCover:\n \"\"\"\n This class represents the bridge between one MQTT cover device and the actual cover\n \n It is in charge of triggering the registration in MQTT (using Homeassistant AutoDiscovery)\n and forwarding commands and state changes between KLF 200 and MQTT\n\n Attributes\n ----------\n vlxnode : \n PyVLX Object to talk to the Cover through KLF 200\n haDevice :\n MQTT representation of the Homeassistant device\n coverDevice :\n MQTT representation of the Homeassistant cover entity\n limitSwitchDevice :\n MQTT representation of the Homeassistant limit switch entity\n \"\"\"\n def __init__(self, mqttc, vlxnode, mqttid):\n logging.debug(\"Registering %s to Homeassistant (Type: %s)\" % (vlxnode.name, type(vlxnode)))\n self.vlxnode = vlxnode\n self.mqttc = mqttc\n self.mqttid = mqttid\n self.haDevice = HaDevice(HA_PREFIX + vlxnode.name, HA_PREFIX + mqttid)\n self.coverDevice = self.makeMqttCover()\n self.limitSwitchDevice = self.makeMqttKeepOpenSwitch()\n \n def makeMqttCover(self):\n return MqttCover(\n MqttDeviceSettings(\"\", HA_PREFIX + self.mqttid, self.mqttc, self.haDevice),\n self.getHaDeviceClassFromVlxNode(self.vlxnode)\n )\n\n def makeMqttKeepOpenSwitch(self):\n return MqttSwitchWithIcon(\n MqttDeviceSettings(\"Keep open\", HA_PREFIX + self.mqttid + \"-keepopen\", self.mqttc, self.haDevice),\n \"mdi:lock-outline\"\n )\n\n def getHaDeviceClassFromVlxNode(self, vlxnode):\n if isinstance(vlxnode, Window):\n return HaDeviceClass.WINDOW\n if isinstance(vlxnode, Blind):\n return HaDeviceClass.BLIND\n if isinstance(vlxnode, Awning):\n return HaDeviceClass.AWNING\n if isinstance(vlxnode, RollerShutter):\n return HaDeviceClass.SHUTTER\n if isinstance(vlxnode, GarageDoor):\n return HaDeviceClass.GARAGE\n if isinstance(vlxnode, Gate):\n return HaDeviceClass.GATE\n if isinstance(vlxnode, Blade):\n return HaDeviceClass.SHADE\n \n async def registerMqttCallbacks(self):\n self.coverDevice.callback_open = self.mqtt_callback_open\n self.coverDevice.callback_close = self.mqtt_callback_close\n self.coverDevice.callback_stop = self.mqtt_callback_stop\n self.coverDevice.callback_position = self.mqtt_callback_position\n self.limitSwitchDevice.callback_on = self.mqtt_callback_keepopen_on\n self.limitSwitchDevice.callback_off = self.mqtt_callback_keepopen_off\n \n def updateNode(self):\n \"\"\" Callback for node state changes sent from KLF 200 \"\"\"\n logging.debug(\"Updating %s\", self.vlxnode.name)\n\n self.updateCover()\n self.updateLimitSwitch()\n \n def updateCover(self):\n position = self.vlxnode.position.position_percent\n target_position = self.vlxnode.target_position.position_percent\n\n mqtt_state = \"\"\n self.coverDevice.publish_position(position)\n if target_position < position:\n mqtt_state = \"opening\"\n elif target_position > position:\n mqtt_state = \"closing\"\n elif position == 100:\n mqtt_state = \"closed\"\n else:\n mqtt_state = \"open\"\n \n self.coverDevice.publish_state(mqtt_state)\n\n def updateLimitSwitch(self):\n max_position = self.vlxnode.limitation_max.position\n if max_position < 100:\n self.limitSwitchDevice.publish_state('on')\n else:\n self.limitSwitchDevice.publish_state('off')\n \n def mqtt_callback_open(self):\n logging.debug(\"Opening %s\", self.vlxnode.name)\n asyncio.run(self.vlxnode.open(wait_for_completion=False))\n\n def mqtt_callback_close(self):\n logging.debug(\"Closing %s\", self.vlxnode.name)\n asyncio.run(self.vlxnode.close(wait_for_completion=False))\n\n def mqtt_callback_stop(self):\n logging.debug(\"Stopping %s\", self.vlxnode.name)\n asyncio.run(self.vlxnode.stop(wait_for_completion=False))\n\n def mqtt_callback_position(self, position):\n logging.debug(\"Moving %s to position %s\" % (self.vlxnode.name, position))\n asyncio.run(self.vlxnode.set_position(Position(position_percent=int(position)), wait_for_completion=False))\n\n def mqtt_callback_keepopen_on(self):\n logging.debug(\"Enable 'keep open' limitation of %s\" % (self.vlxnode.name))\n asyncio.run(self.vlxnode.set_position_limitations(position_max=Position(position_percent=0), position_min=Position(position_percent=0)))\n\n def mqtt_callback_keepopen_off(self):\n logging.debug(\"Disable 'keep open' limitation of %s\" % (self.vlxnode.name))\n asyncio.run((self.vlxnode.clear_position_limitations()))\n\n def __del__(self):\n logging.debug(\"Unregistering %s from Homeassistant\" % (self.vlxnode.name))\n self.coverDevice.close()\n\nclass VeluxMqttCoverInverted (VeluxMqttCover):\n def __init__(self, mqttc, vlxnode, mqttid):\n super().__init__(mqttc, vlxnode, mqttid)\n \n def makeMqttCover(self):\n return MqttCover(\n MqttDeviceSettings(\"\", HA_PREFIX + self.mqttid, self.mqttc, self.haDevice),\n self.getHaDeviceClassFromVlxNode(self.vlxnode),\n True\n )\n\n def mqtt_callback_open(self):\n logging.debug(\"Opening %s\", self.vlxnode.name)\n asyncio.run(self.vlxnode.close(wait_for_completion=False))\n\n def mqtt_callback_close(self):\n logging.debug(\"Closing %s\", self.vlxnode.name)\n asyncio.run(self.vlxnode.open(wait_for_completion=False))\n\n def updateCover(self):\n position = self.vlxnode.position.position_percent\n target_position = self.vlxnode.target_position.position_percent\n\n mqtt_state = \"\"\n self.coverDevice.publish_position(position)\n if target_position < position:\n mqtt_state = \"closing\"\n elif target_position > position:\n mqtt_state = \"opening\"\n elif position == 0:\n mqtt_state = \"closed\"\n else:\n mqtt_state = \"open\"\n \n self.coverDevice.publish_state(mqtt_state)\n\n\n\n\nclass VeluxMqttHomeassistant:\n \"\"\"\n This class manages the connections to KLF 200 and MQTT Broker and holds a list\n of all registered device objects\n\n Attributes\n ----------\n mqttc :\n MQTT client\n pyvlx :\n Object representing KLF 200\n mqttDevices : list\n list of all registered devices\n \n \n \"\"\"\n def __init__(self):\n # MQTT\n MQTT_CLIENT_ID = APPNAME + \"_%d\" % os.getpid()\n self.mqttc = mqtt.Client(MQTT_CLIENT_ID)\n self.pyvlx = None\n self.mqttDevices = {}\n\n async def connect_mqtt(self):\n logging.debug(\"MQTT broker : %s\" % MQTT_HOST)\n if MQTT_LOGIN:\n logging.debug(\" port : %s\" % (str(MQTT_PORT)))\n logging.debug(\" login : %s\" % MQTT_LOGIN)\n\n # set login and password, if available\n if MQTT_LOGIN:\n self.mqttc.username_pw_set(MQTT_LOGIN, MQTT_PASSWORD)\n\n # Connect to the broker and enter the main loop\n result = self.mqttc.connect(MQTT_HOST, MQTT_PORT, 60)\n while result != 0:\n logging.info(\"Connection failed with error code %s. Retrying\", result)\n await asyncio.sleep(10)\n result = self.mqttc.connect(MQTT_HOST, MQTT_PORT, 60)\n\n self.mqttc.loop_start()\n await asyncio.sleep(1)\n\n async def connect_klf200(self, loop):\n logging.debug(\"klf200 : %s\" % VLX_HOST)\n self.pyvlx = PyVLX(host=VLX_HOST, password=VLX_PW, loop=loop)\n await self.pyvlx.load_nodes()\n\n logging.debug(\"vlx nodes : %s\" % (len(self.pyvlx.nodes)))\n for node in self.pyvlx.nodes:\n logging.debug(\" %s\" % node.name)\n\n async def register_devices(self):\n # register callbacks\n for vlxnode in self.pyvlx.nodes:\n if isinstance(vlxnode, OpeningDevice):\n vlxnode.register_device_updated_cb(self.vlxnode_callback)\n mqttid = self.generate_id(vlxnode)\n mqttCover = None\n if isinstance(vlxnode, Awning) and HA_INVERT_AWNING == True:\n mqttCover = VeluxMqttCoverInverted(self.mqttc, vlxnode, mqttid)\n else:\n mqttCover = VeluxMqttCover(self.mqttc, vlxnode, mqttid)\n self.mqttDevices[mqttid] = mqttCover\n await mqttCover.registerMqttCallbacks()\n logging.debug(\"watching: %s\" % vlxnode.name)\n \n async def update_device_state(self):\n for vlxnode in self.pyvlx.nodes:\n if isinstance(vlxnode, OpeningDevice):\n await self.pyvlx.get_limitation(vlxnode.node_id) \n\n async def vlxnode_callback(self, vlxnode):\n logging.debug(\"%s at %d%%\" % (vlxnode.name, vlxnode.position.position_percent))\n mqttid = self.generate_id(vlxnode)\n mqttDevice = self.mqttDevices[mqttid]\n if mqttDevice:\n mqttDevice.updateNode()\n\n def generate_id(self, vlxnode):\n return \"vlx-\" + vlxnode.name.replace(\" \", \"-\").lower()\n\n def __del__(self):\n for mqttDeviceId in self.mqttDevices:\n del self.mqttDevices[mqttDeviceId]\n self.mqttDevices.pop(mqttDeviceId)\n logging.info(\"Disconnecting from MQTT broker\")\n self.mqttc.disconnect()\n self.mqttc.loop_stop()\n\n logging.info(\"Disconnecting from KLF200\")\n self.pyvlx.disconnect()\n\n# Use the signal module to handle signals\nsignal.signal(signal.SIGTERM, lambda: asyncio.get_event_loop().stop())\nsignal.signal(signal.SIGINT, lambda: asyncio.get_event_loop().stop())\n\nif __name__ == '__main__':\n # pylint: disable=invalid-name\n try:\n LOOP = asyncio.get_event_loop()\n\n pid = str(os.getpid())\n pidfile = \"/tmp/vlxmqtthomeassistant.pid\"\n\n if os.path.isfile(pidfile):\n print(\"%s already exists, exiting\" % pidfile)\n sys.exit()\n file = open(pidfile, 'w')\n file.write(pid)\n file.close()\n\n veluxMqttHomeassistant = VeluxMqttHomeassistant()\n LOOP.run_until_complete(veluxMqttHomeassistant.connect_mqtt())\n LOOP.run_until_complete(veluxMqttHomeassistant.connect_klf200(LOOP))\n LOOP.run_until_complete(veluxMqttHomeassistant.register_devices())\n LOOP.run_until_complete(veluxMqttHomeassistant.update_device_state())\n\n LOOP.run_forever()\n except KeyboardInterrupt:\n logging.info(\"Interrupted by keypress\")\n finally:\n del veluxMqttHomeassistant\n os.unlink(pidfile)\n LOOP.close()\n sys.exit(0)\n","repo_name":"tjaehnel/vlxmqttha","sub_path":"vlxmqttha.py","file_name":"vlxmqttha.py","file_ext":"py","file_size_in_byte":13087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32516922099","text":"import smtplib\n\nserver = 'pikachu.udel.edu'\ndb = 'MG_priv_BSseq'\n \ndef sendmail():\n print('Sending job complete mail')\n to = 'kakrana@gmail.com'\n gmail_user = 'daemon.blake.lab@gmail.com'\n gmail_pwd = '*********'\n smtpserver = smtplib.SMTP(\"smtp.gmail.com\",587)\n smtpserver.ehlo()\n smtpserver.starttls()\n smtpserver.ehlo\n smtpserver.login(gmail_user, gmail_pwd)\n header = 'To:' + to + '\\n' + 'From: ' + gmail_user + '\\n' + 'Subject:Script run finished \\n'\n #print (header)\n msg = (header + 'Master, \\nYour BS seq clustering and methylation analysis script has just finished run at:%s on :%s\\n\\n' % (server,db))\n #msg = (header + '\\n This is test msg from your server \\n\\n')\n smtpserver.sendmail(gmail_user, to, msg)\n print ('Mail Sent!')\n smtpserver.close() \n#####\nsendmail()\n","repo_name":"atulkakrana/computational-biology","sub_path":"old/sendmail2.py","file_name":"sendmail2.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"2037253110","text":"from django.shortcuts import get_object_or_404\nfrom restaurants.models import Restaurant, Menu, Menu_item\nfrom decimal import Decimal\n\ndef get_cart_items_and_total(cart):\n\n cart_items = []\n total = 0\n for menu_item_id, item_quantity in cart.items():\n this_menu_item = get_object_or_404(Menu_item, pk=menu_item_id)\n this_menu = this_menu_item.menu\n this_restaurant = this_menu.restaurant\n this_total = this_menu_item.price * Decimal(item_quantity)\n total += this_total\n this_item = {\n 'restaurant': this_restaurant,\n 'menu': this_menu,\n 'menu_item': this_menu_item, \n 'quantity': item_quantity,\n 'total': this_total,\n }\n cart_items.append(this_item)\n\n return { 'cart_items': cart_items, 'total': total }\n \n ","repo_name":"Code-Institute-Submissions/feedFast","sub_path":"cart/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"71426823400","text":"import os\nimport time\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nfrom tqdm import tqdm\nimport math\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, CuDNNGRU, Conv1D\nfrom keras.layers import Bidirectional, GlobalMaxPool1D\nfrom keras.models import Model\nfrom keras import initializers, regularizers, constraints, optimizers, layers\ntrain_df = pd.read_csv(\"../input/train.csv\")\ntest_df = pd.read_csv(\"../input/test.csv\")\nprint(\"Train shape : \",train_df.shape)\nprint(\"Test shape : \",test_df.shape)\n## split to train and val\ntrain_df, val_df = train_test_split(train_df, test_size=0.1, random_state=2018)\n\n## some config values \nembed_size = 300 # how big is each word vector\nmax_features = 50000 # how many unique words to use (i.e num rows in embedding vector)\nmaxlen = 100 # max number of words in a question to use\n\n## fill up the missing values\ntrain_X = train_df[\"question_text\"].fillna(\"_na_\").values\nval_X = val_df[\"question_text\"].fillna(\"_na_\").values\ntest_X = test_df[\"question_text\"].fillna(\"_na_\").values\n\n## Tokenize the sentences\ntokenizer = Tokenizer(num_words=max_features)\ntokenizer.fit_on_texts(list(train_X))\ntrain_X = tokenizer.texts_to_sequences(train_X)\nval_X = tokenizer.texts_to_sequences(val_X)\ntest_X = tokenizer.texts_to_sequences(test_X)\n\n## Pad the sentences \ntrain_X = pad_sequences(train_X, maxlen=maxlen)\nval_X = pad_sequences(val_X, maxlen=maxlen)\ntest_X = pad_sequences(test_X, maxlen=maxlen)\n\n## Get the target values\ntrain_y = train_df['target'].values\nval_y = val_df['target'].values\nEMBEDDING_FILE1 = '../input/embeddings/glove.840B.300d/glove.840B.300d.txt'\ndef get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')\nembeddings_index = dict(get_coefs(*o.split(\" \")) for o in open(EMBEDDING_FILE1))\n\nall_embs = np.stack(embeddings_index.values())\nemb_mean,emb_std = all_embs.mean(), all_embs.std()\nembed_size = all_embs.shape[1]\n\nword_index = tokenizer.word_index\nnb_words = min(max_features, len(word_index))\nembedding_matrix1 = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))\nfor word, i in word_index.items():\n if i >= max_features: continue\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None: embedding_matrix1[i] = embedding_vector\n \nfrom keras.layers import K, Activation\nfrom keras.engine import Layer\nfrom keras.layers import Dense, Input, Embedding, Dropout, Bidirectional, GRU, Flatten, SpatialDropout1D\ngru_len = 128\nRoutings = 5\nNum_capsule = 10\nDim_capsule = 16\ndropout_p = 0.25\nrate_drop_dense = 0.28\n\ndef squash(x, axis=-1):\n # s_squared_norm is really small\n # s_squared_norm = K.sum(K.square(x), axis, keepdims=True) + K.epsilon()\n # scale = K.sqrt(s_squared_norm)/ (0.5 + s_squared_norm)\n # return scale * x\n s_squared_norm = K.sum(K.square(x), axis, keepdims=True)\n scale = K.sqrt(s_squared_norm + K.epsilon())\n return x / scale\n\n\n# A Capsule Implement with Pure Keras\nclass Capsule(Layer):\n def __init__(self, num_capsule, dim_capsule, routings=3, kernel_size=(9, 1), share_weights=True,\n activation='default', **kwargs):\n super(Capsule, self).__init__(**kwargs)\n self.num_capsule = num_capsule\n self.dim_capsule = dim_capsule\n self.routings = routings\n self.kernel_size = kernel_size\n self.share_weights = share_weights\n if activation == 'default':\n self.activation = squash\n else:\n self.activation = Activation(activation)\n\n def build(self, input_shape):\n super(Capsule, self).build(input_shape)\n input_dim_capsule = input_shape[-1]\n if self.share_weights:\n self.W = self.add_weight(name='capsule_kernel',\n shape=(1, input_dim_capsule,\n self.num_capsule * self.dim_capsule),\n # shape=self.kernel_size,\n initializer='glorot_uniform',\n trainable=True)\n else:\n input_num_capsule = input_shape[-2]\n self.W = self.add_weight(name='capsule_kernel',\n shape=(input_num_capsule,\n input_dim_capsule,\n self.num_capsule * self.dim_capsule),\n initializer='glorot_uniform',\n trainable=True)\n\n def call(self, u_vecs):\n if self.share_weights:\n u_hat_vecs = K.conv1d(u_vecs, self.W)\n else:\n u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])\n\n batch_size = K.shape(u_vecs)[0]\n input_num_capsule = K.shape(u_vecs)[1]\n u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,\n self.num_capsule, self.dim_capsule))\n u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))\n # final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule]\n\n b = K.zeros_like(u_hat_vecs[:, :, :, 0]) # shape = [None, num_capsule, input_num_capsule]\n for i in range(self.routings):\n b = K.permute_dimensions(b, (0, 2, 1)) # shape = [None, input_num_capsule, num_capsule]\n c = K.softmax(b)\n c = K.permute_dimensions(c, (0, 2, 1))\n b = K.permute_dimensions(b, (0, 2, 1))\n outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2]))\n if i < self.routings - 1:\n b = K.batch_dot(outputs, u_hat_vecs, [2, 3])\n\n return outputs\n\n def compute_output_shape(self, input_shape):\n return (None, self.num_capsule, self.dim_capsule)\n\n\ndef get_model():\n input1 = Input(shape=(maxlen,))\n embed_layer = Embedding(max_features,\n embed_size,\n input_length=maxlen,\n weights=[embedding_matrix1],\n trainable=False)(input1)\n embed_layer = SpatialDropout1D(rate_drop_dense)(embed_layer)\n\n x = Bidirectional(\n CuDNNGRU(gru_len, return_sequences=True))(\n embed_layer)\n capsule = Capsule(num_capsule=Num_capsule, dim_capsule=Dim_capsule, routings=Routings,\n share_weights=True)(x)\n # output_capsule = Lambda(lambda x: K.sqrt(K.sum(K.square(x), 2)))(capsule)\n capsule = Flatten()(capsule)\n capsule = Dropout(dropout_p)(capsule)\n output = Dense(1, activation='sigmoid')(capsule)\n model = Model(inputs=input1, outputs=output)\n model.compile(\n loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n model.summary()\n return model\nmodel = get_model()\nfrom keras.callbacks import EarlyStopping\nearlystopping = EarlyStopping(patience=2, verbose=1, restore_best_weights=True)\nmodel.fit(train_X, train_y, batch_size=64, epochs=50, validation_data=(val_X, val_y), callbacks=[earlystopping])\npred_glove_val_y = model.predict([val_X], batch_size=1024, verbose=1)\nfor thresh in np.arange(0.1, 0.501, 0.01):\n thresh = np.round(thresh, 2)\n print(\"F1 score at threshold {0} is {1}\".format(thresh, metrics.f1_score(val_y, (pred_glove_val_y>thresh).astype(int))))\npred_glove_test_y = model.predict([test_X], batch_size=1024, verbose=1)\npred_test_y = (pred_glove_test_y>0.34).astype(int)\nout_df = pd.DataFrame({\"qid\":test_df[\"qid\"].values})\nout_df['prediction'] = pred_test_y\nout_df.to_csv(\"submission.csv\", index=False)\n","repo_name":"aorursy/new-nb-3","sub_path":"ghostiphate_capsule-net.py","file_name":"ghostiphate_capsule-net.py","file_ext":"py","file_size_in_byte":7787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11193940310","text":"from django.shortcuts import render\nfrom rest_framework.viewsets import ModelViewSet\nfrom testapp.models import Employee\nfrom testapp.serializers import EmployeeSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom io import BytesIO\nfrom rest_framework.parsers import JSONParser\n\n# Create your views here.\nclass EmployeeView(ModelViewSet):\n queryset = Employee.objects.all()\n serializer_class = EmployeeSerializer\n\n\nclass EmployeeAPIView(APIView):\n def get(self, request, *args, **kwargs):\n qs = Employee.objects.all()\n serializer = EmployeeSerializer(qs, many=True)\n return Response(serializer.data)\n\n def post(self, request, *args, **kwargs):\n json_data = request.body\n stream = BytesIO(json_data)\n python_data = JSONParser().parse(stream)\n emp_serializer = EmployeeSerializer(data=python_data)\n if emp_serializer.is_valid():\n emp_serializer.save()\n return Response('Employee record inserted.') \n return Response(emp_serializer.errors)\n\n def put(self, request, *args, **kwargs):\n json_data = request.body\n stream = BytesIO(json_data)\n python_data = JSONParser().parse(stream=stream)\n emp_no = python_data.get('eno')\n emp_obj = Employee.objects.get(eno=emp_no)\n emp_serializer = EmployeeSerializer(emp_obj, data=python_data, partial=True)\n if emp_serializer.is_valid():\n emp_serializer.save()\n return Response('Employee record updated.') \n return Response(emp_serializer.errors)\n\n def delete(self, request, *args, **kwargs):\n json_data = request.body\n stream = BytesIO(json_data)\n python_data = JSONParser().parse(stream=stream)\n emp_no = python_data.get('eno')\n emp_obj = Employee.objects.get(eno=emp_no)\n emp_obj.delete()\n return Response('Employee record deleted.') \n","repo_name":"mdmubeenkhan/djangobasicAPI","sub_path":"testapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21475112317","text":"#!/usr/bin/env python\n\"\"\"\nUse this to execute and clean up notebooks by:\n\n - Execute the notebook to make sure it works as intended\n - Adding Last executed line\n - Converting references\n - Adding a References section\n - Rewriting the notebook\n\nNote: this does not preserve NGLView widgets. You will have to\nre-execute those notebooks and click Widgets > Save Notebook Widget State\nto keep functionality in the HTML version.\n\nTo run::\n\n ./clean_example_notebooks.py *.ipynb -vvv\n\nReferences\n==========\n\nThis script uses the sphinxcontrib-bibtex extension for references.\n\"\"\"\n\nimport argparse\nimport datetime\nimport glob\nimport logging\nimport os\nimport re\nimport shutil\nimport sys\n\nimport MDAnalysis as mda\nimport nbformat\nimport pybtex as tex\nimport pybtex.plugin\nfrom nbconvert.preprocessors import ExecutePreprocessor\nfrom pybtex.database import BibliographyData, parse_file\nfrom pybtex.style import formatting\n\nparser = argparse.ArgumentParser(description=\"Clean Jupyter notebooks.\")\nparser.add_argument(\"files\", type=str, nargs=\"+\", help=\"notebook files\")\nparser.add_argument(\"-v\", \"--verbose\", action=\"count\", default=0)\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass References:\n \"\"\"\n Reference manager\n \"\"\"\n\n style_name = \"plain\"\n template = \"[{i}] {html}\"\n ref_header = \"## References\"\n\n def __init__(self, filename=\"../references.bib\"):\n logger.info(\"Reading references from {}\".format(filename))\n self.data = parse_file(filename)\n logger.info(self.data.entries.keys())\n\n # set up bibliography formatting\n self.style = pybtex.plugin.find_plugin(\n \"pybtex.style.formatting\", self.style_name\n )()\n self.backend = pybtex.plugin.find_plugin(\"pybtex.backends\", \"html\")()\n\n # set up inline reference and doi dictionaries\n entries = self.data.entries\n self.inline = {k: self._parse_entry(v) for k, v in entries.items()}\n self.doi = {k: self._get_doi(v) for k, v in entries.items()}\n\n # set up key regex\n pattern = \"(\" + \")|(\".join(self.data.entries.keys()) + \")\"\n self.regex = re.compile(pattern)\n\n def write_bibliography(self, keys=[]):\n \"\"\"\n Write ordered, numbered bibliography for each notebook.\n Output is in HTML.\n \"\"\"\n bibs = [self.ref_header]\n entries = [self.data.entries[k] for k in keys]\n for i, entry in enumerate(self.style.format_entries(entries), 1):\n html = entry.text.render(self.backend)\n bibs.append(self.template.format(i=i, html=html))\n return \"\\n\\n\".join(bibs)\n\n def _get_doi(self, entry):\n try:\n return \"https://doi.org/{}\".format(entry.fields[\"doi\"])\n except KeyError:\n return \"#References\"\n\n def _parse_entry(self, entry):\n authors = entry.persons[\"author\"]\n inline = authors[0].last_names[0]\n if len(authors) > 2:\n inline += \" *et al.*\"\n elif len(authors) == 2:\n inline += \" and {}\".format(authors[1].last_names[0])\n inline += \", \"\n year = entry.fields[\"year\"]\n inline += year\n return inline\n\n\nclass JupyterCell:\n \"\"\"\n Handles each Jupyter cell.\n \"\"\"\n\n last_executed = \"**Last executed:** {} with MDAnalysis {}\"\n time_fmt = \"%b %d, %Y\"\n tag = \"a\"\n close_tag = \"\".format(tag)\n tagline = (\n '<{} data-cite=\"{{key}}\" ' 'href=\"{{url}}\">{{authors}}'\n ).format(tag, tag)\n\n @classmethod\n def as_references(cls, refs, keys=[]):\n source = refs.write_bibliography(keys)\n return cls(source=source)\n\n def __init__(self, cell_type=\"markdown\", metadata={}, source=\"\", **kwargs):\n self.cell_type = cell_type\n self.metadata = metadata\n self.source = source\n self.lines = source.split(\"\\n\")\n self.kwargs = kwargs\n\n def update_last_executed(self, now, version):\n self._update_last_executed_lines(now, version)\n self.source = \"\\n\".join(self.lines)\n\n def _update_last_executed_lines(self, now, version):\n \"\"\"\n Add a **Last executed** line to the first cell.\n\n First tries to replace former **Last executed**.\n Then, sticks it in front of **Last updated**.\n If that is not there, then in front of **Minimum version**.\n Finally, the last option is just the last line of the first cell.\n \"\"\"\n copied = self.lines[:]\n time = now.strftime(self.time_fmt)\n newline = self.last_executed.format(time, version)\n for i, line in enumerate(copied):\n if \"last executed\" in line.lower():\n self.lines[i] = newline\n return\n for i, line in enumerate(copied):\n if \"last updated\" in line.lower():\n self.lines.insert(i - 1, \"\\n\" + newline)\n return\n for i, line in enumerate(copied):\n if \"minimum version\" in line.lower():\n self.lines.insert(i - 1, \"\\n\" + newline)\n return\n self.lines.append(newline + \"\\n\")\n\n def find_reference_keys(self, refs, keys=[]):\n \"\"\"\n Replace shorthand reference keys with formatted, linked\n inline references. In the Jupyter notebook these link to the\n paper DOI. In the HTML output they are displayed as Sphinx\n references.\n\n Track the order of the keys for a final bibliography cell.\n \"\"\"\n matches = [\n x for x in re.split(refs.regex, self.source) if x is not None\n ]\n new_source = \"\"\n\n while len(matches) > 1:\n key = matches[1]\n if key not in keys:\n keys.append(key)\n\n authors = refs.inline[key]\n url = refs.doi[key]\n\n before = matches[0]\n prev_char = before[-1]\n # shorthand\n if prev_char == \"#\":\n new_source += before[:-1]\n # already in an HTML tag\n elif prev_char in ('\"', \"'\"):\n new_source += before.rsplit(\"<\", maxsplit=1)[0]\n if len(matches) > 2:\n matches[2] = matches[2].split(self.close_tag, maxsplit=1)[\n -1\n ]\n tag = self.tagline.format(key=key, authors=authors, url=url)\n new_source += tag\n matches.pop(0)\n matches.pop(0)\n\n if len(matches):\n new_source += matches.pop(0)\n\n self.source = new_source\n self.lines = new_source.split(\"\\n\")\n\n def to_dict(self):\n \"\"\"\n Turn into a dictionary\n \"\"\"\n cell = {\n \"cell_type\": self.cell_type,\n \"metadata\": self.metadata,\n \"source\": self.source,\n }\n cell.update(self.kwargs)\n return cell\n\n\nclass JupyterNotebook:\n \"\"\"\n Handles a Jupyter notebook. It immediately backs up the original to\n .original, just in case.\n \"\"\"\n\n kernel_name = os.environ[\"CONDA_DEFAULT_ENV\"]\n version = mda.__version__\n\n def __init__(self, filename, refs):\n logger.info(\"Operating on notebook {}\".format(filename))\n with open(filename, \"r\") as f:\n self.contents = nbformat.reads(f.read(), as_version=4)\n self.cells = [JupyterCell(**c) for c in self.contents[\"cells\"]]\n self.metadata = self.contents[\"metadata\"]\n self.keys = []\n self.refs = refs\n self.filename = filename\n # make backup\n split = filename.split(\"/\")\n backup_name = \"/\".join(split[:-1] + [\".\" + split[-1]])\n shutil.copyfile(filename, backup_name)\n self.err = None\n self.nglview = False\n\n def clean(self):\n \"\"\"\n Write references and execute the notebook. If an error is\n raised, save the error. If no error is raised, the notebook\n is overwritten.\n \"\"\"\n self.get_references()\n try:\n self.execute_and_update()\n except Exception as err:\n self.err = err\n\n def get_references(self):\n \"\"\"\n Get references in all the cells and build a bibliography in the\n last cell.\n \"\"\"\n logger.info(\" Rewriting references\")\n for cell in self.cells:\n if cell.cell_type == \"markdown\":\n cell.find_reference_keys(self.refs, keys=self.keys)\n if \"## References\" in self.cells[-1].source:\n self.cells = self.cells[:-1]\n\n if self.keys:\n self.cells.append(JupyterCell.as_references(self.refs, self.keys))\n self.contents[\"cells\"] = [c.to_dict() for c in self.cells]\n self.contents = nbformat.from_dict(self.contents)\n\n def execute_and_update(self):\n \"\"\"\n Execute the notebook. Overwrite the notebook if\n \"\"\"\n original_contents = self.contents\n logger.info(\" Executing\")\n # Choosing a kernel name gets me errors?\n ep = ExecutePreprocessor(timeout=600, kernel_name=self.kernel_name)\n ep.preprocess(self.contents)\n\n logger.info(\" Updating last executed\")\n # on success, replace Last executed line\n first_cell = JupyterCell(**self.contents[\"cells\"][0])\n now = datetime.datetime.now()\n first_cell.update_last_executed(now, self.version)\n\n if \"nglview\" in first_cell.source:\n # never mind, this doesn't successfully preserve older widget states.\n # self.contents = original_contents\n self.nglview = True\n\n self.contents[\"cells\"][0] = first_cell.to_dict()\n self.contents[\"metadata\"] = self.metadata\n self.contents = nbformat.from_dict(self.contents)\n\n logger.info(\" Rewriting notebook\")\n with open(self.filename, \"w\", encoding=\"utf-8\") as f:\n nbformat.write(self.contents, f)\n\n\ndef clean_all_notebooks(notebooks):\n not_backups = [n for n in notebooks if not n.split(\"/\")[-1][0] == \".\"]\n if not not_backups:\n return\n refs = References()\n errs = []\n nglview = []\n for nb in not_backups:\n notebook = JupyterNotebook(nb, refs)\n notebook.clean()\n if notebook.err is not None:\n errs.append((nb, notebook.err))\n if notebook.nglview:\n nglview.append(nb)\n\n if nglview:\n print(\"Re-execute for NGLView: \")\n print(\"\\n\".join(nglview))\n\n if len(errs):\n errmsgs = [\"{}: {}\".format(nb, err) for nb, err in errs]\n delim = \"\\n\" + \"===\" * 10 + \"\\n\"\n raise ValueError(\n \"Notebooks have errors: {}\".format(delim.join(errmsgs))\n )\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n level = max(0, 50 - (args.verbose * 10))\n logging.basicConfig(\n level=level,\n stream=sys.stdout,\n )\n clean_all_notebooks(args.files)\n","repo_name":"MDAnalysis/UserGuide","sub_path":"doc/source/scripts/clean_example_notebooks.py","file_name":"clean_example_notebooks.py","file_ext":"py","file_size_in_byte":10814,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"18"} +{"seq_id":"8787334580","text":"import Chapter3.BinaryClassifier_2 as ch3Bin\r\n\r\ndef multioutputClassfier():\r\n from sklearn.neighbors import KNeighborsClassifier\r\n import numpy as np\r\n\r\n noise = np.random.randint(0, 100, (len(ch3Bin.X_train), 784))\r\n X_train_mod = ch3Bin.X_train + noise #원래 이미지에 노이즈 추가\r\n noise = np.random.randint(0, 100, (len(ch3Bin.X_test), 784))\r\n X_test_mod = ch3Bin.X_test + noise #원래 이미지에 노이즈 추가\r\n y_train_mod = ch3Bin.X_train #정답데이터는 노이즈 추가하기 전 데이터\r\n y_test_mod = ch3Bin.y_test #정답데이터는 노이즈 추가하기 전 데이터\r\n\r\n knn_clf = KNeighborsClassifier() # KNeighborsClassifier은 다중 레이블 분류를 지원\r\n knn_clf.fit(X_train_mod, y_train_mod)\r\n clean_digit = knn_clf.predict(X_test_mod[0].reshape(1, -1))\r\n\r\n import matplotlib.pyplot as plt\r\n\r\n def plot_digits(instances, images_per_row=10, **options): #없어서 내가 만듬, 신경안써도 됨\r\n import matplotlib\r\n\r\n size = 28\r\n images_per_row = min(len(instances), images_per_row)\r\n images = [instance.reshape(size, size) for instance in instances]\r\n n_rows = (len(instances) - 1) // images_per_row + 1\r\n row_images = []\r\n n_empty = n_rows * images_per_row - len(instances)\r\n images.append(np.zeros((size, size * n_empty)))\r\n for row in range(n_rows):\r\n rimages = images[row * images_per_row: (row + 1) * images_per_row]\r\n row_images.append(np.concatenate(rimages, axis=1))\r\n image = np.concatenate(row_images, axis=0)\r\n plt.imshow(image, cmap=matplotlib.cm.binary, **options)\r\n plt.axis(\"off\")\r\n\r\n plot_digits(clean_digit, images_per_row=1)\r\n plt.show()\r\n print(y_test_mod[0], \"복원\")\r\n\r\nif __name__ == '__main__':\r\n multioutputClassfier()","repo_name":"sunminky/tensorflow","sub_path":"Chapter3/MultioutputClassfier_6.py","file_name":"MultioutputClassfier_6.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14732140426","text":"# 대표값\ndef representative_value(n, list):\n # 5 <= n 100\n # 학생의 번호는 1 부터 시작.\n\n avg = sum(list) / n\n\n student_num = 1\n student_score = list[student_num]\n min_avg = abs(list[student_num] - avg)\n\n # print(avg)\n\n for x in range(1, n):\n if abs(list[x] - avg) < min_avg:\n print(x)\n student_num = x + 1\n student_score = list[x]\n min_avg = abs(list[x] - avg)\n\n print(student_num, student_score)\n\nrepresentative_value(10, [45, 73, 66, 87, 92, 67, 75, 79, 75, 80])\n# representative_value(20, [13, 34, 17, 6, 11, 15, 27, 42, 39, 31, 25, 36, 32, 25, 17, 45, 67, 89, 24, 65])\n\n# 개선\ndef representative_value2(n, list):\n # python 에서 round 는 사사오입의 개념. 4.5 => 4, 5.5 => 6\n avg = int((sum(list) / n) + 0.5)\n\n min = abs(list[0] - avg)\n student_score = list[0]\n student_num = 1\n\n for index, score in enumerate(list):\n temp = abs(score - avg)\n\n if temp < min:\n min = temp\n student_score = score\n student_num = index + 1\n elif temp == min:\n if score > student_score:\n student_score = score\n student_num = index + 1\n print(avg, student_num)\n\nrepresentative_value2(10, [45, 73, 66, 87, 92, 67, 75, 79, 75, 80])\n# representative_value2(20, [13, 34, 17, 6, 11, 15, 27, 42, 39, 31, 25, 36, 32, 25, 17, 45, 67, 89, 24, 65])","repo_name":"talentceffort/python-algorithm","sub_path":"section2/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42282445241","text":"import scrapy\nfrom ..items import Douban250Item\n\nclass DoubanSpider(scrapy.Spider):\n name = 'douban'\n allowed_domains = ['movie.douban.com']\n\n start = 0\n end = '&filter='\n url = 'https://movie.douban.com/top250?start='\n start_urls = [url + str(start) + end]\n\n def parse(self, response):\n item = Douban250Item()\n lists = response.xpath('//li/div[@class=\"item\"]')\n for list in lists:\n item['title'] = list.xpath('./div[@class=\"info\"]/div[@class=\"hd\"]/a/span[@class=\"title\"]/text()').get()\n item['other'] = list.xpath('./div[@class=\"info\"]/div[@class=\"hd\"]/a/span[@class=\"other\"]/text()').get()\n item['info'] = list.xpath('./div[@class=\"info\"]/div[@class=\"bd\"]/p/text()').get()\n item['score'] = list.xpath('./div[@class=\"info\"]/div[@class=\"bd\"]/div[@class=\"star\"]/span[@class=\"rating_num\"]/text()').get()\n item['detail_href'] = list.xpath('./div[@class=\"pic\"]/a/@href').get()\n item['img_url'] = list.xpath('./div[@class=\"pic\"]/a/img/@src').get()\n yield item\n\n if self.start <= 255:\n self.start += 25\n yield scrapy.Request(self.url + str(self.start) + self.end, callback=self.parse)\n\n\n","repo_name":"three-body-zhangbeihai/spiderProjects","sub_path":"douban250/douban250/spiders/douban.py","file_name":"douban.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"7565186970","text":"from keras.models import Model\r\nfrom keras.layers import Input\r\nfrom keras.layers import LSTM\r\nfrom keras.layers import Dense\r\nfrom keras.layers import GRU\r\nimport numpy as py\r\nimport keras\r\nfrom loaddata import *\r\n\r\nX1_train_stopfeatures,X2_train_headway,y_train_target_headway = get_train_dataset()\r\n\r\nn_headway_features = 28\r\nn_stop_features = X1_train_stopfeatures.shape[2]\r\n\r\n\r\n\r\nn_units = 64\r\n\r\nepochs = 30\r\n\r\n\r\nlearning_rate = 0.01\r\ndecay = 0 # Learning rate decay\r\noptimiser = keras.optimizers.Adam(lr=learning_rate, decay=decay)\r\n\r\nbatch_size = 64\r\n\r\n'''\r\n# define training encoder\r\nencoder_inputs = Input(shape=(None, n_stop_features))\r\nencoder = LSTM(n_units, return_state=True)\r\nencoder_outputs, state_h, state_c = encoder(encoder_inputs)\r\nencoder_states = [state_h, state_c]\r\n# define training decoder\r\ndecoder_inputs = Input(shape=(None, n_headway_features))\r\nprint(decoder_inputs)\r\nprint(decoder_inputs[:,:,0])\r\n\r\ndecoder_lstm = LSTM(n_units, return_sequences=True, return_state=True)\r\ndecoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)\r\n\r\n\r\ndecoder_dense = Dense(n_headway_features,activation='relu')\r\ndecoder_outputs = decoder_dense(decoder_outputs)\r\n\r\n\r\n\r\nmodel = Model(inputs = [encoder_inputs, decoder_inputs],outputs = decoder_outputs)\r\n'''\r\n\r\nencoder_inputs = Input(shape=(None, n_stop_features))\r\nencoder = GRU(n_units, return_state=True)\r\nencoder_outputs, state_h = encoder(encoder_inputs)\r\n\r\ndecoder_inputs = Input(shape=(None, n_headway_features))\r\ndecoder_gru = GRU(n_units, return_sequences=True)\r\ndecoder_outputs = decoder_gru(decoder_inputs, initial_state=state_h)\r\ndecoder_dense = Dense(n_headway_features,activation='relu')\r\ndecoder_outputs = decoder_dense(decoder_outputs)\r\nmodel = Model([encoder_inputs, decoder_inputs], decoder_outputs)\r\n\r\n\r\nmodel.compile(optimizer=optimiser, loss='mse',metrics=['acc'])\r\n\r\nX1_test_stopfeatures,X2_test_headway,y_test_target_headway = get_test_dataset()\r\n\r\nmodel.fit([X1_train_stopfeatures,X2_train_headway],y_train_target_headway,batch_size = batch_size,epochs = epochs,validation_split=0.2)\r\n\r\n\r\nprint('-------------------------------------------------------------')\r\n\r\n\r\nx = model.predict([X1_test_stopfeatures,X2_test_headway])\r\n\r\n\r\nallnum_real = 0\r\nallnum_pre = 0\r\naccnum = 0\r\n\r\noffset = 0\r\nallnum = 0\r\n\r\nthreshold_time = float(3/30)\r\n\r\nfor i in range(0,x.shape[0]):\r\n\r\n for index in range(0,n_headway_features):\r\n allnum+=1\r\n offset+=abs(list(y_test_target_headway[i,0,:])[index] - list(x[i,0,:])[index])\r\n if list(y_test_target_headway[i,0,:])[index] <= threshold_time:\r\n allnum_real+=1\r\n if list(x[i,0,:])[index] <= threshold_time:\r\n allnum_pre+=1\r\n if (list(x[i,0,:])[index] <= threshold_time) and (list(y_test_target_headway[i,0,:])[index] <= threshold_time):\r\n accnum+=1\r\n\r\nprint(\"allnum_real:\")\r\nprint(allnum_real)\r\nprint(\"allnum_pre:\")\r\nprint(allnum_pre)\r\nprint(\"accnum:\")\r\nprint(accnum)\r\n\r\n\r\nprint()\r\nprint()\r\nprint(offset/allnum)\r\n\r\n\r\n\r\n","repo_name":"gzyszuuow/BusBunching","sub_path":"BusRoute400/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"33556335335","text":"import numpy as np\nfrom PIL import Image\nfrom icevision.all import *\n\ndef IoU(box1, box2, mode='xyxy'):\n \"\"\" Calculate the IoU of a box pair\n box1: (xmin1, ymin1, width1, height1)\n box2: (xmin2, ymin2, width2, height2)\n \"\"\"\n if mode == 'xywh':\n # Extract params\n xmin1, ymin1, width1, height1 = box1\n xmin2, ymin2, width2, height2 = box2\n \n # Compute the x, y maxes\n xmax1 = xmin1 + width1 - 1\n xmax2 = xmin2 + width2 - 1\n \n ymax1 = ymin1 + height1 - 1\n ymax2 = ymin2 + height2 - 1\n\n elif mode == 'xyxy':\n xmin1, ymin1, xmax1, ymax1 = box1\n xmin2, ymin2, xmax2, ymax2 = box2\n\n else:\n raise Exception(f\"Unsupported mode type '{mode}'\")\n \n xA = min(xmax1, xmax2)\n xB = max(xmin1, xmin2)\n x_overlap = max(xA - xB + 1, 0)\n \n yA = min(ymax1, ymax2)\n yB = max(ymin1, ymin2)\n y_overlap = max(yA - yB + 1, 0)\n \n intersection = x_overlap * y_overlap\n \n area1 = (xmax1 - xmin1 + 1) * (ymax1 - ymin1 + 1)\n area2 = (xmax2 - xmin2 + 1) * (ymax2 - ymin2 + 1)\n\n # area1 = width1 * height1\n # area2 = width2 * height2\n \n union = area1 + area2 - intersection\n \n iou = intersection / union\n \n return iou\n\ndef calculate_conf(gts, preds, confs, mode='xyxy'):\n confs_order = np.argsort(confs)[::-1]\n preds = preds[confs_order]\n\n main_ioumat = np.zeros((len(gts), len(preds)))\n\n for i, gt in enumerate(gts):\n for j, pred in enumerate(preds):\n main_ioumat[i, j] = IoU(gt, pred, mode=mode)\n\n # We'll need this at each threshold\n preds_set = set([x for x in range(0, len(preds))])\n\n tps = 0\n fps = 0\n fns = 0\n\n for thr in np.arange(0.3, 0.85, 0.05):\n # print(thr)\n\n ioumat = main_ioumat.copy()\n ioumat[ioumat < thr] = 0\n # ioumat\n\n mask = (ioumat != 0)\n res = np.where(mask.any(axis=1), mask.argmax(axis=1), -1)\n\n tp_set = set(res[res != -1])\n\n tps += len(tp_set)\n fps += len(preds_set - tp_set)\n fns += len(res) - len(tp_set)\n\n return tps, fps, fns\n\ndef calculate_conf_all_samples(gts, bboxes, scores, mode='xyxy'):\n total_tps = 0\n total_fps = 0\n total_fns = 0\n\n for i, _ in enumerate(gts):\n \n # If the image has no bboxes, and we predicted no bboxes, no accumulation\n if (len(gts[i]) == 0) and (len(bboxes[i]) == 0):\n tps, fps, fns = 0, 0, 0\n \n # If the image has no bboxes, and we predicted bboxes, all are false positives\n elif (len(gts[i]) == 0) and (len(bboxes[i]) != 0):\n tps, fps, fns = 0, len(bboxes[i]), 0\n \n # If the image has bboxes, and we predicted no bboxes, all are false negatives\n elif (len(gts[i]) != 0) and (len(bboxes[i]) == 0):\n tps, fps, fns = 0, 0, len(gts[i])\n \n # Otherwise let's do the full calculation\n else:\n tps, fps, fns = calculate_conf(gts[i], bboxes[i], scores[i], mode=mode)\n \n total_tps += tps\n total_fps += fps\n total_fns += fns\n\n return total_tps, total_fps, total_fns\n\ndef compute_f2(model_type,\n model,\n valid_ds,\n valid_tfms,\n class_map,\n mode='xyxy',\n thr_min=0.1,\n thr_max=0.9,\n thr_step=0.05):\n ### Get predictions from the trained model\n detection_threshold = 0.1\n preds = []\n\n # Make prediction in the original image coordinates\n # Note: iterating through valid_ds.records is siginificantly\n # faster than iterating through valid_ds\n for entry in tqdm(valid_ds.records):\n img = Image.open(entry.filepath)\n \n pred = model_type.end2end_detect(img, \n valid_tfms, \n model, \n class_map=class_map, \n detection_threshold=detection_threshold)\n preds.append(pred)\n \n img.close()\n\n thrs = np.arange(thr_min, thr_max + thr_step, thr_step)\n f2s = []\n\n for thr in thrs:\n # print(thr)\n # thr = 0.5\n\n ### Extract the relevant parameters from predictions\n bboxes = []\n scores = []\n\n for pred in preds:\n current_bboxes = pred['detection']['bboxes']\n current_scores = pred['detection']['scores']\n\n # Filter based on current threshold value thr\n output_bboxes, output_scores = [], []\n for i, _ in enumerate(current_bboxes):\n if current_scores[i] > thr:\n output_bboxes.append(current_bboxes[i].xyxy)\n output_scores.append(current_scores[i])\n\n bboxes.append(np.array(output_bboxes))\n scores.append(np.array(output_scores))\n\n ### Get the ground truth boxes\n\n gts = []\n\n for record in valid_ds.records:\n gts.append(np.array([bb.xyxy for bb in record.detection.bboxes]))\n\n # Calculate dataset-wide tps, fps, fns\n total_tps, total_fps, total_fns = calculate_conf_all_samples(gts, bboxes, scores, mode=mode)\n\n # Compute precision, recall, F2\n if (total_tps + total_fps) > 0:\n precision = total_tps / (total_tps + total_fps)\n else:\n precision = 0\n if (total_tps + total_fns) > 0:\n recall = total_tps / (total_tps + total_fns)\n else:\n recall = 0\n \n if (precision != 0) and (recall != 0):\n f2 = 5 * precision * recall / (4 * precision + recall)\n else:\n f2 = 0\n \n f2s.append(f2)\n \n return thrs, f2s","repo_name":"laserstonewall/kaggle-reef","sub_path":"src/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":5776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8803469118","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nKuplung - OpenGL Viewer, python port\nsupudo.net\n\"\"\"\n__author__ = 'supudo'\n__version__ = \"1.0.0\"\n\nimport imgui\nimport _thread\nimport time\nfrom settings import Settings\n\ndef add_slider(title, idx, step, min, limit, show_animate=True,\n animated_flag=False, animated_value=0.0,\n do_minus=False, is_frame=True):\n if title != '':\n imgui.text(title)\n if show_animate:\n c_id = \"##00\" + str(idx)\n if animated_flag is not None and show_animate:\n _, animated_flag = imgui.checkbox(c_id, animated_flag)\n if animated_flag:\n animate_value(is_frame, animated_flag, animated_value, step,\n limit, do_minus)\n if imgui.is_item_hovered():\n imgui.set_tooltip('Animate ' + title)\n imgui.same_line()\n s_id = '##10' + str(idx)\n _, values = imgui.slider_float(s_id, animated_value, min, limit, \"%.03f\", 1.0)\n return animated_flag, values\n\ndef add_slider_control(title, idx, min, limit, value=0.0):\n if title != '':\n imgui.text(title)\n s_id = '##10' + str(idx)\n _, values = imgui.slider_float(s_id, value, min, limit, \"%.03f\", 1.0)\n return values\n\ndef add_int_slider_control(title, idx, min, limit, value=0):\n if title != '':\n imgui.text(title)\n s_id = '##10' + str(idx)\n _, values = imgui.slider_int(s_id, value, min, limit, \"%.f\")\n return values\n\ndef add_color3(title, color, animate):\n ce_id = '##101' + title\n imgui.text_colored(title, color.r, color.g, color.b, 255.0)\n _, new_color = imgui.color_edit3(ce_id, color.r, color.g, color.b)\n color.r = new_color[0]\n color.g = new_color[1]\n color.b = new_color[2]\n imgui.same_line()\n imgui.push_style_color(imgui.COLOR_BUTTON, 0, 0, 0, 0)\n imgui.push_style_color(imgui.COLOR_BUTTON_HOVERED, 0, 0, 0, 0)\n imgui.push_style_color(imgui.COLOR_BUTTON_ACTIVE, 0, 0, 0, 0)\n imgui.push_style_color(imgui.COLOR_BORDER, 0, 0, 0, 0)\n # TODO: show color picker\n imgui.pop_style_color(4)\n imgui.new_line()\n return color, animate\n\ndef add_color4(title, color, animate):\n ce_id = '##101' + title\n imgui.text_colored(title, color.r, color.g, color.b, color.a)\n _, new_color = imgui.color_edit4(ce_id, color.r, color.g, color.b, color.a, True)\n color.r = new_color[0]\n color.g = new_color[1]\n color.b = new_color[2]\n color.a = new_color[3]\n imgui.same_line()\n imgui.push_style_color(imgui.COLOR_BUTTON, 0, 0, 0, 0)\n imgui.push_style_color(imgui.COLOR_BUTTON_HOVERED, 0, 0, 0, 0)\n imgui.push_style_color(imgui.COLOR_BUTTON_ACTIVE, 0, 0, 0, 0)\n imgui.push_style_color(imgui.COLOR_BORDER, 0, 0, 0, 0)\n # TODO: show color picker\n imgui.pop_style_color(4)\n imgui.new_line()\n return color, animate\n\ndef add_controls_slider_same_line(title, idx, step, min, limit, show_animate,\n animated_flag, animated_value, do_minus,\n is_frame):\n if show_animate:\n c_id = \"##00\" + str(idx)\n _, animated_flag = imgui.checkbox(c_id, animated_flag)\n if animated_flag:\n animate_value(is_frame, animated_flag, animated_value, step,\n limit, do_minus)\n if imgui.is_item_hovered():\n imgui.set_tooltip(\"Animate \" + title)\n imgui.same_line()\n s_id = \"##10\" + str(idx)\n _, animated_value = imgui.slider_float(s_id, animated_value, min,\n limit, \"%.03f\", 1.0)\n imgui.same_line()\n imgui.text(title)\n return animated_flag, animated_value\n\ndef animate_value(isFrame, animatedFlag, animatedValue, step, limit, doMinus):\n try:\n # TODO: animate in thread\n # _thread.start_new_thread(\n # animate_value_async,\n # (1, isFrame, animatedFlag, animatedValue, step, limit, doMinus)\n # )\n pass\n except:\n Settings.do_log(\"[UIHelpers] Error: cannot start animating thread!\")\n\ndef animate_value_async(delay, is_frame, animated_flag,\n animated_value, step, limit, do_minus):\n while (animated_flag):\n if is_frame:\n v = animated_value\n v += step\n if v > limit:\n v = (-1 * limit) if do_minus else 0\n animated_value = v\n is_frame = False\n time.sleep(delay)\n\n\ndef draw_tabs(tabs_labels, tabs_icons, value_init, style_padding=10.0, font_scale=2.0):\n selected_item = value_init\n\n imgui.push_style_color(imgui.COLOR_BUTTON, 153 / 255, 68 / 255, 61 / 255, 255 / 255)\n imgui.push_style_color(imgui.COLOR_BUTTON_HOVERED, 178 / 255, 64 / 255, 53 / 255, 255 / 255)\n imgui.push_style_color(imgui.COLOR_BUTTON_ACTIVE, 204 / 255, 54 / 255, 40 / 255, 255 / 255)\n imgui.push_style_var(imgui.STYLE_ITEM_SPACING, (style_padding, style_padding))\n imgui.push_style_var(imgui.STYLE_FRAME_ROUNDING, 2.0)\n imgui.set_window_font_scale(font_scale)\n\n for i in range(len(tabs_labels)):\n clicked = imgui.button(' ' + tabs_labels[i][0] + ' ')\n if imgui.is_item_hovered():\n imgui.set_tooltip(tabs_labels[i])\n if clicked:\n selected_item = i\n imgui.same_line()\n\n imgui.set_window_font_scale(1.0)\n imgui.pop_style_var(2)\n imgui.pop_style_color(3)\n\n imgui.new_line()\n\n return selected_item","repo_name":"supudo/Kuplung-Py","sub_path":"ui/ui_helpers/UIHelpers.py","file_name":"UIHelpers.py","file_ext":"py","file_size_in_byte":5405,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"36621515182","text":"import pygame, sys, copy\nimport requests\nfrom time import sleep\n\n\ndef new_board(diff=1):\n board = requests.get(f'https://sudoku--api.herokuapp.com/new-board?diff={diff}').json()['response']['unsolved-sudoku']\n\n return board\n\n\nclass Sudoku():\n def __init__(self, diff=1):\n self.org = new_board(diff)\n self.second = copy.deepcopy(self.org)\n self.solu_board = copy.deepcopy(self.org)\n self._solve()\n\n self.text_r = [[], [], [], [], [], [], [], [], []]\n\n def find_next(self):\n for y in range(9):\n for x in range(9):\n if self.solu_board[y][x] == 0:\n return y, x\n return None\n\n def valid(self, n, y, x):\n for i in range(9):\n if self.solu_board[y][i] == n:\n return False\n for i in range(9):\n if self.solu_board[i][x] == n:\n return False\n x_ = (x // 3) * 3\n y_ = (y // 3) * 3\n for i in range(3):\n for j in range(3):\n if self.solu_board[y_ + i][x_ + j] == n:\n return False\n return True\n\n def _solve(self):\n find = self.find_next()\n if find:\n col, row = find\n else:\n return True\n for i in range(1, 10):\n if self.valid(i, col, row):\n self.solu_board[col][row] = i\n if self._solve():\n return True\n self.solu_board[col][row] = 0\n return\n\n def fn(self):\n for y in range(9):\n for x in range(9):\n if self.org[y][x] == 0:\n return y, x\n return None\n\n def v(self, n, y, x):\n for i in range(9):\n if self.org[y][i] == n:\n return False\n for i in range(9):\n if self.org[i][x] == n:\n return False\n x_ = (x // 3) * 3\n y_ = (y // 3) * 3\n for i in range(3):\n for j in range(3):\n if self.org[y_ + i][x_ + j] == n:\n return False\n return True\n\n def _s(self):\n find = self.fn()\n if find:\n col, row = find\n else:\n return True\n for i in range(1, 10):\n if self.v(i, col, row):\n self.org[col][row] = i\n pygame.time.delay(60)\n self.draw_grid()\n if self._s():\n return True\n self.org[col][row] = 0\n pygame.time.delay(60)\n self.draw_grid()\n return\n\n def restart(self):\n self.org = copy.deepcopy(self.second)\n\n def draw_grid(self):\n x_const = 0\n y_const = 0\n for i in range(9):\n for j in range(9):\n\n self.text_r[i].append(pygame.Rect(305 + (j * 67) + x_const, 5 + (i * 67) + y_const, 67, 67))\n if (j + 1) % 3 == 0:\n x_const += 5\n else:\n x_const += 1\n center = list(self.text_r[i][j].center)\n center[0] -= 5\n center[1] -= 15\n\n if board.second[i][j] == 0:\n pygame.draw.rect(screen, (211, 211, 211), self.text_r[i][j])\n else:\n pygame.draw.rect(screen, (255, 255, 255), self.text_r[i][j])\n\n if board.org[i][j] != 0:\n screen.blit(game_font.render(str(self.org[i][j]), False, (0)), center)\n\n if board.second[i][j] == 0:\n if self.org[i][j] != 0 and self.org[i][j] == self.solu_board[i][j]:\n screen.blit(game_font.render(str(self.org[i][j]), False, (0)), center)\n elif board.org[i][j] != 0 and board.org[i][j] != self.solu_board[i][j]:\n screen.blit(game_font.render(str(self.org[i][j]), False, (255, 0, 0)), center)\n\n if (i + 1) % 3 == 0:\n y_const += 5\n else:\n y_const += 1\n x_const = 0\n pygame.display.flip()\n\n\npygame.init()\n\npygame.display.set_caption(\"Sudoku!\")\nscreen = pygame.display.set_mode((930, 630))\nscreen.fill(0)\n\ngame_font = pygame.font.Font(\"font.ttf\", 28)\nwin_font = pygame.font.Font(\"font.ttf\", 20)\n\nsettings = pygame.Rect(0, 0, 300, 630)\neasy_mode = pygame.Rect(20, 30, 260, 60)\nmedium_mode = pygame.Rect(20, 110, 260, 60)\nhard_mode = pygame.Rect(20, 190, 260, 60)\nrestart = pygame.Rect(20, 340, 260, 60)\nsolve = pygame.Rect(20, 420, 260, 60)\nexit_game = pygame.Rect(20, 500, 260, 60)\n\nboard = Sudoku()\n\n\ndef draw_menu(mode='easy'):\n pygame.draw.rect(screen, (255, 255, 255), settings)\n\n if mode == 'easy':\n select_box = pygame.Rect(18, 28, 264, 64)\n pygame.draw.rect(screen, (0), select_box, border_radius=3)\n pygame.draw.rect(screen, (124, 252, 0), easy_mode, border_radius=3)\n easy = game_font.render(\"easy\", False, (0))\n screen.blit(easy, (129, 45))\n\n if mode == 'medium':\n select_box = pygame.Rect(18, 108, 264, 64)\n pygame.draw.rect(screen, (0), select_box, border_radius=3)\n pygame.draw.rect(screen, (255, 255, 0), medium_mode, border_radius=3)\n medium = game_font.render(\"medium\", False, (0))\n screen.blit(medium, (114, 125))\n\n if mode == 'hard':\n select_box = pygame.Rect(18, 188, 264, 64)\n pygame.draw.rect(screen, (0), select_box, border_radius=3)\n pygame.draw.rect(screen, (250, 0, 0), hard_mode, border_radius=3)\n hard = game_font.render(\"hard\", False, (0))\n screen.blit(hard, (129, 205))\n\n pygame.draw.rect(screen, (123, 104, 238), restart, border_radius=3)\n restart_ = game_font.render(\"restart\", False, (0))\n screen.blit(restart_, (110, 355))\n pygame.draw.rect(screen, (255, 165, 0), solve, border_radius=3)\n solve_ = game_font.render(\"solve\", False, (0))\n screen.blit(solve_, (125, 435))\n pygame.draw.rect(screen, (0, 191, 255), exit_game, border_radius=3)\n exit_ = game_font.render(\"exit\", False, (0))\n screen.blit(exit_, (129, 515))\n\n\ntime = \"00:00:00\"\nmode = 'easy'\n\nleft_click = False\nright_click = False\n\ntimer_started = False\n\nmilliseconds = 0\nseconds = 0\nminutes = 0\nhours = 0\n\na_key = False\nkey = 0\ncomp_solved = False\nover = False\n\neditable = True\npygame.display.flip()\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: # Checks if the user quit the game\n pygame.quit()\n sys.exit()\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n left_click = True\n mx, my = pygame.mouse.get_pos()\n\n if event.button == 3:\n guess = True\n mx, my = pygame.mouse.get_pos()\n\n if event.type == pygame.KEYDOWN:\n a_key = True\n key = event.key % 48 if event.key >= 48 and event.key <= 58 else 0\n\n draw_menu(mode)\n if not over:\n screen.blit(game_font.render(time, False, (0)), (100, 280))\n else:\n screen.blit(game_font.render(time, False, (124, 252, 0)), (40, 280))\n\n if left_click:\n\n if easy_mode.collidepoint(mx, my):\n editable = True\n over = False\n mode = 'easy'\n board = Sudoku()\n timer_started = False\n comp_solved = False\n left_click = False\n\n\n elif medium_mode.collidepoint(mx, my):\n editable = True\n over = False\n mode = 'medium'\n board = Sudoku(2)\n timer_started = False\n comp_solved = False\n left_click = False\n\n\n elif hard_mode.collidepoint(mx, my):\n editable = True\n over = False\n mode = 'hard'\n timer_started = False\n comp_solved = False\n board = Sudoku(3)\n left_click = False\n\n elif restart.collidepoint(mx, my):\n editable = True\n over = False\n board.restart()\n comp_solved = False\n timer_started = False\n left_click = False\n\n elif exit_game.collidepoint(mx, my):\n pygame.quit()\n sys.exit()\n\n elif solve.collidepoint(mx, my):\n editable = False\n board._s()\n left_click = False\n a_key = False\n key = 0\n timer_started = False\n\n elif not editable:\n key = 0\n\n else:\n for i in range(9):\n for j in range(9):\n if editable and board.second[i][j] == 0 and board.text_r[i][j].collidepoint(mx, my):\n if not timer_started:\n start_time = pygame.time.get_ticks()\n milliseconds = 0\n minutes = 0\n seconds = 0\n hours = 0\n timer_started = True\n\n if a_key:\n board.org[i][j] = key\n a_key = False\n key = 0\n\n if timer_started and not over:\n milliseconds = pygame.time.get_ticks() - start_time\n\n if milliseconds >= 1000:\n seconds = milliseconds//1000\n\n if seconds >= 60:\n minutes = seconds // 60\n seconds %= 60\n\n if minutes >= 60:\n hours = minutes // 60\n minutes %= 60\n\n if hours == 23 and minutes == 59 and seconds == 60:\n milliseconds = 0\n\n dis_hour = str(hours) if len(str(hours)) == 2 else '0' + str(hours)\n dis_minute = str(minutes) if len(str(minutes)) == 2 else '0' + str(minutes)\n dis_second = str(seconds) if len(str(seconds)) == 2 else '0' + str(seconds)\n\n time = f'{dis_hour}:{dis_minute}:{dis_second}'\n last_time = copy.deepcopy(time)\n else:\n time = f'00:00:00'\n\n if board == board.solu_board and not comp_solved:\n over = True\n time = 'Solved in: ' + last_time\n editable = False\n\n board.draw_grid()\n\n","repo_name":"uglkjgj/sudoku","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73636917800","text":"from django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .forms import ReservaForm\nfrom .models import Reserva\n\n# Create your views here.\ndef reserva_consulta(request): \n if request.method=='POST':\n for key in request.POST:\n print(request.POST[key])\n codigo = request.POST.get(\"txtCodigo\", \"\")\n reserva = Reserva.objects.get(id__exact=codigo)\n if reserva == null :\n return render_to_response('reserva_consulta.html',{'mensaje':'Reserva no registrada'},RequestContext(request))\n else:\n return render_to_response('reserva_consulta.html',{'reserva':reserva},RequestContext(request))\n return render_to_response('reserva_consulta.html',{},RequestContext(request))\n \n@csrf_exempt \ndef reserva_registro(request):\n \n if request.method=='POST':\n print(request.POST)\n accion = request.POST.get(\"hdnAccion\", \"\")\n print(accion)\n if accion == \"1\":\n v={'view_documento':False,'view_libro':False,'libro':'Mi Libro','autor':'Steven','descripcion':'Un libro','material':'Papel','clasificacion':'Accion','isbn':'kdjfsldfjsd'}\n print(v)\n return render_to_response('reserva_registro.html',v,RequestContext(request))\n else:\n v={'view_documento':False,'view_libro':False}\n return render_to_response('reserva_registro.html',v,RequestContext(request))\n else:\n v={'view_documento':False,'view_libro':False}\n return render_to_response('reserva_registro.html',v,RequestContext(request))","repo_name":"arcegk/biblioteca","sub_path":"projects/ingsoft/biblioteca/reservas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28883590210","text":"# https://www.hackerrank.com/challenges/reverse-shuffle-merge/problem?isFullScreen=false\r\n\r\n#!/bin/python3\r\n \r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\nfrom collections import Counter\r\n#\r\n# Complete the 'reverseShuffleMerge' function below.\r\n#\r\n# The function is expected to return a STRING.\r\n# The function accepts STRING s as parameter.\r\n#\r\n \r\ndef reverseShuffleMerge(s):\r\n s=s[::-1]\r\n unused={}\r\n used={}\r\n required={}\r\n stack=[]\r\n \r\n unused=Counter(s)\r\n for ch in unused:\r\n used[ch]=0\r\n required[ch]=unused[ch]//2\r\n print(used)\r\n print(unused)\r\n print(required)\r\n for i in s:\r\n if required[i]>0:\r\n if len(stack)==0:\r\n stack.append(i)\r\n used[i]+=1\r\n unused[i]-=1\r\n required[i]-=1\r\n else:\r\n while (len(stack)>0 and ord(stack[-1])>ord(i)):\r\n if unused[stack[-1]]>required[stack[-1]]:\r\n ch=stack.pop()\r\n used[ch]-=1\r\n required[ch]+=1\r\n else:\r\n stack.append(i)\r\n used[i]+=1\r\n unused[i]-=1\r\n required[i]-=1\r\n break\r\n else:\r\n stack.append(i)\r\n used[i]+=1\r\n unused[i]-=1\r\n required[i]-=1\r\n else:\r\n unused[i]-=1\r\n return (\"\".join(stack))\r\n \r\n \r\n \r\n \r\n # Write your code here\r\n \r\nif __name__ == '__main__':\r\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\r\n \r\n s = input()\r\n \r\n result = reverseShuffleMerge(s)\r\n \r\n fptr.write(result + '\\n')\r\n \r\n fptr.close()\r\n","repo_name":"harsh-srivastv/HackerRank-Problems","sub_path":"Reverse Shuffle Merge.py","file_name":"Reverse Shuffle Merge.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"28102274250","text":"import json\nimport logging\nimport math\nimport os\nimport urllib.request\n\nfrom flask import Flask, request\nfrom redis import Redis\nimport datetime as dt\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nhandler = logging.StreamHandler()\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n# REDIS_HOST = os.getenv(\"REDIS_HOST\", \"redis\")\n# redis_db = Redis(host=REDIS_HOST, db=0, socket_connect_timeout=2, socket_timeout=2)\n\napp = Flask(__name__)\n\n\ndef rest_call(url=\"https://api.covid19api.com/dayone/country/np\"):\n user_agent = 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_4; en-US) AppleWebKit/534.3 (KHTML, like Gecko) ' \\\n 'Chrome/6.0.472.63 Safari/534.3'\n headers = {'User-Agent': user_agent}\n\n try:\n req = urllib.request.Request(url, None, headers)\n response = urllib.request.urlopen(req)\n json_data = json.loads(response.read().decode('utf8'))\n if json_data and len(json_data) != 0:\n return json_data\n except Exception as e:\n logger.warning('Error fetching data from ' + url, e)\n return None\n return None\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return \"

404

\"\n\n\n@app.route('/country', methods=['GET'])\ndef country_list():\n countries = rest_call(\"https://api.covid19api.com/countries\")\n\n if not countries:\n return app.response_class(\n response=\"

Error

\",\n status=400,\n mimetype='text/html'\n )\n\n countries_data = []\n for country in countries:\n data = {\n \"country\": country[\"Country\"],\n \"country_code\": country[\"ISO2\"],\n }\n countries_data.append(data)\n\n response = app.response_class(\n response=json.dumps(countries_data, indent=2),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\n@app.route('/', methods=['GET'])\ndef home():\n corona_data_world = rest_call(\"https://api.covid19api.com/summary\")\n if not corona_data_world:\n return app.response_class(\n response=\"

Error

\",\n status=400,\n mimetype='text/html'\n )\n total = {\n \"Total Confirmed\": corona_data_world[\"Global\"][\"TotalConfirmed\"],\n \"Total Recovered\": corona_data_world[\"Global\"][\"TotalRecovered\"],\n \"Total Death\": corona_data_world[\"Global\"][\"TotalDeaths\"]\n }\n new = {\n \"New Confirmed\": corona_data_world[\"Global\"][\"NewConfirmed\"],\n \"New Recovered\": corona_data_world[\"Global\"][\"NewRecovered\"],\n \"New Death\": corona_data_world[\"Global\"][\"NewDeaths\"]\n }\n data = {\n \"date\": \"latest\",\n \"country\": \"Global\",\n \"total\": total,\n \"new\": new\n }\n\n np_data = None\n for each_country_data in corona_data_world[\"Countries\"]:\n if each_country_data[\"CountryCode\"] == \"NP\":\n np_data = each_country_data\n break\n\n total = {\n \"Total Confirmed\": np_data[\"TotalConfirmed\"],\n \"Total Recovered\": np_data[\"TotalRecovered\"],\n \"Total Death\": np_data[\"TotalDeaths\"]\n }\n new = {\n \"New Confirmed\": np_data[\"NewConfirmed\"],\n \"New Recovered\": np_data[\"NewRecovered\"],\n \"New Death\": np_data[\"NewDeaths\"]\n }\n data[\"Nepal\"] = {\n \"date\": np_data[\"Date\"].replace(\"T\", \" \").replace(\"Z\", \" UTC\"),\n \"country\": np_data[\"Country\"],\n \"total\": total,\n \"new\": new\n }\n\n response = app.response_class(\n response=json.dumps(data, indent=2),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\n@app.route('/all', methods=['GET'])\ndef all_country_data():\n corona_data_world = rest_call(\"https://api.covid19api.com/summary\")\n\n if not corona_data_world:\n return app.response_class(\n response=\"

Error

\",\n status=400,\n mimetype='text/html'\n )\n\n all_data = []\n for each_country_data in corona_data_world[\"Countries\"]:\n total = {\n \"Total Confirmed\": each_country_data[\"TotalConfirmed\"],\n \"Total Recovered\": each_country_data[\"TotalRecovered\"],\n \"Total Death\": each_country_data[\"TotalDeaths\"]\n }\n new = {\n \"New Confirmed\": each_country_data[\"NewConfirmed\"],\n \"New Recovered\": each_country_data[\"NewRecovered\"],\n \"New Death\": each_country_data[\"NewDeaths\"]\n }\n data = {\n \"date\": each_country_data[\"Date\"].replace(\"T\", \" \").replace(\"Z\", \" UTC\"),\n \"country\": each_country_data[\"Country\"],\n \"country_code\": each_country_data[\"CountryCode\"],\n \"total\": total,\n \"new\": new\n }\n all_data.append(data)\n\n response = app.response_class(\n response=json.dumps(all_data, indent=2),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\n@app.route('/', methods=['GET'])\ndef country_data(country_code):\n corona_data_world = rest_call(\"https://api.covid19api.com/summary\")\n\n if not corona_data_world:\n return app.response_class(\n response=\"

Error

\",\n status=400,\n mimetype='text/html'\n )\n\n country_data = None\n for each_country_data in corona_data_world[\"Countries\"]:\n if country_code == each_country_data[\"CountryCode\"].lower():\n country_data = each_country_data\n break\n\n if not country_data:\n return app.response_class(\n response=\"

Error

\",\n status=400,\n mimetype='text/html'\n )\n\n total = {\n \"Total Confirmed\": country_data[\"TotalConfirmed\"],\n \"Total Recovered\": country_data[\"TotalRecovered\"],\n \"Total Death\": country_data[\"TotalDeaths\"]\n }\n new = {\n \"New Confirmed\": country_data[\"NewConfirmed\"],\n \"New Recovered\": country_data[\"NewRecovered\"],\n \"New Death\": country_data[\"NewDeaths\"]\n }\n data = {\n \"date\": country_data[\"Date\"].replace(\"T\", \" \").replace(\"Z\", \" UTC\"),\n \"country\": country_data[\"Country\"],\n \"total\": total,\n \"new\": new\n }\n response = app.response_class(\n response=json.dumps(data, indent=2),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\n@app.route('/history/', methods=['GET'])\ndef country_history_data(country_code):\n # date = dt.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0) - dt.timedelta(days=30)\n # print(date.isoformat())\n # url = \"https://api.covid19api.com/live/country/{}/status/confirmed/date/{}\".format(country_code, date.isoformat()+\"Z\")\n\n\n\n date = dt.datetime.utcnow().date().isoformat()\n corona_data = None\n if os.path.exists(date+country_code):\n html = \"\"\n with open(date+country_code, \"r\") as f:\n html = f.read()\n f.close()\n response = app.response_class(\n response=html,\n status=200,\n mimetype='text/html'\n )\n return response\n else:\n url = \"https://api.covid19api.com/country/\"+country_code\n # url = \"https://api.covid19api.com/total/dayone/country/\"+country_code\n corona_data = rest_call(url)\n\n if not corona_data:\n return app.response_class(\n response=\"

Invalid country code {}

\".format(country_code),\n status=400,\n mimetype='text/html'\n )\n\n html = '''\n\n\n\n\n\n\n\n\n\n

Corona Data {{Country}}

\n\n\n \n \n \n \n \n \n {{row-of-table}}\n
DateConfirmedRecoveredDeath
\n\n\n '''\n row = '''\n \n {{Date}}\n {{Confirmed}} \n {{Recovered}}\n {{Death}}\n \n '''\n all_rows = \"\"\n country = None\n previousDate, currentDate = None, None\n for each_day_data in reversed(corona_data):\n country = str(each_day_data[\"Country\"])\n previousDate = str(each_day_data[\"Date\"]).split(\"T\")[0]\n break\n\n prevConfirmed, prevRecovered, prevDeath = [0] * 3\n confirmed, recovered, death = [0] * 3\n isFirst = True\n data_date = previousDate\n for each_day_data in reversed(corona_data):\n currentDate = str(each_day_data[\"Date\"]).split(\"T\")[0]\n if currentDate != previousDate:\n if isFirst:\n r = row.replace(\"{{Date}}\", \"Total\") \\\n .replace(\"{{Confirmed}}\", str(confirmed)) \\\n .replace(\"{{Recovered}}\", str(recovered)) \\\n .replace(\"{{Death}}\", str(death))\n all_rows = all_rows + r\n else:\n r = row.replace(\"{{Date}}\", data_date) \\\n .replace(\"{{Confirmed}}\", str(int(math.fabs(prevConfirmed - confirmed)))) \\\n .replace(\"{{Recovered}}\", str(int(math.fabs(prevRecovered - recovered)))) \\\n .replace(\"{{Death}}\", str(int(math.fabs(prevDeath - death))))\n all_rows = all_rows + r\n data_date = previousDate\n previousDate = currentDate\n prevConfirmed, prevRecovered, prevDeath = confirmed, recovered, death\n confirmed, recovered, death = [0] * 3\n isFirst = False\n\n confirmed = confirmed + each_day_data[\"Confirmed\"]\n recovered = recovered + each_day_data[\"Recovered\"]\n death = death + each_day_data[\"Deaths\"]\n\n html = html.replace(\"{{row-of-table}}\", all_rows) \\\n .replace(\"{{Country}}\", country)\n with open(date+country_code, \"w\") as f:\n f.write(html)\n f.close()\n response = app.response_class(\n response=html,\n status=200,\n mimetype='text/html'\n )\n return response\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=int(os.getenv(\"PORT\", \"8443\")), debug=False)\n","repo_name":"oneamitj/covid19","sub_path":"corona.py","file_name":"corona.py","file_ext":"py","file_size_in_byte":10488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"71376136360","text":"import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture(cv2.samples.findFile(\"video/scene2.avi\"))\nret, frame1 = cap.read()\nprvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)\nhsv = np.zeros_like(frame1)\nhsv[...,1] = 255\n\nwhile(1):\n ret, frame2 = cap.read()\n next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])\n\n img2 = frame2.copy()\n step = 20\n for r in range(0,img2.shape[0], step):\n for c in range(0,img2.shape[1], step):\n offset_x = np.cos(ang[r,c]) * mag[r,c] * 2\n offset_y = np.sin(ang[r,c]) * mag[r,c] * 2\n cv2.line(img2, (c,r), (c-int(offset_x), r-int(offset_y)), (0, 255, 255), 2)\n\n hsv[...,0] = ang*180/np.pi/2\n hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)\n bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)\n\n cv2.imshow('optical_flow', np.concatenate((img2, bgr), axis=1))\n\n if cv2.waitKey(30) == 27:\n break\n\n prvs = next","repo_name":"lib26/Drone-Image-Processing","sub_path":"imgP7/7-3.py","file_name":"7-3.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15524996500","text":"\nimport spacy\n\nfrom old_version.combined_data_old_annotations import annotated_for_testing_with_all\n\ntest_model = spacy.load('ingredient_test')\n\nsuccess = {\n \"CARDINAL\": [0, 0], # first num is success, second num is all attempts\n \"QUANTITY\": [0, 0],\n \"INGREDIENT\": [0, 0]\n}\n\nfor line in annotated_for_testing_with_all:\n doc = test_model(line[0])\n line[1]['entities'].sort() # we sort because the annotations are not originally in order\n\n for ent, tup in zip(doc.ents, line[1]['entities']):\n print(ent.start_char, ent.end_char, ent.label_)\n print(tup)\n\n found_annotation = (ent.start_char, ent.end_char, ent.label_)\n\n if found_annotation == tup:\n success[tup[2]][0] += 1\n else:\n print(ent.text)\n\n success[tup[2]][1] += 1\n\n print()\n\ncardinal = success['CARDINAL']\nquantity = success['QUANTITY']\ningredient = success['INGREDIENT']\n\nprint(\"success rates as follows\")\nprint(f\"CARDINAL: {cardinal[0]} out of {cardinal[1]} for total of {cardinal[0]/cardinal[1]*100: .2f}%\")\nprint(f\"QUANTITY: {quantity[0]} out of {quantity[1]} for total of {quantity[0]/quantity[1]*100: .2f}%\")\nprint(f\"INGREDIENT: {ingredient[0]} out of {ingredient[1]} for total of {ingredient[0]/ingredient[1]*100: .2f}%\")\n\n","repo_name":"wenzstev/grocery-list-maker","sub_path":"model_scripts/model_tester.py","file_name":"model_tester.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"71285617959","text":"# !/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n# @Author: lodge\n# @Contact: lodgeinwh@gmail.com\n# @File: Bird Language.py\n# @Time: 2019/2/11 22:16\n# @Software: PyCharm\n\n\nVOWELS = \"aeiouy\"\n\n\ndef translate(phrase):\n result = ''\n phrase = list(phrase.lower())\n i = 0\n while i < len(phrase):\n if phrase[i] == ' ':\n result += ' '\n i += 1\n elif phrase[i] in VOWELS:\n result += phrase[i]\n i += 3\n else:\n result += phrase[i]\n i += 2\n return result\n\n\nif __name__ == '__main__':\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert translate(\"hieeelalaooo\") == \"hello\", \"Hi!\"\n assert translate(\"hoooowe yyyooouuu duoooiiine\") == \"how you doin\", \"Joey?\"\n assert translate(\"aaa bo cy da eee fe\") == \"a b c d e f\", \"Alphabet\"\n assert translate(\"sooooso aaaaaaaaa\") == \"sos aaa\", \"Mayday, mayday\"\n","repo_name":"lodgeinwh/Study","sub_path":"Python/Py.Checkio/Home/Bird Language.py","file_name":"Bird Language.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41620091918","text":"'''\nPlots from Admiralty Inlet 65 meter simulation.\n'''\n\nimport scipy.io\nimport numpy as np\nfrom scipy import interpolate\nimport matplotlib.pyplot as plt\nfrom add_colormaps import add_colormaps\nfrom skimage import color\nfrom matplotlib import cm\nimport matplotlib as mpl\nfrom mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset\nfrom mpl_toolkits.basemap import Basemap\nfrom matplotlib.ticker import MaxNLocator\nimport pdb\nimport cmocean.cm as cmo\nimport cmocean\n\nmpl.rcParams.update({'font.size': 16})\nmpl.rcParams['font.sans-serif'] = 'Arev Sans, Bitstream Vera Sans, Lucida Grande, Verdana, Geneva, Lucid, Helvetica, Avant Garde, sans-serif'\nmpl.rcParams['mathtext.fontset'] = 'custom'\nmpl.rcParams['mathtext.cal'] = 'cursive'\nmpl.rcParams['mathtext.rm'] = 'sans'\nmpl.rcParams['mathtext.tt'] = 'monospace'\nmpl.rcParams['mathtext.it'] = 'sans:italic'\nmpl.rcParams['mathtext.bf'] = 'sans:bold'\nmpl.rcParams['mathtext.sf'] = 'sans'\nmpl.rcParams['mathtext.fallback_to_cm'] = 'True'\n\ndef comps():\n '''read in comparison points'''\n\n import scipy.io\n base = 'savedoutput/'\n data = dict()\n d = scipy.io.loadmat(base + 'zeta_adcpzetasites.mat')\n data['zeta'] = dict(); data['vel'] = dict(); data['ctd'] = dict()\n data['zeta']['lon'] = d['coords']['xm'][0,0][0,:]; data['zeta']['lat'] = d['coords']['ym'][0,0][0,:]\n d = scipy.io.loadmat(base + 'uADCPvel.mat')\n data['vel']['lon'] = d['datacoords']['xm'][0,0][0,0,:]; data['vel']['lat'] = d['datacoords']['ym'][0,0][0,0,:]\n d = scipy.io.loadmat(base + 'PTH005_2006_9.mat')\n lon0 = d['PTH']['lon'][0][0][0][0]; lat0 = d['PTH']['lat'][0][0][0][0]\n d = scipy.io.loadmat(base + 'ADM001_2006_9.mat')\n lon1 = d['ADM']['lon'][0][0][0][0]; lat1 = d['ADM']['lat'][0][0][0][0]\n data['ctd']['lon'] = np.array([[lon0, lon1]]); data['ctd']['lat'] = np.array([[lat0, lat1]])\n\n # data markers\n data['zeta']['plot'] = {'color': '#FF5733', 'marker': 'o', 'markersize': 8, 'linewidth': 0}\n data['vel']['plot'] = {'color': 'k', 'marker': 's', 'markersize': 10,\n 'markerfacecolor': 'None', 'markeredgewidth': 0.8, 'linewidth': 0}\n data['ctd']['plot'] = {'color': '#8E44AD', 'marker': '*', 'markersize': 15, 'linewidth': 0}\n\n # d = scipy.io.loadmat(base + 'ha_zetaadcp.mat')\n # cons = d['ha']['name'][0][0] # tidal constituents\n # freqs = d['ha']['freq'][0][0] # tidal frequencies\n # # tidal constituents: constituents by 4:\n # tidecons = [d['ha']['tidecon'][0][0][i] for i in range(cons.size)]\n #\n # zeta = dict()\n # for con, freq, tidecon in zip(cons, freqs, tidecons):\n # con = con.strip() # remove trailing spaces\n # zeta[con] = dict()\n # zeta[con]['f'] = freq\n # zeta[con]['tidecons'] = tidecon\n\n return data\n\n\ndef add_alpha(cmap, alpha):\n '''Add alpha to colormap. Return adjusted colormap.'''\n\n # Read in rgb of colormap\n rgb = cmocean.tools.print_colormaps([cmap], savefiles=False)[0]\n # make array of alpha values\n alphas = alpha*np.ones((rgb.shape[0],1))\n # set up new rgb\n rgbnew = np.hstack((rgb, alphas))\n # create new colormap object\n cmap = cmocean.tools.cmap(rgbnew)\n return cmap\n\n\ndef ai(grid, zoom='out'):\n '''\n Plot bathymetry of just Admiralty Inlet\n\n Inputs:\n grid tracpy grid dictionary\n zoom 'out'= zoomed out view, 'in'= zoomed in view\n '''\n\n # x and y limits for this plot\n if zoom=='out': # zoomed out\n lonlims = [-122.8, -122.55]\n latlims = [47.9665, 48.227]\n # levels to plot\n levs = np.arange(-200, 20, 20)\n elif zoom=='in': # zoomed in\n lonlims = [-122.71, -122.64]\n latlims = [48.135, 48.18]\n # levels to plot\n levs = np.arange(-60, 5, 5)\n\n cmap = 'Blues_r'\n xlims, ylims = grid.proj(lonlims, latlims)\n\n # Make plot\n fig = plt.figure(figsize=(14,12))\n ax = fig.add_subplot(111)\n grid.proj.drawcoastlines(ax=ax)\n grid.proj.fillcontinents('darkgreen', ax=ax)\n mappable = ax.contourf(grid.x_rho, grid.y_rho, -grid.h, cmap=cmap, levels=levs, extend='min')\n ax.set_xlim(xlims)\n ax.set_ylim(ylims)\n cb = fig.colorbar(mappable, shrink=0.75, pad=0.025)\n cb.set_label('Height/depth [m]')\n fig.show()\n\n # # Save figure\n # fig.savefig('figures/ai_transect.png')\n\n\n# def ps(dosave=True, fname='figures/domains.png', lont=None, latt=None, ht=None, dd=None):\n# '''\n# Plot Bathymetry of Puget Sound, Admiralty Inlet, and Admiralty Head\n#\n# Inputs:\n# dosave Save figure\n# fname File name for figure\n# lont, latt transect points to plot if they are input\n# ht Depth along transect, if input\n# dd Distance in meters along transect\n# '''\n\nif __name__ == \"__main__\": # Puget sound plot with inlays\n # download bathymetry, which can be found at: http://figshare.com/preview/_preview/1165560 (27.3MB)\n fname = 'figures/domains'\n dosave = True\n doAH = False\n lont=None; latt=None; ht=None; dd=None\n\n # Read in bathymetry\n mat = scipy.io.loadmat('../form-drag/cascadia_gridded.mat')\n\n # x and y limits for these plots\n lonlimsPS = np.array([-124.21, -122.15]) #-123.21, -122.15])\n latlimsPS = np.array([47.02, 48.82])\n lonlimsAI = np.array([-122.8, -122.535])\n latlimsAI = np.array([47.9665, 48.225])\n lonlimsAH = np.array([-122.72, -122.64])\n latlimsAH = np.array([48.12, 48.18])\n\n # Functionality copied from https://github.com/clawpack/geoclaw/blob/master/src/python/geoclaw/topotools.py#L873\n land_cmap = cmo.speed_r\n land_cmap = add_alpha(land_cmap, 0.7)\n sea_cmap = cmo.deep_r\n cmapPS, norm = add_colormaps((land_cmap, sea_cmap), data_limits=[-375,2500], data_break=0.0)\n cmapAI = cmo.deep_r\n cmapAH = cmo.deep_r\n\n cland = '#4C6351'\n\n # levels to plot\n levsPS = np.concatenate((np.arange(-375, 0, 25), np.arange(0,3000,500)))\n levsAI = np.arange(-200, 20, 20)\n levsAH = np.arange(-120, 15, 15)\n\n # use basemap\n basemapPS = Basemap(llcrnrlon=lonlimsPS[0], llcrnrlat=latlimsPS[0],\n urcrnrlon=lonlimsPS[1], urcrnrlat=latlimsPS[1],\n lat_0=latlimsPS.mean(), lon_0=lonlimsPS.mean(),\n projection='lcc', resolution='f',\n area_thresh=0.)\n xPS, yPS = basemapPS(mat['lon_topo'], mat['lat_topo'])\n xlimsAI, ylimsAI = basemapPS(lonlimsAI, latlimsAI)\n xlimsAH, ylimsAH = basemapPS(lonlimsAH, latlimsAH)\n\n # data points to plot on Admiralty Inlet map, get projected coords\n data = comps()\n for key in data.keys():\n data[key]['x'], data[key]['y'] = basemapPS(data[key]['lon'], data[key]['lat'])\n\n # Make Puget Sound plot\n fig = plt.figure(figsize=(16,16))\n axPS = fig.add_subplot(111)\n # basemapPS.drawcoastlines(ax=axPS)\n mappablePS = axPS.contourf(xPS, yPS, mat['z_topo'], cmap=cmapPS, levels=levsPS, zorder=2, norm=norm)\n locator = MaxNLocator(6) # if you want no more than 10 contours\n locator.create_dummy_axis()\n locator.set_bounds(lonlimsPS[0], lonlimsPS[1])\n pars = locator()\n locator = MaxNLocator(6) # if you want no more than 10 contours\n locator.create_dummy_axis()\n locator.set_bounds(latlimsPS[0], latlimsPS[1])\n mers = locator()\n basemapPS.drawparallels(mers, dashes=(1, 1), linewidth=0.15, labels=[1,0,0,0], ax=axPS)#, zorder=3)\n basemapPS.drawmeridians(pars, dashes=(1, 1), linewidth=0.15, labels=[0,0,0,1], ax=axPS)#, zorder=3)\n cbPS = fig.colorbar(mappablePS, pad=0.015, aspect=35)\n cbPS.set_label('Height/depth [m]')\n # Label\n axPS.text(0.8, 0.025, 'Puget Sound', transform=axPS.transAxes, color='0.15')\n\n # Inset magnified plot of Admiralty Inlet\n locAI = 3\n axAI = zoomed_inset_axes(axPS, 4, loc=locAI)\n basemapPS.drawcoastlines(ax=axAI)\n basemapPS.fillcontinents(cland, ax=axAI)\n mappableAI = axAI.contourf(xPS, yPS, mat['z_topo'], cmap=cmapAI, levels=levsAI)\n # add data points\n for key, value in zip(data.keys(), data.values()):\n xtemp, ytemp = data[key]['x'], data[key]['y']\n axAI.plot(xtemp, ytemp, **data[key]['plot'])\n axAI.set_xlim(xlimsAI)\n axAI.set_ylim(ylimsAI)\n # Inlaid colorbar\n if locAI == 1:\n caxAI = fig.add_axes([0.581, 0.665, 0.011, 0.1])\n elif locAI == 3:\n caxAI = fig.add_axes([0.18, 0.12, 0.011, 0.1])\n cbAI = plt.colorbar(mappableAI, cax=caxAI, orientation='vertical')\n cbAI.ax.tick_params(labelsize=12)\n # draw a bbox of the region of the inset axes in the parent axes and\n # connecting lines between the bbox and the inset axes area\n mark_inset(axPS, axAI, loc1=2, loc2=4, fc=\"none\", ec=\"0.3\", lw=1.5, zorder=5)\n # Label\n axAI.text(0.41, 0.83, 'Admiralty\\n Inlet', transform=axAI.transAxes, color='0.15', fontsize=16)\n\n # Inset magnified plot of Admiralty Head\n if doAH:\n axAH = zoomed_inset_axes(axPS, 9, loc=1)\n basemapPS.drawcoastlines(ax=axAH)\n basemapPS.fillcontinents(cland, ax=axAH)\n mappableAH = axAH.contourf(xPS, yPS, mat['z_topo'], cmap=cmapAH, levels=levsAH)\n axAH.set_xlim(xlimsAH)\n axAH.set_ylim(ylimsAH)\n\n if plt.is_numlike(lont):\n # add points if you have some\n xt, yt = basemapPS(lont, latt)\n axAH.plot(xt, yt, 'k', lw=3)\n\n # Inlaid colorbar\n caxAH = fig.add_axes([0.398, 0.116, 0.012, 0.15])\n cbAH = plt.colorbar(mappableAH, cax=caxAH, orientation='vertical')\n cbAH.ax.tick_params(labelsize=12)\n # draw a bbox of the region of the inset axes in the parent axes and\n # connecting lines between the bbox and the inset axes area\n mark_inset(axPS, axAH, loc1=2, loc2=4, fc=\"none\", ec=\"0.3\", lw=1.5, zorder=5)\n # Label\n axAH.text(0.47, 0.92, 'Admiralty Head', transform=axAH.transAxes, color='0.15', fontsize=16)\n\n # pdb.set_trace()\n\n if plt.is_numlike(lont):\n # Add axes to plot transect depths\n axdepths = fig.add_axes([0.28, 0.39, 0.14, 0.075], zorder=11)\n axdepths.plot((np.arange(lont.size)*dd)/1000., -ht, '0.2', lw=2, zorder=12)\n axdepths.tick_params(axis='both', colors='0.1', top='off', right='off', width=2, length=4, labelsize=12, labelcolor='0.1')\n axdepths.spines['bottom'].set_color('none')\n axdepths.spines['top'].set_color('none')\n axdepths.spines['left'].set_color('none')\n axdepths.spines['right'].set_color('none')\n axdepths.set_xlabel('Distance along transect [km]', fontsize=14, color='0.1')\n axdepths.set_ylabel('Transect depth [m]', fontsize=14, color='0.1')\n axdepths.patch.set_alpha(0.0) # make bg transparent\n fig.show()\n\n\n # Save figure\n if dosave:\n fig.savefig(fname + '.png', bbox_inches='tight', dpi=120)\n fig.savefig(fname + '_highres.png', bbox_inches='tight', dpi=300)\n fig.show()\n","repo_name":"kthyng/ai65","sub_path":"plot_domain.py","file_name":"plot_domain.py","file_ext":"py","file_size_in_byte":10906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26180949823","text":"class LinkedList:\n \"\"\"\n Implement the LinkedList data structure. The Node class below is an \n inner class. An inner class means that its real name is related to \n the outer class. To create a Node object, we will need to \n specify LinkedList.Node\n \"\"\"\n\n class Node:\n \"\"\"\n Each node of the linked list will have data and links to the \n previous and next node. \n \"\"\"\n\n def __init__(self, data):\n \"\"\" \n Initialize the node to the data provided. Initially\n the links are unknown so they are set to None.\n \"\"\"\n self.data = data\n self.next = None\n self.prev = None\n\n def __init__(self):\n \"\"\"\n Initialize an empty linked list.\n \"\"\"\n self.head = None\n self.tail = None\n\n def insert_head(self, value):\n \"\"\"\n Insert a new node at the front (i.e. the head) of the\n linked list.\n \"\"\"\n # Create the new node\n new_node = LinkedList.Node(value) \n \n # If the list is empty, then point both head and tail\n # to the new node.\n if self.head is None:\n self.head = new_node\n self.tail = new_node\n # If the list is not empty, then only self.head will be\n # affected.\n else:\n new_node.next = self.head # Connect new node to the previous head\n self.head.prev = new_node # Connect the previous head to the new node\n self.head = new_node # Update the head to point to the new node\n\n def insert_tail(self, value):\n \"\"\"\n Insert a new node at the back (i.e. the tail) of the \n linked list.\n \"\"\"\n new_node = LinkedList.Node(value)\n\n # If the list is empty, then point both head and tail\n # to the new node \n if self.head is None:\n self.head = new_node\n self.tail = new_node \n # If the list is not empty, then only the self.tail will be\n # affected.\n else:\n new_node.prev = self.tail # Connect new node to the previous tail\n self.tail.next = new_node # Connect the previous tail to the new node\n self.tail = new_node # Update the tail to point to the new node\n\n #################\n # Start Problem #\n #################\n\n def insert_after(self, value, new_value):\n \"\"\"\n Insert 'new_value' after the first occurance of 'value' in\n the linked list.\n \"\"\"\n # Search for the node that matches 'value' by starting at the \n # head of the list.\n curr = self.head\n while curr is not None:\n if curr.data == value:\n # If the location of 'value' is at the end of the list,\n # then we can call insert_tail to add 'new_value'\n if curr == self.tail:\n self.insert_tail(new_value)\n # For any other location of 'value', need to create a \n # new node and reconnect the links to insert.\n else:\n new_node = LinkedList.Node(new_value)\n new_node.prev = curr # Connect new node to the node containing 'value'\n new_node.next = curr.next # Connect new node to the node after 'value'\n curr.next.prev = new_node # Connect node after 'value' to the new node\n curr.next = new_node # Connect the node containing 'value' to the new node\n return # We can exit the function after we insert\n curr = curr.next # Go to the next node to search for 'value'\n\n ###############\n # End Problem #\n ###############\n\n ","repo_name":"KMausisa/cse-212-final-project","sub_path":"final-project-draft1/python_files/linked-list.py","file_name":"linked-list.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31346075478","text":"from django.urls import path\nfrom .middlewares.auth import auth_middleware\nfrom .views import Index, OrderView, Signup, Login, logout, Cart, Checkout,Order\n\nurlpatterns = [\n path('', Index.as_view(), name='index'),\n path('signup', Signup.as_view(), name='signup'),\n path('login', Login.as_view(), name='login'),\n path('logout', logout, name='logout'),\n path('cart', auth_middleware(Cart.as_view()), name='cart'),\n path('checkout', Checkout.as_view(), name='checkout'),\n path('orders', OrderView.as_view(), name='orders'),\n\n]\n","repo_name":"RobertHalam/Ecom","sub_path":"ecomapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16697763704","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 11/9/21 2:41 PM\n# @Author : Jiyuan Wang\n# @File : loss_user.py\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pylab as plt\nimport seaborn as sns\nimport os\nfrom sklearn.model_selection import learning_curve, train_test_split, GridSearchCV\nfrom sklearn.preprocessing import StandardScaler\n#使用PCA方法对数据进行降维\nfrom sklearn.decomposition import PCA\n#导入管道机制进行流水作业\nfrom sklearn.pipeline import Pipeline\n#导入自带的评估模型准确率的函数\nfrom sklearn.metrics import accuracy_score\n#导入分类算法SVC, 其它还有NuSVC,和LinearSVC 。另一类是回归算法库,包括SVR, NuSVR,和LinearSVR\nfrom sklearn.svm import SVC\n#决策树分类器\nfrom sklearn.tree import DecisionTreeClassifier\n#随机森林分类器\nfrom sklearn.ensemble import RandomForestClassifier\n#KNN分类器\nfrom sklearn.neighbors import KNeighborsClassifier\n#Adaboost分类器\nfrom sklearn.ensemble import AdaBoostClassifier\n\ndef load_data():\n script_dir = os.path.dirname(__file__)\n customerDf = pd.read_csv(os.path.join(script_dir, 'data/WA_Fn-UseC_-Telco-Customer-Churn.csv'))\n #customerDf[['TotalCharges']].astype(float)\n customerDf['TotalCharges'] = pd.to_numeric(customerDf['TotalCharges'], errors=\"coerce\")\n\n customerDf.loc[:, 'TotalCharges'].fillna(customerDf['TotalCharges'].mean(), inplace=True)\n customerDf.loc[:, 'tenure'].replace(to_replace=0, value=1, inplace=True)\n\n customerID = customerDf['customerID']\n customerDf.drop(['customerID'], axis=1, inplace=True)\n\n cateCols = [c for c in customerDf.columns if customerDf[c].dtype == 'object' or c == 'SeniorCitizen']\n global dfCate\n dfCate = customerDf[cateCols].copy()\n dfCate.head(3)\n\n for col in cateCols:\n if dfCate[col].nunique() == 2:\n dfCate[col] = pd.factorize(dfCate[col])[0]\n else:\n dfCate = pd.get_dummies(dfCate, columns=[col])\n dfCate['tenure'] = customerDf[['tenure']]\n dfCate['MonthlyCharges'] = customerDf[['MonthlyCharges']]\n dfCate['TotalCharges'] = customerDf[['TotalCharges']]\n\n dropFea = ['gender', 'PhoneService',\n 'OnlineSecurity_No internet service', 'OnlineBackup_No internet service',\n 'DeviceProtection_No internet service', 'TechSupport_No internet service',\n 'StreamingTV_No internet service', 'StreamingMovies_No internet service',\n 'OnlineSecurity_No', 'OnlineBackup_No',\n 'DeviceProtection_No','TechSupport_No',\n 'StreamingTV_No', 'StreamingMovies_No',\n 'MultipleLines_No', 'Contract_Month-to-month',\n 'StreamingMovies_Yes', 'StreamingTV_Yes',\n 'PaymentMethod_Credit card (automatic)','PaymentMethod_Electronic check',\n 'InternetService_Fiber optic','MultipleLines_No phone service',\n 'PaymentMethod_Mailed check', 'PaymentMethod_Bank transfer (automatic)',\n 'OnlineSecurity_Yes', 'InternetService_DSL','InternetService_No',\n 'OnlineBackup_Yes','DeviceProtection_Yes','PaperlessBilling'\n ]\n dfCate.drop(dropFea, inplace=True, axis=1)\n # 最后一列是作为标识\n target = dfCate['Churn'].values\n # 列表:特征和1个标识\n columns = dfCate.columns.tolist()\n\n # 列表:特征\n columns.remove('Churn')\n # 含有特征的DataFrame\n features = dfCate[columns].values\n # 30% 作为测试集,其余作为训练集\n # random_state = 1表示重复试验随机得到的数据集始终不变\n # stratify = target 表示按标识的类别,作为训练数据集、测试数据集内部的分配比例\n train_x, test_x, train_y, test_y = train_test_split(features, target, test_size=0.30, stratify=target,\n random_state=1)\n\n return train_x, test_x, train_y, test_y\n\ndef train_model(train_x, train_y):\n classifiers = [\n SVC(random_state=1, kernel='rbf'),\n DecisionTreeClassifier(random_state=1, criterion='gini'),\n RandomForestClassifier(random_state=1, criterion='gini'),\n KNeighborsClassifier(metric='minkowski'),\n AdaBoostClassifier(random_state=1),\n ]\n # 分类器名称\n classifier_names = [\n 'svc',\n 'decisiontreeclassifier',\n 'randomforestclassifier',\n 'kneighborsclassifier',\n 'adaboostclassifier',\n ]\n # 分类器参数\n # 注意分类器的参数,字典键的格式,GridSearchCV对调优的参数格式是\"分类器名\"+\"__\"+\"参数名\"\n classifier_param_grid = [\n {'svc__C': [0.1], 'svc__gamma': [0.01]},\n {'decisiontreeclassifier__max_depth': [6, 9, 11]},\n {'randomforestclassifier__n_estimators': range(1, 11)},\n {'kneighborsclassifier__n_neighbors': [4, 6, 8]},\n {'adaboostclassifier__n_estimators': [70, 80, 90]}\n ]\n\n global s_model\n s_model = AdaBoostClassifier(n_estimators=80)\n s_model.fit(train_x, train_y)\n\n return s_model\n\ndef run_model(pred_x):\n pred_y = s_model.predict(pred_x)\n predDf = pd.DataFrame({'Churn': pred_y})\n\ndef set_up():\n train_x, test_x, train_y, test_y = load_data()\n model = train_model(train_x,train_y)\n return (test_x,test_y,model)\n\ndef build_test_input(SeniorCitizen,Partner,Dependents,MultipleLines_Yes,TechSupport_Yes,\n Contract_Oneyear,Contract_Twoyear,tenure,MonthlyCharges,TotalCharges):\n test_input = pd.DataFrame(\n columns=['SeniorCitizen','Partner','Dependents','MultipleLines_Yes','TechSupport_Yes',\n 'Contract_Oneyear','Contract_Twoyear','tenure','MonthlyCharges','TotalCharges'])\n test_input.loc[0] = [SeniorCitizen,Partner,Dependents,MultipleLines_Yes,TechSupport_Yes,\n Contract_Oneyear,Contract_Twoyear,tenure,MonthlyCharges,TotalCharges]\n return test_input\n\nif __name__ == '__main__':\n test_x, test_y, model = set_up()\n pd.set_option('max_colwidth',100)\n pd.set_option('display.max_columns',None)\n\n pred_x = dfCate.drop(['Churn'],axis=1).tail(10)\n print(pred_x)\n run_model(pred_x)\n\n\n\n\n\n","repo_name":"fabriceyhc/pyfuzz","sub_path":"src/examples/lossuser/loss_user.py","file_name":"loss_user.py","file_ext":"py","file_size_in_byte":6233,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"43444175635","text":"\n# coding: utf-8\n\n# In[20]:\n\n\nimport numpy as np\n\nDataset =[3, 5, 7, 2, 8, 10, 11, 65, 72, 81, 99, 100, 150]\n\ndef movingaverage(values,window):\n weights=np.repeat(values,window)/window\n smas=np.convolve(values,weights,'valid')\n return smas\n\nprint(movingaverage(Dataset,3))\n\n","repo_name":"Rosmej/Numpy-2","sub_path":"Numpy-2.py","file_name":"Numpy-2.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34665664429","text":"import cv2\n\ncarregarAlgoritmo = cv2.CascadeClassifier('haarcascade/haarcascade_frontalface_default.xml')\n\nimg = cv2.cvtColor(cv2.imread(\"imagens/time_corinthians.jpg\"), cv2.COLOR_BGR2RGB) # abro a imagem com imread que faza leiturada imagem\n\n# transformando a imagem em cinza\n# a documentação do opencv recomenda que se utilze a imagem na tonalidade de cinza, pois a porcentagem de acerto é maior\nimagem_cinza = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\nfaces = carregarAlgoritmo.detectMultiScale(imagem_cinza)\n\nprint(faces)\n\nfor (x, y, l, a) in faces:\n cv2.rectangle(img, (x, y), (x + l, x + a), (0,255,0), 2)\ncv2.imshow(\"Faces\", img)\ncv2.waitKey()\n","repo_name":"diogolimalucasdev/Estudando-Sobre-OpenCv","sub_path":"reconhecimento.py","file_name":"reconhecimento.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72166391399","text":"import pytest\nimport os\nimport time\nimport contextlib\nimport io\nfrom .._parallelize import _get_n_jobs, parallel_loop\n\n\ndef example_function(a):\n time.sleep(0.5)\n return None\n\n\ndef test_get_n_jobs():\n max_jobs = os.cpu_count()\n other_n_jobs = 2 if max_jobs >= 2 else 1\n\n assert _get_n_jobs(None) == 1\n assert _get_n_jobs(other_n_jobs) == other_n_jobs\n assert _get_n_jobs(-1) == max_jobs\n\n with pytest.raises(RuntimeError):\n _get_n_jobs(max_jobs + 1)\n\n\ndef test_parallel_loop():\n # Get number of jobs to set iterable range\n n_jobs = os.cpu_count()\n\n # Check if parallelization is happening\n start = time.time()\n parallel_loop(example_function, range(n_jobs), n_jobs=-1)\n exec_time = time.time() - start\n\n exp_single_job = n_jobs * 0.5 if n_jobs != 1 else 1\n assert exec_time < exp_single_job\n\n\ndef test_progress_bar():\n # Get number of jobs to set iterable range\n n_jobs = os.cpu_count()\n description = \"Test descr.\"\n\n # Check if parallelization is happening\n f = io.StringIO()\n with contextlib.redirect_stdout(f):\n parallel_loop(\n function=example_function,\n iterable=range(n_jobs),\n n_jobs=-1,\n progress_bar=True,\n description=description,\n )\n\n output = f.getvalue()\n assert output.startswith(description)\n","repo_name":"joaopfonseca/ml-research","sub_path":"mlresearch/utils/tests/test_parallelize.py","file_name":"test_parallelize.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"1097541792","text":"# Input\r\nimport numpy as np\r\nm = int(input(\"Nhập m = \")) # dòng\r\nn = int(input(\"Nhập n = \")) # cột\r\na = []\r\nb = []\r\nfor i in range(0, m):\r\n a.append([])\r\n for j in range(0, n):\r\n x = int(input(\"Nhập phần tử a[%s][%s]: \" % (i+1, j+1)))\r\n b.append(x)\r\n a[i].append(0)\r\nb.sort(reverse=True)\r\n\r\n# Tạo biến\r\nhang1 = -1\r\nhang2 = hang1\r\nkhong = 0\r\nkhong1 = 0\r\nmot = 1\r\nhai = 0\r\nhang3 = -1\r\n\r\n# Output\r\nwhile b!=[]:\r\n for i in range(khong1,n-khong): # phải sang trái\r\n a[hang1][hang3] = b[-1]\r\n b.remove(b[-1])\r\n hang3 -= 1\r\n hang3 = hang1-1\r\n if b == []: \r\n break\r\n for i in range(khong,m-mot): # duới lên trên\r\n hang2 -= 1\r\n a[hang2][khong] = b[-1]\r\n b.remove(b[-1])\r\n if b == []: \r\n break\r\n for i in range(khong+1,n-hai): # trái sang phải\r\n a[khong][i] = b[-1]\r\n b.remove(b[-1])\r\n if b == []: # nhanh nhanh\r\n break\r\n for i in range(mot,m-mot): # trên xuống dưới\r\n a[i][hang1] = b[-1]\r\n b.remove(b[-1])\r\n hang1 -= 1 \r\n hang2 = hang1\r\n khong += 1\r\n khong1+=1\r\n mot += 1\r\n hai += 1\r\nprint(np.array(a))","repo_name":"trandinhtrung3082007/HSG-Tin","sub_path":"bai3_mang2chieu.py","file_name":"bai3_mang2chieu.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11001005612","text":"class ListNode:\r\n def __init__(self, x):\r\n self.val = x\r\n self.next = None\r\nclass Solution:\r\n def addInList(self , head1 , head2 ):\r\n # write code here\r\n # 翻转链表\r\n def reverse(head):\r\n if not head:\r\n return head\r\n # 链表翻转\r\n pre = ListNode(-1)\r\n # 头插法链表翻转\r\n while head:\r\n # 记录下一个节点\r\n cur = head.next\r\n # 记录当前头节点的下一个节点\r\n nxt = pre.next\r\n # 断开节点\r\n pre.next = head\r\n head.next = nxt\r\n head = cur\r\n return pre.next\r\n head1 = reverse(head1)\r\n head2 = reverse(head2)\r\n dummy = ListNode(-1)\r\n newnode = dummy\r\n flag = 0\r\n # 由高位向低位相加和\r\n while head1 and head2:\r\n addnum = head1.val + head2.val + flag\r\n flag = addnum//10\r\n now = addnum%10\r\n newnode.next = ListNode(now)\r\n head1 = head1.next\r\n head2 = head2.next\r\n newnode = newnode.next\r\n # 如果仍然包含进位 依次加和 head1 和head2\r\n while head1:\r\n addnum = head1.val+ flag\r\n flag = addnum//10\r\n now = addnum%10\r\n newnode.next = ListNode(now)\r\n head1 = head1.next\r\n newnode = newnode.next\r\n while head2:\r\n addnum = head2.val+ flag\r\n flag = addnum//10\r\n now = addnum%10\r\n newnode.next = ListNode(now)\r\n head2 = head2.next\r\n newnode = newnode.next\r\n # 最后一位进位是否为1\r\n if flag == 1:\r\n newnode.next = ListNode(flag)\r\n # 求和结果进行翻转依次输出\r\n return reverse(dummy.next)","repo_name":"dunkle/leetcode_block","sub_path":"剑指offer/NC40 两个链表生成相加链表.py","file_name":"NC40 两个链表生成相加链表.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27158320974","text":"import os\nimport sys\nfrom bench import time_func\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nfrom amcl import commitments\n\nx_hex = \"40576370e36018f6bfaffc4c66780303a361f0c5f4a18a86a74fb179ca0fcf22\"\nr_hex = \"296f910bde4530efe3533ed3b74475d6022364db2e57773207734b6daf547ac8\"\n\nif __name__ == \"__main__\":\n x = bytes.fromhex(x_hex)\n r = bytes.fromhex(r_hex)\n\n # Generate quantities for benchmark\n r, c = commitments.nm_commit(None, x, r)\n\n assert commitments.nm_decommit(x, r, c) == commitments.OK\n\n # Run benchmark\n fncall = lambda: commitments.nm_commit(None, x, r)\n time_func(\"nm_commit \", fncall, unit=\"us\")\n\n fncall = lambda: commitments.nm_decommit(x, r, c)\n time_func(\"nm_decommit\", fncall, unit=\"us\")\n","repo_name":"ep-infosec/33_apache_incubator-milagro-MPC","sub_path":"python/benchmark/bench_nm_commit.py","file_name":"bench_nm_commit.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29201101591","text":"from cms.toolbar.utils import get_toolbar_from_request\nfrom cms.utils.conf import get_cms_setting\nfrom menus.menu_pool import MenuRenderer\n\n\ndef menu_renderer_cache_key(self):\n prefix = get_cms_setting('CACHE_PREFIX')\n\n key = '%smenu_nodes_%s_%s' % (prefix, self.request_language, self.site.pk)\n\n if self.request.user.is_authenticated:\n key += '_%s_user' % self.request.user.pk\n\n request_toolbar = get_toolbar_from_request(self.request)\n\n if request_toolbar.edit_mode_active or request_toolbar.preview_mode_active:\n key += ':draft'\n else:\n key += ':public'\n return key\nMenuRenderer.cache_key = property(menu_renderer_cache_key) # noqa: E305\n","repo_name":"wahello/djangocms-versioning","sub_path":"djangocms_versioning/monkeypatch/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"34024946778","text":"def readMatrix(n, elementsType=int):\n M = []\n for i in range(n):\n row = [elementsType(el) for el in input().split()]\n M.append(row)\n\n return M\n\ndef printMatrix(M, elementView=\"%4d\", endElem=\"\"):\n for row in M:\n for el in row:\n print(elementView % el, end=endElem)\n print()\n\ndef writeMatrix(M, elementsType=int):\n # n = len(M) # кількість рядків матриці\n # m = len(M[0]) # кількість стовпчиків матриці\n if elementsType == int:\n str_elem = \"%4d\"\n elif elementsType == float:\n str_elem = \"%7.2f\"\n else:\n str_elem = \"%5s\"\n\n for row in M:\n # print(*row)\n for el in row:\n print(str_elem % el, end= \"\")\n print()\n\n\n","repo_name":"krenevych/Prog2021","sub_path":"Stat12/L10/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"uk","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"21390614034","text":"#! /usr/bin/env python\n\nimport ctypes\n\ndef libc():\n lib_utils = ctypes.CDLL('utils.so')\n lib_utils.strstr2.argstype = [ctypes.c_char_p, ctypes.c_char_p]\n lib_utils.strstr2.restype = ctypes.c_char_p\n res = lib_utils.strstr2(b\"ababac\", b\"baba\")\n print(res)\n\n\ndef libcpp():\n lib_utils = ctypes.CDLL('libutilscpp.so')\n lib_utils.int2str.argstype = [ctypes.c_int]\n lib_utils.int2str.restype = ctypes.c_void_p\n\n lib_utils.free_memory.argstype = [ctypes.c_void_p]\n lib_utils.free_memory.restype = None\n\n res_str_p = lib_utils.int2str(100500)\n print(type(res_str_p))\n print(ctypes.c_char_p(res_str_p).value)\n lib_utils.free_memory(ctypes.c_char_p(res_str_p))\n\n lib_utils.fibonacci.argstype = [ctypes.c_int]\n lib_utils.fibonacci.restype = ctypes.c_int\n n = 10\n print(f\"Fibonacci {n}-th number is: {lib_utils.fibonacci(n)}\")\n\ndef main():\n print(\"=== libc ===\")\n libc()\n print(\"=== libcpp ===\")\n libcpp()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AlexG888/MADE_python","sub_path":"lessons_and_hometasks/advance-04/src/ctypes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4498936577","text":"import csv\nimport statistics\nimport numpy\nfrom prettytable import PrettyTable\n\nfile = open('CovidCases.csv')\ncsvreader = csv.reader(file)\n\n\ndata_by_states = {}\nfor row in csvreader:\n\tif row[1] in data_by_states.keys() and int(row[5]) >= 0:\n\t\tdata_by_states[row[1]].append(int(row[5]))\n\t\t#if (row[2] != '0'):\n\t\t\t#data_by_states[row[1]].append(row[2])\n\telse:\n\t\tdata_by_states[row[1]] = []\n\n\n\ndata_Arizona = data_by_states['AZ']\ndata_Florida = data_by_states['FL']\ndata_Georgia = data_by_states['GA']\ndata_SouthCarolina = data_by_states['SC']\ndata_SouthDakota = data_by_states['SD']\n\ndata_NewYork = data_by_states['NY']\ndata_Massachusetts = data_by_states['MA']\ndata_Pennsylvania = data_by_states['PA']\ndata_Ohio = data_by_states['OH']\ndata_Utah = data_by_states['UT']\n\ninnerTLeft = PrettyTable()\ninnerTRight = PrettyTable()\n\n\ninnerTLeft.field_names = [\"State\", \"Mean\", \"St Dev\", \"Min\", \"Max\"]\ninnerTRight.field_names = [\"State\", \"Mean\", \"St Dev\", \"Min\", \"Max\"]\n\n\ninnerTLeft.title = 'State without mask mandate new cases per capita'\ninnerTLeft.add_row(['AZ', statistics.mean(data_Arizona) /7151502, statistics.stdev(data_Arizona)/7151502, min(data_Arizona)/7151502, max(data_Arizona)/7151502])\ninnerTLeft.add_row(['FL', statistics.mean(data_Florida) / 21538187, statistics.stdev(data_Florida)/ 21538187, min(data_Florida)/ 21538187, max(data_Florida)/ 21538187])\ninnerTLeft.add_row(['GA', statistics.mean(data_Georgia) /10711908, statistics.stdev(data_Georgia)/10711908, min(data_Georgia)/10711908, max(data_Georgia)/10711908])\ninnerTLeft.add_row(['SC', statistics.mean(data_SouthCarolina)/5118425, statistics.stdev(data_SouthCarolina)/5118425, min(data_SouthCarolina)/5118425, max(data_SouthCarolina)/5118425])\ninnerTLeft.add_row(['SD', statistics.mean(data_SouthDakota)/886667, statistics.stdev(data_SouthDakota)/886667, min(data_SouthDakota)/886667, max(data_SouthDakota)/886667])\n\n\ninnerTRight.title = 'State with mask mandate new cases per capita'\ninnerTRight.add_row(['NY', statistics.mean(data_NewYork) / 8177025, statistics.stdev(data_NewYork) / 8177025, min(data_NewYork) / 8177025, max(data_NewYork)/ 8177025])\ninnerTRight.add_row(['MA', statistics.mean(data_Massachusetts) / 7029917, statistics.stdev(data_Massachusetts) / 7029917, min(data_Massachusetts) / 7029917, max(data_Massachusetts) / 7029917])\ninnerTRight.add_row(['PA', statistics.mean(data_Pennsylvania) / 13002700, statistics.stdev(data_Pennsylvania)/ 13002700, min(data_Pennsylvania)/ 13002700, max(data_Pennsylvania)/ 13002700])\ninnerTRight.add_row(['OH', statistics.mean(data_Ohio) / 11799448, statistics.stdev(data_Ohio)/ 11799448, min(data_Ohio)/ 11799448, max(data_Ohio)/ 11799448])\ninnerTRight.add_row(['UT', statistics.mean(data_Utah) /3271616, statistics.stdev(data_Utah)/3271616, min(data_Utah)/3271616, max(data_Utah)/3271616])\n\n\n\noutput = PrettyTable()\noutput.field_names = ['State without mask mandate', 'State with mask mandate']\noutput.add_row([innerTLeft, innerTRight])\n\n\nfinal = PrettyTable()\nfinal.field_names = ['New Cases Per Day Per Capita']\nfinal.add_row([output])\n\nprint (innerTLeft)\nprint (\"\")\nprint (innerTRight)","repo_name":"joshkirsh720/ISYEfinal","sub_path":"Descriptive Analysis Table Code.py","file_name":"Descriptive Analysis Table Code.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72976745319","text":"class Solution:\n def maximumPopulation(self, logs: List[List[int]]) -> int:\n count = [0] * 101\n for birth, death in logs:\n for i in range(birth, death):\n count[i-1950] += 1\n \n ind = 0\n max_population = count[0]\n for i in range(1,len(count)):\n if count[i] > max_population:\n ind = i\n max_population = count[i]\n \n return ind + 1950","repo_name":"benj35/competitve-programming-2","sub_path":"1854-maximum-population-year/1854-maximum-population-year.py","file_name":"1854-maximum-population-year.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71134056040","text":"import streamlit as st\r\nimport PyPDF2\r\nimport openai\r\nimport requests\r\nimport json\r\nimport math\r\nimport pandas as pd\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nimport time\r\n\r\ndef extract_text_from_pdf(pdf_file):\r\n text = \"\"\r\n pdf_reader = PyPDF2.PdfReader(pdf_file)\r\n for page_num in range(len(pdf_reader.pages)):\r\n page = pdf_reader.pages[page_num]\r\n text += page.extract_text()\r\n return text\r\n\r\n\r\ndef extract_info(text):\r\n api_key = \"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\"\r\n openai.api_key = api_key\r\n prompt = \"\"\"Provide a a summary report, strengths and weaknesses in the\r\n resume, and on what areas the user can work to improve their resume\r\n of the based on the domain of the user : \"\"\" + text\r\n response = openai.Completion.create(\r\n model='gpt-3.5-turbo-instruct',\r\n prompt=prompt,\r\n max_tokens=2000,\r\n top_p=0.2\r\n )\r\n return response.choices[0].text\r\n\r\n\r\ndef main():\r\n st.title(\"Resume Analyzer\")\r\n st.write(\"Upload a PDF resume for analysis.\")\r\n\r\n uploaded_file = st.file_uploader(\"Upload a PDF file\", type=[\"pdf\"])\r\n\r\n if uploaded_file is not None:\r\n st.write(\"Analyzing the uploaded resume...\")\r\n\r\n resume_text = extract_text_from_pdf(uploaded_file)\r\n\r\n info = extract_info(resume_text)\r\n\r\n st.subheader(\"Extracted Text:\")\r\n st.markdown(f\"
{info}
\", unsafe_allow_html=True)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"MukulDevanath/Resume-Analyzer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22959529561","text":"import json\nfrom random import randint\n\n\nclass WorkHours:\n\n def work_hours(self, month, year):\n return randint(20, 23)*8 \n\nclass perHourIncome:\n\n def per_hour_income(salary, month, year):\n hour_income = salary/WorkHours().work_hours(month,year)\n return round(hour_income, 2)\n\n\ndef main():\n\n with open('input.json', 'r') as inp:\n data = json.load(inp)\n salary, month, year = data.get(\"salary\"), data.get('month'), data.get('year')\n perHourSalary = perHourIncome.per_hour_income(salary, month, year)\n\n with open('output.json', 'w') as out:\n data[\"hour_income\"] = perHourSalary\n out.write(json.dumps(data, ensure_ascii=False))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"alexeikotov/Practice_python","sub_path":"Task5/salary.py","file_name":"salary.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70924412519","text":"import logging\nimport paho.mqtt.client as mqtt\n\n# self-defined modules\nfrom vem.interpreter import Interpreter\n\n\nclass VaillantMessage():\n msg = None\n \n def __init__(self):\n self.mqtt = mqtt.Client()\n self.mqtt.on_connect = self._on_connect\n self.mqtt.connect(\"localhost\")\n self.mqtt.loop_start()\n\n def __str__(self):\n if self.msg is not None:\n return str(self.msg)\n else:\n return \"\"\n\n def _on_connect(self, client, userdata, flags, rc):\n '''callback for when the client receives a CONNACK response from the MQTT server.'''\n logging.info(\"Connected to MQTT broker with result code \"+str(rc))\n\n\n def interpret_msg(self, msg):\n # we rely on valid messages, with the protocoll already parsed\n assert type(msg) is Interpreter\n self.msg = msg\n # interpret message content\n msg_known = self._interpret_command()\n return msg_known\n \n\n def _interpret_command(self):\n # determine primary command\n if self.msg.cmd >> 8 == 0xb5:\n return self._interpret_cmd_vendor()\n else:\n logging.info(\"unknown message: \" + str(self))\n return False\n \n def _interpret_cmd_vendor(self):\n if self.msg.cmd & 0xff == 0x04:\n # 0xb5 0x04: Get Data Block\n assert len(self.msg.data) == 1\n # ensure presense of slave data\n if self.msg.slave_data is None or len(self.msg.slave_data) == 0:\n logging.warning(\"no slave data in 0xb5 0x04 block\")\n return False\n\n if self.msg.data[0] == 0:\n # status data block: timestamp and outside temperature\n assert len(self.msg.slave_data) == 0x0a\n # timestamp\n # the timestamp received here seems to be invalid; date is only 0xff\n # seconds count correctly but the absolute value is just totaly wrong\n # it seems the burner unit does not hold a valid time\n logging.debug(\"status data block 0: {}\".format(\":\".join(\"{:02x}\".format(c) for c in self.msg.slave_data)))\n # outside temperature\n outside_temp = (self.msg.slave_data[8] | (self.msg.slave_data[9]<<8) ) / 256\n self.mqtt.publish(\"vem/temp/outside\", outside_temp)\n \n elif self.msg.data[0] == 1:\n assert len(self.msg.slave_data) == 0x09\n logging.info(\"set temperatures received\")\n elif self.msg.data[0] == 2:\n assert len(self.msg.slave_data) == 0x07\n logging.info(\"set time windows received\")\n elif self.msg.data[0] == 9:\n assert len(self.msg.slave_data) == 0x0a\n logging.info(\"set heater parameters received\")\n elif self.msg.data[0] == 0x0d:\n assert len(self.msg.slave_data) == 0x05\n logging.info(\"set water parameters received\")\n else:\n logging.warning(\"unknown get data block received\")\n return False\n\n elif self.msg.cmd & 0xff == 0x05:\n # 0xb5 0x05: SetOperationMode\n # this is always 10:fe:b5:05:02:29:00:2c (in summer)\n # this message is sent once each hour\n logging.debug(\"unknown SetOperationMode message\" + str(self))\n #return False\n\n elif self.msg.cmd & 0xff == 0x10:\n # 0xb5 0x10: Operational Data from Room Controller to Burner Control Unit\n assert len(self.msg.data) == 9\n heatingwater_temp = self.msg.data[2]/2\n water_temp = self.msg.data[3]/2\n heating_enabled = (self.msg.data[6] & 0x01) == 0\n water_enabled = (self.msg.data[6] & 0x04) == 0\n # skip interpretation of slave data\n \n logging.debug(\"set: heating enabled: {}; water enabled: {}; heating temp: {}; water temp: {}\"\n .format(heating_enabled, water_enabled, heatingwater_temp, water_temp))\n\n elif self.msg.cmd & 0xff == 0x11:\n # 0xb5 0x11: Operational Data of Burner Control Unit to Room Control Unit \n assert len(self.msg.data) == 1\n # ensure presense of slave data\n if self.msg.slave_data is None or len(self.msg.slave_data) == 0:\n logging.warning(\"no slave data in 0xb5 0x11 block\")\n return False\n\n if self.msg.data[0] == 0x01:\n assert len(self.msg.slave_data) == 9\n lead_heatingwater_temp = self.msg.slave_data[0] / 2\n return_heatingwater_temp = self.msg.slave_data[1] / 2\n outside_temp = (self.msg.slave_data[2] | (self.msg.slave_data[3]<<8) ) / 256\n water_temp = self.msg.slave_data[4] / 2\n storage_water_temp = self.msg.slave_data[5] / 2\n heating_enabled = (self.msg.slave_data[6] & 0x01) != 0\n water_enabled = (self.msg.slave_data[6] & 0x02) != 0\n logging.debug(\"state: heating enabled: {}; \".format(heating_enabled)+\n \"water enabled: {}; \".format(water_enabled) +\n \"lead heating temp: {}; \".format(lead_heatingwater_temp) +\n \"return heating temp: {}; \".format(return_heatingwater_temp) +\n \"outside temp: {}; \".format(outside_temp) +\n \"water temp: {}; \".format(water_temp) +\n \"storage temp: {}\".format(storage_water_temp))\n self.mqtt.publish(\"vem/heating/enabled\", heating_enabled)\n self.mqtt.publish(\"vem/heating/water_temp_lead\", lead_heatingwater_temp)\n self.mqtt.publish(\"vem/heating/water_temp_return\", return_heatingwater_temp)\n self.mqtt.publish(\"vem/water/enabled\", water_enabled)\n self.mqtt.publish(\"vem/water/temp\", water_temp)\n self.mqtt.publish(\"vem/water/storage_temp\", storage_water_temp)\n self.mqtt.publish(\"vem/temp/outside\", outside_temp)\n \n elif self.msg.data[0] == 0x02:\n assert len(self.msg.slave_data) == 5\n water_target_temp = self.msg.slave_data[4] / 2\n logging.debug(\"water target temperature: {}\".format(water_target_temp))\n self.mqtt.publish(\"vem/water/target_temp\", water_target_temp)\n \n else:\n logging.warning(\"unknown 0xb5 0x11 block\")\n return False\n \n elif self.msg.cmd & 0xff == 0x12:\n # 0xb5 0x12: Various commands\n assert len(self.msg.data) == 0x02\n logging.debug(\"pump commands received: Nachladeverzögerung: {:02x} {:02x}\"\n .format(self.msg.data[0], self.msg.data[1]))\n \n elif self.msg.cmd & 0xff == 0x16:\n # 0xb5 0x16: Broadcast Service\n assert len(self.msg.data) > 0\n if self.msg.data[0] == 0x00:\n # Broadcast Date/Time\n assert len(self.msg.data) == 8\n sec = (self.msg.data[1]>>4)*10 + (self.msg.data[1]&0x0f)\n min = (self.msg.data[2]>>4)*10 + (self.msg.data[2]&0x0f)\n hour = (self.msg.data[3]>>4)*10 + (self.msg.data[3]&0x0f)\n day = (self.msg.data[4]>>4)*10 + (self.msg.data[4]&0x0f)\n month = (self.msg.data[5]>>4)*10 + (self.msg.data[5]&0x0f)\n weekday = (self.msg.data[6]>>4)*10 + (self.msg.data[6]&0x0f)\n year = 2000+(self.msg.data[7]>>4)*10 + (self.msg.data[7]&0x0f)\n timestamp = \"{:02}.{:02}.{} {:02}:{:02}:{:02}\".format(day, month, year, hour, min, sec)\n logging.debug(\"timestamp: {}\".format(timestamp))\n self.mqtt.publish(\"vem/misc/timestamp\", timestamp)\n \n elif self.msg.data[0] == 0x01:\n # Broadcast outside temp\n assert len(self.msg.data) == 3\n outside_temp = (self.msg.data[1] | (self.msg.data[2]<<8) ) / 256\n logging.debug(\"outside temperature: {}\".format(outside_temp))\n # this is already published from 0xb5 0x11 message\n #self.mqtt.publish(\"vem/temp/outside\", outside_temp)\n else:\n \n logging.warning(\"unknown vendor broadcast\")\n return False\n else:\n logging.warning(\"unknown vendor message\")\n return False\n \n # successfully interpreted data\n return True\n","repo_name":"embyt/vem","sub_path":"vem/vaillant.py","file_name":"vaillant.py","file_ext":"py","file_size_in_byte":8661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23046257930","text":"import tkinter as tk\nfrom tkinter import messagebox\nimport random\n\nroot = tk.Tk()\n\n# ------------------------------ LISTADO DE METODOS ------------------------------ #\n\n\ndef JugadoresElegidos(numJugadores):\n\n Inicio.destroy()\n InicioBotones.destroy()\n\n def Ganador(numJugadores):\n\n ListadoFinal = []\n participante = 0\n\n while participante < numJugadores:\n\n exec(\"ListadoFinal.append(getJugador{}.get())\".format(participante + 1))\n participante += 1\n\n Jugadores.destroy()\n\n ganadorFinal = str(random.choice(ListadoFinal))\n\n textoParaGanador = tk.Label(MostrarGanador, text = \"El ganador (o perdedor) es...\", font = ('Verdana', 22), bg = \"white\")\n textoParaGanador.grid(row = 0, column = 0)\n \n elGanador = tk.Label(MostrarGanador, text = ganadorFinal, font = ('Verdana', 28), fg = \"green\", bg = \"white\")\n elGanador.grid(row = 1, column = 0)\n\n for i in ListadoFinal:\n if i.lower() == \"el desarrollador\":\n\n sorpresa = tk.Label(MostrarGanador, text = \"El desarrolador siempre gana :)\", font = ('Verdana', 28), fg = 'blue', bg = 'white')\n sorpresa.grid(row = 0, column = 0)\n\n textoParaGanador.destroy()\n elGanador.destroy()\n \n rr = 0\n cc = 0\n elem = 1\n\n while elem <= numJugadores:\n\n exec('labelJugador{}.grid(row = {}, column = {}, pady = 4)'.format(elem, rr, cc))\n\n cc += 1\n\n exec('entryJugador{}.grid(row = {}, column = {}, pady = 4)'.format(elem, rr, cc))\n\n rr += 1\n cc = 0\n elem += 1\n\n botonGanador = tk.Button(Jugadores, text = 'Mostrar al Ganador', command = lambda:Ganador(numJugadores))\n botonGanador.config(bg = \"green\",fg = \"white\", font = (None, 12), relief = \"flat\", overrelief = \"flat\", activebackground = \"green\")\n botonGanador.grid(row = rr, column = cc, columnspan = 2, pady = 20)\n\n\n# ------------------------------ CONFIGURACION ROOT ------------------------------ #\n\n\nroot.title(\"Hola Suerte!\")\nroot.iconbitmap(\"../hola_suerte/HolaSuerte/icono.ico\")\n\nbarra_menu = tk.Menu(root)\nroot.config(menu = barra_menu, bg = 'white')\nroot.geometry('900x600')\n\nroot.rowconfigure(0, weight=1)\nroot.columnconfigure(0, weight=1)\n\n\n# ------------------------------ BARRA SUPERIOR ------------------------------ #\n\n\ndef Info_desarrollador():\n messagebox.showinfo(\"Información del Desarrollador\", \"Email: alvarez.fing@gmail.com\\nGitHub: Mateoac12\")\n\ndef Info_aplicacion():\n messagebox.showinfo(\"Información de la Aplicación\", \"Pensada para reuniones de amigos o debates en el hogar. Por ejemplo \" + \n \"debatir quien va a pagar en una reunión o quien lavará los platos esta noche. Devuelve un participante de forma aleatoria, salvo que te llames 'el Desarrollador'...\")\n\n\nmenu_opciones = tk.Menu(barra_menu, tearoff = 0)\nmenu_opciones.add_command(label = 'Sobre el Desarrollador', command = lambda:Info_desarrollador())\nmenu_opciones.add_command(label = 'Sobre la Aplicación', command = lambda:Info_aplicacion())\n\nbarra_menu.add_cascade(label = 'Información', menu = menu_opciones)\n\n\n# ------------------------------ LISTADO DE FRAMES ------------------------------ #\n\n\nInicio = tk.Frame(root)\nInicio.grid(row = 0, column = 0)\nInicio.config(bg = 'white')\n\n\n\nInicioBotones = tk.Frame(Inicio)\nInicioBotones.grid(row = 2, column = 0)\nInicioBotones.config(bg = 'white')\n\n\n\nJugadores = tk.Frame(root)\nJugadores.grid(row = 0, column = 0, pady = 40)\nJugadores.config(bg = 'white')\n\n\n\nMostrarGanador = tk.Frame(root)\nMostrarGanador.grid(row = 0, column = 0)\nMostrarGanador.config(bg = 'white')\n\n\n# ------------------------------ CONSTRUCCION DE LOS WIDGETS ------------------------------ #\n\nlogotipo = tk.PhotoImage(file = \"../hola_suerte/HolaSuerte/HolaSuerteLogo.png\")\n\nmiLogo = tk.Label(Inicio, image=logotipo, bg = 'white')\nmiLogo.grid(row = 0, column = 0, padx = 20, pady = 20)\n\ninfo = '¡Haz Click en el número de participantes y que comiencen los nervios!'\ninfoLabel = tk.Label(Inicio, text = info, bg = 'white', font = ('Verdana', 12))\ninfoLabel.grid(row = 1, column = 0, pady = 20, padx = 20)\n\nnumJugador = 1\njugadoresTotales = 13\n\nwhile numJugador <= jugadoresTotales:\n\n exec('getJugador{} = tk.StringVar()'.format(numJugador))\n \n exec('labelJugador{} = tk.Label(Jugadores, text = \"Integrante {}:\", justify = tk.CENTER, bg = \"white\", font = (\"Verdana\", 12))'.format(numJugador, numJugador))\n\n exec('entryJugador{} = tk.Entry(Jugadores, textvariable = getJugador{}, font = (\"Verdana\", 12))'.format(numJugador, numJugador))\n\n numJugador += 1\n\nr = 2\nc = 0\nboton = 2\nbotonesTotales = jugadoresTotales\n\nwhile boton <= botonesTotales:\n\n exec('boton{} = tk.Button(InicioBotones, text = \"{}\", command = lambda:JugadoresElegidos({}))'.format(boton, boton, boton))\n\n exec('boton{}.config(bg = \"lightgrey\", font = (None, 20), relief = \"flat\", overrelief = \"flat\", activebackground = \"lightgrey\", width = 2)'.format(boton))\n\n if c == 6:\n\n r += 1\n c = 0\n\n exec('boton{}.grid(row = {}, column = {}, padx = 10, pady = 10)'.format(boton, r, c))\n\n boton += 1\n c += 1\n\n\nroot.mainloop()","repo_name":"maadeval/hola_suerte-python","sub_path":"HolaSuerte/Hola_Suerte.py","file_name":"Hola_Suerte.py","file_ext":"py","file_size_in_byte":5200,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71981281639","text":"from config import *\nfrom utils import *\n\n\n@bot.message_handler(regexp=\"^🛍 Orders\")\ndef store(msg):\n \"\"\"\n Reply when the order Category is selected\n \"\"\"\n\n user = get_user(msg)\n print(user)\n\n bot.send_message(\n msg.from_user.id,\n \"What can we help you do today? Please explain it to us here ...\",\n )\n\n bot.reply_to(\n msg,\n f\"You just successfully added a new user to the sqlalchemy database with the id, {user.id}\",\n\n )\n","repo_name":"bouncei/telegram-outlet-bot","sub_path":"main/orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38023637330","text":"import sys\r\nimport cv2 as cv\r\nimport numpy as np\r\n\r\n\r\n# contributor: Menna & Abdullah\r\n\r\ndef main(argv):\r\n #file = 'Empty1.jpg'\r\n file = 'Empty2.jpg'\r\n #file = 'Empty3.png'\r\n #file = 'empty_lot.jpg'\r\n\r\n # load the input image\r\n img = cv.imread(file, cv.IMREAD_GRAYSCALE)\r\n\r\n # Check if image is loaded fine\r\n if img is None:\r\n print('Error opening image!')\r\n print('Usage: hough_lines.py [image_name -- default ' + file + '] \\n')\r\n return -1\r\n\r\n dst = cv.Canny(img, 250, 350, None, 3)\r\n # here i can change the value of the min and max gradients depending on the brightness of the image\r\n cv.imshow (\"canny\", dst)\r\n # Copy edges to the images that will display the results in BGR\r\n new_dst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR)\r\n dstP = np.copy(new_dst)\r\n # using HoughLinesP function to get the lines detected with both the start and the end points\r\n linesP = cv.HoughLinesP(dst, 1, np.pi / 180, 50, None, 50, 10)\r\n\r\n\r\n if linesP is not None:\r\n for i in range(0, len(linesP)):\r\n l = linesP[i][0]\r\n cv.line(dstP, (l[0], l[1]), (l[2], l[3]), (0, 0, 255), 6, cv.LINE_AA)\r\n #print(l[0], \" \", l[1], \" \", l[2], \" \", l[3])\r\n # cv.LINE_AA >> gives anti-aliased line which looks great for curves.\r\n\r\n # show input image and output image\r\n cv.imshow(\"Source\", img)\r\n cv.imshow(\"Detected Lines (in red) - Probabilistic Line Transform\", dstP)\r\n\r\n # save the output image (used to record the output only - and - not a must in the main algorithm)\r\n #cv.imwrite(\"Empty1_r.jpg\", dstP)\r\n #cv.imwrite(\"Empty2_r.jpg\", dstP)\r\n #cv.imwrite(\"Empty3_r.jpg\", dstP)\r\n #cv.imwrite(\"empty_lot_r.jpg\", dstP)\r\n\r\n cv.waitKey()\r\n return 0\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main(sys.argv[1:])\r\n","repo_name":"AhmedSayedSk/ParkingLot","sub_path":"Different trials/Hough Detection/line detection.py","file_name":"line detection.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"880832871","text":"def arrondisementNote(list = [12,62,45,60,67,52,91,73,60]):\n \n for i in range(0,len(list)):\n if (list[i] >= 40 and list[i] < 100):\n for J in range(1,3):\n if( (list[i] + J) % 5 == 0):\n list[i] = list[i]+J\n \n return list\n\nprint(arrondisementNote())\n# arrondisementNote()","repo_name":"hassani-nassur/runtrack-python","sub_path":"jour05/job06.py","file_name":"job06.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11619960989","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\n#David Dorado\r\n#Date:April, 24\r\n# So from the data it seems that people are more likely to click on the red based on the data. Which is very different from the null hypothesis.\r\n\r\nimport pandas as pd\r\nimport numpy as np \r\n\r\npopulation= ['red', 'blue']\r\nweight=[0.9,0.1]\r\n\r\nnumber_list=[]\r\nfor i in range(100):\r\n sample_array= np.random.choice(population, p=weight, size=3840)\r\n sample= pd.Series(sample_array)\r\n counts= sample.value_counts()\r\n number= counts['blue']\r\n number_list.append(number)\r\n \r\nnumber_series= pd.Series(number_list)\r\nnumber_series.plot.hist(bins=20) \r\n ","repo_name":"dave-sys/Data-Science-Assignments","sub_path":"Assign36.py","file_name":"Assign36.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15505052845","text":"import os\n\nfrom libraries.provision.ansible_runner import AnsibleRunner\n\nfrom keywords.utils import log_info\n\n\ndef push_cbcollect_info_supportal(cluster_config):\n \"\"\"\n 1. Runs cbcollect_info on one of the couchbase server nodes\n 2. Pushes to supportal.couchbase.com\n \"\"\"\n ansible_runner = AnsibleRunner(config=cluster_config)\n status = ansible_runner.run_ansible_playbook(\"push-cbcollect-info-supportal.yml\")\n assert status == 0, \"Failed to push cbcollect info\"\n\n\nif __name__ == \"__main__\":\n\n try:\n cluster_conf = os.environ[\"CLUSTER_CONFIG\"]\n except KeyError as ke:\n log_info(\"Make sure CLUSTER_CONFIG is defined and pointing to the configuration you would like to provision\")\n raise KeyError(\"CLUSTER_CONFIG not defined. Unable to provision cluster.\")\n\n push_cbcollect_info_supportal(cluster_conf)\n","repo_name":"couchbaselabs/mobile-testkit","sub_path":"libraries/utilities/push_cbcollect_info_supportal.py","file_name":"push_cbcollect_info_supportal.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"18"} +{"seq_id":"10790486117","text":"\n#############################################################################\n# ocr.py\n#\n# Authors: Anurag\n#\n# Performs OCR given a file name\n#############################################################################\n\n# Do not touch the next 3 lines\n\nimport sys\n\nsys.path.insert(1, \"../../Compiler\")\nsys.path.insert(1, \"../../OCR\")\n\n# Do not move the above around or change the order otherwise the other import statements will break\n# Add any future import statements below this line\n\nimport os\nfrom PIL import Image\nfrom ocr_wrapper import ocr_wrapper\nfrom compiler_wrapper import compiler_wrapper\n\n# ocr_wrapper\n# Author: Anurag\n# Return value: compiler's output\n# Parameters:\n#\t1. fname - file name\n# This function is a wrapper for the entire OCR Process\n# Calls functions in:\n#\tBackend/OCR/ocr_wrapper.py\n#\tBackend/Compiler/compiler_wrapper.py\n\ndef ocr(fname):\n\timage = Image.open(fname)\n\tos.chdir(\"../../OCR\")\n\tocr_out = ocr_wrapper(image)\n\tos.chdir(\"../Compiler\")\n\tcompiler_out = compiler_wrapper(ocr_out)\n\tos.chdir(\"../WhiteBoardBackEnd/WhiteBoardBackEnd\")\n\treturn compiler_out\n\n\nif __name__ == \"__main__\":\n\t# Testing function for pipeline\n\ttest_im_path = \"../../OCR/images/tesseract_tests/\"\n\ttest_im = \"test2\"\n\timsuffix = \".png\"\n\tprint(ocr(test_im_path + test_im + imsuffix))","repo_name":"Anurag-Shah/WhiteBoard","sub_path":"Backend/WhiteBoardBackEnd/API/OCRAPI.py","file_name":"OCRAPI.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"28600583377","text":"from django.contrib.auth import get_user_model\nfrom django.urls import reverse\nfrom django.test import TestCase\n\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\n\nfrom core.models import Project, Cooperation, Organization, Review\n\nfrom projects.serializers import ReviewSerializer\n\nREVIEW_URL = reverse('projects:review-list')\n\n\ndef sample_user(email='test@xemob.com', password='testpass'):\n \"\"\"Create a sample user\"\"\"\n return get_user_model().objects.create_user(email, password)\n\n\nclass PublicReviewApiTests(TestCase):\n \"\"\"Test the publicly available reviews API\"\"\"\n\n def setUp(self):\n self.client = APIClient()\n\n self.user = sample_user()\n self.user2 = sample_user(email='other@xemob.com')\n self.organization = Organization.objects.create(\n user=self.user, name='Sample Ngo', country='Spain')\n self.project = Project.objects.create(\n user=self.user,\n name='Test project',\n organization=self.organization,\n description='Project description'\n )\n self.cooperation_name = f\"\"\"\n Cooperation between {self.user.name} and {self.user2.name},\n for the project {self.project.name}\"\"\"\n self.cooperation = Cooperation.objects.create(\n name=self.cooperation_name[:255],\n project=self.project,\n user=self.user,\n voluntary=self.user2\n )\n\n def test_login_not_required(self):\n \"\"\"Test that login is not required to access the endpoint\"\"\"\n res = self.client.get(REVIEW_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n def test_retrieve_reviews_list(self):\n \"\"\"Test retrieving a list of reviews items\"\"\"\n Review.objects.create(\n name='Title of sample review',\n cooperation=self.cooperation,\n user=self.user,\n reviewed=self.user2,\n review='This is a sample review'\n )\n Review.objects.create(\n name='Title of sample review 2',\n cooperation=self.cooperation,\n user=self.user2,\n reviewed=self.user,\n review='This is a sample review 2'\n )\n res = self.client.get(REVIEW_URL)\n\n reviews_items = Review.objects.all().order_by('-id')\n serializer = ReviewSerializer(reviews_items, many=True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)\n\n\nclass PrivateReviewApiTests(TestCase):\n \"\"\"Test the Review API for authenticated users\"\"\"\n\n def setUp(self):\n self.client = APIClient()\n self.user = get_user_model().objects.create_user(\n 'test@xemob.com',\n 'testpass'\n )\n self.client.force_authenticate(self.user)\n self.user2 = get_user_model().objects.create_user(\n 'other@xemob.com',\n 'testpass'\n )\n\n def test_create_review_successful(self):\n \"\"\"Test create a review with valid payload\"\"\"\n sample_org = Organization.objects.create(user=self.user,\n name='Sample ngo',\n country='spain')\n sample_project = Project.objects.create(user=self.user,\n organization=sample_org,\n name='Sample Project')\n sample_cooperation = Cooperation.objects.create(\n name='Sample cooperation',\n project=sample_project)\n\n payload = {'name': 'This is a sample review',\n 'cooperation': sample_cooperation.id,\n 'user': self.user.id,\n 'reviewed': self.user2.id,\n 'review': 'This is a sample review body'\n }\n self.client.post(REVIEW_URL, payload)\n\n exists = Review.objects.filter(\n user=self.user,\n name=payload['name']\n ).exists()\n\n self.assertTrue(exists)\n\n def test_create_review_invalid(self):\n \"\"\"Test creating review with invalid payload fails\"\"\"\n sample_org = Organization.objects.create(user=self.user,\n name='Sample ngo',\n country='spain')\n sample_project = Project.objects.create(user=self.user,\n organization=sample_org,\n name='Sample Project')\n sample_cooperation = Cooperation.objects.create(\n name='Sample cooperation',\n project=sample_project)\n\n payload = {'name': '',\n 'cooperation': sample_cooperation,\n 'user': self.user,\n 'reviewed': self.user2,\n 'review': ''\n }\n res = self.client.post(REVIEW_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n","repo_name":"nestor-san/cooperation-fit","sub_path":"app/projects/tests/test_review_api.py","file_name":"test_review_api.py","file_ext":"py","file_size_in_byte":5055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11595406744","text":"from PyQt5.QtWidgets import QVBoxLayout, QWidget, QGridLayout, QSizePolicy\n\nfrom src.menus.brush_menus.BrushTypePicker import BrushTypePicker\nfrom src.menus.brush_menus.ColorPicker import ColorPicker\nfrom src.menus.brush_menus.SizePicker import SizePicker\n\n\nclass DrawMenu(QWidget):\n \"\"\"\n Combines all the brush menus in a widget and layouts them\n \"\"\"\n\n def __init__(self, window, brush):\n super().__init__(window)\n\n self.brush = brush\n self.layout = QVBoxLayout()\n self.color_picker = ColorPicker(brush)\n self.size_picker = SizePicker(brush)\n self.brush_type_picker = BrushTypePicker(brush)\n self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n\n self.__layout()\n\n def __layout(self):\n self.layout = QGridLayout()\n self.layout.addWidget(self.color_picker, 0, 0)\n self.layout.addWidget(self.size_picker, 1, 0)\n self.layout.addLayout(self.brush_type_picker, 0, 1, 2, 1)\n self.setLayout(self.layout)\n self.setFixedHeight(150)\n","repo_name":"yannikinniger/paint-app","sub_path":"src/menus/DrawMenu.py","file_name":"DrawMenu.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"2814967086","text":"# Opening file \"um-server-01.txt\" and setting its contents into log_file\nlog_file = open(\"um-server-01.txt\")\n\n# Creating function sales_reports that takes the variable log_file that holds\n# our files data \ndef sales_reports(log_file):\n # For loop that loops over each line in the file ('um-server-01.txt')\n for line in log_file:\n # gets rid of the trailing characters in this space its white space.\n line = line.rstrip()\n # sets variable day to the line of slice start at character 0 end at \n # character 3\n day = line[0:3]\n # if the first 3 characters == \"mon\" then do some functionality\n if day == \"Mon\":\n # prints the line.\n print(line)\n\n# calling the function with our argument(log_file)\nsales_reports(log_file)\n","repo_name":"ItsDlylan/Week-4-Assessment","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72957262759","text":"__author__ = \"Benjamin Feder\"\n\n# A. match_ends\n# Given a list of strings, return the count of the number of\n# strings where the string length is 2 or more and the first\n# and last chars of the string are the same.\n# Note: python does not have a ++ operator, but += works.\n\n\ndef match_ends(words):\n count = 0\n for string in words:\n if len(string) >= 2 and string[0] == string[-1]:\n count += 1\n return count\n\n\n# B. front_x\n# Given a list of strings, return a list with the strings in\n# sorted order, except group all the strings that begin with\n# 'x' first.\n# Example:\n# ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields\n# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']\n# Hint: this can be done by making 2 lists and sorting each\n# of them before combining them.\n\n\ndef front_x(words):\n\n x_words = []\n non_x_words = []\n\n for word in words:\n if word[0] == \"x\":\n x_words.append(word)\n else:\n non_x_words.append(word)\n\n sorted_x_words = sorted(x_words)\n sorted_non_x_words = sorted(non_x_words)\n\n sorted_x_words.extend(sorted_non_x_words)\n\n return sorted_x_words\n\n\n# C. sort_last\n# Given a list of non-empty tuples, return a list sorted in\n# increasing order by the last element in each tuple.\n# Example\n# [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields\n# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]\n# Hint: use a custom key= function to extract the last element from each tuple.\n\n\ndef sort_last(tuples):\n new_tuples = sorted(tuples, key=lambda tup: tup[-1])\n return new_tuples\n\n\n# D. remove_adjacent\n# Given a list of numbers, return a list where all adjacent\n# equal elements have been reduced to a single element.\n# Example:\n# [1, 2, 2, 3] -> [1, 2, 3]\n# You may create a new list or modify the passed in list.\n# Hint: Don't use set()\n\n\ndef remove_adjacent(nums):\n new_nums = []\n for i, num in enumerate(nums):\n if i == 0 or (i > 0 and num != nums[i-1]):\n new_nums.append(num)\n return new_nums\n\n\n# E. zip_merge\n# Given two lists, combine the values from their corresponding\n# indices into a single list.\n# list1 = [\"M\", \"na\", \"i\", \"Ke\"]\n# list2 = [\"y\", \"me\", \"s\", \"lly\"]\n# result = ['My', 'name', 'is', 'Kelly']\n# Hint: Think of it as \"zipping\" two lists together. Is there\n# a built-in function in python that will do this?\n\n\ndef zip_merge(list1, list2):\n \"\"\"\n result = []\n for char in range(len(list1)):\n result.append(list1[char] + list2[char])\n return result\n # this works too\n \"\"\"\n\n return [\"\".join(elem) for elem in zip(list1, list2)]\n\n\n# F. empty_filter\n# Given a single list containing strings, empty strings, and\n# None values: Return a new list with the same elements, but\n# strip out (filter) the empty strings and None values away.\n# example: list1 = [\"Mike\", \"\", \"Emma\", None, \"Kelly\", \"\", \"Brad\", None]\n# result: [\"Mike\", \"Emma\", \"Kelly\", \"Brad\"]\n# Hint: There is a Python idiom for doing this. Can you find it?\n\n\ndef empty_filter(list1):\n new_list = []\n for string in list1:\n if string:\n new_list.append(string)\n return new_list\n\n\n# G. linear_merge\n# Given two lists sorted in increasing order, create and\n# return a merged list of all the elements in sorted order.\n# You may modify the passed in lists.\n# The solution should work in \"linear\" time, making a single\n# pass of both lists.\n# Hint: Don't use `sort` or `sorted` -- they are not O(n)\n# linear time and the two lists are already provided in\n# ascending sorted order.\n\n\ndef linear_merge(list1, list2):\n new_list = []\n while list1 and list2:\n if list1[0] < list2[0]:\n new_list.append(list1.pop(0))\n else:\n new_list.append(list2.pop(0))\n new_list.extend(list1)\n new_list.extend(list2)\n return new_list\n","repo_name":"BenFeder/kenzie-se-q3-lists-BenFeder","sub_path":"lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11112809910","text":"from datetime import datetime\nimport wx\n\n\nclass LogDialog(wx.Dialog):\n def __init__(self, parent):\n super().__init__(parent, \n title=\"MQTT Message Log\", \n style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n \n self.log_textctrl = wx.TextCtrl(self, style=wx.TE_MULTILINE | wx.TE_READONLY)\n\n self.sizer = wx.BoxSizer(wx.VERTICAL)\n self.sizer.Add(self.log_textctrl, 1, flag=wx.EXPAND)\n\n self.SetSizer(self.sizer)\n\n def log_message(self, msg):\n timestamp = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n self.log_textctrl.AppendText(f'[{timestamp}] {msg}\\n')","repo_name":"datajango/self2023_iot","sub_path":"wx_mqtt_control/log_dialog.py","file_name":"log_dialog.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12787985893","text":"def solution(N, stages):\n answer = []\n stages.sort()\n\n failure = [0] * (N+1)\n\n for item in stages:\n for i in range(0, item):\n failure[i] += 1\n\n failure_rate = []\n for i in range(0, len(failure)-1):\n try:\n failure_rate.append((failure[i] - failure[i+1])/failure[i])\n except ZeroDivisionError:\n failure_rate.append(0)\n\n f_list = []\n for i in range(0, len(failure_rate)):\n f_list.append([i+1, failure_rate[i]])\n\n f_list.sort(key=lambda x: x[1], reverse=True)\n for stage in f_list:\n answer.append(stage[0])\n return answer\n\n\nif __name__ == \"__main__\":\n arr_N = [5, 4, 5]\n arr_stages = [[2, 1, 2, 6, 2, 4, 3, 3], [4, 4, 4, 4, 4], [2,1,2,4,2,4,3,3]]\n for N, stages in zip(arr_N, arr_stages):\n print(solution(N, stages))\n","repo_name":"dydfuf/Coding-Test","sub_path":"Programmers/Level1/failure_rate.py","file_name":"failure_rate.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10455457376","text":"from google.cloud import firestore\nimport json\n\ndef download_document(doc_id='all'):\n db = firestore.Client(project='hakoona-matata-298704')\n users_ref = db.collection(u'users')\n docs = users_ref.stream()\n for doc in docs:\n if doc_id in (str(doc.id),'all'):\n with open('download/'+str(doc.id)+\".json\",'w') as file_obj:\n data = doc.to_dict()\n json.dump(data,file_obj,indent=2)\n file_obj.close()\n print(\"file downloaded\")\ndef upload_document(doc_id='all'):\n db = firestore.Client(project='hakoona-matata-298704')\n collection_ref = db.collection('users')\n with open('download/user1.json','r') as file_obj:\n data=json.load(file_obj)\n doc_ref = collection_ref.document('python_test')\n doc_ref.set(data)\n file_obj.close()\n","repo_name":"manireddy1997/GCP_dataengineering","sub_path":"python/firestore_client.py","file_name":"firestore_client.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4820141936","text":"#!/usr/bin/env python3\n\nimport sys\nimport json\nimport numpy as np\n\nimport common.runner as runner\nimport common.params as params\n\nout_file = os.path.abspath(sys.argv[1])\nscenarios_dir = os.path.abspath(sys.argv[2])\n\nresult = {}\nfor scenario in params.scenarios:\n print(f\"Running {scenario.human_name} in Callgrind\")\n\n centroid_instrss = []\n starid_instrss = []\n centroid_memorys = []\n starid_memorys = []\n with runner.LostDatabase(scenario.lost_database_params) as db:\n for img_path in scenario.image_paths(params.comprehensive_num_callgrinds, scenarios_dir):\n all_params = scenario.lost_params + ['--png', img_path, '--database', db]\n\n callgrind_results = runner.run_callgrind_on_lost(all_params)\n centroid_instrss.append(callgrind_results[scenario.lost_centroid_function_name])\n # sometimes will be zero, because there weren't enough stars in the image.\n starid_instrss.append(callgrind_results.get(scenario.lost_starid_function_name, 0))\n\n massif_results = runner.run_massif_on_lost(all_params)\n centroid_memorys.append(massif_results[scenario.lost_centroid_function_name])\n starid_memorys.append(massif_results.get(scenario.lost_starid_function_name, 0))\n\n centroid_avg_instrs = int(np.mean(centroid_instrss))\n starid_avg_instrs = int(np.mean(starid_instrss))\n centroid_avg_memory_kib = int(np.mean(centroid_memorys)) // 1024\n starid_avg_memory_kib = int(np.mean(starid_memorys)) // 1024\n result[scenario.machine_name] = {\n 'lost_centroid_avg_instrs': centroid_avg_instrs,\n 'lost_starid_avg_instrs': starid_avg_instrs,\n 'lost_total_avg_instrs': centroid_avg_instrs+starid_avg_instrs,\n 'lost_centroid_avg_memory_kib': centroid_avg_memory_kib,\n 'lost_starid_avg_memory_kib': starid_avg_memory_kib,\n }\n \nwith open(out_file, 'w') as f:\n json.dump(result, f, indent=4)\n","repo_name":"UWCubeSat/lost-evals","sub_path":"comprehensive/callgrind.json.py","file_name":"callgrind.json.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"4035097646","text":"def solution(N, M, coin_type):\n answer = 0\n INF = 9876543210\n dp = [INF] * (M + 1)\n for coin in coin_type:\n if coin <= M:\n dp[coin] = 1\n for i in range(1, M + 1):\n if dp[i] != INF:\n continue\n for coin in coin_type:\n if i - coin >= 0 and dp[i - coin] != INF:\n dp[i] = min(dp[i], dp[i - coin] + 1)\n\n if dp[M] == INF:\n return -1\n else:\n return dp[M]\n\n\nprint(solution(2, 15, [2, 3]))\nprint(solution(3, 4, [3, 5, 7]))\n\n","repo_name":"kakaocloudschool/dangicodingtest","sub_path":"007_DP/001_이코테/005_효율적인화폐구성.py","file_name":"005_효율적인화폐구성.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"72095278185","text":"'''\nDescription: S7Comm Parameter Function: 0xf0, setup communication, no additional parameter input\nAutor: Jechin\nUsage: ./astf-sim -f ~/s7/astf/s7_ASTF_Setup_Comm.py --full -o ~/s7/astf/pcap/s7_ASTF_Setup_Comm.pcap -t config=\"config_Setup_Communication.json\"\n'''\n\nfrom trex.astf.api import *\nimport argparse\nimport os, sys, json\n\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) + \"/../\")\nfrom s7module.S7 import *\n\n# def S7_HEADER(rosctr, protocol_data_unit_reference, parameter_length, data_length, reversed=0x0000, error_para=False):\n# if error_para:\n# return b'\\x32' + rosctr.to_bytes(1, byteorder='big') + reversed.to_bytes(2, byteorder='big') + \\\n# protocol_data_unit_reference.to_bytes(2, byteorder='big') + parameter_length.to_bytes(2, byteorder='big') + data_length.to_bytes(2, byteorder='big') + b'\\x00\\x00'\n# return b'\\x32' + rosctr.to_bytes(1, byteorder='big') + reversed.to_bytes(2, byteorder='big') + \\\n# protocol_data_unit_reference.to_bytes(2, byteorder='big') + parameter_length.to_bytes(2, byteorder='big') + data_length.to_bytes(2, byteorder='big')\n\n# def S7_PARAMETER_JOB_0xF0(function, calling, called, pdu_length, reversed=0x00):\n# return function.to_bytes(1, byteorder='big') + reversed.to_bytes(1, byteorder='big') + calling.to_bytes(2, byteorder='big') + called.to_bytes(2, byteorder='big') + pdu_length.to_bytes(2, byteorder='big')\n\n# # TPKT protocol header without length\n# tpkt = b'\\x03\\x00\\x00\\x16'\n# # COPT Connect request\n# copt_cr = b'\\x11\\xe0\\x00\\x00\\x00\\x06\\x00\\xc1\\x02\\x01\\x00\\xc2\\x02\\x01\\x02\\xc0\\x01\\x0a'\n# # COTP Connection Comfirm\n# copt_cc = b'\\x11\\xd0\\x00\\x06\\x00\\x03\\x00\\xc0\\x01\\x0a\\xc1\\x02\\x01\\x00\\xc2\\x02\\x01\\x02'\n\n# S7_connect_request = tpkt + copt_cr\n# S7_connect_confirm = tpkt + copt_cc\n\n# # S7Comm datagram with Rosctr: 0x01, job and Fuction: 0xf0, setup communication\n# s7_job_f0_parameter = S7_PARAMETER_JOB_0xF0(function=0xf0, calling=1, called=1, pdu_length=480)\n# s7_header = S7_HEADER(rosctr=0x01, protocol_data_unit_reference=512, parameter_length=len(s7_job_f0_parameter), data_length=0)\n# s7_job_f0 = s7_header + s7_job_f0_parameter\n# copt_header = b'\\x02\\xf0\\x80'\n# tpkt_header = b'\\x03\\x00' + (len(copt_header + s7_job_f0) + 4).to_bytes(2, byteorder='big')\n# S7_job_f0 = tpkt_header + copt_header + s7_job_f0\n\n# # S7Comm datagram with Rosctr: 0x03, ACK DATA and Fuction: 0xf0, setup communication\n# s7_job_f0_ack_parameter = S7_PARAMETER_JOB_0xF0(function=0xf0, calling=1, called=1, pdu_length=240)\n# s7_header = S7_HEADER(rosctr=0x03, protocol_data_unit_reference=512, parameter_length=len(s7_job_f0_ack_parameter), data_length=0, error_para=True)\n# s7_job_f0_ack = s7_header + s7_job_f0_ack_parameter\n# copt_header = b'\\x02\\xf0\\x80'\n# tpkt_header = b'\\x03\\x00' + (len(copt_header + s7_job_f0_ack) + 4).to_bytes(2, byteorder='big')\n# S7_job_f0_ack = tpkt_header + copt_header + s7_job_f0_ack\n\ndef load_config(config_file):\n if not os.path.isabs(config_file):\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), config_file)\n \n if not os.path.exists(config_file):\n colormsg(title_with_color=\"[Error]\", msg=\"Config file not found: {}\".format(config_file), color=\"red\")\n colormsg(title_with_color=\"[Warn]\", msg=\"Use default config file: config_Setup_Communication.json\", color=\"yellow\")\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"config_Setup_Communication.json\")\n \n try:\n with open(config_file, \"r\") as f:\n config = json.load(f)\n except Exception as e:\n colormsg(title_with_color=\"[Error]\", msg=\"Load config file failed: {}\".format(e), color=\"red\")\n sys.exit(1)\n\n check_config(config)\n return config\n\ndef check_config_connect(config_connect: dict):\n if \"sip\" not in config_connect.keys():\n colormsg(title_with_color=\"[Error]\", msg=\"Missing key: sip\", color=\"red\")\n sys.exit(1)\n if \"dip\" not in config_connect.keys():\n colormsg(title_with_color=\"[Error]\", msg=\"Missing key: dip\", color=\"red\")\n sys.exit(1)\n\ndef check_config_function(config_function: dict):\n if \"calling\" not in config_function.keys():\n colormsg(title_with_color=\"[Error]\", msg=\"Missing key: calling\", color=\"red\")\n sys.exit(1)\n if \"called\" not in config_function.keys():\n colormsg(title_with_color=\"[Error]\", msg=\"Missing key: called\", color=\"red\")\n sys.exit(1)\n if \"pdu_size\" not in config_function.keys():\n colormsg(title_with_color=\"[Error]\", msg=\"Missing key: pdu_size\", color=\"red\")\n sys.exit(1)\n\ndef check_config(config: dict):\n if \"Connect\" not in config.keys():\n colormsg(title_with_color=\"[Error]\", msg=\"Missing key: Connect\", color=\"red\")\n sys.exit(1)\n check_config_connect(config[\"Connect\"])\n\n if \"Setup_Communication\" not in config.keys():\n colormsg(title_with_color=\"[Error]\", msg=\"Missing key: Setup_Communication\", color=\"red\")\n sys.exit(1)\n check_config_function(config[\"Setup_Communication\"])\n\nclass Prof1():\n def __init__(self):\n pass # tunables\n\n def create_profile(self, config):\n # generate s7 protocol packets\n s7 = S7()\n [S7_connect_request, S7_connect_confirm] = s7.CR_and_CC()\n S7_Job_setup_communication = s7.Job_Determine_Function(function=JobFunction.SETUP_COMMUNICATION).generate_byte(\n max_amq_calling=config[\"Setup_Communication\"][\"calling\"],\n max_amq_called=config[\"Setup_Communication\"][\"called\"],\n pdu_size=config[\"Setup_Communication\"][\"pdu_size\"]\n )\n S7_Ack_setup_communication = s7.ACK_Data_Determine_Function(function=JobFunction.SETUP_COMMUNICATION).generate_byte(\n max_amq_calling=config[\"Setup_Communication\"][\"calling\"],\n max_amq_called=config[\"Setup_Communication\"][\"called\"],\n pdu_size=config[\"Setup_Communication\"][\"pdu_size\"]\n )\n\n # client commands\n prog_c = ASTFProgram()\n prog_c.delay(1000)\n prog_c.send(S7_connect_request)\n prog_c.recv(len(S7_connect_confirm))\n # prog_c.delay(1000)\n prog_c.send(S7_Job_setup_communication)\n prog_c.recv(len(S7_Ack_setup_communication))\n # prog_c.delay(1000)\n \n\n prog_s = ASTFProgram()\n prog_s.accept()\n prog_s.recv(len(S7_connect_request))\n # prog_s.delay(1000)\n prog_s.send(S7_connect_confirm)\n # prog_s.delay(1000)\n prog_s.recv(len(S7_Job_setup_communication))\n # prog_s.delay(1000)\n prog_s.send(S7_Ack_setup_communication)\n # prog_s.delay(1000)\n \n\n assoc=ASTFAssociationRule(port=81)\n # ip generator\n # ip_gen_c = ASTFIPGenDist(ip_range=[\"16.0.0.0\", \"16.0.0.255\"], distribution=\"seq\")\n # ip_gen_s = ASTFIPGenDist(ip_range=[\"48.0.0.0\", \"48.0.255.255\"], distribution=\"seq\")\n ip_gen_c = ASTFIPGenDist(ip_range=[config[\"Connect\"][\"sip\"], config[\"Connect\"][\"sip\"]], distribution=\"seq\")\n ip_gen_s = ASTFIPGenDist(ip_range=[config[\"Connect\"][\"dip\"], config[\"Connect\"][\"dip\"]], distribution=\"seq\")\n ip_gen = ASTFIPGen(glob=ASTFIPGenGlobal(ip_offset=\"1.0.0.0\"),\n dist_client=ip_gen_c,\n dist_server=ip_gen_s)\n\n\n # template\n temp_c = ASTFTCPClientTemplate(port=102,program=prog_c,ip_gen=ip_gen,limit=1)\n temp_s = ASTFTCPServerTemplate(program=prog_s,assoc=ASTFAssociationRule(102)) # using default association\n template = ASTFTemplate(client_template=temp_c, server_template=temp_s)\n\n # profile\n profile = ASTFProfile(default_ip_gen=ip_gen, templates=template)\n return profile\n\n def get_profile(self, tunables, **kwargs):\n parser = argparse.ArgumentParser(description='Argparser for {}'.format(os.path.basename(__file__)), \n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--config', type=str, default=\"config_copt.json\", help='path of config file')\n\n args = parser.parse_args(tunables)\n config = load_config(args.config)\n return self.create_profile(config)\n\n\ndef register():\n return Prof1()\n","repo_name":"Jechin/S7-Protocol-Traffic-Generation","sub_path":"astf/s7_ASTF_Setup_Comm.py","file_name":"s7_ASTF_Setup_Comm.py","file_ext":"py","file_size_in_byte":8244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"70146482343","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Anand Choudhary\n\"\"\"\nimport numpy as np\nimport math\nfrom component import *\nfrom photon import *\nfrom photon_enc import *\nfrom quantum_state import *\n\nclass Detector():\n \n \"\"\"\n Models a Single Photon Detector\n \n References: \n 1. R. H. Hadfield, \"Single-photon detectors for optical quantum information applications,\" Nature Photonics, vol. 3, pp. 696–705, 2009\n 2. M. Lasota and P. Kolenderski, \"Optimal photon pairs for quantum communication protocols,\" Nature Scientific Reports, vol. 10, no. 20810, 2020\n \n Attributes:\n uID (str) = Unique ID\n env (simpy.Environment) = Simpy Environment for Simulation\n gen (numpy.random.Generator) = Random Number Generator\n dead_time (float) = Dead Time\n xi (float) = Multiplicative Factor for deciding the Width of the Time Window for Single Photon Detection\n coupling_eff (float) = Coupling Efficiency \n dark_count_rate (float) = Rate of generation of Dark Counts\n jitter (float) = Standard Deviation in the Time Interval between the absorption of a Photon and the generation of an Output Electrical Signal from the Detector\n mean_response_time (float) = Average Time taken to generate an Output Electrical Signal after the absorption of a Photon\n det_eff (float) = Detection Efficiency [Probability of registering a Count if a Photon arrives at the Detector]\n next_detection_time (float) = Time at which the next detection event may take place\n next_dark_count_time (float) = Time at which the next dark count is registered \n detect_dark (bool) = True when a dark count is to be registered; False otherwise\n photon_count (int) = Number of photons which have been successfully detected\n func_call_tracker (int) = Tracks whether the detector's receive function has been called or not, i.e., whether the detector is active or not (A value of zero indicates that the detector is not active)\n \"\"\"\n\n def __init__(self,uID,env,dead_time,xi,coupling_eff,dark_count_rate,jitter,mean_response_time):\n \n \"\"\"\n Constructor for the Detector class\n \n Arguments:\n uID (str) = Unique ID\n env (simpy.Environment) = Simpy Environment for Simulation\n dead_time (float) = Dead Time\n xi (float) = Multiplicative Factor for deciding the Width of the Time Window for Single Photon Detection\n coupling_eff (float) = Coupling Efficiency \n dark_count_rate (float) = Rate of generation of Dark Counts\n jitter (float) = Standard Deviation in the Time Interval between the absorption of a Photon and the generation of an Output Electrical Signal from the Detector\n mean_response_time (float) = Average Time taken to generate an Output Electrical Signal after the absorption of a Photon\n \"\"\"\n \n Component.__init__(self,uID,env)\n self.dead_time = dead_time\n self.xi = xi\n self.coupling_eff = coupling_eff\n self.dark_count_rate = dark_count_rate\n self.jitter = jitter \n self.mean_response_time = mean_response_time\n self.det_eff = math.erf(xi/(2*math.sqrt(2)))\n self.next_detection_time = self.env.now\n self.next_dark_count_time = self.env.now\n self.detect_dark = False\n self.photon_count = 0 \n self.func_call_tracker = 0 \n\n def schedule_dark_count(self):\n \n \"\"\"\n Instance method for scheduling a dark count\n \n Details:\n The time intervals for dark counts follow an exponential distribution\n \"\"\"\n\n \n dark_count_time_interval = self.gen.exponential(1/self.dark_count_rate)\n self.next_dark_count_time = self.next_dark_count_time + dark_count_time_interval\n \n\n def receive(self,p,mbasis,follow_up_function = None):\n \n \"\"\"\n Instance method for receiving a photon for detection and detecting it if the appropriate conditions are satisfied\n \n Arguments:\n p (photon) = Photon for detection\n mbasis (numpy.array(list[list[complex]])) = Measurement Basis \n follow_up_function (function) = An optionally specified and defined function which maybe called post the detection of a photon \n \"\"\"\n \n # Determine the instant(s) of time at which dark count(s) is(are) to be registered \n if self.dark_count_rate > 0:\n if (self.func_call_tracker == 0) or (self.env.now > self.next_dark_count_time):\n self.schedule_dark_count()\n \n self.func_call_tracker += 1\n \n while (self.env.now > self.next_dark_count_time):\n self.schedule_dark_count()\n self.detect_dark = True\n self.photon_count += 1\n \n # Set detect_dark = True if the current simulation time is approx. equal to the time for registering the next dark count. Otherwise, set detect_dark = False\n if abs(self.env.now - self.next_dark_count_time) <= 1e-10:\n self.detect_dark = True\n else:\n self.detect_dark = False\n \n else:\n self.detect_dark = False\n \n rn1 = self.gen.random()\n # Check if the probability of the photon being coupled into the detector is less than the coupling efficiency and if that is the case, couple it into the detector\n if rn1 < self.coupling_eff:\n # Check if the photon arrives at the detector at/after the next detection time and if at that arrival time, no dark count is scheduled. If that is the case, the photon may be detected.\n if (self.env.now >= self.next_detection_time) and not(self.detect_dark):\n rn2 = self.gen.random()\n # Check if the probability of the photon triggering a detection count is less than the detection efficiency of the detector and if that is the case, register a photon count\n if rn2 < self.det_eff:\n self.photon_count += 1\n # Measure the quantum state of the photon\n self.detect(p,mbasis)\n # Call any follow up function (if specified as an argument) \n if follow_up_function is not None:\n follow_up_function(p,self.photon_count)\n # Set the next instant of time at which a photon can possibly be detected (Here, the detector's response function has been assumed to be Gaussian)\n self.next_detection_time = self.env.now + (self.mean_response_time + self.jitter*self.gen.standard_normal()) + self.dead_time \n \n elif self.detect_dark:\n # Register a dark count\n self.photon_count += 1\n \n \n def detect(self,p,mbasis):\n \n \"\"\"\n Instance method for measuring the quantum state of a photon upon its successful detection\n \n p (photon) = Successfully detected Photon\n mbasis (numpy.array(list[list[complex]])) = Measurement Basis\n \"\"\"\n\n if p.qs.coeffs.size == 2:\n p.qs.measure_single_qubit_basis(mbasis)\n else:\n p.qs.measure_multiple_qubit_basis_scheme1(mbasis)\n ","repo_name":"qns-github/qns","sub_path":"detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":7399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"9905390561","text":"#!/bin/env python3\nimport click\nfrom org_email.convert import Converter\nfrom org_email.generator import Generator\nimport sys\n\n\n@click.command()\n@click.option(\"--input\", help=\"the input file\")\n@click.option(\"--output\", help=\"the output file\")\ndef main(input: str, output: str):\n doc: str = \"\"\n with open(input, \"r\") as f:\n lines = f.readlines()\n converter = Converter(lines)\n org_lines = converter.convert()\n generator = Generator(org_lines)\n doc = generator.generate()\n\n with open(output, \"w\") as f:\n f.write(doc)\n\n\nif __name__ == \"__main__\":\n main()\n print(sys.version)\n","repo_name":"wgjak47/org_gtd_email","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"27705811986","text":"import os\r\nimport json\r\nimport xlwt\r\n\r\ndef make_dict():\r\n file_path = '/raid/yq/UNITER/pretrain/vocab.txt'\r\n with open(file_path, 'r') as f:\r\n lines = f.readlines()\r\n w2i = {}\r\n for i in range(0, len(lines)):\r\n w2i[lines[i].split('\\n')[0]] = i\r\n sp_path = '/raid/yq/UNITER/pretrain/txt_db/vcr_val.db/special_tokens.json'\r\n with open(sp_path, 'r') as f:\r\n data = f.readlines()\r\n sp = json.loads(data[0])\r\n for i in range(len(lines),len(lines)+len(sp)):\r\n w2i[sp[i-len(lines)]] = i\r\n i2w = dict(zip(w2i.values(), w2i.keys()))\r\n return w2i,i2w\r\n\r\ndef add_sp():\r\n file_path = './pretrain/vocab.txt'\r\n sp_path='/raid/yq/UNITER/pretrain/txt_db/vcr_val.db/special_tokens.json'\r\n with open(sp_path,'r') as f:\r\n data=f.readlines()\r\n sp=json.loads(data[0])\r\n file_path = './pretrain/vocab.txt'\r\n with open(file_path, 'r') as f:\r\n lines = f.readlines()\r\n\r\ndef count_len():\r\n with open('/raid/yq/UNITER/vcr1annots/train.jsonl', 'r') as load_f:\r\n data = load_f.readlines()\r\n len_dic={}\r\n for line in data:\r\n load_dict = json.loads(line)\r\n for i in range(4):\r\n lenth=len(load_dict['rationale_choices'][i])\r\n if lenth not in len_dic.keys():\r\n len_dic[lenth] =1\r\n else:\r\n len_dic[lenth]+=1\r\n workbook = xlwt.Workbook()\r\n sheet=workbook.add_sheet('rs_len_count')\r\n i=0\r\n for key, value in len_dic.items():\r\n sheet.write(i,0,key)\r\n sheet.write(i, 1, value)\r\n i+=1\r\n workbook.save('rs_len_count.xls')\r\n\r\nif __name__ == '__main__':\r\n #count_len()\r\n #add_sp()\r\n make_dict()","repo_name":"HITsz-TMG/ExplainableVisualEntailment","sub_path":"Data/utils/make_dict.py","file_name":"make_dict.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"36"} +{"seq_id":"17471141929","text":"import torch, progressbar, sys, os\nimport torch.optim as optim\nimport torch.nn as nn\nimport numpy as np\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom network import network\nfrom data import dataset\n\nmodel_protocol = {\"state\": network.model_state, \"image\": network.model_image}\ndataset_protocol = {\"state\": dataset.state_dataset, \"image\": dataset.image_dataset}\n\n\nclass Trainer:\n def __init__(self, config):\n\n ### somethings\n self.cfg = config\n self.dataset = dataset_protocol[config.data.protocol](config)\n self.dataloader = torch.utils.data.DataLoader(\n self.dataset,\n batch_size=config.data.batch_size,\n num_workers=config.framework.num_thread,\n )\n widgets = [\n \"Training phase [\",\n progressbar.SimpleProgress(),\n \"] [\",\n progressbar.Percentage(),\n \"] \",\n progressbar.Bar(marker=\"█\"),\n \" (\",\n progressbar.Timer(),\n \" \",\n progressbar.ETA(),\n \") \",\n ]\n self.bar = progressbar.ProgressBar(\n max_value=config.train.num_epoch, widgets=widgets, term_width=100\n )\n self.best_loss = sys.maxsize\n\n ### logging\n self.logger = SummaryWriter(\"{}/runs_{}\".format(config.base_dir, config.mode))\n\n ### model\n self.model = model_protocol[config.model.protocol](config)\n if config.framework.num_gpu > 0:\n self.model.to(device=0)\n\n self.optimizer = optim.Adam(self.model.parameters(), lr=config.train.lr)\n self.scheduler = optim.lr_scheduler.MultiStepLR(\n self.optimizer, milestones=config.train.lr_ms, gamma=0.1\n )\n\n def save_checkpoints(self, losses, avg_period=5):\n if not os.path.isdir(self.cfg.checkpoint_dir):\n os.makedirs(self.cfg.checkpoint_dir)\n\n sd = {\"parameters\": self.model.state_dict(), \"epoch\": len(losses)}\n checkpoint_dir = \"{}/{:05d}.pt\".format(self.cfg.checkpoint_dir, len(losses))\n torch.save(sd, checkpoint_dir)\n\n loss = np.mean(losses[-avg_period:])\n if loss < self.best_loss:\n checkpoint_dir = \"{}/best_model.pt\".format(self.cfg.checkpoint_dir)\n torch.save(sd, checkpoint_dir)\n self.best_loss = loss\n\n def load_checkpoints(self, config, model):\n sd = torch.load(\n \"{}/{}.pt\".format(config.checkpoint_dir, config.checkpoint_file),\n map_location=torch.device(\"cpu\"),\n )\n model.load_state_dict(sd[\"parameters\"])\n\n def run(self):\n raise NotImplementedError\n\n\nclass Trainer_policy(Trainer):\n def __init__(self, configs):\n super(Trainer_policy, self).__init__(configs)\n\n ### define loss functions\n self.criterion = nn.L1Loss()\n\n def run(self):\n losses = []\n for epoch in range(1, self.cfg.train.num_epoch + 1):\n for idx, (imgs, s, x, y) in enumerate(self.dataloader):\n if self.cfg.framework.num_gpu > 0:\n imgs, s, x, y = (\n imgs.to(device=0),\n s.to(device=0),\n x.to(device=0),\n y.to(device=0),\n )\n if isinstance(self.dataset, dataset.image_dataset):\n imgs = imgs.to(device=0)\n y_action = x[:, :, -1]\n x = x[:, :, :-1]\n\n # forward\n if isinstance(self.dataset, dataset.image_dataset):\n p, _ = self.model(imgs)\n else:\n p, _ = self.model(x)\n p = p.view_as(y_action)\n\n # loss\n l = self.criterion(p, y_action)\n\n # backprop\n self.model.zero_grad()\n l.backward()\n self.optimizer.step()\n\n # log\n self.logger.add_scalar(\n \"{}/loss\".format(self.cfg.mode),\n l.data,\n idx\n + (\n self.cfg.data.num_datapoints_per_epoch\n / self.cfg.data.batch_size\n )\n * epoch,\n )\n losses.append(l.detach().cpu().numpy())\n\n if epoch % self.cfg.train.save_iter == 0:\n self.save_checkpoints(losses)\n self.scheduler.step()\n self.bar.update(epoch)\n print(\"finish!\")\n\n\nclass Trainer_dynamic_model(Trainer):\n def __init__(self, configs):\n super(Trainer_dynamic_model, self).__init__(configs)\n\n ### define loss functions\n self.criterion = nn.L1Loss()\n\n def run(self):\n losses = []\n for epoch in range(1, self.cfg.train.num_epoch + 1):\n for idx, (imgs, s, x, y) in enumerate(self.dataloader):\n if self.cfg.framework.num_gpu > 0:\n s, x, y = s.to(device=0), x.to(device=0), y.to(device=0)\n if isinstance(self.dataset, dataset.image_dataset):\n imgs = imgs.to(device=0)\n y_action = x[:, :, -1]\n\n # forward\n if isinstance(self.dataset, dataset.image_dataset):\n p, _ = self.model(imgs, y_action)\n else:\n p, _ = self.model(x)\n p = p.view_as(y)\n\n # loss\n l = self.criterion(p, y)\n\n # backprop\n self.model.zero_grad()\n l.backward()\n self.optimizer.step()\n\n # log\n self.logger.add_scalar(\n \"{}/loss\".format(self.cfg.mode),\n l.data,\n idx\n + (\n self.cfg.data.num_datapoints_per_epoch\n / self.cfg.data.batch_size\n )\n * epoch,\n )\n losses.append(l.detach().cpu().numpy())\n if epoch % self.cfg.train.save_iter == 0:\n self.save_checkpoints(losses)\n self.scheduler.step()\n self.bar.update(epoch)\n print(\"finish!\")\n\n\nclass Trainer_model_predictive_policy_learning(Trainer):\n def __init__(self, configs):\n super(Trainer_model_predictive_policy_learning, self).__init__(configs)\n\n self.dm_model = model_protocol[configs.dm_model.model.protocol](\n configs.dm_model\n )\n self.load_checkpoints(configs.dm_model, self.dm_model)\n if configs.framework.num_gpu > 0:\n self.dm_model.to(device=0)\n\n self.criterion = nn.L1Loss()\n\n def augmented_state(self, state):\n \"\"\"\n :param state: cartpole state\n :param action: action applied to state\n :return: an augmented state for training GP dynamics\n \"\"\"\n dtheta, dx, theta, x = (\n state[:, :, 0],\n state[:, :, 1],\n state[:, :, 2],\n state[:, :, 3],\n )\n return torch.cat(\n [\n x.unsqueeze(2),\n dx.unsqueeze(2),\n dtheta.unsqueeze(2),\n torch.sin(theta).unsqueeze(2),\n torch.cos(theta).unsqueeze(2),\n ],\n dim=2,\n )\n\n def cov(self, m):\n mean = torch.mean(m, dim=0)\n m = m - mean\n cov = m.transpose(0, 1).mm(m)\n return cov\n\n def run(self):\n losses = []\n for epoch in range(1, self.cfg.train.num_epoch + 1):\n for idx, (imgs, s, x, y) in enumerate(self.dataloader):\n if self.cfg.framework.num_gpu > 0:\n s, x, y = s.to(device=0), x.to(device=0), y.to(device=0)\n\n y_action = x[:, :, -1]\n x = x[:, :, :-1]\n\n # forward\n if isinstance(self.dataset, dataset.image_dataset):\n p, _ = self.model(imgs)\n else:\n p, _ = self.model(x)\n pred_action = p.view_as(y_action)\n\n # loss\n loss_policy = self.criterion(pred_action, y_action)\n\n delta_states = []\n for n in range(self.cfg.dm_model.data.num_traj_samples):\n dm_state = torch.cat([x, p], dim=2)\n delta_state, _ = self.dm_model(dm_state)\n delta_states.append(delta_state.unsqueeze(0))\n\n delta_states = torch.cat(delta_states, dim=0)\n delta_states = delta_states.view(\n self.cfg.dm_model.data.num_traj_samples, -1\n )\n cov = self.cov(delta_states)\n loss_uncertainty = cov.trace() / (\n self.cfg.data.horizon\n * self.cfg.data.batch_size\n * self.cfg.dm_model.data.output_dim\n * self.cfg.dm_model.data.num_traj_samples\n )\n loss_policy = loss_policy / (self.cfg.data.horizon)\n l = loss_policy + self.cfg.train.LAMBDA * loss_uncertainty\n\n # backprop\n self.model.zero_grad()\n l.backward()\n self.optimizer.step()\n\n # log\n self.logger.add_scalar(\n \"{}/loss\".format(self.cfg.mode),\n l.data,\n idx\n + (\n self.cfg.data.num_datapoints_per_epoch\n / self.cfg.data.batch_size\n )\n * epoch,\n )\n self.logger.add_scalar(\n \"{}/loss_policy\".format(self.cfg.mode),\n loss_policy.data,\n idx\n + (\n self.cfg.data.num_datapoints_per_epoch\n / self.cfg.data.batch_size\n )\n * epoch,\n )\n self.logger.add_scalar(\n \"{}/loss_uncertainty\".format(self.cfg.mode),\n loss_uncertainty.data,\n idx\n + (\n self.cfg.data.num_datapoints_per_epoch\n / self.cfg.data.batch_size\n )\n * epoch,\n )\n losses.append(l.detach().cpu().numpy())\n if epoch % self.cfg.train.save_iter == 0:\n self.save_checkpoints(losses)\n self.scheduler.step()\n self.bar.update(epoch)\n print(\"finish!\")\n","repo_name":"KuoHaoZeng/cartpole_model_based_control","sub_path":"scripts/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":10685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"73581442665","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nimport sktime\r\nfrom sktime import datasets\r\nfrom sktime.utils.plotting.forecasting import plot_ys\r\nfrom sktime.forecasting.model_selection import temporal_train_test_split\r\nfrom sktime.forecasting.naive import NaiveForecaster\r\nfrom sktime.forecasting.arima import AutoARIMA \r\nfrom sktime.forecasting.exp_smoothing import ExponentialSmoothing\r\nfrom sktime.forecasting.theta import ThetaForecaster\r\nfrom sktime.forecasting.compose import EnsembleForecaster\r\nfrom sktime.performance_metrics.forecasting import smape_loss\r\nfrom sktime.performance_metrics.forecasting import mase_loss\r\n\r\ndef main():\r\n df = datasets.load_airline() #Univariate, monthly records from 1949 to 60 (144 records)\r\n y_train, y_test = temporal_train_test_split(df, test_size=36) #36 months for testing\r\n\r\n forecaster = NaiveForecaster(strategy='seasonal_last',sp=12) #model strategy: last, mean, seasonal_last. sp=12months (yearly season)\r\n forecaster.fit(y_train) #fit\r\n fh = np.arange(1,len(y_test)+1) #forecast horizon: array with the same lenght of y_test\r\n y_pred = forecaster.predict(fh) #pred\r\n\r\n \r\n forecaster2 = AutoARIMA(sp=12, suppress_warnings=True, trace=1)\r\n forecaster2.fit(y_train)\r\n y_pred2 = forecaster2.predict(fh)\r\n \r\n forecaster3 = ExponentialSmoothing(trend='add', damped='True', seasonal='multiplicative', sp=12)\r\n forecaster3.fit(y_train)\r\n y_pred3 = forecaster3.predict(fh)\r\n\r\n forecaster4 = ThetaForecaster(sp=12)\r\n forecaster4.fit(y_train)\r\n y_pred4 = forecaster4.predict(fh)\r\n\r\n forecaster5 = EnsembleForecaster([\r\n ('NaiveForecaster', NaiveForecaster(strategy='seasonal_last',sp=12)), \r\n ('AutoARIMA', AutoARIMA(sp=12, suppress_warnings=True)),\r\n ('Exp Smoothing', ExponentialSmoothing(trend='add', damped='True', seasonal='multiplicative', sp=12)),\r\n ('Theta', ThetaForecaster(sp=12))])\r\n forecaster5.fit(y_train)\r\n y_pred5 = forecaster5.predict(fh)\r\n \r\n\r\n plot_ys(y_train, y_test, y_pred, y_pred2, y_pred3, y_pred4, y_pred5, labels=['Train','Test','Naive Forecaster','AutoARIMA','Exp Smoothing','Theta', 'Ensemble'])\r\n plt.xlabel('Months')\r\n plt.ylabel('Number of flights')\r\n plt.title('Time series of the number of international flights in function of time')\r\n plt.show()\r\n\r\n print('SMAPE Error for NaiveForecaster is:', 100*round(smape_loss(y_test, y_pred),3), '%')\r\n print('SMAPE Error for AutoARIMA is:', 100*round(smape_loss(y_test, y_pred2),3), '%')\r\n print('SMAPE Error for Exp Smoothing is:', 100*round(smape_loss(y_test, y_pred3),3), '%')\r\n print('SMAPE Error for Theta is:', 100*round(smape_loss(y_test, y_pred4),3), '%')\r\n print('SMAPE Error for Ensemble is:', 100*round(smape_loss(y_test, y_pred5),3), '%')\r\n\r\nif __name__== '__main__':\r\n main()\r\n","repo_name":"eccornelsen/Blog","sub_path":"101_airline.py","file_name":"101_airline.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"34977465555","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nfrom typing import List, Tuple\nimport os, sys\nimport numpy as np\nimport pandas as pd\nimport sklearn\nimport matplotlib.pyplot as plt\n\nimport librosa\nimport librosa.display\nimport IPython.display\n\nget_ipython().run_line_magic('load_ext', 'autoreload')\nget_ipython().run_line_magic('autoreload', '2')\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[ ]:\n\n\nsys.path.append('..')\n\n\n# In[ ]:\n\n\nimport torch\nimport torch.backends.cudnn as cudnn\n\nfrom utils import get_model, get_loss_fn\nfrom common.hparams import create_hparams\n\nfrom common.device_funcs import to_device\nfrom utils import prepare_val_dataloader, save_batch_results\n\n\n# In[ ]:\n\n\nEXP_NAME='style_token_diff_loss'\nEXTRA_HP='''\nmodel: ModelST\nloss_fn: StyleDiffLoss\ncode_size: 128\nn_class: 10\ntoken_num: 5\n'''\nconfig = create_hparams(yaml_hparams_string=EXTRA_HP, allow_add=True)\n\n\n# In[ ]:\n\n\nimport matplotlib.font_manager as fm # to create font\n\nfrom PIL import Image, ImageDraw, ImageFont, ImageFilter\n\nimport random\n\n# 随机字母:\ndef rndChar():\n return chr(66) # chr(random.randint(65, 90))\n\n# 随机颜色1:\ndef rndColor():\n return (random.randint(64, 255), random.randint(64, 255), random.randint(64, 255))\n\n# 随机颜色2:\ndef rndColor2():\n return (random.randint(32, 127), random.randint(32, 127), random.randint(32, 127))\n\n# 240 x 60:\nwidth = 60 * 4\nheight = 60\nimage = Image.new('RGB', (width, height), (255, 255, 255))\n# 创建Font对象:\n# font = ImageFont.truetype('Arial.ttf', 36)\nfontsize = 36\nfont = ImageFont.truetype(fm.findfont(fm.FontProperties(family='DejaVu Sans')),fontsize)\n# 创建Draw对象:\ndraw = ImageDraw.Draw(image)\n# 填充每个像素:\nfor x in range(width):\n for y in range(height):\n draw.point((x, y), fill=rndColor())\n# 输出文字:\nfor t in range(4):\n draw.text((60 * t + 10, 10), rndChar(), font=font, fill=(0,0,0)) # \n# 模糊:\n# image = image.filter(ImageFilter.BLUR)\nimage.save('code.jpg', 'jpeg')\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\nimport matplotlib.font_manager as fm # to create font\nfrom PIL import Image,ImageFont,ImageDraw\nimpath = '../exp/style_token_mi_beta1em2_notused-2/result_48k_random/Batch_0_rec_image.png'\ntextpath = '../exp/style_token_mi_beta1em2_notused-2/result_48k_random/Batch_0_labels.npy'\n\n\n# In[ ]:\n\n\nim = Image.open(impath)\nim\n\n\n# In[ ]:\n\n\ntext = np.load(textpath)\ntext\n\n\n# In[ ]:\n\n\n# 创建Font对象:\n# font = ImageFont.truetype('Arial.ttf', 36)\nfontsize = 10\nfont = ImageFont.truetype(fm.findfont(fm.FontProperties(family='DejaVu Sans')),fontsize)\n\nshift = im.size[0]//8\ndraw = ImageDraw.Draw(im)\nfor i in range(8):\n for j in range(8):\n # print(text[i*8+j].item())\n draw.text(\n (i*shift+2, j*shift+1), \n str(text[i+j*8].item()), \n font=font, \n fill=(256,128,128)\n )\n\n\n# In[ ]:\n\n\nim\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\ndef test_dict(config={}):\n config['a'] = 1\n print(config)\n \ntest_dict()\n\n\n# In[ ]:\n\n\ntest_dict({'b':2})\ntest_dict()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"hellolzc/DisentangleMNIST","sub_path":"notebooks/RandomText.py","file_name":"RandomText.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"71625087783","text":"# GitHub: SFhectorj\n# Skills Used: if loops, logical operators, counter variables, modulo\n# Description: This program will loop through the numbers 1-100 and will replace numbers\n# divisible by 3 with \"Fizz\" and numbers divisible by 5 with \"Buzz\". When a\n# number is divisible by both 3 and 5, it will print \"FizzBuzz\".\n\nnum_count = 0\n# Create a variable to keep count of loops happening.\nfor i in range(0, 100):\n num_count += 1\n# Set a range for the program to loop through. In the loop a counter will track the\n# loops completed in the variable count.\n if num_count % 3 == 0 and num_count % 5 == 0:\n print(\"FizzBuzz\")\n# Begin with a logical operator to make sure the program finds numbers divisible by both\n# 3 and 5 first.\n elif num_count % 3 == 0:\n print(\"Fizz\")\n elif num_count % 5 == 0:\n print(\"Buzz\")\n# If the number is not divisible by both, then it searches for each individually\n else:\n print(num_count)\n \n","repo_name":"SFhectorj/FizzBuzz-Problem","sub_path":"FizzBuzz.py","file_name":"FizzBuzz.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"3845302080","text":"from tkinter import *\nimport requests\nimport time\nfrom PIL import Image, ImageTk as imtk\n\nroot = Tk()\nroot.geometry(\"500x300\")\nroot.title(\"Weather App\")\nroot.wm_iconbitmap(\"C:\\\\Users\\\\PC\\\\Desktop\\\\Coding Tutorial\\\\Project\\\\Weather App\\\\photo\\\\icon.ico\")\nroot.resizable(False, False)\n\n\ndef cel(far) : \n b = (far - 32) * 5/9\n return int(b)\n\ndef req() : \n global img1, img1lab\n query = place_name.get()\n timee = time.strftime(\"%y-%m-%d\")\n timeee = time.strftime(\"%H:%M:%S\")\n \n if query.lower() == \"delhi\" : \n query = \"New Delhi\"\n url = f\"https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/{query}/20{timee}T{timeee}?key=H5976K629HDQX4BGJFTVZQRCN\"\n r = requests.get(url)\n \n if int(r.status_code) == 404 : \n today.set(\"Invalid Place\")\n\n temperatureF = r.json()[\"days\"][0][\"temp\"]\n maxtempF = r.json()[\"days\"][0][\"tempmax\"]\n mintempF = r.json()[\"days\"][0][\"tempmin\"]\n temperatureC = cel(int(temperatureF))\n mintempC = cel(int(mintempF))\n maxtempC = cel(int(maxtempF))\n precipitation = r.json()[\"days\"][0][\"precipprob\"]\n cloudcover = r.json()[\"days\"][0][\"cloudcover\"]\n \n today.set(f\"{temperatureF}°F / {temperatureC}°C \")\n maxval.set(f\"{maxtempF}°F / {maxtempC}°C \")\n minval.set(f\"{mintempF}°F / {mintempC}°C \")\n precpit.set(f\"{precipitation}%\")\n cloud.set(f\"{cloudcover}%\")\n \n \ndef reset() : \n place_name.set(\"\")\n today.set(\"Enter Place first\")\n maxval.set(\"\")\n minval.set(\"\")\n cloud.set(\"\")\n precpit.set(\"\")\n\n\nplace_name = StringVar()\ntoday = StringVar()\nmaxval = StringVar()\nminval = StringVar()\nprecpit = StringVar()\ncloud = StringVar()\ntoday.set(\"Enter Place first\")\n\n\nplace_ask = Label(root, text = \"Enter Place Name : \", font = \"arial 12 bold\")\nresult_lab = Label(root, text = \"Current Weather : \", font = \"arial 12 bold\")\nmaxx = Label(root, text=\"Maximum Temperature : \", font = \"arial 12 bold\")\nminn = Label(root, text=\"Minimum Temperature : \", font = \"arial 12 bold\")\nprec = Label(root, text=\"Precipation Chances : \", font = \"arial 12 bold\")\ncloudd = Label(root, text=\"Cloud Percentage : \", font = \"arial 12 bold\")\n\nmaxnum = Entry(root, textvariable = maxval, state=DISABLED, font=\"comicssansms 12\", justify=CENTER)\nminum = Entry(root, textvariable = minval, state=DISABLED, font=\"comicssansms 12\", justify=CENTER)\nplace = Entry(root, textvariable = place_name, font=\"comicssansms 12\", justify=CENTER)\nresult = Entry(root, textvariable = today, state=DISABLED, font=\"comicssansms 12\", justify=CENTER)\nprecipitate = Entry(root, textvariable=precpit, state=DISABLED, font=\"comicssansms 12\", justify=CENTER)\ncloudcov = Entry(root, textvariable=cloud, state=DISABLED, font=\"comicssansms 12\", justify=CENTER)\n\ncon = Button(root, text = \"Confirm\", command = req, justify=CENTER, width=8, bg=\"#ccffcc\", cursor = \"spider\", relief=RAISED)\nres = Button(root, text = \"Reset\", command = reset, justify=CENTER, width=7, bg=\"#ff6666\", cursor=\"pirate\", relief=RAISED)\n\n\nplace_ask.grid(row=0, column=0, pady=40, sticky=W)\nplace.grid(row=0, column=1, pady=40, sticky=W)\nresult_lab.grid(row=1, column=0, sticky=W)\nresult.grid(row=1, column=1, sticky=W)\nmaxx.grid(row=2, column=0, sticky=W)\nmaxnum.grid(row=2, column=1, sticky=W)\nminn.grid(row=3, column=0, sticky=W)\nminum.grid(row=3, column=1, sticky=W)\nprec.grid(row=4, column=0, sticky=W)\nprecipitate.grid(row=4, column=1, sticky=W)\ncloudd.grid(row=5, column=0, sticky=W)\ncloudcov.grid(row=5, column=1, sticky=W)\n\ncon.grid(row=6, column=0, pady = 20)\nres.grid(row=6, column=1, pady = 20)\n\n\n\nroot.mainloop()","repo_name":"Kshav005/Projects","sub_path":"Weather App/Weather_script.py","file_name":"Weather_script.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"24788791969","text":"import sys\nfrom collections import deque\n\ntornado_y = [0, 1, 0, -1] # 서 남 동 북\ntornado_x = [-1, 0, 1, 0]\n\ndir_y = [-1, 1, 0, 0] # 북 남 서 동\ndir_x = [0, 0, -1, 1]\n\n\nN, M = list(map(int, sys.stdin.readline().strip().split()))\n\nblizard_map = [[0] * N for _ in range(N)]\n\nmagic = []\nfor i in range(N):\n blizard_map[i] = list(map(int, sys.stdin.readline().strip().split()))\n\nfor i in range(M):\n magic.append(list(map(int, sys.stdin.readline().strip().split())))\n\n\n# 4번 대각선방향으로만 물 복사할때만 사용\ndef out_of_range(y, x):\n return y < 0 or x < 0 or x >= N or y >= N\n\n\ndef blizard_magic(d, s):\n r, c = N // 2, N // 2\n for distance in range(1, s+1):\n n_r, n_s = r + (dir_y[d]) * distance, c + (dir_x[d]) * distance\n blizard_map[n_r][n_s] = 0\n\n\ndef remove_zero(extended: deque):\n new_list = deque()\n\n\n while extended:\n num = extended.popleft()\n if num != 0:\n new_list.append(num)\n return new_list\n\ndef delete_countinuous(extended: deque):\n start = 0\n end = 0\n is_continuous = False\n\n score = 0\n while start <= end and end < len(extended) and extended[end] != 0:\n if extended[start] == extended[end]:\n end += 1\n if end - start >= 4:\n is_continuous = True\n else:\n if is_continuous:\n score += ((end - start) * extended[start])\n for i in range(start, end):\n extended[i] = 0\n\n start = end\n end += 1\n is_continuous = False\n\n if end - start >= 4:\n score += ((end - start) * extended[start])\n for i in range(start, end):\n extended[i] = 0\n\n return score\n\n\ndef to_group(extended: deque):\n result = deque()\n start = 0\n end = 1\n while start <= end and end < len(extended) and extended[end] != 0 and extended[start] != 0:\n\n if extended[start] == extended[end]: # grouping\n end += 1\n else:\n result.append(end - start)\n result.append(extended[start])\n start = end\n end += 1\n\n result.append(end - start)\n result.append(extended[start])\n return result\n\n\ndef extended_to_blizard_map(extended):\n r, c = N // 2, N // 2\n distance = 1\n move_count, direction = 0, 0\n result = [[0] * N for _ in range(N)]\n while True:\n move_count += 1\n for _ in range(distance):\n next_r, next_c = r + tornado_y[direction], c + tornado_x[direction]\n if (next_r, next_c) == (0, -1):\n return result\n if extended:\n result[next_r][next_c] = extended.popleft()\n else:\n result[next_r][next_c] = 0\n\n r, c = next_r, next_c\n direction = (direction + 1) % 4\n if move_count == 2:\n distance += 1\n move_count = 0\n\n return result\ndef make_extended_list():\n r, c = N // 2, N // 2\n distance = 1\n move_count, count, direction = 0, 0, 0\n result = deque()\n while True:\n move_count += 1\n for _ in range(distance):\n next_r, next_c = r + tornado_y[direction], c + tornado_x[direction]\n\n if (next_r, next_c) == (0, -1):\n return result\n\n if blizard_map[next_r][next_c] != 0:\n result.append(blizard_map[next_r][next_c])\n\n r, c = next_r, next_c\n direction = (direction + 1) % 4\n if move_count == 2:\n distance += 1\n move_count = 0\n\n\ndef solve():\n global blizard_map\n result = 0\n for d, s in magic:\n blizard_magic(d - 1, s)\n extended_tornado = make_extended_list()\n while True:\n score = delete_countinuous(extended_tornado) # 붐\n if score == 0:\n break\n else:\n result += score\n\n extended_tornado = remove_zero(extended_tornado)\n if not extended_tornado:\n break\n grouped = to_group(extended_tornado)\n blizard_map = extended_to_blizard_map(grouped)\n print(result)\nsolve()\n\"\"\"\n7 1\n0 0 0 0 0 0 0\n3 2 1 3 2 3 0\n2 1 2 1 2 1 0\n2 1 1 0 2 1 1\n3 3 2 3 2 1 2\n3 3 3 1 3 3 2\n2 3 2 2 3 2 3\n2 2\n\"\"\"\n\n\"\"\"\n5 1\n0 0 0 0 0\n0 0 1 1 0\n0 1 0 1 0\n0 1 1 1 0\n0 0 0 0 0\n1 2\n\"\"\"","repo_name":"inhyeokJeon/AALGGO","sub_path":"Python/baekjoon/21611_wizard_shark_blizard.py","file_name":"21611_wizard_shark_blizard.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"16845665819","text":"import scrapy\nfrom scrapers.items import Product\n\n\nclass EKOSpider(scrapy.Spider):\n name = \"EKO\"\n allowed_domains = [\"earthkindoriginals.co.uk\"]\n currency_symbol_map = {'£': 'GBP', '$': 'USD', '€': 'EUR'}\n\n def __init__(self, *a, **kw):\n super(EKOSpider, self).__init__(*a, **kw)\n self.base_url = \"https://earthkindoriginals.co.uk\"\n self.urls = [\n {\"value\": \"https://earthkindoriginals.co.uk/product-category/daywear/\", \"gender\": [\"women\"]},\n {\"value\": \"https://earthkindoriginals.co.uk/product-category/casual-dresses/\", \"gender\": [\"women\"]},\n {\"value\": \"https://earthkindoriginals.co.uk/product-category/organic-loungewear/\", \"gender\": [\"women\"]},\n ]\n\n def start_requests(self):\n for url in self.urls:\n yield scrapy.Request(url['value'], callback=self.parse_category_page, meta={'gender': url['gender']})\n\n def parse_category_page(self, response):\n product_urls = response.xpath('//a[contains(@class, \"woocommerce-loop-product__link\")]/@href').extract()\n for url in product_urls:\n yield scrapy.Request(url, callback=self.parse_product_page, meta={'gender': response.meta['gender']})\n\n def parse_product_page(self, response):\n p = Product()\n p['name'] = self.get_name(response)\n p['description'] = self.get_description(response)\n p['image_url'] = self.get_image_urls(response)\n p['brand'] = self.name\n p['url'] = response.url\n p['gender'] = response.meta['gender']\n p['price'] = self.get_price(response)\n p['original_price'] = self.get_original_price(response)\n p['currency'] = self.get_currency(response)\n yield p\n\n def get_name(self, response):\n name = response.xpath('//meta[@property=\"og:title\"]/@content').extract_first()\n if 'bundle' in name.lower():\n return\n if name.endswith('by EKO Womenswear'):\n return name[:-18]\n return name\n\n def get_image_urls(self, response):\n urls = response.xpath('//div[@class=\"woocommerce-product-gallery__image\"]/a/@href').extract()\n return urls\n\n def get_description(self, response):\n description = response.xpath('//meta[@property=\"og:description\"]/@content').extract_first()\n return description\n\n def get_price(self, response):\n price = response.xpath('//p[contains(@class, \"price\")]/ins/span[contains(@class, \"amount\")]/text()').extract_first()\n if not price:\n price = response.xpath('//p[contains(@class, \"price\")]/span[@class=\"amount\"]/text()').extract_first()\n if price:\n price = price[1:]\n if not price:\n price = response.xpath('//span[contains(@class, \"woocommerce-Price-amount\")]/text()').extract_first()\n if not price:\n return\n return float(price.replace(',', '.'))\n\n def get_original_price(self, response):\n price = response.xpath('//p[contains(@class, \"price\")]/del/span[contains(@class, \"amount\")]/text()').extract_first()\n if not price:\n return\n return float(price.replace(',', '.'))\n\n def get_currency(self, response):\n currency = response.xpath('//span[@class=\"woocommerce-Price-currencySymbol\"]/text()').extract_first()\n currency = self.currency_symbol_map.get(currency)\n return currency\n","repo_name":"pbnsilva/cerebel","sub_path":"jobs/scrapers/scrapers/spiders/eko.py","file_name":"eko.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"43812227343","text":"#AHK returns hexadecimal values whereas I'd recorded all the constants as RGB values, so conversion necessary\nfrom ahk import AHK\n\nahk = AHK()\n\ndef toRGB(coord):\n x, y = coord[0], coord[1]\n ahk.pixel_get_color(x, y)\n h = ahk.pixel_get_color(x, y)\n h = h[2:]\n return tuple(int(h[i:i+2], 16) for i in (0, 2, 4))\n\n","repo_name":"emanuelhuffman/yuumi-bot","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"30998081379","text":"#!/usr/bin/python3\nimport sys\nimport telepot\nfrom telepot.delegate import per_chat_id, create_open, pave_event_space\nimport traceback\n\n\"\"\"\n$ python2.7 counter.py \nCounts number of messages a user has sent. Starts over if silent for 10 seconds.\nIllustrates the basic usage of `DelegateBot` and `ChatHandler`.\n\"\"\"\n\nclass MessageCounter(telepot.helper.ChatHandler):\n def __init__(self, *args, **kwargs):\n print(\"init\")\n super(MessageCounter, self).__init__(*args, **kwargs)\n self._count = 0\n\n def on_chat_message(self, msg):\n try:\n print(msg)\n self._count += 1\n self.sender.sendMessage(self._count)\n except Exception as e:\n traceback.print_exc()\n \n\n\nTOKEN = sys.argv[1] # get token from command-line\n\nbot = telepot.DelegatorBot(TOKEN, [\n pave_event_space()(\n per_chat_id(), create_open, MessageCounter, timeout=10\n ),\n])\nbot.message_loop(run_forever='Listening ...')","repo_name":"petr-kalinin/progrobot","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"36"} +{"seq_id":"42265942616","text":"from flask import Blueprint, jsonify, session, request\nfrom itsdangerous import json\nfrom app.models import User, Post, db\nfrom flask_login import current_user, login_required\n\nlike_routes = Blueprint('likes', __name__)\n\n\n@like_routes.route('/p//likes')\ndef getLike(postId):\n current_post = Post.query.get(postId)\n\n res = {}\n for user in current_post.like:\n res[user.id] = user.to_dict()\n\n return jsonify(res)\n\n\n@like_routes.route('/p//likes', methods=[\"POST\"])\n@login_required\ndef newLike(postId):\n target_post = Post.query.get(postId)\n\n target_post.like.append(current_user)\n db.session.commit()\n\n res = {}\n for user in target_post.like:\n res[user.id] = user.to_dict()\n\n return jsonify(res)\n\n\n@like_routes.route('/p//likes/', methods=[\"DELETE\"])\n@login_required\ndef deleteLike(postId, userId):\n target_post = Post.query.get(postId)\n target_user = User.query.get(userId)\n\n target_post.like.remove(target_user)\n db.session.commit()\n\n res = {}\n for user in target_post.like:\n res[user.id] = user.to_dict()\n\n return jsonify(res)\n","repo_name":"jinnie96/Instagram-Clone","sub_path":"app/api/likes_routes.py","file_name":"likes_routes.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"42173667481","text":"# -*- encoding : utf-8 -*-\n\"\"\"\n@File : __init__.py.py\n@Time :2021/5/11 10:35\n@Author :kuang congxian\n@Contact :kuangcx@inspur.com\n@Description : null\n\"\"\"\nimport time\nfrom datetime import datetime, timedelta\n\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n\nrun_time = None\n\n\ndef count():\n print(1)\n time.sleep(5)\n print(2)\n time.sleep(5)\n print(3)\n time.sleep(5)\n print(4)\n time.sleep(5)\n print(5)\n time.sleep(5)\n print(6)\n time.sleep(5)\n\n\nif __name__ == '__main__':\n sche = BackgroundScheduler()\n sche.add_job(count, \"interval\", seconds=5, id=\"count\")\n sche.start()\n\n time.sleep(6)\n print(\"休眠截止\")\n sche.pause_job(\"count\")\n time.sleep(60)\n # while True:\n #\n # now = datetime.now()\n # if run_time is not None and now - timedelta(seconds=9) < run_time:\n # job = sche.get_job(\"count\")\n # job.pause()\n # print(\"休眠开始\")\n # time.sleep(30)\n # print(\"休眠截止\")\n # job.resume()\n","repo_name":"kcx2366425574/python-mall","sub_path":"mall/test/apschedule_test/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"35328403021","text":"from cards import *\nfrom adt_ import *\nfrom time import sleep\n\n# Made by Jordan Williams and Diandra Whittick\n\n\ndef showGreeting():\n print ('********************************************************')\n print ('* Created by Jordan Williams and Diandra Whittick *') \n print ('* *')\n print ('* WELCOME to Strip Me *')\n print ('* *')\n print ('* Rules of the game: Check the docs *')\n print ('* *')\n print ('* Game Play: *')\n print ('* - player 0: the computer *')\n print ('* - player 1: you *')\n print ('* - enter: play the top card and place on discard pile *')\n print ('* - q: quit i.e. stop playing the game * ')\n print ('* *')\n print ('* Enjoy! *')\n print ('********************************************************')\n\n\n# a dictionary showing the value of pay cards\npayCards = {'A':4,'K':3,'Q':2,'J':1} \n\n# test if it is a pay card or not\ndef isPayCard(card):\n return card[1] in payCards.keys()\n\n# gets the rate of the pay card or return if not\ndef getCardRate(card): \n if isPayCard(card):\n return payCards[card[1]]\n else:\n return 0\n\ndef fillHand(q,lst):\n for x in lst:\n enqueue(q,x)\n return q\n\nhand = new_Queue() \nhand1 = new_Queue()\n\ndef prepPlayers():\n made_new_deck = new_Deck()\n shuffled = shuffle(5,made_new_deck)\n dealed_deck = deal(shuffled,26,2) \n\n player0 = dealed_deck[0][0]\n player1 = dealed_deck[0][1]\n\n player0 = fillHand(hand,player0)\n player1 = fillHand(hand1,player1) \n\n return player0,player1\n\n# this function removes the top element of a players deck and adds it to the discard pile\ndef placeCard(A_players_Hand,stack): \n temp_front = queue_front(A_players_Hand) \n dequeue(A_players_Hand)\n push(stack,temp_front)\n\n return A_players_Hand,stack\n\n# \ndef playCard(curr_player,the_player_hand_of_cards,disc_pile): # the disc piles is the same stack used in the above program\n \n new = placeCard(the_player_hand_of_cards,disc_pile)\n a = the_player_hand_of_cards\n b = disc_pile\n print('')\n print ('Player %d ,' % curr_player)\n tup= new[1][1][0]\n print ('played the ',end='')\n \n print (tup[1]+getSuitIcon(tup[0]))\n print('\\n \\n')\n \n \n return a,b\n\n\ndef takePayment(playerHand,disc_pile):\n \n \n temp =new_Stack() \n \n test = len(disc_pile[1])\n for x in range( test):\n t = stack_Top(disc_pile)\n pop(disc_pile)\n push(temp,t)\n\n \n t = stack_Top(temp)\n enqueue(playerHand,t)\n pop(temp)\n \n for x in range(len(stack_Contents(temp))):\n t = stack_Top(temp)\n pop(temp)\n push(disc_pile,t)\n\n return playerHand,disc_pile \n\ndef strip_me():\n showGreeting()\n \n prep = prepPlayers() # creates a tuple of player 1 and 2 decks(shuffled)\n\n player1,player2 = prep # seperates the tuple to get player 1 and 2 \n\n s1 = new_Stack() #initialise of a stack for discard pile for player 1\n s2 = new_Stack() #initialise of a stack for discard pile for player 2\n \n \n # turn is used to manuplaulate who to play next\n # cont is varibale to store the choice of the player\n # rate1 represents the payment number for a card played by player1\n # rate 2 represents the payment number for a card played by player 2\n \n def game(turn,player1,player2,cont,rate1,rate2): \n if empty_queue(player2): # shows the winner if player 1 wins\n print ('player 1 wins')\n return 'player 1 wins'\n\n elif empty_queue(player1): # shows the winner if player 2 wins\n print ('player 2 wins')\n return 'player 2 wins'\n\n\n elif cont == 'q': # exits the game\n print ('the game has ended')\n return 'the game has ended'\n\n elif cont.isalpha()== True: # if the user enters something he is not suppose to\n print ('incorrecnt input')\n return game(-1,player1,player2,'',rate1,rate2) \n\n # if player 1 plays a pay card \n elif rate1 > 0 and rate2 == 0 and turn == 1:\n sleep(1) \n b = playCard(2,player2,s2)\n card_for_2 = b[1][1][0]\n cardRate2 = getCardRate(card_for_2)\n\n # if it happens that player 2 plays a paycard while he is paying it will break out and go back to the recursion\n if isPayCard(card_for_2): \n return game(-1,player1,player2,'',0,cardRate2)\n\n here=len(s2[1]) \n len_of_s1 = len(s1[1]) \n\n rate1 -= 1 # this is to make sure the player 2 completes his payement\n\n # this is when the rate is fully paid it will give player 1 both player 2 discard pile and his own and print out who collected the discard pile\n if rate1 == 0:\n for x in range(here):\n takePayment(player1,s2)\n for x in range(len_of_s1):\n takePayment(player1,s1) \n \n print('\\nThe full payment was made. Player 1 claimed the discard pile') \n return game(-1,player1,player2,'',rate1,rate2) \n \n return game(1,player1,player2,'',rate1,cardRate2) \n\n # if player 2 plays a pay card\n elif rate1 == 0 and rate2 > 0 and turn == -1:\n sleep(1) \n a = playCard(1,player1,s1)\n card_for_1 = a[1][1][0]\n cardRate1 = getCardRate(card_for_1)\n\n # if it happens that player 1 plays a paycard while he is paying it will break out and go back to the recursion\n if isPayCard(card_for_1):\n return game(1,player1,player2,'',cardRate1,0)\n\n there = len(s1[1])\n len_of_s2 = len(s2[1]) \n rate2 -= 1\n # this is when the rate is fully paid it will give player 2 both player 1 discard pile and his own and print out who collected the discard pile\n if rate2 == 0:\n for x in range(there):\n takePayment(player2,s1)\n for x in range(len_of_s2):\n takePayment(player2,s2) \n \n print('\\n The full payment was made. Player 2 claimed the discard pile') \n return game(1,player1,player2,'',rate1,rate2) \n \n return game(-1,player1,player2,'',cardRate1,rate2)\n \n \n # this will run for player 2 if they were no pay cards played\n elif turn == 1 and rate1 ==0 and rate2 == 0: \n b = playCard(2,player2,s2)\n card_for_2 = b[1][1][0]\n cardRate2 = getCardRate(card_for_2)\n \n return game(-1,player1,player2,'',rate2,cardRate2)\n\n # this will run for player 1 if they were no pay cards played\n elif turn == -1 and rate1 ==0 and rate2 == 0: \n print('play(Enter); quit(q,then enter) ')\n cont = input ('Place your input: ')\n if cont == 'q':\n return game(-1,player1,player2,'q',rate1,rate2) \n \n a = playCard(1,player1,s1)\n card_for_1 = a[1][1][0]\n cardRate1 = getCardRate(card_for_1)\n \n return game(1,player1,player2,'',cardRate1,rate2)\n else:\n print ('something went wrong')\n return ''\n \n\n # -1 represents the turn which when started will make player 1 play\n # player 1 is the hand of player 1\n # player 2 is the hand of player 2\n # '' which is a empty string is for cont\n # 0 represents the rate for player 1 and 2\n \n game(-1,player1,player2,'',0,0) #This calls the game function to run\n\nstrip_me() \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n \n","repo_name":"arguement/Strip_me","sub_path":"Strip_me.py","file_name":"Strip_me.py","file_ext":"py","file_size_in_byte":8192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"24635214654","text":"\ndef mostrarMenuInicial():\n\n print(\"-----------------------------------\")\n print(\"BIENVENIDO AL JUEGO DE LAS LUCES !!\")\n print(\"-----------------------------------\")\n print(\"\")\n print(\"A continuacion elija el su modo de juego ingresando 1 , 2 o 3 \")\n print(\"\")\n print(\"1) Jugar en Modo aletorio\")\n print(\"2) Jugar en Modo predeterminado\")\n print(\"3) Salir\")\n\n opcion = input(\"\")\n return opcion\n\n\ndef opcionDelMenuValida(opcionElegida):\n\n opcionesDelJuego = (\"1\", \"2\",\"3\")\n\n if opcionElegida in opcionesDelJuego:\n return True\n\n return False\n\n\ndef mostrarTablero(tablero):\n\n\n letras = (\"A\",\"B\",\"C\",\"D\",\"E\")\n filaLetras = \" \"\n for letra in letras:\n filaLetras = filaLetras + \" \" + letra\n print(filaLetras)\n\n\n for indice, elemento in enumerate (tablero):\n filaEntera = \"\"\n\n for subElemento in (elemento):\n filaEntera = filaEntera + \" \" + subElemento\n\n filaFinal = str(indice + 1) + \" |\" + filaEntera\n print(filaFinal)\n\n\n","repo_name":"bobbryan/CFP34TecnicasProgramacionBob","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"29754394846","text":"\"\"\"\nDescription:\n Object handles command line arguments, stores them together for reference\n\"\"\"\n\nimport glob, os, argparse\n\nclass Arguments:\n def __init__(self):\n # Handles command line options/arguments\n args = argparse.ArgumentParser(description=\"Program searches given folder for video files and trims a specified number of seconds from the start or the end of the video. The file is then saved in a specified location.\")\n\n # command line option for trim amount\n args.add_argument(\n \"-t\",\n \"--trim_amount\",\n type=int,\n help=\"Trim Amount (Seconds): Int -> The number of seconds you would like to trim. Default: 5 seconds\"\n )\n # command line option for trim direction\n args.add_argument(\n \"-d\",\n \"--trim_direction\",\n type=str,\n help=\"Trim Direction [s/e]: String -> Determines if the trim will be applied to the [s]tart of the video or the [e]nd of the video. Default: Start of the video\"\n )\n\n # Parse the command-line arguments\n self.args = args.parse_args()\n\n # Time user wants to trim off videos\n self.trim_amount = 5\n # False -> Trim start of video\n # True -> Trim end of video\n self.trim_direction = False\n # Video format to search for to trim\n self.file_type = \".mp4\"\n # Identifies the directory containing files user wants to trim\n self.directory = \"./\"\n # Collects all files inside of the given directory to trim\n self.input_files = glob.glob(f\"{self.directory}/*\" + self.file_type)\n # Identifies the location the processed files are saved to\n self.output_dir = \"./output\"\n\n self.assign_args()\n\n\n # Method overwrites default variables with values proveded by users\n def assign_args(self):\n # assigns trim_amount value\n if self.args.trim_amount:\n self.trim_amount = self.args.trim_amount\n\n # assigns trim_direction value\n if self.args.trim_direction:\n if \"e\" in self.args.trim_direction:\n self.trim_direction = True\n\n\n # Method creates output folder to store processed .mp4 files\n def set_output_directory(self):\n # Confirms if the specified output directory already exists\n check = os.path.exists(self.output_dir)\n # If the output directory doesn't already exist, create the output directory\n if not check:\n os.makedirs(self.output_dir)\n","repo_name":"ZTStew/Python-Batch-Trimmer","sub_path":"arguments.py","file_name":"arguments.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"36516699703","text":"from sqlalchemy.dialects.mysql import INTEGER\n\nfrom sbs.database_utility import db\nfrom sbs.models.BaseModel import BaseModel\n\n\nclass EndogenousJunctionSet(BaseModel):\n __tablename__ = 'ENDOGENOUS_JUNCTION_SET'\n id = db.Column('ID', INTEGER(unsigned=True), primary_key=True, unique=True)\n crop_id = db.Column('CROP_ID', INTEGER(unsigned=True),\n db.ForeignKey('CROP.ID'))\n name = db.Column('NAME', db.String(500), nullable=True)\n\n endogenous_junctions = db.relationship('EndogenousJunction',\n backref='endogenous_junction_set',\n lazy='dynamic')\n\n def __init__(self, crop_id, name):\n self.crop_id = crop_id\n self.name = name\n\n def __repr__(self):\n return (\n \"\").format(\n self.id,\n self.name,\n self.crop_id\n )\n\n def as_dict(self):\n return {\n \"id\": self.id,\n \"crop_id\": self.crop_id,\n \"name\": self.name\n }\n","repo_name":"rohitbs113/DupontSBS","sub_path":"sbs/models/EndogenousJunctionSet.py","file_name":"EndogenousJunctionSet.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"27165358710","text":"import logging\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport os\n\nfrom data_loading import BasicDataset\nfrom loss import dice_loss, LandmarksMSELoss\nfrom model import multitask_network\n\n\ndef train_net(net, device):\n # Set your data path\n train_data_path = ['', '', '']\n val_data_path = ['', '', '']\n train_dataset = BasicDataset(train_data_path, mode='train')\n val_dataset = BasicDataset(val_data_path, mode='val')\n\n loader_args = dict(batch_size=8, num_workers=12, pin_memory=True)\n train_loader = DataLoader(train_dataset, shuffle=True, **loader_args)\n val_loader = DataLoader(val_dataset, shuffle=True, drop_last=True, **loader_args)\n\n optimizer = torch.optim.Adam(net.parameters(), lr=0.0001)\n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.9)\n criterion = LandmarksMSELoss(False)\n criterion_seg = nn.CrossEntropyLoss()\n\n # 5. Begin training\n for epoch in tqdm(range(150), position=0, desc=\"Epoch\", unit='img', leave=True, colour='green', ncols=100):\n net.train()\n for batch in tqdm(train_loader, position=1, desc=\"Batch\", unit='img', leave=False, colour='red', ncols=100):\n images = batch['image']\n true_masks = batch['mask']\n true_heatmap = batch['heatmap']\n landmarks_vis = batch['landmarks_vis']\n\n images = images.to(device=device, dtype=torch.float32)\n true_masks = true_masks.to(device=device, dtype=torch.long)\n true_heatmap = true_heatmap.to(device=device, non_blocking=True)\n landmarks_vis = landmarks_vis.to(device=device, non_blocking=True)\n\n heatmap_pred, masks_pred = net(images)\n lm_loss_temp = criterion(heatmap_pred, true_heatmap, landmarks_vis) \\\n + dice_loss(heatmap_pred.float(), true_heatmap, multiclass=True)\n seg_loss_temp = dice_loss(F.softmax(masks_pred, dim=1).float(),\n F.one_hot(true_masks, 6).permute(0, 3, 1, 2).float(),\n multiclass=True) \\\n + criterion_seg(masks_pred, true_masks)\n\n loss = lm_loss_temp + seg_loss_temp\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Evaluation round\n net.eval()\n num_val_batches = len(val_loader)\n val_seg_loss = 0\n val_lm_loss = 0\n\n for batch in val_loader:\n val_image = batch['image']\n val_true_masks = batch['mask']\n val_true_heatmap = batch['heatmap']\n val_landmarks_vis = batch['landmarks_vis']\n\n val_image = val_image.to(device=device, dtype=torch.float32)\n val_true_masks = val_true_masks.to(device=device, dtype=torch.long)\n val_true_heatmap = val_true_heatmap.to(device=device, non_blocking=True)\n val_landmarks_vis = val_landmarks_vis.to(device=device, non_blocking=True)\n\n with torch.no_grad():\n val_landmarks_pred, val_masks_pred = net(val_image)\n val_lm_loss += criterion(val_landmarks_pred, val_true_heatmap, val_landmarks_vis) \\\n + dice_loss(val_landmarks_pred.float(), val_true_heatmap, multiclass=True)\n val_seg_loss += dice_loss(F.softmax(val_masks_pred, dim=1).float(),\n F.one_hot(val_true_masks, 6).permute(0, 3, 1, 2).float(),\n multiclass=True) \\\n + criterion_seg(val_masks_pred, val_true_masks)\n\n net.train()\n\n val_loss = (val_seg_loss + val_lm_loss) / num_val_batches\n\n scheduler.step()\n\n\nif __name__ == '__main__':\n\n logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')\n\n os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1'\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n logging.info(f'Using device {device}')\n\n net = multitask_network(n_channels=3, n_seg=6, n_landmark=2)\n\n net = nn.DataParallel(net).to(device=device)\n\n train_net(net=net, device=device)\n\n\n","repo_name":"kaiwenli325/AS-OCT","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"} +{"seq_id":"71105977705","text":"#%matplotlib inline\n'''\nMatplotlib 是一个 Python 的 2D绘图库,它以各种硬拷贝格式和跨平台的交互式环境生成出版质量级别的图形 [1] 。\n通过 Matplotlib,开发者可以仅需要几行代码,便可以生成绘图,直方图,功率谱,条形图,错误图,散点图等。 \n'''\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nfont = FontProperties(fname=r\"c:\\windows\\fonts\\msyh.ttc\", size=10)\n\n\ndef runplt():\n plt.figure()\n plt.title('匹萨价格与直径数据',fontproperties=font)\n plt.xlabel('直径(英寸)',fontproperties=font)\n plt.ylabel('价格(美元)',fontproperties=font)\n plt.axis([0, 25, 0, 25])\n plt.grid(True)\n return plt\n\nplt = runplt()\nX = [[6], [8], [10], [14], [18]]\ny = [[7], [9], [13], [17.5], [18]]\nplt.plot(X, y, 'k.')\nplt.show()\n\n\nfrom sklearn.linear_model import LinearRegression\n# 创建并拟合模型\nmodel = LinearRegression()\nmodel.fit(X, y)\n#print('预测一张12英寸匹萨价格:$%.2f' % model.predict([12])[0]\nimport numpy as np \ntemp = [12] #an instance \ntemp = np.array(temp).reshape((1, -1)) \nprint('预测一张12英寸匹萨价格:$%.2f' % model.predict(temp))\n\n\n'''\n上述代码中sklearn.linear_model.LinearRegression类是一个估计器(estimator)。\n估计器依据观测值来预测结果。在scikit-learn里面,所有的估计器都带有fit()和predict()方法。\nfit()用来分析模型参数,predict()是通过fit()算出的模型参数构成的模型,对解释变量进行预测获得的值。\n因为所有的估计器都有这两种方法,所有scikit-learn很容易实验不同的模型。\nLinearRegression类的fit()方法学习下面的一元线性回归模型:\ny = α + βx\ny表示响应变量的预测值,本例指匹萨价格预测值,x是解释变量,本例指匹萨直径。截距α和相关系数β是线性回归模型最关心的事情。\n'''\n\n#一元线性回归拟合模型的参数估计常用方法是普通最小二乘法(ordinary least squares )或线性最小 二乘法(linear least squares)。","repo_name":"yanfeng12/scikit-learn","sub_path":"线性回归/一元线性回归/pizzaPrice.py","file_name":"pizzaPrice.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"41035143875","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy import Request\nfrom scrapy_splash import SplashRequest\n\nlua_script = '''\nfunction main(splash, args)\n splash:set_viewport_size(1028, 10000)\n splash.images_enabled = false\n splash:go(args.url)\n splash:wait(10)\n splash.scroll_position = {0,5000}\n-- splash:runjs(\"window.scrollTo(0, document.body.scrollHeight)\")\n-- splash:runjs(\"document.getElementsByClassName('page')[0].scrollIntoView(true)\")\n splash:wait(10)\n return splash:html()\nend\n'''\n\n\nclass JdSpider(scrapy.Spider):\n name = 'jd'\n allowed_domains = ['jd.com']\n start_urls = ['https://search.jd.com/Search?keyword=python']\n\n def parse(self, response):\n total_page = int(response.css('.fp-text i::text').extract_first())\n for i in range(10):\n url = '%s&page=%s' % (self.start_urls[0], 2 * i + 1)\n print('下载页数', i + 1)\n yield SplashRequest(url, self.parse_books, endpoint='execute',\n args={'lua_source': lua_script},\n cache_args=['lua_source'])\n\n def parse_books(self, response):\n sels = response.css('.clearfix .gl-item')\n print(response.url)\n print('本页书数量', len(sels))\n for sel in sels:\n yield{\n 'name': sel.css('.p-name').xpath(\n 'string(.//em)').extract_first(),\n 'price': sel.css('.p-price i::text').extract_first(),\n }\n","repo_name":"yugiyx/scrapy_spider","sub_path":"02_splash_jd/splash_jd/spiders/jd.py","file_name":"jd.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"13348345780","text":"from tests.third_party.cupy import testing as cupy_testing\nimport dpnp\nimport numpy\n\nfrom tests import testing\n\n\nnumpy.testing.assert_allclose = testing.assert_allclose\nnumpy.testing.assert_array_equal = testing.assert_array_equal\nnumpy.testing.assert_equal = testing.assert_equal\n\n# patch for shaped_arange func to exclude calls of astype and reshape\n# necessary because new data container does not support these functions yet\n\norig_shaped_arange = cupy_testing.shaped_arange\norig_shaped_reverse_arange = cupy_testing.shaped_reverse_arange\n\n\ndef _shaped_arange(shape, xp=dpnp, dtype=dpnp.float64, order='C'):\n res = xp.array(orig_shaped_arange(shape, xp=numpy, dtype=dtype, order=order), dtype=dtype)\n return res\n\n\ndef _shaped_reverse_arange(shape, xp=dpnp, dtype=dpnp.float32):\n res = xp.array(orig_shaped_reverse_arange(shape, xp=numpy, dtype=dtype), dtype=dtype)\n return res\n\n\ncupy_testing.shaped_arange = _shaped_arange\ncupy_testing.shaped_reverse_arange = _shaped_reverse_arange\n","repo_name":"LukichevaPolina/dpnp","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"36"} +{"seq_id":"35778087571","text":"from osv import osv,fields\n\nclass hr_department(osv.osv):\n _inherit = \"hr.department\"\n _columns = {\n 'department_code' : fields.char('Department Code',size=16),\n 'placement' : fields.selection([('bsp','Kantor Pusat'),\n ('bob','BOB')], 'Placement'),\n 'bagian' : fields.selection([('core','Core'),\n ('subcore','Sub Core'),\n ('support','Support')])\n }\nhr_department()\n\nclass hr_job(osv.osv):\n _inherit = \"hr.job\"\n _columns = {\n 'placement' : fields.selection([('bsp','Kantor Pusat'),\n ('bob','BOB')], 'Placement'),\n 'section_id' : fields.many2one('hr.section','Section'),\n }\nhr_job()\n","repo_name":"aryaadiputra/addons60_ptgbu_2013","sub_path":"ad_hr_bsp/hr_department.py","file_name":"hr_department.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"14047568235","text":"from setuptools import setup, find_packages\nimport sys, os\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.rst')).read()\nNEWS = open(os.path.join(here, 'NEWS.txt')).read()\n\n\nversion = '0.1'\n\ninstall_requires = [\n # List your project dependencies here.\n # For more details, see:\n # http://packages.python.org/distribute/setuptools.html#declaring-dependencies\n]\n\n\nsetup(name='pyfoxsi',\n version=version,\n description=\"Software for the Focusing Optics X-ray Solar Imager Small Explorer Mission\",\n long_description=README + '\\n\\n' + NEWS,\n classifiers=[\n # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n ],\n keywords='x-ray solar satellite science',\n author='Steven D. Christe',\n author_email='steven.christe@nasa.gov',\n url='',\n license='',\n packages=find_packages('src'),\n package_dir = {'': 'src'},include_package_data=True,\n zip_safe=False,\n install_requires=install_requires,\n entry_points={\n 'console_scripts':\n ['pyfoxsi=pyfoxsi:main']\n }\n)\n","repo_name":"foxsi/foxsi-smex","sub_path":"pyfoxsi/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"} +{"seq_id":"33739368914","text":"from mongoFuncs import connect2Mongo, disconnectMongo, getShotHashCodeBasedOnId, getAllShotsHashCodeExcludingQueryId\nfrom utils import getResultsNum \n\n\ndef searchByImageQuery(queryId, modality, collection, resultsNum):\n # Mongo information\n print('Mongo info')\n mongoInfo = {}\n mongoInfo['username'] = 'root'\n mongoInfo['password'] = '123'\n mongoInfo['host'] = 'mongodb'\n mongoInfo['port'] = 27017\n mongoInfo['authMechanism'] = 'SCRAM-SHA-1'\n mongoInfo['databaseName'] = 'callisto'\n mongoInfo['authDatabase'] = 'admin'\n mongoInfo['collectionName'] = collection # AU-AIR or CALLISTO\n\n # Connect to Mongo\n print('Connect mongodb')\n client, callistoDB = connect2Mongo(mongoInfo)\n print(client)\n print(callistoDB)\n\n # Get binary vectors\n print('Get hash codes')\n queryCode = getShotHashCodeBasedOnId(callistoDB[collection], queryId, modality)\n print('getShots')\n shotsId, databaseCodes = getAllShotsHashCodeExcludingQueryId(callistoDB[collection], modality, queryId)\n\n # Query to Mongo. Find resultsNum relevant to query documents.\n results = getResultsNum(shotsId, queryCode, databaseCodes, resultsNum)\n\n # Disconnect Mongo\n print('Disconnect mongodb')\n disconnectMongo(client)\n\n return results\n","repo_name":"Anastasios-Gkagkas/CALLISTO-KR9-experiment","sub_path":"code/searchByImageQuery.py","file_name":"searchByImageQuery.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"34247125180","text":"from flask import Flask, jsonify, request, Response\nfrom bson import json_util, ObjectId\nfrom pymongo import MongoClient\nimport json\n\n# setup mongodb\nclient = MongoClient(\"localhost\", 27017)\ndb = client.test\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET'])\ndef welcome_screen():\n return '

Welcome

'\n\n\n@app.route('/save', methods=['POST'])\ndef save_data():\n user_data = db.users.insert_one({'name': 'Ganesh', 'surname': 'Avhad'})\n return Response('saved', status=201, mimetype='application/json')\n\n\n@app.route('/get', methods=['GET'])\ndef get_data():\n user_data = db.users.find_one({})\n return Response(json_util.dumps(user_data), status=200, mimetype='application/json')\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"ganyavhad/python_rest","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"9588164247","text":"from plugin import plugin, require, alias\nfrom googletrans import Translator\nfrom googletrans.constants import LANGCODES, LANGUAGES, SPECIAL_CASES\nimport nltk\n\n\n@require(network=True)\n@alias('trans')\n@plugin('translate')\ndef translate(jarvis, s):\n \"\"\"\n Translates from one language to another and allows input to be somewhat natural.\n\n Usage:\n\n 'Jarvis, please translate, from English to French, Hello, how are you?'\n\n OR\n\n 'Jarvis, could you translate Hello, how are you? from English to French for me please?'\n \"\"\"\n\n# Check whether user has entered translate by itself or with extra parameters\n if s != \"\":\n words = nltk.word_tokenize(s.lower())\n currentPos = 0\n finalPos = 0\n srcs = None\n des = None\n\n# Search input string for source language\n for i in range(len(words)):\n word = words[i]\n currentPos = i\n\n# Do not include lang codes in the tests when using full sentence command since words can conflict with them (Eg. hi -> Hindi).\n# This code looks like it includes them, but since the googletrans API is implemented such that the languages are stored in\n# dictionaries, when the \"in\" operator is used, it only checks the keys of the dictionary, not the values. Therefore, the\n# LANG_CODES dictionary must be used to check full language names instead of the LANGUAGES dictionary. For more clarification,\n# have a look at the code on the googletrans github.\n if (word in LANGCODES):\n srcs = word\n break\n\n# Search input string for destination language starting from the word after the source language\n for i in range(currentPos + 1, len(words)):\n word = words[i]\n finalPos = i\n# Do not include LANGCODES in the tests when using full sentence command since words can conflict with them (Eg. hi -> Hindi)\n if (word in LANGCODES):\n des = word\n break\n\n# If both languages found, work out where the text to be translated is in the sentence and perform the translation\n if (des and srcs):\n if(currentPos < 2):\n tex = \" \".join(words[finalPos + 1:])\n else:\n tex = \" \".join(words[:currentPos - 1]) # Discards extra words at the end of the sentence\n performTranslation(srcs, des, tex)\n# Otherwise perform the default method for translation\n else:\n jarvis.say(\"\\nSorry, I couldn't understand your translation request. Please enter the request in steps.\")\n default(jarvis)\n else:\n default(jarvis)\n\n\ndef default(jarvis):\n \"\"\"\n Default function that is called when translate is entered alone or\n when input is not understood when translate is entered with additional parameters\n \"\"\"\n# Get source language\n jarvis.say('\\nEnter source language ')\n srcs = jarvis.input().lower().strip()\n# Check source language\n while (\n srcs not in LANGUAGES) and (\n srcs not in SPECIAL_CASES) and (\n srcs not in LANGCODES):\n if srcs in SPECIAL_CASES:\n srcs = SPECIAL_CASES[srcs]\n elif srcs in LANGCODES:\n srcs = LANGCODES[srcs]\n else:\n jarvis.say(\"\\nInvalid source language\\nEnter again\")\n srcs = jarvis.input().lower()\n# Get destination language\n jarvis.say('\\nEnter destination language ')\n des = jarvis.input().lower().strip()\n# Check destination language\n while (\n des not in LANGUAGES) and (\n des not in SPECIAL_CASES) and (\n des not in LANGCODES):\n if des in SPECIAL_CASES:\n des = SPECIAL_CASES[des]\n elif des in LANGCODES:\n des = LANGCODES[des]\n else:\n jarvis.say(\"\\nInvalid destination language\\nEnter again\")\n des = jarvis.input().lower()\n\n jarvis.say('\\nEnter text ')\n tex = jarvis.input()\n\n performTranslation(srcs, des, tex)\n\n\ndef performTranslation(srcs, des, tex):\n \"\"\"\n Function to actually perform the translation of text and print the result\n \"\"\"\n translator = Translator()\n result = translator.translate(tex, dest=des, src=srcs)\n result = u\"\"\"\n[{src}] {original}\n ->\n[{dest}] {text}\n[pron.] {pronunciation}\n \"\"\".strip().format(src=result.src, dest=result.dest, original=result.origin,\n text=result.text, pronunciation=result.pronunciation)\n print(\"\\n\" + result)\n","repo_name":"sukeesh/Jarvis","sub_path":"jarviscli/plugins/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","stars":2765,"dataset":"github-code","pt":"36"} +{"seq_id":"11871406651","text":"# Lists are just like PHP's indexed arrays\nbreakfast = [\"eggs\", \"spam\", \"bacon\", \"pancakes\", \"oatmeal\", \"yogurt\", \"toast\"]\n\n# You can get elements from a Python list just like you did in PHP\nbreakfast[0] # eggs\nbreakfast[2] # bacon\n\n# But lists in Python are *much* more featureful. For example, using our\n# breakfast list above:\n\nbreakfast[-1] # toast\nbreakfast[2:5] # [\"bacon\", \"pancakes\", \"oatmeal\"]\nbreakfast[-2:] # [\"yogurt\", \"toast\"]\nbreakfast.append(\"juice\") # Adds \"juice\" to the breakfast list\nbreakfast.pop() # Returns \"juice\", modifying breakfast\n\n# You can even concatenate lists\n\nunited_kingdom = [\"England\", \"Wales\"] + [\"Scotland\", \"Northern Ireland\"]\n# Returns: [\"England\", \"Wales\", \"Scotland\", \"Northern Ireland\"]\n","repo_name":"matks/Python-Django-for-PHP-Nerds","sub_path":"examples/basic/2.lists-tuples-and-arrays/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"34904472475","text":"# import math\r\n# import numpy as np\r\nimport time\r\n# import math\r\nimport sys\r\n# import os\r\n# from PyQt5 import QtWidgets, QtGui, QtCore\r\nfrom PyQt5.QtWidgets import QApplication\r\n# from PyQt5.QtWidgets import QTableWidgetItem\r\nfrom PyQt5.QtGui import QPalette, QColor\r\n# from PyQt5.QtCore import Qt\r\n# from xml.etree import ElementTree\r\n# from nest_calculation import CalculateThread\r\n# from fractions import Fraction\r\nfrom window import Window\r\n\r\n# TODO add results outputs such as scrap %\r\n# TODO number or letter each pattern\r\n# TODO add scale to patterns display (possibly behind them), or add person or car to side for size reference\r\n# TODO add explain on hover\r\n# TODO add help documentation to explain nesting algorithms and how to use the application\r\n# TODO add more colors and/or patterns for part display\r\n# TODO allow user to click on a nest pattern to show it in more detail\r\n# TODO implement a xml template for default settings that opens on launch\r\n\r\n# Start timer\r\nstart_time = time.time()\r\n\r\n# Create the application\r\napp = QApplication(sys.argv)\r\n\r\n# Force the style to be the same on all OSs:\r\napp.setStyle(\"Fusion\")\r\n\r\n# Use a palette to switch to dark colors:\r\npalette = QPalette()\r\npalette.setColor(QPalette.Window, QColor(53, 53, 53))\r\npalette.setColor(QPalette.WindowText, QColor(255, 255, 255))\r\npalette.setColor(QPalette.Base, QColor(25, 25, 25))\r\npalette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))\r\npalette.setColor(QPalette.ToolTipBase, QColor(255, 255, 255))\r\npalette.setColor(QPalette.ToolTipText, QColor(255, 255, 255))\r\npalette.setColor(QPalette.Text, QColor(255, 255, 255))\r\npalette.setColor(QPalette.Button, QColor(53, 53, 53))\r\npalette.setColor(QPalette.ButtonText, QColor(255, 255, 255))\r\npalette.setColor(QPalette.BrightText, QColor(255, 0, 0))\r\npalette.setColor(QPalette.Link, QColor(42, 130, 218))\r\npalette.setColor(QPalette.Highlight, QColor(42, 130, 218))\r\npalette.setColor(QPalette.HighlightedText, QColor(0, 0, 0))\r\n\r\napp.setPalette(palette)\r\n\r\n\r\ndef main():\r\n # Create window\r\n window = Window()\r\n window.app = app\r\n\r\n # Show window\r\n window.show()\r\n window.t1.update_table_width(window.t1)\r\n\r\n # TODO change this to a default file that is always opened on start up; put file in lengthnestpro path\r\n # Allow user to select xml file to open\r\n # file_path = 0\r\n # if file_path == 0:\r\n # file_path = window.default_path_string + \"/container test.xml\"\r\n # if file_path:\r\n # tree = ElementTree.parse(file_path)\r\n # nesting_job = tree.getroot()\r\n #\r\n # # Create blank list with length equal to the number of parts in the nesting job\r\n # blank_list = []\r\n # required_parts = nesting_job.find('requiredParts')\r\n # for i in range(len(required_parts)):\r\n # blank_list.append(\"\")\r\n #\r\n # # Copy blank list to initialize each attribute\r\n # name = blank_list.copy()\r\n # qty = blank_list.copy()\r\n # length = blank_list.copy()\r\n #\r\n # # Clear contents from table\r\n # window.t1.clearContents()\r\n #\r\n # # Extract info for each part and add it to \"required parts\" table\r\n # i = 0\r\n # for part in required_parts:\r\n # name[i] = QTableWidgetItem(part.find('name').text)\r\n # qty[i] = QTableWidgetItem(part.find('qty').text)\r\n # length[i] = QTableWidgetItem(part.find('length').text)\r\n # name[i].setTextAlignment(Qt.AlignCenter)\r\n # qty[i].setTextAlignment(Qt.AlignCenter)\r\n # length[i].setTextAlignment(Qt.AlignCenter)\r\n # window.t1.setItem(i, 0, qty[i])\r\n # window.t1.setItem(i, 1, length[i])\r\n # window.t1.setItem(i, 2, name[i])\r\n # i += 1\r\n #\r\n # # Extract nesting settings and add them to \"nesting settings\" table\r\n # nesting_settings = nesting_job.find('nestingSettings')\r\n #\r\n # if hasattr(nesting_settings.find('stockLength'), 'text'):\r\n # stock_length = QTableWidgetItem(nesting_settings.find('stockLength').text)\r\n # stock_length.setTextAlignment(Qt.AlignCenter)\r\n # window.t2.setItem(0, 0, stock_length)\r\n #\r\n # if hasattr(nesting_settings.find('leftWaste'), 'text'):\r\n # left_waste = QTableWidgetItem(nesting_settings.find('leftWaste').text)\r\n # left_waste.setTextAlignment(Qt.AlignCenter)\r\n # window.t2.setItem(1, 0, left_waste)\r\n #\r\n # if hasattr(nesting_settings.find('rightWaste'), 'text'):\r\n # right_waste = QTableWidgetItem(nesting_settings.find('rightWaste').text)\r\n # right_waste.setTextAlignment(Qt.AlignCenter)\r\n # window.t2.setItem(2, 0, right_waste)\r\n #\r\n # if hasattr(nesting_settings.find('spacing'), 'text'):\r\n # spacing = QTableWidgetItem(nesting_settings.find('spacing').text)\r\n # spacing.setTextAlignment(Qt.AlignCenter)\r\n # window.t2.setItem(3, 0, spacing)\r\n #\r\n # if hasattr(nesting_settings.find('maxPartsPerNest'), 'text'):\r\n # max_parts_per_nest = QTableWidgetItem(nesting_settings.find('maxPartsPerNest').text)\r\n # max_parts_per_nest.setTextAlignment(Qt.AlignCenter)\r\n # window.t2.setItem(4, 0, max_parts_per_nest)\r\n #\r\n # if hasattr(nesting_settings.find('maxContainers'), 'text'):\r\n # max_containers = QTableWidgetItem(nesting_settings.find('maxContainers').text)\r\n # max_containers.setTextAlignment(Qt.AlignCenter)\r\n # window.t2.setItem(5, 0, max_containers)\r\n\r\n sys.exit(app.exec_())\r\n\r\n\r\n# Run Program\r\nmain()\r\n","repo_name":"sweiss93/LengthNestPro","sub_path":"LengthNestPro.py","file_name":"LengthNestPro.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"36"} +{"seq_id":"3955199428","text":"# -*- codind: utf-8 -*-\n\nimport os, sys, random, argparse, time\nimport math\nimport json\nimport codecs\n\nimport math\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\n\nfrom xslu.utils import EarlyStopping, Statistics\nimport xslu.Constants as Constants\n\nclass Trainer(object):\n\n def __init__(self, model, criterion, optimizer, logger, cuda=True, early_stop_mode='max', tag='S2SPointer'):\n self.tag = tag\n self.model = model\n self.criterion = criterion\n self.optimizer = optimizer\n self.logger = logger\n self.early_stop = EarlyStopping(mode=early_stop_mode, min_delta=0.1, patience=10)\n self.cuda = cuda\n\n def get_stats(self, loss, probs, target):\n pred = probs.max(1)[1]\n non_padding = target.ne(Constants.PAD)\n num_correct = pred.eq(target).masked_select(non_padding).sum().item()\n return Statistics(loss.item(), non_padding.sum().item(), num_correct)\n\n def train_on_epoch(self, epoch, data_iter):\n\n self.model.train()\n self.logger.info(\"Epoch {:02} {} begins training, {:05} examples ...................\".format(\n epoch, self.tag, len(data_iter))\n )\n\n stats = Statistics()\n\n for (itr, batch) in enumerate(data_iter):\n enc, lengths, extra_zeros, enc_batch_extend_vocab_idx, dec_inp_ids, dec_out_ids = batch\n probs = self.model(enc, lengths, dec_inp_ids, extra_zeros, enc_batch_extend_vocab_idx)\n target = dec_out_ids.transpose(0, 1).contiguous().view(-1)\n loss = self.criterion(probs, target)\n\n # statistics\n loss_data = loss.data.clone()\n batch_stat = self.get_stats(loss_data, probs.data, target.data)\n stats.update(batch_stat)\n\n # grad update\n self.model.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # loss logging\n self.logger.info(\"Epoch {:02} {} accu: {:6.2f}; ppl: {:6.2f}; elapsed_time: {:6.0f}s\".format(\n epoch, self.tag, stats.accuracy(), stats.ppl(), stats.elapsed_time()\n ))\n\n return stats.accuracy()\n\n def valid_on_epoch(self, epoch, data_iter):\n\n self.model.eval()\n self.logger.info(\"Epoch {:02} {} begins validation, {:05} examples ...................\".format(\n epoch, self.tag, len(data_iter))\n )\n\n stats = Statistics()\n\n for (itr, batch) in enumerate(data_iter):\n enc, lengths, extra_zeros, enc_batch_extend_vocab_idx, dec_inp_ids, dec_out_ids = batch\n probs = self.model(enc, lengths, dec_inp_ids, extra_zeros, enc_batch_extend_vocab_idx)\n target = dec_out_ids.transpose(0, 1).contiguous().view(-1)\n loss = self.criterion(probs, target)\n\n # statistics\n loss_data = loss.data.clone()\n batch_stat = self.get_stats(loss_data, probs.data, target.data)\n stats.update(batch_stat)\n\n # loss logging\n self.logger.info(\"Epoch {:02} {} accu: {:6.2f}; ppl: {:6.2f}; elapsed_time: {:6.0f}s\".format(\n epoch, self.tag, stats.accuracy(), stats.ppl(), stats.elapsed_time()\n ))\n\n return stats.accuracy()\n\n def train(self, epochs, train_data, valid_data, chkpt_path):\n\n for epoch in range(1, epochs+1):\n\n _ = self.train_on_epoch(epoch, train_data)\n f = self.valid_on_epoch(epoch, valid_data)\n\n flag = self.early_stop(epoch, f, self.model.state_dict())\n best_epoch = self.early_stop.best_epoch\n best_metric = self.early_stop.best_metric\n best_model_state = self.early_stop.best_model_state\n\n if flag:\n self.logger.info('Early Stopping at epoch {:02}'.format(best_epoch))\n self.logger.info('Best metric is {:6.2f}'.format(best_metric))\n torch.save(best_model_state, chkpt_path)\n self.logger.info('Save the model state at {}'.format(chkpt_path))\n break\n\n if not flag:\n self.logger.info('Finally Stopping at epoch {:02}'.format(best_epoch))\n self.logger.info('Best metric is {:6.2f}'.format(best_metric))\n torch.save(best_model_state, chkpt_path)\n self.logger.info('Save the model state at {}'.format(chkpt_path))\n\n\n","repo_name":"ZiJianZhao/Unaligned-SLU","sub_path":"dual/pointer/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"22347143598","text":"import os\nimport re\nfrom enum import Enum\nfrom pathlib import Path\nfrom subprocess import run, CalledProcessError, DEVNULL, PIPE\nfrom urllib.parse import urlparse\n\nfrom .exceptions import BodyworkGitError\nfrom .constants import (\n DEFAULT_PROJECT_DIR,\n SSH_DIR_NAME,\n SSH_PRIVATE_KEY_ENV_VAR,\n GITHUB_SSH_FINGERPRINT,\n GITLAB_SSH_FINGERPRINT,\n BITBUCKET_SSH_FINGERPRINT,\n AZURE_SSH_FINGERPRINT,\n GIT_SSH_COMMAND,\n DEFAULT_SSH_FILE,\n)\nfrom .logs import bodywork_log_factory\n\n_log = bodywork_log_factory()\n\n\ndef download_project_code_from_repo(\n url: str,\n branch: str = None,\n destination: Path = DEFAULT_PROJECT_DIR,\n ssh_key_path: str = None,\n) -> None:\n \"\"\"Download Bodywork project code from Git repository,\n\n :param url: Git repository URL.\n :param branch: The Git branch to download, defaults to 'master'.\n :param destination: The name of the directory int which the\n repository will be cloned, defaults to DEFAULT_PROJECT_DIR.\n :param ssh_key_path: SSH key filepath.\n :raises BodyworkGitError: If Git is not available on the system or the\n Git repository cannot be accessed.\n \"\"\"\n try:\n run([\"git\", \"--version\"], check=True, stdout=DEVNULL)\n except CalledProcessError:\n raise BodyworkGitError(\"git is not available\")\n try:\n if get_connection_protocol(url) is ConnectionProtocol.SSH:\n hostname = urlparse(f\"ssh://{url}\").hostname\n if hostname:\n setup_ssh_for_git_host(hostname, ssh_key_path)\n else:\n raise ValueError(\n f\"Unable to derive hostname from URL {url}. Please check \"\n )\n except Exception as e:\n msg = f\"Unable to setup SSH for Git and you are trying to connect via SSH: {e}\"\n raise BodyworkGitError(msg)\n try:\n if branch:\n git_cmd = [\n \"git\",\n \"clone\",\n \"--branch\",\n branch,\n \"--single-branch\",\n url,\n str(destination),\n ]\n else:\n git_cmd = [\"git\", \"clone\", \"--single-branch\", url, str(destination)]\n run(git_cmd, check=True, encoding=\"utf-8\", stdout=DEVNULL, stderr=PIPE)\n except CalledProcessError as e:\n msg = f\"Git clone failed - calling {e.cmd} returned {e.stderr}\"\n raise BodyworkGitError(msg)\n\n\nclass ConnectionProtocol(Enum):\n \"\"\"Connection protocol used to access Git repo.\"\"\"\n\n FILE = \"file\"\n HTTPS = \"https\"\n SSH = \"ssh\"\n\n\ndef get_connection_protocol(connection_string: str) -> ConnectionProtocol:\n \"\"\"Derive connection protocol used to retrieve Git repo.\n\n :param connection_string: The string containing the connection\n details for the remote Git repository - e.g. the GitHub URL.\n :raises RuntimeError: if the connection protocol cannot be\n identified or is not supported.\n :return: The connection protocol type.\n \"\"\"\n if re.match(\"^https://\", connection_string):\n return ConnectionProtocol.HTTPS\n elif re.match(\"^git@\", connection_string):\n return ConnectionProtocol.SSH\n elif re.match(\"^file://\", connection_string):\n return ConnectionProtocol.FILE\n else:\n msg = (\n f\"cannot identify connection protocol in {connection_string}\"\n f\"- currently, there is only support for HTTPS and SSH\"\n )\n raise RuntimeError(msg)\n\n\ndef setup_ssh_for_git_host(hostname: str, ssh_key_path: str = None) -> None:\n \"\"\"Setup system for SSH interaction with GitHub.\n\n Using the private key assigned to an environment variable, this\n function creates a new SSH configuration in the working directory\n and then tells Git to use it for SSH by exporting the\n GIT_SSH_COMMAND environment variable.\n\n :param hostname: Hostname to SSH to.\n :param ssh_key_path: SSH key file to use.\n \"\"\"\n ssh_dir = Path.home() / SSH_DIR_NAME\n if SSH_PRIVATE_KEY_ENV_VAR in os.environ:\n _log.info(\"Using SSH key from environment variable.\")\n try:\n private_key = ssh_dir / DEFAULT_SSH_FILE\n ssh_dir.mkdir(mode=0o700, exist_ok=True)\n private_key.touch(0o700, exist_ok=True)\n key = os.environ[SSH_PRIVATE_KEY_ENV_VAR]\n if key[-1] != \"\\n\":\n key = f\"{key}\\n\"\n with Path(private_key).open(mode=\"w\", newline=\"\\n\") as file_handle:\n file_handle.write(key)\n except OSError as e:\n raise RuntimeError(\n f\"Unable to create private key {private_key} from\"\n f\" {SSH_PRIVATE_KEY_ENV_VAR} environment variable.\"\n ) from e\n elif ssh_key_path:\n private_key = Path(ssh_key_path)\n if not private_key.exists():\n msg = f\"Failed to setup SSH for {hostname} - cannot find SSH key {ssh_key_path}\" # noqa\n raise FileNotFoundError(msg)\n else:\n msg = f\"Failed to setup SSH for {hostname} - cannot find SSH keys or {SSH_PRIVATE_KEY_ENV_VAR} environment variable.\" # noqa\n raise RuntimeError(msg)\n\n _configure_known_hosts(hostname, ssh_dir)\n os.environ[GIT_SSH_COMMAND] = f\"ssh -i '{private_key}'\" f\" -o IdentitiesOnly=yes\"\n\n\ndef _configure_known_hosts(hostname, ssh_dir):\n try:\n known_hosts = ssh_dir / \"known_hosts\"\n if not known_hosts.exists():\n known_hosts.touch(0o700, exist_ok=False)\n known_hosts.write_text(get_ssh_public_key_from_domain(hostname))\n elif not known_hosts_contains_domain_key(hostname, known_hosts):\n with known_hosts.open(mode=\"a\") as file_handle:\n file_handle.write(get_ssh_public_key_from_domain(hostname))\n except OSError as e:\n raise RuntimeError(\n f\"Error updating known hosts with public key from {hostname}.\"\n ) from e\n\n\ndef known_hosts_contains_domain_key(hostname: str, known_hosts_filepath: Path) -> bool:\n \"\"\"Checks to see if the host is in the list of keys in the known_hosts file.\n\n :param known_hosts_filepath: path to known_hosts file\n :param hostname: Hostname to check for.\n :return: bool if the hostname is in the file\n \"\"\"\n return hostname in known_hosts_filepath.read_text()\n\n\ndef get_ssh_public_key_from_domain(hostname: str) -> str:\n \"\"\"Gets the public key from the host and checks the fingerprint.\n\n Output from ssh-keyscan is piped into ssh-keygen by setting the input in\n conjunction with the trailing '-' in the command.\n\n :param hostname: Name of host to retrieve the key from e.g. Gitlab.com\n :return: The public SSH Key of the host.\n \"\"\"\n fingerprints = {\n \"github.com\": GITHUB_SSH_FINGERPRINT,\n \"gitlab.com\": GITLAB_SSH_FINGERPRINT,\n \"bitbucket.org\": BITBUCKET_SSH_FINGERPRINT,\n \"ssh.dev.azure.com\": AZURE_SSH_FINGERPRINT,\n }\n if hostname in fingerprints:\n try:\n server_key = run(\n [\"ssh-keyscan\", \"-t\", \"rsa\", hostname],\n check=True,\n capture_output=True,\n encoding=\"utf-8\",\n ).stdout\n fingerprint = run(\n [\"ssh-keygen\", \"-l\", \"-f\", \"-\"],\n check=True,\n capture_output=True,\n encoding=\"utf-8\",\n input=server_key,\n ).stdout.strip()\n if fingerprint == fingerprints.get(hostname):\n return server_key\n else:\n raise ConnectionAbortedError(\n f\"SECURITY ALERT! SSH Fingerprint received from server does not \"\n f\"match the fingerprint for {hostname}. Please check and ensure\"\n f\" that {hostname} is not being impersonated\"\n )\n except CalledProcessError as e:\n raise RuntimeError(\n f\"Unable to retrieve public SSH key from {hostname}: {e.stdout} {e.stderr}\" # noqa\n )\n else:\n raise RuntimeError(f\"{hostname} is not supported by Bodywork\")\n\n\ndef get_git_commit_hash(project_path: Path = DEFAULT_PROJECT_DIR) -> str:\n \"\"\"Retrieves the Git commit hash.\n\n :param project_path: Git project path.\n :return: The Git commit hash.\n \"\"\"\n try:\n result = run(\n [\"git\", \"rev-parse\", \"--short\", \"HEAD\"],\n cwd=project_path,\n check=True,\n capture_output=True,\n encoding=\"utf-8\",\n ).stdout.strip()\n return result\n except CalledProcessError as e:\n raise BodyworkGitError(\n f\"Unable to retrieve git commit hash: {e.stdout} {e.stderr}\"\n ) from e\n except OSError as e:\n raise BodyworkGitError(\n f\"Unable to retrieve git commit hash, path: {project_path} is invalid - {e}\"\n ) from e\n","repo_name":"bodywork-ml/bodywork-core","sub_path":"src/bodywork/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":8822,"program_lang":"python","lang":"en","doc_type":"code","stars":430,"dataset":"github-code","pt":"36"} +{"seq_id":"27740106270","text":"import sys,os\npath2add = os.path.normpath(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'page')))\nif (not (path2add in sys.path)) :\n sys.path.append(path2add)\nimport unittest\nfrom data.get_data import GetData\nfrom ddt import ddt,data,unpack\nfrom base_page import opera_click\nfrom time import sleep\nfrom utils.多线程 import Threads\nimport asyncio\n\n@ddt\nclass NLE(unittest.TestCase):\n get_data = GetData(0)\n data_all = get_data.data()\n cloud = opera_click()\n L = []\n # tcp = Tcpclient()\n\n @classmethod\n def setUpClass(cls):\n cls.cloud.dr_get()\n\n @classmethod\n def tearDownClass(cls):\n cls.cloud.dr_quit()\n\n def setUp(self):\n self.cloud.cookie()\n\n @data(*data_all)\n @unpack\n def test_new_device(self,*args):\n if self.cloud.isElementExist('/html/body/header/div/nav/ul/li[5]/a') == True:\n #点击开发者中心按钮\n self.cloud.xpath_click('/html/body/header/div/nav/ul/li[5]/a')\n sleep(1)\n\n\n\n #点击新增项目按钮\n self.cloud.xpath_click('/html/body/div[1]/div/div[1]/div[1]/a')\n #切换到frame框架下\n self.cloud.loc_frame('myModalFrame')\n sleep(1)\n self.cloud.xpath_send_keys('//*[@id=\"Name\"]',args[0])\n #点击\"下一步\"button按钮\n self.cloud.xpath_click('/html/body/div[1]/div/form/div[5]/div/input')\n\n\n #添加设备名称\n self.cloud.xpath_send_keys('//*[@id=\"Name\"]',args[1])\n #添加设备标识符\n self.cloud.xpath_send_keys('//*[@id=\"Tag\"]',args[2])\n #点击\"确认添加设备\",然后返回到\"添加项目\"页面\n self.cloud.xpath_click('/html/body/div/div/form/div[6]/div/input')\n \"\"\"\n # #Bug:project_ID元素出现后,再去获取project_ID,对其显式等待\n self.cloud.show_wait(\"//span[@class='tag']\")\n\n \"\"\"\n #页面跳转到项目页面过程中会跳动导致获取不到元素,加上sleep(2)\n sleep(1)\n\n #获取project_ID\n project_ID = self.cloud.get_text(\"//span[@class='tag']\")\n #点击链接进入添加设备页面\n projectid =\"projectid-\" + str(project_ID)\n path = \"//*[@id=\" + \"'\" + projectid + \"'\" + \"]/div/div/a[1]/div\"\n self.cloud.xpath_click(path)\n\n #添加传感器和执行器\n #点击设备名称连接按钮,进入传感器页面\n self.cloud.xpath_click('//*[@id=\"list\"]/tbody/tr/td[3]/a[1]')\n sleep(1)\n #点击\"+\"按钮添加传感器\n self.cloud.xpath_click('/html/body/div[1]/div/div[3]/div[1]/table/tbody/tr/td/a[2]')\n #切换到frame框架下\n self.cloud.loc_frame('myModalFrame')\n \"\"\"\n #点击\"自定义\"标签,为了刷新一下,不然传感器名称可能输入不进去\n self.cloud.xpath_click('/html/body/ul/li[1]/a')\n \"\"\"\n sleep(1)\n #添加传感器名称\n self.cloud.xpath_send_keys('//*[@id=\"Name\"]',args[3])\n #添加传感器标识符\n self.cloud.xpath_send_keys('//*[@id=\"ApiTag\"]',args[4])\n #点击\"确认添加设备\"\n self.cloud.xpath_click('/html/body/div/div/form/div[7]/div/input[1]')\n sleep(1)\n #点击\"+\"按钮添加执行器\n self.cloud.xpath_click('/html/body/div[1]/div/div[3]/div[2]/table/tbody/tr/td/a[2]')\n #切换到frame框架下\n self.cloud.loc_frame('myModalFrame')\n \"\"\"\n #点击\"自定义\"标签,为了刷新一下,不然执行器名称可能输入不进去\n self.cloud.xpath_click('/html/body/ul/li[1]/a')\n \"\"\"\n sleep(1)\n #添加执行器名称\n self.cloud.xpath_send_keys('//*[@id=\"Name\"]',args[5])\n #添加执行器标识符\n self.cloud.xpath_send_keys('//*[@id=\"ApiTag\"]',args[6])\n #选择单选按钮\"开关型\"\n self.cloud.addact_Sin_button(args[7])\n #点击\"确认\"\n self.cloud.xpath_click('/html/body/div/div/form/div[8]/div/input[1]')\n\n #页面跳转过程中会跳动导致获取不到元素,加上sleep(2)\n sleep(1)\n\n #获取SecretKey\n SecretKey = self.cloud.get_text(\"//td[@class='security-key']\")\n #把device_tag,SecretKey,Sensor_tag放在一个元祖内\n T = ()\n T = (args[2],SecretKey,args[3])\n #把不同的元祖放在列表中\n self.L.append(T)\n #print(self.L)\n\n \"\"\"\n #通过TCP连接方式上报数据\n self.tcp.tcpclient1(args[2],SecretKey,args[3])\n \"\"\"\n\n \"\"\"\n #Bug:\"开发者中心\"元素出现后,再去点击\"开发者中心\",对其显式等待\n self.cloud.show_wait('/html/body/header/div[2]/div/ol/li[2]/a')\n \"\"\"\n #点击\"开发者中心\"返回到\"添加项目页面\"\n self.cloud.xpath_click('/html/body/header/div[2]/div/ol/li[2]/a')\n\n\n def test_tcpclient2(self):\n\n threads = Threads()\n threads.create_thread(self.L)\n\n\n\"\"\"\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n\"\"\"\n\n\n\n\n\n\n","repo_name":"songteng2012/Test_Cloud_improve","sub_path":"test/suite/add_device.py","file_name":"add_device.py","file_ext":"py","file_size_in_byte":5036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70573833639","text":"# Default\nimport datetime\n\n# fastapi\nfrom fastapi import FastAPI\n\n# CORE\nfrom wrabbit import rabbit_wrapper\n\n# TASK\n\n# app initialization\napp = FastAPI()\n\n# kafka initialization\n\n\n@app.on_event(\"startup\")\nasync def start_event():\n rabbit_wrapper.init()\n\n\n@app.get(\"/\")\nasync def say_hello():\n return {\"hello\": \"world\"}\n\n\n@app.get(\"/hello\")\nasync def create(scan_id: int):\n rabbit_wrapper.produce(\"start_hello\", {\"scan_id\": scan_id})\n\n\n@app.get(\"/stop_hello\")\nasync def stop_hello(scan_id: int):\n rabbit_wrapper.produce(\"stop_hello\", {\"scan_id\": scan_id})\n\n\n@app.get(\"/start_crawler\")\nasync def start_crawl(scan_id: int):\n rabbit_wrapper.produce(\"start_crawler\", {\n \"host\": \"192.168.7.77\",\n \"target_list\": [\n {\n \"port\": 81,\n \"state\": \"open\",\n \"protocol\": \"tcp\",\n \"service\": \"http\",\n },\n ],\n \"credentials\": [],\n \"scan_id\": scan_id,\n \"is_testing\": True,\n \"entry_paths\": [],\n })\n\n\n@app.get(\"/stop_crawler\")\nasync def stop_crawl(scan_id: int):\n rabbit_wrapper.produce(\"stop_crawler\", {\"scan_id\": scan_id})\n\n\n@app.get(\"/start_scanner\")\nasync def start_scanner(scan_id: int):\n rabbit_wrapper.produce(\"start_scanner\", {\n \"host\": \"192.168.7.77\",\n \"scan_id\": scan_id,\n \"port_list\": \"80, 81\",\n })\n\n\n@app.get(\"/stop_scanner\")\nasync def stop_scanner(scan_id: int):\n rabbit_wrapper.produce(\"stop_scanner\", {\n \"scan_id\": scan_id,\n })\n","repo_name":"onaeonae1/rabbit_hunt","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37890796969","text":"def adjust_quote(text, quote):\n if len(quote) == 0:\n return text\n\n if text.find(quote) > -1:\n return text.replace(quote, quote + quote)\n else:\n return text\n\n\ndef build_quoted_string(\n field_type, field_value, field_separator, quote_around_str, quote_if_delimiter\n):\n if field_type == \"str\" and quote_around_str != \"\":\n return (\n quote_around_str\n + adjust_quote(field_value, quote_around_str)\n + quote_around_str\n )\n elif field_value.find(field_separator) >= 0:\n return (\n quote_if_delimiter\n + adjust_quote(field_value, quote_if_delimiter)\n + quote_if_delimiter\n )\n else:\n return field_value\n\n\ndef format_double(\n input_value,\n decimal_mark=\".\",\n digit_grouping_char=\",\",\n digit_grouping_mode=0,\n neg_sign_template=\"-X\",\n):\n # use std conversion\n try:\n value = abs(input_value)\n except TypeError:\n return str(input_value)\n\n tmp = str(value)\n if tmp.find(\".\") < 0:\n tmp = tmp + \".0\"\n\n if decimal_mark == digit_grouping_char and digit_grouping_mode > 0:\n print(\n f\"Decimal mark is set to {decimal_mark} and digit grouping is set to {digit_grouping_char}.\"\n )\n\n tmp1 = tmp[0 : tmp.find(\".\")]\n tmp2 = tmp[tmp.find(\".\") + 1 :]\n # print(value, tmp, tmp1, tmp2)\n\n if digit_grouping_mode == 0: # no grouping\n tmp_out = tmp1\n\n elif digit_grouping_mode == 1:\n tmp_out = separate(tmp1, digit_grouping_char, [3, 3, 3, 3, 3])\n\n elif digit_grouping_mode == 2:\n tmp_out = separate(tmp1, digit_grouping_char, [3, 2, 2, 2, 2, 2])\n\n tmp_out = tmp_out + decimal_mark + tmp2\n\n if input_value < 0:\n tmp_out = neg_sign_template.replace(\"X\", tmp_out)\n\n return tmp_out\n\n\ndef separate(input_str, digit_grouping_char, limits):\n tmp_rem = input_str\n tmp_out = \"\"\n tmp_cnt = 0\n tmp_cnt_limit_arr = limits\n\n if len(tmp_cnt_limit_arr) > 0:\n tmp_cnt_limit = tmp_cnt_limit_arr.pop(0)\n else:\n tmp_cnt_limit = 3\n\n while len(tmp_rem) > 0:\n\n tmp_trail = tmp_rem[len(tmp_rem) - 1 :]\n tmp_rem = tmp_rem[0 : len(tmp_rem) - 1]\n if tmp_cnt == tmp_cnt_limit:\n tmp_out = tmp_trail + digit_grouping_char + tmp_out\n tmp_cnt = 1\n if len(tmp_cnt_limit_arr) > 0:\n tmp_cnt_limit = tmp_cnt_limit_arr.pop(0)\n else:\n tmp_cnt_limit = 3\n else:\n tmp_out = tmp_trail + tmp_out\n tmp_cnt += 1\n return tmp_out\n","repo_name":"SergeLouvetMcKinsey/files_generator","sub_path":"file_generator_library/dataset_functions.py","file_name":"dataset_functions.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9049426661","text":"\"\"\"\n Learning with TKInter\n\"\"\"\n\n# IMPORTS\nimport tkinter as tk\n\n# CONFIG\nFRAME_BG = \"#ABCDEF\"\nW_DIMENSIONS = \"600x600\"\n\n# CLASSES\nclass App:\n def __init__(self):\n # Window\n self.window = tk.Tk()\n self.window.geometry(W_DIMENSIONS)\n self.window.title(\"Test TK\")\n \n # Frame\n self.frame = tk.Frame(self.window, bg=FRAME_BG)\n\n self.frame.pack(expand=True, fill=\"both\")\n self.frame.update()\n\n def run(self):\n self.window.mainloop()\n\n# RUNTIME\ndef main():\n app = App()\n\n app.run()\n\nif __name__ == \"__main__\":\n main()","repo_name":"klusik/Python","sub_path":"Bonusy/tkinter_stuff/tkinter_stuff.py","file_name":"tkinter_stuff.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70570547560","text":"# Series calculator\r\n\r\n# Име на сериал - текст\r\n# Брой сезони – цяло число в диапазона [1… 10]\r\n# Брой епизоди – цяло число в диапазона [10… 80]\r\n# Времетраене на обикновен епизод без рекламите – реално число в диапазона [40.0… 65.0]\r\n\r\ntv_series_name = input()\r\nnumber_of_seasons = int(input())\r\nnumber_of_episodes = int(input())\r\nepisode_duration = float(input())\r\n\r\nnormal_duration_sum = number_of_seasons * number_of_episodes * episode_duration\r\nduration_with_ads = normal_duration_sum + (normal_duration_sum * 0.20)\r\nextra_episodes_added_duration = duration_with_ads + (number_of_seasons * 10)\r\n\r\n\r\nprint(f\"Total time needed to watch the {tv_series_name} series \\\r\nis {int(extra_episodes_added_duration)} minutes.\")\r\n","repo_name":"pySin/SoftUni-Software-Engineering","sub_path":"Python-Programming-Basics/First-Steps-in-Python-Coding/series_calculator.py","file_name":"series_calculator.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"bg","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16122926913","text":"\nclass Source:\n def __init__(self, url, quantity, home, guest, homeQuote, guestQuote):\n self.url = url\n self.home = home\n self.guest = guest\n self.homeQuote = homeQuote\n self.guestQuote = guestQuote\n self.qunatity = quantity\n\n def __str__(self):\n return \"url : %s , home : %s , guest : %s , homeQuote : %s , guestQuote : %s\" % (self.url, self.home, self.guest, self.homeQuote, self.guestQuote)\n","repo_name":"robrados/arbitrage","sub_path":"data/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15770405104","text":"class Solution:\r\n def solveNQueens(self, n: int) -> List[List[str]]:\r\n def findqueen(lutions,n):\r\n solution = []\r\n nums = len(lutions)\r\n for i in range(0,n):\r\n if i not in lutions:\r\n solution.append(i)\r\n for j in range(0,nums):\r\n if nums - j == i - lutions[j] or nums - j == lutions[j] - i:\r\n solution.pop()\r\n break\r\n solutions = []\r\n if nums < n - 1 and solution != []:\r\n for i in solution:\r\n lutions.append(i)\r\n for j in findqueen(lutions,n):solutions.append([i] + j)\r\n else:lutions.pop()\r\n elif nums == n - 1:\r\n for i in solution:solutions.append([i])\r\n return solutions\r\n if n == 1:return [[\"Q\"]]\r\n solutions = []\r\n printsolutions = []\r\n for i in range(0,n):\r\n for j in findqueen([i],n):solutions.append([i] + j)\r\n for i in range(0,len(solutions)):\r\n a = []\r\n for j in range(0,n):\r\n a.append(\".\" * solutions[i][j] + \"Q\" + \".\" * (n - solutions[i][j] - 1))\r\n printsolutions.append(a)\r\n return printsolutions","repo_name":"Psyqotato/leetcode","sub_path":"0051.py","file_name":"0051.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35058963956","text":"import pygame\nfrom src.p_server.network import Network\nfrom src.components.button import ButtonDuels\nfrom src.screens.s_duel import DuelGame\nfrom duel import Duel\nfrom src.services.getFont import loadCustomFont\nfrom subprocess import Popen\n\n\nclass Client():\n def __init__(self, game):\n self.game = game\n self.index = 0\n self.base_font = loadCustomFont(25)\n self.ip = \"\"\n self.player, self.n, self.gameUser = None, None, None\n self.ipPos = (self.game.WIDTH/2 - 150, self.game.HEIGHT/2 - 80)\n self.volume = self.game.volume\n self.input_rect = pygame.Rect(\n self.ipPos[0] + 30, self.ipPos[1], 250, 36)\n self.searchButton = ButtonDuels(\n 'Procurar', self.ipPos[0] + 290, self.ipPos[1], (200, 200, 200), 'rooters', 140, 30, 10, 25, textColor=(20, 20, 20))\n self.hostButton = ButtonDuels('Hospedar', self.game.WIDTH/2 - 40, self.game.HEIGHT /\n 2 - 20, (200, 200, 200), 'rooters', 200, 50, 10, 25, textColor=(20, 20, 20))\n self.images = [\n pygame.image.load('assets/images/waiting/waiting1.png'),\n pygame.image.load('assets/images/waiting/waiting2.png'),\n pygame.image.load('assets/images/waiting/waiting3.png'),\n pygame.image.load('assets/images/waiting/waiting4.png'),\n pygame.image.load('assets/images/waiting/waiting5.png'),\n pygame.image.load('assets/images/waiting/waiting6.png'),\n pygame.image.load('assets/images/waiting/waiting7.png'),\n pygame.image.load('assets/images/waiting/waiting8.png'),\n pygame.image.load('assets/images/waiting/waiting9.png'),\n pygame.image.load('assets/images/waiting/waiting10.png'),\n pygame.image.load('assets/images/waiting/waiting11.png'),\n pygame.image.load('assets/images/waiting/waiting12.png')\n ]\n\n def redrawWindow(self, win: pygame.Surface):\n win.blit(self.image, (0, 0))\n self.animateBackground()\n pygame.display.update()\n\n def animateBackground(self):\n self.index += 1\n if self.index >= len(self.images):\n self.index = 0\n self.image = pygame.transform.scale(\n self.images[self.index], (self.game.WIDTH, self.game.HEIGHT))\n pygame.time.delay(60)\n\n def getConnection(self, ):\n n = Network(self.game.userName, self.ip)\n player = int(n.getP()['player'])\n gameUser = self.game.userName + str(player)\n return (n, player, gameUser)\n\n def main(self):\n self.index = 0\n self.image = pygame.transform.scale(\n self.images[self.index], (self.game.WIDTH, self.game.HEIGHT))\n self.run_display = True\n clock = pygame.time.Clock()\n\n while self.run_display:\n clock.tick(30)\n try:\n duel: Duel = self.n.send({'action': 'standby'})\n except Exception as e:\n self.run_display = False\n print(\"Couldn't get game\", e)\n self.game.goToMenuScreen()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.game.finish()\n if(duel.connected()):\n self.run_display = False\n self.redrawWindow(self.game.window)\n self.game.duelGame = DuelGame(\n self.game, self.n, self.player, self.gameUser, duel)\n self.game.curr_screen = self.game.duelGame\n self.game.curr_screen.render_self()\n\n def render_self(self):\n clock = pygame.time.Clock()\n self.run = True\n\n color_active = (233, 233, 233)\n color_passive = (150, 150, 150)\n color = color_passive\n self.text_active = False\n ip_surface = self.base_font.render(\n \"ip:\", True, (255, 255, 255))\n while self.run:\n clock.tick(30)\n self.check_input()\n color = color_active if self.text_active else color_passive\n self.game.window.fill(self.game.BLACK)\n text_surface = self.base_font.render(\n self.ip, True, (255, 255, 255))\n\n self.game.draw_text(\n \"Busque uma partida(Se o campo ip estiver vazio usara o ip da sua maquina) ou inicie um servidor\", 50, self.game.DISPLAY_W / 2, self.game.DISPLAY_H / 2 - 200)\n\n self.game.window.blit(\n ip_surface, (self.ipPos[0], self.ipPos[1]))\n self.game.window.blit(\n text_surface, (self.ipPos[0] + 32, self.ipPos[1]))\n self.searchButton.draw(self.game.window)\n self.hostButton.draw(self.game.window)\n pygame.draw.rect(self.game.window,\n color, self.input_rect, 2)\n self.game.window.blit(self.game.window, (0, 0))\n pygame.display.flip()\n\n self.main()\n\n def check_input(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.run = False\n self.game.finish\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_presses = pygame.mouse.get_pressed()\n if mouse_presses[0]:\n self.text_active = False\n pos = pygame.mouse.get_pos()\n if self.searchButton.click(pos):\n self.game.sound.playSound('simpleClick'),\n self.run = False\n try:\n self.n, self.player, self.gameUser = self.getConnection()\n except Exception as e:\n return\n return\n elif self.input_rect.collidepoint(pos):\n self.text_active = True\n return\n elif self.hostButton.click(pos):\n self.game.sound.playSound('simpleClick'),\n self.run = False\n self.ip = \"\"\n self.game.server = Popen(\n 'python server.py')\n try:\n self.n, self.player, self.gameUser = self.getConnection()\n except:\n self.run_display = False\n return\n else:\n self.text_active = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.game.goToMenuScreen()\n elif self.text_active:\n if event.key == pygame.K_BACKSPACE:\n self.ip = self.ip[:-1]\n else:\n if len(self.ip) < 15:\n self.ip += event.unicode\n","repo_name":"David-Ackerman/mogs_smaug2_pygame","sub_path":"src/screens/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"28458949423","text":"#! /usr/bin/python3\n\nfrom platform import platform\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nimport os.path\nimport argparse\n\n# plt.rcParams[\"figure.figsize\"] = [4., 3.]\nplt.rcParams[\"figure.autolayout\"] = True\n\ndef validFileLabelTuple(param: str):\n \"\"\"\n Helper function, which transforms and checks for the right structure of the given arguments\n \n param param: the argument to check\n\n return: tuple[str, str]\n \"\"\"\n # check for valid structure of the argument\n try:\n filepath, label = map(str, param.split(','))\n except ValueError:\n filepath = param.split(',')\n if len(filepath) > 1:\n exit(f\"Argument {param} has too many values\")\n elif len(filepath) < 1:\n exit(f\"Argument {param} has too few values\")\n elif len(filepath) == 1:\n filepath = param\n label = \"\"\n except Exception as e: \n print(e)\n raise argparse.ArgumentError(f\"Unsupported argument {param}\")\n\n # ensure the right file extension\n ext = os.path.splitext(filepath)[1]\n if ext.lower() not in ('.csv'):\n raise argparse.ArgumentTypeError(f\"Invalid file type: {param} must be a csv file\")\n \n # ensure that file exists\n if not os.path.exists(filepath):\n raise argparse.ArgumentTypeError(f\"Invalid file {filepath} does not exist\")\n \n return (str(filepath), label)\n \n\n\nparser = argparse.ArgumentParser(\n description=\"Produce plots showing the performance of the simulated system. \\\n It can use several files, or tuples of file and label, one for each simulation run to be compared. \\\n The files containing the simulation dump are CSV files produced by the output method of the simulator.\",\n add_help=True\n)\nparser.add_argument(\n \"--logscale\",\n action=\"store_true\",\n help=\"Plot with logarithmic x-axis\"\n)\nparser.add_argument(\n \"--title\",\n type=str,\n default=\"DCSim\",\n help=\"Plot title hinting on the simulated scenario\"\n)\nparser.add_argument(\n \"--suffix\",\n type=str,\n help=\"Optonal suffix to add to the file-name of the plot.\"\n)\nparser.add_argument(\n \"simoutputs\",\n nargs='+',\n type=validFileLabelTuple,\n help=\"CSV files, or tuples of file and label, containing information \\\n about the simulated jobs produced by the simulator.\"\n)\n\n\nargs = parser.parse_args()\n\ntitle = args.title\nsuffix = args.suffix\nfile_label_pairs = args.simoutputs\n\nfile_label_pairs = dict(\n file_label_pairs\n)\n\n\nevent_fig, event_ax = plt.subplots()\nevent_ax.set_xlabel('time / s', loc='right')\nif args.logscale:\n event_ax.set_xscale('log')\n\nfor file, label in file_label_pairs.items():\n with open(file) as f:\n df = pd.read_csv(f, sep=\",\\s\", engine='python')\n \n starts = event_ax.eventplot(\n positions=df['job.start'].to_numpy(),\n orientation='horizontal',\n lineoffsets=label,\n linewidths=0.1,\n linelengths=0.75,\n colors='black',\n label=\"start\"\n )\n ends = event_ax.eventplot(\n positions=df['job.end'].to_numpy(),\n orientation='horizontal',\n lineoffsets=label,\n linewidths=0.1,\n linelengths=0.75,\n colors='black',\n label=\"end\"\n )\n\n\n machines = df[\"machine.name\"].unique()\n\n\n efficiency_fig, efficiency_ax = plt.subplots()\n efficiency_ax.set_xlabel(\"eff. / %\", loc='right')\n efficiency_ax.set_ylabel(\"jobs\", loc='top')\n efficiency_ax.set_yscale('log')\n\n walltime_fig, walltime_ax = plt.subplots()\n walltime_ax.set_xlabel(\"walltime / s\", loc='right')\n walltime_ax.set_ylabel(\"jobs\", loc='top')\n walltime_ax.set_yscale('log')\n\n machine_efficiencies = {}\n machine_walltimes = {}\n for i,machine in enumerate(machines):\n df_masked = df[df[\"machine.name\"]==machine]\n\n machine_efficiency = df_masked[\"job.computetime\"]/(df_masked[\"job.end\"]-df_masked[\"job.start\"])/100\n machine_efficiencies[machine]=machine_efficiency\n\n machine_walltime = (df_masked[\"job.end\"]-df_masked[\"job.start\"])\n machine_walltimes[machine]=machine_walltime\n\n machine_efficiencies_list = sorted(machine_efficiencies.items(),key=lambda x: x[1].size)\n machine_efficiencies = dict(machine_efficiencies_list)\n machine_walltimes_list = sorted(machine_walltimes.items(), key=lambda x: x[1].size)\n machine_walltimes = dict(machine_walltimes_list)\n\n efficiency_ax.hist(\n list(machine_efficiencies.values()),\n bins=100, range=(0.,100.),\n stacked=True,\n label=list(machine_efficiencies.keys())\n )\n\n walltime_ax.hist(\n list(machine_walltimes.values()),\n bins=100,\n stacked=True,\n label=list(machine_walltimes.keys())\n )\n\n\n efficiency_ax.legend()\n efficiency_ax.set_title(title+\" \"+label, loc='left', fontsize=14, fontweight='bold')\n\n efficiency_fig.savefig(f\"efficiency_{label}_{suffix}.png\")\n efficiency_fig.savefig(f\"efficiency_{label}_{suffix}.pdf\")\n\n\n walltime_ax.legend()\n walltime_ax.set_title(title+\" \"+label, loc='left', fontsize=14, fontweight='bold')\n\n walltime_fig.savefig(f\"walltime_{label}_{suffix}.png\")\n walltime_fig.savefig(f\"walltime_{label}_{suffix}.pdf\")\n\n\nevent_ax.legend(\n handles = (starts[0], ends[0]),\n labels = (\"start\", \"end\")\n)\nevent_ax.set_title(title, loc='left', fontsize=14, fontweight='bold')\n\nevent_fig.savefig(f\"jobevents_{suffix}.png\")\nevent_fig.savefig(f\"jobevents_{suffix}.pdf\")\n\n\n\nplt.show()\n\n","repo_name":"HEPCompSim/DCSim","sub_path":"tools/platformPerformancePlots.py","file_name":"platformPerformancePlots.py","file_ext":"py","file_size_in_byte":5533,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"7099235931","text":"import re\nfrom textblobl import TextBlob\n\n\ndef clean_text():\n \"\"\"\n Utility function to clean text by removing links, special characters using\n regex statement.\n\n :return: None\n \"\"\"\n\n return ' '.join(re.sub(\n \"(@[A-Za-z0-9]+) | ([ ^ 0-9A-Za-z \\t]) | (\\w+: \\/\\/\\S+)\",\n \" \", tweet).split())\n\n\ndef get_text_sentiment(text):\n \"\"\"\n Pass text as a parameter, which gets split into paragraphs/sentences and\n passed onto TextBlob for analysis.\n\n :return: tuple(positive, negative, neutral)\n \"\"\"\n\n text = text.split(\"\\n\")\n \n positive = 0\n negative = 0\n neutral = 0\n\n for sentence in text:\n analysis = TextBlob(clean_text(sentence))\n\n if analysis.sentiment.polarity > 0:\n positive += 1\n elif analysis.sentiment.polarity < 0:\n negative += 1\n else:\n neutral += 1\n\n return (positive, negative, neutral)\n\n\ndef display_analysis(positive, negative, neutral):\n \"\"\"\n Interprets the values from TextBlob and displays it on the shell.\n\n :param positive: number of positive results after analysis\n :param negative: number of negative results after analysis\n :param neutral: number of neutral results after analysis\n\n :return: None\n \"\"\"\n total = positive + negative + neutral\n\n positive_percentage = (positive / total) * 100\n negative_percentage = (negative / total) * 100\n neutral_percentage = (neutral / total) * 100\n\n print(\"Positive: {}%\\nNegative: {}%\\n Neutral: {}%\".format(\n positive_percentage, negative_percentage, neutral_percentage))\n","repo_name":"amalrajan/quora-analyzer","sub_path":"source/sentiment_analysis.py","file_name":"sentiment_analysis.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"8321725263","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport jsonschema\n\nfrom oas.exceptions import UnmarshalError\n\n\ndef test_unmarshal_error():\n error = UnmarshalError(\n [\n jsonschema.ValidationError(\n \"'p1' is a required in 'query' parameter\",\n validator='required',\n path=('path', 'p1'),\n ),\n jsonschema.ValidationError(\n \"'123' is not of type 'integer'\",\n validator='type',\n path=('query', 'p2'),\n ),\n ],\n [\n jsonschema.ValidationError(\n 'Request body is required', validator='required'\n )\n ],\n )\n assert error.to_dict() == {\n 'parameters': [\n {\n 'path': ['path', 'p1'],\n 'validator': 'required',\n 'message': \"'p1' is a required in 'query' parameter\",\n },\n {\n 'path': ['query', 'p2'],\n 'validator': 'type',\n 'message': \"'123' is not of type 'integer'\",\n },\n ],\n 'request_body': [\n {\n 'path': [],\n 'validator': 'required',\n 'message': 'Request body is required',\n }\n ],\n }\n\n\ndef test_no_unmarshal_error():\n error = UnmarshalError()\n assert error.to_dict() == {}\n","repo_name":"grktsh/python-oas","sub_path":"tests/test_exceptions.py","file_name":"test_exceptions.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"39893967340","text":"\"\"\" run decoding of rnn-ext + abs + RL (+ rerank)\"\"\"\nimport argparse\nimport json\nimport os\nfrom os.path import join\nfrom datetime import timedelta\nfrom time import time\nfrom collections import Counter, defaultdict\nfrom itertools import product\nfrom functools import reduce\nimport operator as op\n\nfrom cytoolz import identity, concat, curry\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch import multiprocessing as mp\n\nfrom data.batcher import tokenize, preproc\n\nfrom decoding import Abstractor, RLExtractor, DecodeDataset, BeamAbstractor, SCExtractor, DecodeDatasetEntity\nfrom decoding import make_html_safe\nfrom nltk import sent_tokenize\n\n\ndef decode(save_path, model_dir, split, batch_size,\n beam_size, diverse, max_len, cuda, sc, min_len):\n start = time()\n # setup model\n with open(join(model_dir, 'meta.json')) as f:\n meta = json.loads(f.read())\n if meta['net_args']['abstractor'] is None:\n #if not meta['net_args'].__contains__('abstractor'):\n # NOTE: if no abstractor is provided then\n # the whole model would be extractive summarization\n assert beam_size == 1\n abstractor = identity\n else:\n if beam_size == 1:\n abstractor = Abstractor(join(model_dir, 'abstractor'),\n max_len, cuda)\n else:\n abstractor = BeamAbstractor(join(model_dir, 'abstractor'),\n max_len, cuda, min_len)\n\n if sc:\n extractor = SCExtractor(model_dir, cuda=cuda)\n else:\n extractor = RLExtractor(model_dir, cuda=cuda)\n\n # setup loader\n def coll(batch):\n articles = list(filter(bool, batch))\n return articles\n dataset = DecodeDataset(split)\n\n n_data = len(dataset)\n loader = DataLoader(\n dataset, batch_size=batch_size, shuffle=False, num_workers=4,\n collate_fn=coll\n )\n\n # prepare save paths and logs\n os.makedirs(join(save_path, 'output'))\n dec_log = {}\n dec_log['abstractor'] = meta['net_args']['abstractor']\n dec_log['extractor'] = meta['net_args']['extractor']\n dec_log['rl'] = True\n dec_log['split'] = split\n dec_log['beam'] = beam_size\n dec_log['diverse'] = diverse\n with open(join(save_path, 'log.json'), 'w') as f:\n json.dump(dec_log, f, indent=4)\n\n # Decoding\n if sc:\n i = 0\n length = 0\n with torch.no_grad():\n for i_debug, raw_article_batch in enumerate(loader):\n tokenized_article_batch = map(tokenize(None), raw_article_batch)\n ext_arts = []\n ext_inds = []\n for raw_art_sents in tokenized_article_batch:\n ext = extractor(raw_art_sents)[:] # exclude EOE\n if not ext:\n # use top-5 if nothing is extracted\n # in some rare cases rnn-ext does not extract at all\n ext = list(range(5))[:len(raw_art_sents)]\n else:\n ext = [i for i in ext]\n ext_inds += [(len(ext_arts), len(ext))]\n ext_arts += [raw_art_sents[i] for i in ext]\n if beam_size > 1:\n all_beams = abstractor(ext_arts, beam_size, diverse)\n dec_outs = rerank_mp(all_beams, ext_inds)\n else:\n dec_outs = abstractor(ext_arts)\n assert i == batch_size*i_debug\n for j, n in ext_inds:\n decoded_sents = [' '.join(dec) for dec in dec_outs[j:j+n]]\n with open(join(save_path, 'output/{}.dec'.format(i)),\n 'w') as f:\n f.write(make_html_safe('\\n'.join(decoded_sents)))\n i += 1\n print('{}/{} ({:.2f}%) decoded in {} seconds\\r'.format(\n i, n_data, i/n_data*100,\n timedelta(seconds=int(time()-start))\n ), end='')\n length += len(decoded_sents)\n else:\n i = 0\n length = 0\n with torch.no_grad():\n for i_debug, raw_article_batch in enumerate(loader):\n tokenized_article_batch = map(tokenize(None), raw_article_batch)\n ext_arts = []\n ext_inds = []\n for raw_art_sents in tokenized_article_batch:\n ext = extractor(raw_art_sents)[:-1] # exclude EOE\n if not ext:\n # use top-5 if nothing is extracted\n # in some rare cases rnn-ext does not extract at all\n ext = list(range(5))[:len(raw_art_sents)]\n else:\n ext = [i.item() for i in ext]\n ext_inds += [(len(ext_arts), len(ext))]\n ext_arts += [raw_art_sents[i] for i in ext]\n if beam_size > 1:\n all_beams = abstractor(ext_arts, beam_size, diverse)\n dec_outs = rerank_mp(all_beams, ext_inds)\n else:\n dec_outs = abstractor(ext_arts)\n assert i == batch_size*i_debug\n for j, n in ext_inds:\n decoded_sents = [' '.join(dec) for dec in dec_outs[j:j+n]]\n with open(join(save_path, 'output/{}.dec'.format(i)),\n 'w') as f:\n f.write(make_html_safe('\\n'.join(decoded_sents)))\n i += 1\n print('{}/{} ({:.2f}%) decoded in {} seconds\\r'.format(\n i, n_data, i/n_data*100,\n timedelta(seconds=int(time()-start))\n ), end='')\n length += len(decoded_sents)\n print('average summary length:', length / i)\n\ndef decode_entity(save_path, model_dir, split, batch_size,\n beam_size, diverse, max_len, cuda, sc, min_len):\n start = time()\n # setup model\n with open(join(model_dir, 'meta.json')) as f:\n meta = json.loads(f.read())\n if meta['net_args']['abstractor'] is None:\n #if not meta['net_args'].__contains__('abstractor'):\n # NOTE: if no abstractor is provided then\n # the whole model would be extractive summarization\n assert beam_size == 1\n abstractor = identity\n else:\n if beam_size == 1:\n abstractor = Abstractor(join(model_dir, 'abstractor'),\n max_len, cuda)\n else:\n abstractor = BeamAbstractor(join(model_dir, 'abstractor'),\n max_len, cuda, min_len=min_len)\n\n if sc:\n extractor = SCExtractor(model_dir, cuda=cuda, entity=True)\n else:\n extractor = RLExtractor(model_dir, cuda=cuda)\n\n # setup loader\n def coll(batch):\n batch = list(filter(bool, batch))\n return batch\n\n if args.key == 1:\n key = 'filtered_rule1_input_mention_cluster'\n elif args.key == 2:\n key = 'filtered_rule23_6_input_mention_cluster'\n else:\n raise Exception\n dataset = DecodeDatasetEntity(split, key)\n\n n_data = len(dataset)\n loader = DataLoader(\n dataset, batch_size=batch_size, shuffle=False, num_workers=4,\n collate_fn=coll\n )\n\n # prepare save paths and logs\n os.makedirs(join(save_path, 'output'))\n dec_log = {}\n dec_log['abstractor'] = meta['net_args']['abstractor']\n dec_log['extractor'] = meta['net_args']['extractor']\n dec_log['rl'] = True\n dec_log['split'] = split\n dec_log['beam'] = beam_size\n dec_log['diverse'] = diverse\n with open(join(save_path, 'log.json'), 'w') as f:\n json.dump(dec_log, f, indent=4)\n\n # Decoding\n if sc:\n i = 0\n length = 0\n sent_selected = 0\n with torch.no_grad():\n for i_debug, raw_input_batch in enumerate(loader):\n raw_article_batch, clusters = zip(*raw_input_batch)\n tokenized_article_batch = map(tokenize(None), raw_article_batch)\n #processed_clusters = map(preproc(list(tokenized_article_batch), clusters))\n #processed_clusters = list(zip(*processed_clusters))\n ext_arts = []\n ext_inds = []\n pre_abs = []\n beam_inds = []\n for raw_art_sents, raw_cls in zip(tokenized_article_batch, clusters):\n processed_clusters = preproc(raw_art_sents, raw_cls)\n ext = extractor((raw_art_sents, processed_clusters))[:] # exclude EOE\n sent_selected += len(ext)\n if not ext:\n # use top-3 if nothing is extracted\n # in some rare cases rnn-ext does not extract at all\n ext = list(range(3))[:len(raw_art_sents)]\n else:\n ext = [i for i in ext]\n ext_art = list(map(lambda i: raw_art_sents[i], ext))\n pre_abs.append([word for sent in ext_art for word in sent])\n beam_inds += [(len(beam_inds), 1)]\n\n if beam_size > 1:\n # all_beams = abstractor(ext_arts, beam_size, diverse)\n # dec_outs = rerank_mp(all_beams, ext_inds)\n all_beams = abstractor(pre_abs, beam_size, diverse=1.0)\n dec_outs = rerank_mp(all_beams, beam_inds)\n else:\n dec_outs = abstractor(pre_abs)\n for dec_out in dec_outs:\n dec_out = sent_tokenize(' '.join(dec_out))\n ext = [sent.split(' ') for sent in dec_out]\n ext_inds += [(len(ext_arts), len(ext))]\n ext_arts += ext\n\n dec_outs = ext_arts\n assert i == batch_size*i_debug\n for j, n in ext_inds:\n decoded_sents = [' '.join(dec) for dec in dec_outs[j:j+n]]\n with open(join(save_path, 'output/{}.dec'.format(i)),\n 'w') as f:\n f.write(make_html_safe('\\n'.join(decoded_sents)))\n i += 1\n print('{}/{} ({:.2f}%) decoded in {} seconds\\r'.format(\n i, n_data, i/n_data*100,\n timedelta(seconds=int(time()-start))\n ), end='')\n length += len(decoded_sents)\n else:\n i = 0\n length = 0\n with torch.no_grad():\n for i_debug, raw_article_batch in enumerate(loader):\n tokenized_article_batch = map(tokenize(None), raw_article_batch)\n ext_arts = []\n ext_inds = []\n for raw_art_sents in tokenized_article_batch:\n ext = extractor(raw_art_sents)[:-1] # exclude EOE\n if not ext:\n # use top-5 if nothing is extracted\n # in some rare cases rnn-ext does not extract at all\n ext = list(range(5))[:len(raw_art_sents)]\n else:\n ext = [i.item() for i in ext]\n ext_inds += [(len(ext_arts), len(ext))]\n ext_arts += [raw_art_sents[i] for i in ext]\n if beam_size > 1:\n all_beams = abstractor(ext_arts, beam_size, diverse)\n dec_outs = rerank_mp(all_beams, ext_inds)\n else:\n dec_outs = abstractor(ext_arts)\n assert i == batch_size*i_debug\n for j, n in ext_inds:\n decoded_sents = [' '.join(dec) for dec in dec_outs[j:j+n]]\n with open(join(save_path, 'output/{}.dec'.format(i)),\n 'w') as f:\n f.write(make_html_safe('\\n'.join(decoded_sents)))\n i += 1\n print('{}/{} ({:.2f}%) decoded in {} seconds\\r'.format(\n i, n_data, i/n_data*100,\n timedelta(seconds=int(time()-start))\n ), end='')\n length += len(decoded_sents)\n print('average summary length:', length / i)\n print('average sentence selected:', sent_selected)\n\n\n\n_PRUNE = defaultdict(\n lambda: 2,\n {1:5, 2:5, 3:5, 4:5, 5:5, 6:4, 7:3, 8:3}\n)\n\ndef rerank(all_beams, ext_inds):\n beam_lists = (all_beams[i: i+n] for i, n in ext_inds if n > 0)\n return list(concat(map(rerank_one, beam_lists)))\n\ndef rerank_mp(all_beams, ext_inds):\n beam_lists = [all_beams[i: i+n] for i, n in ext_inds if n > 0]\n with mp.Pool(8) as pool:\n reranked = pool.map(rerank_one, beam_lists)\n return list(concat(reranked))\n\ndef rerank_one(beams):\n @curry\n def process_beam(beam, n):\n for b in beam[:n]:\n b.gram_cnt = Counter(_make_n_gram(b.sequence))\n return beam[:n]\n beams = map(process_beam(n=_PRUNE[len(beams)]), beams)\n best_hyps = max(product(*beams), key=_compute_score)\n dec_outs = [h.sequence for h in best_hyps]\n return dec_outs\n\ndef _make_n_gram(sequence, n=2):\n return (tuple(sequence[i:i+n]) for i in range(len(sequence)-(n-1)))\n\ndef _make_tri_gram(sequence, n=3):\n return (tuple(sequence[i:i+n]) for i in range(len(sequence)-(n-1)) if '.' not in tuple(sequence[i:i+n]))\n\ndef length_wu(cur_len, alpha=0.):\n \"\"\"GNMT length re-ranking score.\n See \"Google's Neural Machine Translation System\" :cite:`wu2016google`.\n \"\"\"\n return ((5 + cur_len) / 6.0) ** alpha\n\n\ndef coverage_summary(cov, beta=0.):\n \"\"\"Our summary penalty.\"\"\"\n penalty = torch.max(cov, cov.clone().fill_(1.0)).sum(-1)\n penalty -= cov.size(-1)\n return beta * penalty\n\ndef _compute_score(hyps):\n all_cnt = reduce(op.iadd, (h.gram_cnt for h in hyps), Counter())\n # repeat = sum(c-1 for g, c in all_cnt.items() if c > 1)\n # try:\n # lp = sum(h.logprob for h in hyps) / sum(len(h.sequence) for h in hyps)\n # except ZeroDivisionError:\n # lp = -1e5\n for h in hyps:\n if h.coverage is None:\n print(h.sequence)\n try:\n lp = sum(h.logprob for h in hyps) / sum(length_wu(len(h.sequence)+1, alpha=0.9) - coverage_summary(h.coverage, beta=5) for h in hyps)\n except ZeroDivisionError:\n lp = -1e5\n # for h in hyps:\n # print(h.sequence)\n # tri_grams = _make_tri_gram(h.sequence)\n # cnt = Counter(tri_grams)\n # if not all((cnt[g] <= 1 for g in cnt)):\n # lp = lp - 1e5\n\n # length = sum([len(h.sequence) for h in hyps]) + 1 # include EOS\n # len_pen = length_wu(length, alpha=0.9)\n # try:\n # lp = lp / len_pen - sum(coverage_summary(h.coverage, beta=5) for h in hyps)\n # except:\n # print(hyps[0].sequence)\n\n # return (-repeat, lp)\n return lp\n\n\nif __name__ == '__main__':\n torch.cuda.set_device(0)\n parser = argparse.ArgumentParser(\n description='run decoding of the full model (RL)')\n parser.add_argument('--path', required=True, help='path to store/eval')\n parser.add_argument('--model_dir', help='root of the full model')\n parser.add_argument('--sc', action='store_true', help='self critical')\n parser.add_argument('--entity', action= 'store_true', help='entity model')\n parser.add_argument('--key', type=int, default=2, help='use which cluster type')\n\n # dataset split\n data = parser.add_mutually_exclusive_group(required=True)\n data.add_argument('--val', action='store_true', help='use validation set')\n data.add_argument('--test', action='store_true', help='use test set')\n\n # decode options\n parser.add_argument('--batch', type=int, action='store', default=32,\n help='batch size of faster decoding')\n parser.add_argument('--beam', type=int, action='store', default=5,\n help='beam size for beam-search (reranking included)')\n parser.add_argument('--div', type=float, action='store', default=1.0,\n help='diverse ratio for the diverse beam-search')\n parser.add_argument('--max_dec_word', type=int, action='store', default=100,\n help='maximun words to be decoded for the abstractor')\n parser.add_argument('--min_dec_word', type=int, action='store', default=0,\n help='maximun words to be decoded for the abstractor')\n\n parser.add_argument('--no-cuda', action='store_true',\n help='disable GPU training')\n args = parser.parse_args()\n args.cuda = torch.cuda.is_available() and not args.no_cuda\n\n data_split = 'test' if args.test else 'val'\n if args.entity:\n decode_entity(args.path, args.model_dir,\n data_split, args.batch, args.beam, args.div,\n args.max_dec_word, args.cuda, args.sc, args.min_dec_word)\n else:\n decode(args.path, args.model_dir,\n data_split, args.batch, args.beam, args.div,\n args.max_dec_word, args.cuda, args.sc, args.min_dec_word)\n","repo_name":"luyang-huang96/EntityDrivenSumm","sub_path":"decode_full_model.py","file_name":"decode_full_model.py","file_ext":"py","file_size_in_byte":17085,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"18"} +{"seq_id":"36453626732","text":"import pandas as pd\nimport numpy as np\n\ndf = pd.read_excel('ml\\is_valid\\output.xlsx')\n\ndel df['Unnamed: 0']\ndf = np.array(df)\n\ndf_1 = pd.DataFrame(columns=['href', 'data', 'is_valid'], data=[i for i in df if i[-1]==1])\ndf_1.to_excel('ml/aaa/output.xlsx')\n\n\n","repo_name":"alekstarat/bitcoin-cost-predict-AI","sub_path":"ml/correlate_course/dataParse.py","file_name":"dataParse.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40347283450","text":"isp = {'*': 2, '/': 2, '+': 1, '-': 1,'(': 0 }\nicp = {'*': 2, '/': 2, '+': 1, '-': 1,'(': 3 }\n# (6+5*(2-8)/2)\n# '2+3*4/5' >> 234*5/+\n# 2 3 4 5 / * +\ndata = input()\nstack = []\n#피연산자는 그냥 출력\n#연산자는 우선순위에 따라서 stack넣거나, 빼고 넣기\n# stack의 top의 우선순위보다 token의 우선순위가 높으면 그냥 stack에 넣어주기\n# 아니면...높거나 같은애들은 다빼고 낮은애가 나오면 push\n# 닫히는 괄호나오면 여는 괄호나올때 까지 pop 하면서 출력하기\nfor i in range(len(data)):\n # data[i]\n if data[i] in '0123456789':\n print(data[i],end='')\n else: # 연산자\n if data[i] == ')':\n #여는 괄호가 나올때까지 pop하면서 연산자 출력\n while stack[-1] != '(':\n print(stack.pop(),end='')\n #여는 괄호 버리기\n stack.pop()\n elif not stack: #스택이 비어있으면\n stack.append(data[i])\n else: # 스택이 비어있지 않으면 우선순위를 따집시다.\n if isp[stack[-1]]< icp[data[i]]:\n stack.append(data[i])\n else:\n # 나보다 작은애가 나올때 까지 pop하면서 출력 + 스택이 비어있지 않으면서\n while stack and isp[stack[-1]] >= icp[data[i]]:\n print(stack.pop(),end='')\n stack.append(data[i])\n#수식을 다 읽었을 때 stack에 연산자가 남아 있으면 pop() 하면서 출력\nwhile stack:\n print(stack.pop(),end='')\nprint()\n","repo_name":"OneDayOneAlgorithm/algorithm-project","sub_path":"0215/후위표기식.py","file_name":"후위표기식.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4115208176","text":"st = open('24_66.txt').readline().strip()\n# st = st.replace('KOT', '*').replace('K', ' ').replace('O', ' ').replace('T', ' ').split()\n# print(max(len(i) for i in st))\nmaxx = float('-inf')\n\nfor j in 0, 1, 2:\n count = 0\n for i in range(j, len(st) - 2, 3):\n if st[i : i + 3] == 'KOT':\n count += 1\n maxx = max(maxx, count)\n else:\n count = 0\nprint(maxx)","repo_name":"Propolisss/home_code-python-","sub_path":"kege/24/subs/18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31861385239","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nimport ray\nfrom ray.rllib.evaluation.postprocessing import compute_advantages\nfrom ray.rllib.evaluation.tf_policy_graph import TFPolicyGraph, \\\n LearningRateSchedule\nfrom ray.rllib.models.catalog import ModelCatalog\nfrom ray.rllib.utils.explained_variance import explained_variance\n\n\nclass PPOLoss(object):\n def __init__(self,\n action_space,\n value_targets,\n advantages,\n actions,\n logits,\n vf_preds,\n curr_action_dist,\n value_fn,\n cur_kl_coeff,\n valid_mask,\n entropy_coeff=0,\n clip_param=0.1,\n vf_clip_param=0.1,\n vf_loss_coeff=1.0,\n use_gae=True):\n \"\"\"Constructs the loss for Proximal Policy Objective.\n\n Arguments:\n action_space: Environment observation space specification.\n value_targets (Placeholder): Placeholder for target values; used\n for GAE.\n actions (Placeholder): Placeholder for actions taken\n from previous model evaluation.\n advantages (Placeholder): Placeholder for calculated advantages\n from previous model evaluation.\n logits (Placeholder): Placeholder for logits output from\n previous model evaluation.\n vf_preds (Placeholder): Placeholder for value function output\n from previous model evaluation.\n curr_action_dist (ActionDistribution): ActionDistribution\n of the current model.\n value_fn (Tensor): Current value function output Tensor.\n cur_kl_coeff (Variable): Variable holding the current PPO KL\n coefficient.\n valid_mask (Tensor): A bool mask of valid input elements (#2992).\n entropy_coeff (float): Coefficient of the entropy regularizer.\n clip_param (float): Clip parameter\n vf_clip_param (float): Clip parameter for the value function\n vf_loss_coeff (float): Coefficient of the value function loss\n use_gae (bool): If true, use the Generalized Advantage Estimator.\n \"\"\"\n\n def reduce_mean_valid(t):\n return tf.reduce_mean(tf.boolean_mask(t, valid_mask))\n\n dist_cls, _ = ModelCatalog.get_action_dist(action_space, {})\n prev_dist = dist_cls(logits)\n # Make loss functions.\n logp_ratio = tf.exp(\n curr_action_dist.logp(actions) - prev_dist.logp(actions))\n action_kl = prev_dist.kl(curr_action_dist)\n self.mean_kl = reduce_mean_valid(action_kl)\n\n curr_entropy = curr_action_dist.entropy()\n self.mean_entropy = reduce_mean_valid(curr_entropy)\n\n surrogate_loss = tf.minimum(\n advantages * logp_ratio,\n advantages * tf.clip_by_value(logp_ratio, 1 - clip_param,\n 1 + clip_param))\n self.mean_policy_loss = reduce_mean_valid(-surrogate_loss)\n\n if use_gae:\n vf_loss1 = tf.square(value_fn - value_targets)\n vf_clipped = vf_preds + tf.clip_by_value(\n value_fn - vf_preds, -vf_clip_param, vf_clip_param)\n vf_loss2 = tf.square(vf_clipped - value_targets)\n vf_loss = tf.maximum(vf_loss1, vf_loss2)\n self.mean_vf_loss = reduce_mean_valid(vf_loss)\n loss = reduce_mean_valid(\n -surrogate_loss + cur_kl_coeff * action_kl +\n vf_loss_coeff * vf_loss - entropy_coeff * curr_entropy)\n else:\n self.mean_vf_loss = tf.constant(0.0)\n loss = reduce_mean_valid(-surrogate_loss +\n cur_kl_coeff * action_kl -\n entropy_coeff * curr_entropy)\n self.loss = loss\n\n\nclass PPOPolicyGraph(LearningRateSchedule, TFPolicyGraph):\n def __init__(self,\n observation_space,\n action_space,\n config,\n existing_inputs=None):\n \"\"\"\n Arguments:\n observation_space: Environment observation space specification.\n action_space: Environment action space specification.\n config (dict): Configuration values for PPO graph.\n existing_inputs (list): Optional list of tuples that specify the\n placeholders upon which the graph should be built upon.\n \"\"\"\n config = dict(ray.rllib.agents.ppo.ppo.DEFAULT_CONFIG, **config)\n self.sess = tf.get_default_session()\n self.action_space = action_space\n self.config = config\n self.kl_coeff_val = self.config[\"kl_coeff\"]\n self.kl_target = self.config[\"kl_target\"]\n dist_cls, logit_dim = ModelCatalog.get_action_dist(\n action_space, self.config[\"model\"])\n\n if existing_inputs:\n obs_ph, value_targets_ph, adv_ph, act_ph, \\\n logits_ph, vf_preds_ph, prev_actions_ph, prev_rewards_ph = \\\n existing_inputs[:8]\n existing_state_in = existing_inputs[8:-1]\n existing_seq_lens = existing_inputs[-1]\n else:\n obs_ph = tf.placeholder(\n tf.float32,\n name=\"obs\",\n shape=(None, ) + observation_space.shape)\n adv_ph = tf.placeholder(\n tf.float32, name=\"advantages\", shape=(None, ))\n act_ph = ModelCatalog.get_action_placeholder(action_space)\n logits_ph = tf.placeholder(\n tf.float32, name=\"logits\", shape=(None, logit_dim))\n vf_preds_ph = tf.placeholder(\n tf.float32, name=\"vf_preds\", shape=(None, ))\n value_targets_ph = tf.placeholder(\n tf.float32, name=\"value_targets\", shape=(None, ))\n prev_actions_ph = ModelCatalog.get_action_placeholder(action_space)\n prev_rewards_ph = tf.placeholder(\n tf.float32, [None], name=\"prev_reward\")\n existing_state_in = None\n existing_seq_lens = None\n self.observations = obs_ph\n\n self.loss_in = [\n (\"obs\", obs_ph),\n (\"value_targets\", value_targets_ph),\n (\"advantages\", adv_ph),\n (\"actions\", act_ph),\n (\"logits\", logits_ph),\n (\"vf_preds\", vf_preds_ph),\n (\"prev_actions\", prev_actions_ph),\n (\"prev_rewards\", prev_rewards_ph),\n ]\n self.model = ModelCatalog.get_model(\n {\n \"obs\": obs_ph,\n \"prev_actions\": prev_actions_ph,\n \"prev_rewards\": prev_rewards_ph\n },\n observation_space,\n logit_dim,\n self.config[\"model\"],\n state_in=existing_state_in,\n seq_lens=existing_seq_lens)\n\n # KL Coefficient\n self.kl_coeff = tf.get_variable(\n initializer=tf.constant_initializer(self.kl_coeff_val),\n name=\"kl_coeff\",\n shape=(),\n trainable=False,\n dtype=tf.float32)\n\n self.logits = self.model.outputs\n curr_action_dist = dist_cls(self.logits)\n self.sampler = curr_action_dist.sample()\n if self.config[\"use_gae\"]:\n if self.config[\"vf_share_layers\"]:\n self.value_function = self.model.value_function()\n else:\n vf_config = self.config[\"model\"].copy()\n # Do not split the last layer of the value function into\n # mean parameters and standard deviation parameters and\n # do not make the standard deviations free variables.\n vf_config[\"free_log_std\"] = False\n vf_config[\"use_lstm\"] = False\n with tf.variable_scope(\"value_function\"):\n self.value_function = ModelCatalog.get_model({\n \"obs\": obs_ph,\n \"prev_actions\": prev_actions_ph,\n \"prev_rewards\": prev_rewards_ph\n }, observation_space, 1, vf_config).outputs\n self.value_function = tf.reshape(self.value_function, [-1])\n else:\n self.value_function = tf.zeros(shape=tf.shape(obs_ph)[:1])\n\n if self.model.state_in:\n max_seq_len = tf.reduce_max(self.model.seq_lens)\n mask = tf.sequence_mask(self.model.seq_lens, max_seq_len)\n mask = tf.reshape(mask, [-1])\n else:\n mask = tf.ones_like(adv_ph)\n\n self.loss_obj = PPOLoss(\n action_space,\n value_targets_ph,\n adv_ph,\n act_ph,\n logits_ph,\n vf_preds_ph,\n curr_action_dist,\n self.value_function,\n self.kl_coeff,\n mask,\n entropy_coeff=self.config[\"entropy_coeff\"],\n clip_param=self.config[\"clip_param\"],\n vf_clip_param=self.config[\"vf_clip_param\"],\n vf_loss_coeff=self.config[\"vf_loss_coeff\"],\n use_gae=self.config[\"use_gae\"])\n\n LearningRateSchedule.__init__(self, self.config[\"lr\"],\n self.config[\"lr_schedule\"])\n TFPolicyGraph.__init__(\n self,\n observation_space,\n action_space,\n self.sess,\n obs_input=obs_ph,\n action_sampler=self.sampler,\n loss=self.loss_obj.loss,\n loss_inputs=self.loss_in,\n state_inputs=self.model.state_in,\n state_outputs=self.model.state_out,\n prev_action_input=prev_actions_ph,\n prev_reward_input=prev_rewards_ph,\n seq_lens=self.model.seq_lens,\n max_seq_len=config[\"model\"][\"max_seq_len\"])\n\n self.sess.run(tf.global_variables_initializer())\n self.explained_variance = explained_variance(value_targets_ph,\n self.value_function)\n self.stats_fetches = {\n \"cur_lr\": tf.cast(self.cur_lr, tf.float64),\n \"total_loss\": self.loss_obj.loss,\n \"policy_loss\": self.loss_obj.mean_policy_loss,\n \"vf_loss\": self.loss_obj.mean_vf_loss,\n \"vf_explained_var\": self.explained_variance,\n \"kl\": self.loss_obj.mean_kl,\n \"entropy\": self.loss_obj.mean_entropy\n }\n\n def copy(self, existing_inputs):\n \"\"\"Creates a copy of self using existing input placeholders.\"\"\"\n return PPOPolicyGraph(\n self.observation_space,\n self.action_space,\n self.config,\n existing_inputs=existing_inputs)\n\n def extra_compute_action_fetches(self):\n return {\"vf_preds\": self.value_function, \"logits\": self.logits}\n\n def extra_compute_grad_fetches(self):\n return self.stats_fetches\n\n def update_kl(self, sampled_kl):\n if sampled_kl > 2.0 * self.kl_target:\n self.kl_coeff_val *= 1.5\n elif sampled_kl < 0.5 * self.kl_target:\n self.kl_coeff_val *= 0.5\n self.kl_coeff.load(self.kl_coeff_val, session=self.sess)\n return self.kl_coeff_val\n\n def value(self, ob, *args):\n feed_dict = {self.observations: [ob], self.model.seq_lens: [1]}\n assert len(args) == len(self.model.state_in), \\\n (args, self.model.state_in)\n for k, v in zip(self.model.state_in, args):\n feed_dict[k] = v\n vf = self.sess.run(self.value_function, feed_dict)\n return vf[0]\n\n def postprocess_trajectory(self,\n sample_batch,\n other_agent_batches=None,\n episode=None):\n completed = sample_batch[\"dones\"][-1]\n if completed:\n last_r = 0.0\n else:\n next_state = []\n for i in range(len(self.model.state_in)):\n next_state.append([sample_batch[\"state_out_{}\".format(i)][-1]])\n last_r = self.value(sample_batch[\"new_obs\"][-1], *next_state)\n batch = compute_advantages(\n sample_batch,\n last_r,\n self.config[\"gamma\"],\n self.config[\"lambda\"],\n use_gae=self.config[\"use_gae\"])\n return batch\n\n def gradients(self, optimizer):\n return optimizer.compute_gradients(\n self._loss, colocate_gradients_with_ops=True)\n\n def get_initial_state(self):\n return self.model.state_init\n","repo_name":"llan-ml/tesp","sub_path":"ray/rllib/agents/ppo/ppo_policy_graph.py","file_name":"ppo_policy_graph.py","file_ext":"py","file_size_in_byte":12624,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"18"} +{"seq_id":"39838453809","text":"from django.shortcuts import get_object_or_404\nfrom django.shortcuts import render\nfrom rest_framework.decorators import api_view,permission_classes\nfrom rest_framework.response import Response\nfrom .models import Room,Booking\n#, Booking, CheckIn\nfrom .serializer import RoomSerializer, BookingSerializer\n#, CheckinSerializer\nfrom rest_framework.permissions import AllowAny, IsAuthenticated, IsAdminUser\nfrom rest_framework import status\nfrom datetime import datetime\n\n@api_view(['GET'])\ndef RoomView(request):\n rooms=Room.objects.all()\n serializer=RoomSerializer(rooms,many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef RoomDetailView(request,pk):\n room=Room.objects.get(pk=pk)\n serializer=RoomSerializer(room,context={\"request\":request})\n return Response(serializer.data)\n\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef BookingCreateApiView(request):\n user = request.user\n data = request.data\n roomname=data['room']['name']\n booking = Booking.objects.create(\n customer=user,\n bookingname=data['bookingname'],\n room=roomname,\n checking_date=data['checking_date'],\n checkout_date=data['checkout_date'],\n phone_number=data['phone_number'],\n email=data['email'],\n totalPrice=data['totalprice'],\n )\n serializer=BookingSerializer(booking,many=False)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef getBookingByIdView(request, pk):\n\n user = request.user\n\n try:\n booking = Booking.objects.get(id=pk)\n if user.is_staff or booking.customer == user:\n serializer = BookingSerializer(booking, many=False)\n return Response(serializer.data)\n else:\n Response({'detail': 'Not authorized to view this booking'},\n status=status.HTTP_400_BAD_REQUEST)\n except:\n return Response({'detail': 'Booking does not exist'}, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef getMyBookingsView(request):\n user = request.user\n bookings = user.booking_set.all()\n serializer = BookingSerializer(bookings, many=True)\n return Response(serializer.data)\n\n@api_view(['PUT'])\n@permission_classes([IsAuthenticated])\ndef updateBookingToPaidView(request, pk):\n booking = Booking.objects.get(id=pk)\n\n booking.isPaid = True\n booking.paidAt = datetime.now()\n booking.save()\n\n return Response('Booking was paid')","repo_name":"VRAJPATEL100/HOTEL-ROOM-BOOKING-WEBSITE-WITH-BACKEND","sub_path":"backend/hotel_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15895169994","text":"# Python\n#\n# This module implements tests for TableOfContents class.\n#\n# This file is part of mdutils. https://github.com/didix21/mdutils\n#\n# MIT License: (C) 2018 Dídac Coll\n\nfrom unittest import TestCase\nfrom mdutils.tools.TableOfContents import TableOfContents\n\n__author__ = \"didix21\"\n__project__ = \"MdUtils\"\n\n\nclass TestTableOfContents(TestCase):\n def test_create_table_of_contents(self):\n array_of_contents = [\n \"Results Tests\",\n [],\n \"Test Details\",\n [\"Test 1\", [], \"Test 2\", [], \"Test 3\", [], \"Test 4\", []],\n ]\n expects = (\n \"\\n* [Results Tests](#results-tests)\\n\"\n \"* [Test Details](#test-details)\\n\\t\"\n \"* [Test 1](#test-1)\\n\\t\"\n \"* [Test 2](#test-2)\\n\\t\"\n \"* [Test 3](#test-3)\\n\\t\"\n \"* [Test 4](#test-4)\\n\"\n )\n\n table_of_contents = TableOfContents()\n self.assertEqual(\n table_of_contents.create_table_of_contents(array_of_contents, depth=2),\n expects,\n )\n\n def test_table_of_contents_with_colon(self):\n array_of_contents = [\"My header: 1\"]\n expects = \"\\n* [My header: 1](#my-header-1)\\n\"\n\n self.assertEqual(\n TableOfContents().create_table_of_contents(array_of_contents), expects\n )\n\n def test_table_of_contents_with_dot(self):\n array_of_contents = [\"My.header 1.1\"]\n expects = \"\\n* [My.header 1.1](#myheader-11)\\n\"\n\n self.assertEqual(\n TableOfContents().create_table_of_contents(array_of_contents), expects\n )\n\n def test_table_of_contents_with_back_slash(self):\n array_of_contents = [\"My\\header 1\"]\n expects = \"\\n* [My\\header 1](#myheader-1)\\n\"\n\n self.assertEqual(\n TableOfContents().create_table_of_contents(array_of_contents), expects\n )\n\n def test_table_of_contents_with_hyphen(self):\n array_of_contents = [\"My-header-1 pop\"]\n expects = \"\\n* [My-header-1 pop](#my-header-1-pop)\\n\"\n\n self.assertEqual(\n TableOfContents().create_table_of_contents(array_of_contents), expects\n )\n","repo_name":"didix21/mdutils","sub_path":"tests/test_tools/test_table_of_contents.py","file_name":"test_table_of_contents.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"18"} +{"seq_id":"70911640040","text":"import bpy, os, platform, subprocess\n\nclass TLM_Optix_Denoise:\n\n image_array = []\n\n image_output_destination = \"\"\n\n denoised_array = []\n\n def __init__(self, optixProperties, img_array, dirpath):\n\n self.optixProperties = optixProperties\n\n self.image_array = img_array\n\n self.image_output_destination = dirpath\n\n self.check_binary()\n\n def check_binary(self):\n\n optixPath = self.optixProperties.tlm_optix_path\n\n if optixPath != \"\":\n\n file = optixPath\n filename, file_extension = os.path.splitext(file)\n\n if(file_extension == \".exe\"):\n\n #if file exists optixDenoise or denoise\n\n pass\n\n else:\n\n #if file exists optixDenoise or denoise\n\n self.optixProperties.tlm_optix_path = os.path.join(self.optixProperties.tlm_optix_path,\"Denoiser.exe\")\n\n else:\n if bpy.context.scene.TLM_SceneProperties.tlm_verbose:\n print(\"Please provide Optix path\")\n\n def denoise(self):\n\n if bpy.context.scene.TLM_SceneProperties.tlm_verbose:\n print(\"Optix: Denoising\")\n for image in self.image_array:\n\n if image not in self.denoised_array:\n\n image_path = os.path.join(self.image_output_destination, image)\n\n denoise_output_destination = image_path[:-10] + \"_denoised.hdr\"\n\n if platform.system() == 'Windows':\n optixPath = bpy.path.abspath(self.optixProperties.tlm_optix_path)\n pipePath = [optixPath, '-i', image_path, '-o', denoise_output_destination]\n elif platform.system() == 'Darwin':\n if bpy.context.scene.TLM_SceneProperties.tlm_verbose:\n print(\"Mac for Optix is still unsupported\") \n else:\n if bpy.context.scene.TLM_SceneProperties.tlm_verbose:\n print(\"Linux for Optix is still unsupported\")\n\n if self.optixProperties.tlm_optix_verbose:\n denoisePipe = subprocess.Popen(pipePath, shell=True)\n else:\n denoisePipe = subprocess.Popen(pipePath, stdout=subprocess.PIPE, stderr=None, shell=True)\n\n denoisePipe.communicate()[0]\n \n image = bpy.data.images.load(image_path, check_existing=False)\n bpy.data.images[image.name].filepath_raw = bpy.data.images[image.name].filepath_raw[:-4] + \"_denoised.hdr\"\n bpy.data.images[image.name].reload()\n\n def clean(self):\n\n self.denoised_array.clear()\n self.image_array.clear()\n\n for file in self.image_output_destination:\n if file.endswith(\"_baked.hdr\"):\n baked_image_array.append(file)\n\n #self.image_output_destination\n\n #Clean temporary files here..\n #...pfm\n #...denoised.hdr","repo_name":"armory3d/armory","sub_path":"blender/arm/lightmapper/utility/denoiser/optix.py","file_name":"optix.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","stars":2916,"dataset":"github-code","pt":"18"} +{"seq_id":"9770728735","text":"#! /usr/bin/env python3\n\nimport datetime\nimport xml.etree.cElementTree as et\nimport logging as lg\nimport pandas as pd\nimport sys\n\nfrom pathlib import Path\n\nlg.basicConfig(level=lg.DEBUG)\n\ndef main():\n \"\"\" Bundles the various methods \"\"\"\n\n # parsing file\n if len(sys.argv) != 2:\n lg.error('Usage: $ setup ')\n else:\n try:\n filepath = Path(sys.argv[1]).resolve()\n except FileNotFoundError:\n lg.error(f\"File {filepath} not found.\")\n else:\n lg.info(f\"File {filepath} found\")\n\n # initialize DataFrame\n currency_cols = [\"id\", \"name\", \"symbol\", \"trigram\"]\n currency_df = pd.DataFrame(columns=currency_cols)\n account_cols = [\"id\", \"name\", \"currency\", \"initial_balance\"]\n account_df = pd.DataFrame(columns=account_cols)\n # NB: we didn't create payment_df\n transaction_cols = [\"id\", \"account\", \"date\", \"value_date\", \"currency\", \"amount\", \"exchange_rate\", \"exchange_fee\", \"category\", \"subcategory\", \"budgetary\", \"subbudgetary\", \"corresponding_transaction\", \"transaction_id\", \"payee\", \"exb\", \"split\", \"note\", \"pn\", \"pc\", \"ma\", \"ar\", \"au\", \"re\", \"fi\", \"vo\", \"ba\", \"mo\"]\n transaction_df = pd.DataFrame(columns=transaction_cols)\n party_cols = [\"id\", \"name\"]\n party_df = pd.DataFrame(columns=party_cols)\n category_cols = [\"id\", \"name\"]\n category_df = pd.DataFrame(columns=category_cols)\n subcategory_cols = [\"id\", \"parent\", \"scatg_id\", \"name\"]\n subcategory_df = pd.DataFrame(columns=subcategory_cols)\n budgetary_cols = [\"id\", \"name\"]\n budgetary_df = pd.DataFrame(columns=budgetary_cols)\n subbudgetary_cols = [\"id\", \"parent\", \"sbudg_id\", \"name\"]\n subbudgetary_df = pd.DataFrame(columns=subbudgetary_cols)\n currency_link_cols = [\"id\", \"from\", \"to\", \"value\"]\n currency_link_df = pd.DataFrame(columns=currency_link_cols)\n\n # parse file\n with open(filepath, 'r') as gsb_file:\n # retreive root of the xml file\n tree = et.parse(gsb_file)\n tree_root = tree.getroot()\n\n # compute number of nodes and initialize node parsed counter\n nb_child = len(tree_root.getchildren())\n current_child = 0\n\n lg.info(f\"Start parsing {filepath}, that has {nb_child} nodes.\")\n\n for child in tree_root:\n current_child +=1\n lg.debug(f\"Parsing node {current_child}/{nb_child}.\")\n\n # check children tag and parse its attributes into relevant df accordingly\n if child.tag == \"Currency\":\n # read data from child node\n id_number = child.attrib.get(\"Nb\")\n name = child.attrib.get(\"Na\")\n symbol = child.attrib.get(\"Co\")\n trigram = child.attrib.get(\"Ico\")\n\n # append data as a new line of the df\n currency_df = currency_df.append(\n pd.Series([id_number, name, symbol, trigram], index=currency_cols),\n ignore_index=True\n )\n\n elif child.tag == \"Account\":\n \"id\", \"name\", \"currency\", \"initial_balance\"\n id_number = child.attrib.get(\"Number\")\n name = child.attrib.get(\"Name\")\n currency = child.attrib.get(\"Currency\")\n initial_balance = float(child.attrib.get(\"Initial_balance\"))\n\n account_df = account_df.append(\n pd.Series([id_number, name, currency, initial_balance], index=account_cols),\n ignore_index=True\n )\n\n elif child.tag == \"Transaction\":\n id_number = child.attrib.get(\"Nb\")\n account = child.attrib.get(\"Ac\")\n # the \"Dt\" attribute is formatted \"12/31/1999\" (gasp!) alike\n _date_string = child.attrib.get(\"Dt\")\n # if no date, assume it's tomorrow\n if _date_string == '':\n date = datetime.date.today() + datetime.timedelta(days=1)\n else:\n date = datetime.datetime.strptime(_date_string, \"%m/%d/%Y\")\n _value_date_string = child.attrib.get(\"Dv\")\n # if no date, assume it's tomorrow\n if _value_date_string == '':\n value_date = datetime.date.today() + datetime.timedelta(days=1)\n else:\n value_date = datetime.datetime.strptime(_value_date_string, \"%m/%d/%Y\")\n currency = child.attrib.get(\"Cu\")\n amount = float(child.attrib.get(\"Am\"))\n exchange_rate = float(child.attrib.get(\"Exr\"))\n exchange_fee = float(child.attrib.get(\"Exf\"))\n category = child.attrib.get(\"Ca\")\n subcategory = child.attrib.get(\"Sca\")\n budgetary = child.attrib.get(\"Bu\")\n subbudgetary = child.attrib.get(\"Sbu\")\n corresponding_transaction = child.attrib.get(\"Trt\")\n transaction_id = child.attrib.get(\"Id\")\n payee = child.attrib.get(\"Pa\")\n exb = child.attrib.get(\"Exb\")\n split = bool(int(child.attrib.get(\"Br\")))\n note = child.attrib.get(\"No\")\n pn = child.attrib.get(\"Pn\")\n pc = child.attrib.get(\"Pc\")\n ma = child.attrib.get(\"Ma\")\n ar = child.attrib.get(\"Ar\")\n au = child.attrib.get(\"Au\")\n re = child.attrib.get(\"Re\")\n fi = child.attrib.get(\"Fi\")\n vo = child.attrib.get(\"Vo\")\n ba = child.attrib.get(\"Ba\")\n mo = child.attrib.get(\"Mo\")\n\n transaction_df = transaction_df.append(\n pd.Series([id_number, account, date, value_date, currency, amount, exchange_rate, exchange_fee, category, subcategory, budgetary, subbudgetary, corresponding_transaction, transaction_id, payee, exb, split, note, pn, pc, ma, ar, au, re, fi, vo, ba, mo], index=transaction_cols),\n ignore_index=True\n )\n\n elif child.tag == \"Party\":\n id_number = child.attrib.get(\"Nb\")\n name = child.attrib.get(\"Na\")\n\n party_df = party_df.append(\n pd.Series([id_number, name], index=party_cols),\n ignore_index=True\n )\n\n elif child.tag == \"Category\":\n id_number = child.attrib.get(\"Nb\")\n name = child.attrib.get(\"Na\")\n\n category_df = category_df.append(\n pd.Series([id_number, name], index=category_cols),\n ignore_index=True\n )\n\n elif child.tag == \"Sub_category\":\n id_number = child.attrib.get(\"Nbc\") + \"_\" + child.attrib.get(\"Nb\")\n parent = child.attrib.get(\"Nbc\")\n scatg_id = child.attrib.get(\"Nb\")\n name = child.attrib.get(\"Na\")\n\n subcategory_df = subcategory_df.append(\n pd.Series([id_number, parent, scatg_id, name], index=subcategory_cols),\n ignore_index=True\n )\n\n elif child.tag == \"Budgetary\":\n id_number = child.attrib.get(\"Nb\")\n name = child.attrib.get(\"Na\")\n\n budgetary_df = budgetary_df.append(\n pd.Series([id_number, name], index=budgetary_cols),\n ignore_index=True\n )\n\n elif child.tag == \"Sub_budgetary\":\n id_number = child.attrib.get(\"Nbb\") + \"_\" + child.attrib.get(\"Nb\")\n parent = child.attrib.get(\"Nbb\")\n sbudg_id = child.attrib.get(\"Nb\")\n name = child.attrib.get(\"Na\")\n\n subbudgetary_df = subbudgetary_df.append(\n pd.Series([id_number, parent, sbudg_id, name], index=subbudgetary_cols),\n ignore_index=True\n )\n\n elif child.tag == \"Currency_link\":\n id_number = child.attrib.get(\"Nb\")\n from_curr = child.attrib.get(\"Cu1\")\n to_curr = child.attrib.get(\"Cu2\")\n value = float(child.attrib.get(\"Ex\"))\n\n currency_link_df = currency_link_df.append(\n pd.Series([id_number, from_curr, to_curr, value], index=currency_link_cols),\n ignore_index=True\n )\n\n else:\n continue\n\n lg.info(\"Parsing done.\")\n\n # create a global DataFrame\n\n # export DataFrame\n\nif __name__ == '__main__':\n main()\n","repo_name":"EBoisseauSierra/grisbi-Rgraph","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":9782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73929437479","text":"import tkinter as tk\nfrom tkinter.ttk import *\n\nfrom PresentationLayer.Entities.Person import Person\nfrom LogicLayer.LL_tblFine import tblFine_LogicLayer\n\nfrom PIL import Image, ImageTk\n\nclass Window:\n def __init__(self, master, person = Person()) -> None:\n infoFrame = Frame(master)\n infoFrame.pack(side='left', anchor='nw',padx=5,pady=5)\n\n self.fileName = person.Id\n self.fileExtention = person.PhotoExtention\n\n self.pictureFrame = Frame(infoFrame)\n self.pictureFrame.pack(side='top', anchor='nw',padx=5,pady=5)\n\n self.imageLabel =Label(self.pictureFrame)\n self.imageLabel.pack(side='left', anchor='nw',padx=5,pady=5)\n \n\n personInfoFrame = Frame(infoFrame)\n personInfoFrame.pack(side='top', anchor='nw',padx=5,pady=5)\n \n label = Label(personInfoFrame, text=\"کد ملی:\")\n label.grid(row = 0 , column= 0,ipadx=20,ipady=10)\n\n label = Label(personInfoFrame, text=person.Id)\n label.grid(row = 0 , column= 1,ipadx=20,ipady=10)\n\n lable = Label(personInfoFrame, text=\"نام:\")\n lable.grid(row = 1 , column= 0,ipadx=20,ipady=10)\n \n lable = Label(personInfoFrame, text=person.FirstName)\n lable.grid(row = 1 , column= 1,ipadx=20,ipady=10)\n\n lable = Label(personInfoFrame, text=\"نام خانوادگی:\")\n lable.grid(row = 2 , column= 0,ipadx=20,ipady=10)\n\n lable = Label(personInfoFrame, text=person.LastName)\n lable.grid(row = 2 , column= 1,ipadx=20,ipady=10)\n\n self.LoadImage(person.Photo, person)\n\n fineInfoFrame = Frame(master)\n fineInfoFrame.pack(side='left', anchor='nw',padx=5,pady=5)\n\n cols = ('plate','cost')\n self.treeview = Treeview(fineInfoFrame, columns=cols,selectmode='browse', show= 'headings')\n self.treeview.pack(side = 'left', anchor='nw',padx=5, pady=5, fill = 'both')\n\n self.treeview.column('plate',anchor='center', stretch=False, width=80)\n self.treeview.heading('plate', text=\"شماره پلاک\")\n self.treeview.column('cost',anchor='center', stretch=False, width=100)\n self.treeview.heading('cost', text=\"مجموع جریمه\")\n\n self.LoadTable(person.Id)\n\n def LoadImage(self, image, person = Person()):\n path = save_image(image, person.Id, person.PhotoExtention)\n self.newImage = Image.open(path)\n self.newImage = self.newImage.resize((150,200), Image.ANTIALIAS)\n self.python_newImage = ImageTk.PhotoImage(master = self.pictureFrame,image= self.newImage)\n self.imageLabel.configure(image=self.python_newImage)\n\n def LoadTable(self, id):\n llfine = tblFine_LogicLayer()\n data = llfine.identify(id)\n self.treeview.delete(*self.treeview.get_children())\n for index , row in enumerate(data):\n self.treeview.insert('', tk.END, text = row[0], values=(row[1],row[2]))\n\ndef save_image(image, file_name, file_Extention):\n with open(f\"Resources/temp/{file_name}.{file_Extention}\", 'wb') as file:\n file.write(image) \n return f\"Resources/temp/{file_name}.{file_Extention}\"\n \nclass MainWindow:\n def __init__(self, person = None) -> None:\n self.root = tk.Tk()\n self.window = Window(self.root, person)\n\n\n def configStyle(self):\n self.style = Style(self.root)\n\n self.style.configure(\n \"Treeview\",\n rowheight = 40,\n font = ('Arial',10))\n self.style.map(\"Treeview\")\n\n self.style.configure(\n 'btnStyle.TButton',\n font =('Arial', 13))\n \n self.style.configure(\n 'TLabel',\n font =('Arial', 13)\n )\n\n def startWindow(self):\n self.configStyle()\n self.root.title('Person Profile')\n self.root.geometry(\"800x500\")\n self.root.mainloop()\n","repo_name":"alirezaf2001/DatabaseProject","sub_path":"PresentationLayer/PersonProfileWindow.py","file_name":"PersonProfileWindow.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20694697514","text":"import argparse\ndef process(log_dir,sub_dirs):\n res={'avg':[],\\\n '50th':[],\\\n '60th':[],\\\n '70th':[],\\\n '80th':[],\\\n '90th':[],\\\n '99th':[],\\\n 'avg_latency_to_eb':[],\\\n 'avg_latency_from_eb':[]}\n for sub_dir in sub_dirs:\n latency_file='%s/%s/summary/overall_performance.csv'%(log_dir,sub_dir)\n with open(latency_file) as f:\n #skip header\n next(f)\n line=next(f)\n avg_latency,min_latency,max_latency,\\\n latency_50_percentile,latency_60_percentile,\\\n latency_70_percentile,latency_80_percentile,\\\n latency_90_percentile,latency_99_percentile,\\\n latency_99_99_percentile,latency_99_9999_percentile,\\\n avg_latency_to_eb,avg_latency_from_eb= line.split(',')\n res['avg'].append(float(avg_latency))\n res['50th'].append(float(latency_50_percentile))\n res['60th'].append(float(latency_60_percentile))\n res['70th'].append(float(latency_70_percentile))\n res['80th'].append(float(latency_80_percentile))\n res['90th'].append(float(latency_90_percentile))\n res['99th'].append(float(latency_99_percentile))\n res['avg_latency_to_eb'].append(float(avg_latency_to_eb))\n res['avg_latency_from_eb'].append(float(avg_latency_from_eb))\n\n measurements=['avg','50th','60th','70th','80th','90th','99th','avg_latency_to_eb','avg_latency_from_eb']\n with open('%s/overall_performance.csv'%(log_dir),'w') as f:\n f.write('xticks:%s\\n'%(','.join(sub_dir for sub_dir in sub_dirs)))\n for measurement in measurements:\n f.write('%s,%s\\n'%(measurement,','.join(str(val) for val in res[measurement])))\n\nif __name__== \"__main__\":\n #parse cmd line args\n parser=argparse.ArgumentParser(description='script for collating results across test iterations')\n parser.add_argument('-log_dir',help='path to log directory',required=True)\n parser.add_argument('-sub_dirs',nargs='*',required=True)\n args=parser.parse_args()\n \n process(args.log_dir,args.sub_dirs)\n","repo_name":"doc-vu/edgent","sub_path":"scripts/experiment/src/plots/summarize/collate_overall_latency.py","file_name":"collate_overall_latency.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"10126637081","text":"''' Contains the Reducer service implementation '''\n\nfrom smcity.logging.logger import Logger\n\nlogger = Logger(__name__)\n\nclass Reducer:\n ''' Consumers results from the reduce queue and pushes them into NoSQL. '''\n \n def __init__(self, job_factory, reduce_queue):\n '''\n Constructor.\n\n @param job_factory Used to retrieve jobs whose results have posted\n @paramType JobFactory\n @param reduce_queue Used to retrieve results from the reduce queue\n @paramType ReduceQueue\n @returns n/a\n '''\n assert job_factory is not None\n assert reduce_queue is not None\n \n self.is_shutting_down = False\n self.job_factory = job_factory\n self.reduce_queue = reduce_queue\n\n def reduce_results(self):\n '''\n Continuously pulls results from the queue and adds the to the associated job in the database.\n\n @returns n/a\n '''\n while not self.is_shutting_down:\n try:\n result = self.reduce_queue.get_result()\n \n if result is None: # If there are currently no results available\n continue\n\n logger.debug(\"Found result for job %s. Posting result...\" % result['job_id'])\n job = self.job_factory.get_job(result['job_id']) # Update the jobs state\n job.add_result(result['coordinate_box'], result['result'])\n job.save_changes()\n\n self.reduce_queue.finish_result(result) # Remove the result message from the queue\n except:\n logger.exception() \n \n def shutdown(self):\n '''\n Cleanly shutdowns down the reducer.\n \n @returns n/a\n '''\n self.is_shutting_down = True\n","repo_name":"mcmonster/smcity","sub_path":"smcity/analytics/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35523884717","text":"from numpy import *\r\nfrom sklearn import datasets\r\nfrom sklearn.linear_model import LogisticRegression\r\nimport matplotlib.pyplot as plt\r\n\r\n# 加载数据\r\niris = datasets.load_iris()\r\nX = iris[\"data\"][:,3:]\r\ny = (iris[\"target\"]==2).astype(int)\r\n\r\n# 分类器\r\nclassifier = LogisticRegression() # 导入模型\r\nclassifier.fit(X,y) # 训练模型\r\nwidth = linspace(0,3,100) # 预测\r\nprobability = classifier.predict_proba(reshape(width,[len(width),1]))\r\n# probability = classifier.predict(reshape(width,[len(width),1])) # 预测类别\r\n\r\n# 可视化\r\nl1, = plt.plot(width,probability[:,0],linewidth=3)\r\nl2, = plt.plot(width,probability[:,1],linewidth=3)\r\n# l1, = plt.plot(width,probability,linewidth=3)\r\nplt.legend(handles=[l1,l2],labels=[\"Not Iris-Vir\",\"Iris-Vir\"],loc='best')\r\n# plt.legend(handles=[l1],labels=[\"Iris-Vir\"],loc='best')\r\nplt.xlabel('Width/cm')\r\nplt.ylabel('Probability')\r\n# plt.ylabel('Category')\r\nplt.title(\"Logistic Regression\")\r\nplt.show()","repo_name":"JaceyHuang/ISEE-zju","sub_path":"大三下/人工智能实验/lab2/src/logistic.py","file_name":"logistic.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"18"} +{"seq_id":"41467533867","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 15 12:03:50 2020\n\n@author: lhf\n\"\"\"\n\nfrom sklearn.externals import joblib\nimport csv\nimport matplotlib.pyplot as plt\nimport numpy as np\n#%% define the cost function\ndef costFunction(y_pred, y):\n cost = 0\n for j in range(len(y)):\n cost += (y_pred[j]-y[j])**2\n return (1/2/len(y))*cost\n#load model and scaler\nmodelname = '../../Model/nnmodelFP_v7'\nnn_model = joblib.load(modelname)\nscalername = '../../Model/FindPscaler_v7'\nscaler = joblib.load(scalername)\n#%% load test data\nfname = '../../Data/P_testset_pro.csv'\nprint('Loading data ...\\n')\nX = []\ny = []\nwith open(fname) as csvfile:\n csv_reader = csv.reader(csvfile)\n header = next(csv_reader)\n for row in csv_reader:\n X.append(row[3:9])\n y.append(row[9])\n \nX = [[float(i) for i in row] for row in X]\ny = [float(j) for j in y]\n\n#%% test the model\nTest_X = np.array(X[:])\nTest_y = np.array(y[:])\nTest_X = scaler.transform(Test_X)\n\navg = []\nfor m in range(10):\n pred_y = nn_model[m].predict(Test_X)\n avg.append(list(pred_y))\n \navg = np.array(avg)\npred = np.mean(avg,0)\n\n#pred = nn_model.predict(Test_X)\nTest_error = []\nfor i in range(len(Test_y)):\n Test_error.append(100*abs(pred[i]-Test_y[i])/Test_y[i])\n\ntemp = Test_error[:]\ntemp.sort()\ntemp.reverse()\nfor i in range(5):\n print(i+1)\n print('error: %f\\n' %temp[i])\n print('index: %d\\n' %Test_error.index(temp[i]))\n print(X[Test_error.index(temp[i])])\n print('\\n')\n\nprint('\\tidx \\tpred_y \\tTest_y \\terror')\nfor i in range(100):\n print('\\t{} \\t{} \\t{} \\t{}'.format(i+2, pred[i], Test_y[i], Test_error[i]))\nprint('\\n')\nprint('Average Cost:')\nprint(costFunction(pred, Test_y))\nprint('Mean error:{}'.format(np.mean(Test_error)))\nprint('Standard deviation Error:{}'.format(np.std(Test_error)))\nplt.figure()\nplt.plot(np.linspace(1,100,100), Test_error)\nplt.title('NN test error plot')\nplt.xlabel('Index of test samples')\nplt.ylabel('relative error %')\nplt.show()\n\n","repo_name":"LuHaofan/SRTP","sub_path":"Code/ML predict P/testP.py","file_name":"testP.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14455169886","text":"import os\n\nfrom Crypto.Cipher import PKCS1_OAEP, AES\nfrom Crypto.PublicKey import RSA\n\nfrom FileServerApp.config import SECRET_CODE, RSA_PROTECTION, CERT_EXTENSION, FILE_EXTENSION\nfrom FileServerApp.crypto import AESCipher\n\n\nclass RSACipher(AESCipher):\n \"\"\" RSA Cipher class \"\"\"\n\n def __init__(self):\n super().__init__()\n\n self.private_key = None\n self.private_key_path = None\n\n self.public_key = None\n self.public_key_path = None\n\n self.generate_public_and_private_keys()\n\n def encrypt(self, data):\n cipher_text, tag, nonce, session_key = super().encrypt(data)\n cipher_rsa = PKCS1_OAEP.new(self.public_key)\n encrypted_session_key = cipher_rsa.encrypt(session_key)\n\n return cipher_text, tag, nonce, encrypted_session_key\n\n def decrypt(self, i_file, key_filename):\n cipher_rsa = PKCS1_OAEP.new(self.private_key)\n\n nonce, tag, cipher_text, session_key = super(AESCipher, self).decrypt(i_file, key_filename)\n\n session_key = cipher_rsa.decrypt(session_key)\n cipher_aes = AES.new(session_key, AES.MODE_EAX, nonce)\n return cipher_aes.decrypt_and_verify(cipher_text, tag).decode(\"utf8\")\n\n def write_chiper_text(self, data, o_file, filename):\n cipher_text, tag, nonce, session_key = self.encrypt(data)\n key_filename = f\"AES_{filename}\"\n session_key_path = os.path.join(self.KEY_DIR, key_filename + FILE_EXTENSION)\n\n if not os.path.isfile(session_key_path):\n with open(session_key_path, \"wb\") as s_key_file:\n s_key_file.write(session_key)\n\n o_file.write(nonce + tag + cipher_text)\n\n return key_filename\n\n def generate_public_and_private_keys(self):\n key = RSA.generate(2048)\n encrypted_key = key.exportKey(passphrase=SECRET_CODE, pkcs=8, protection=RSA_PROTECTION)\n\n self.private_key_path = os.path.join(self.KEY_DIR, \"private_rsa_key\" + CERT_EXTENSION)\n self.public_key_path = os.path.join(self.KEY_DIR, \"public_rsa_key\" + CERT_EXTENSION)\n\n self.private_key = RSA.importKey(encrypted_key, passphrase=SECRET_CODE)\n if not os.path.isfile(self.private_key_path):\n with open(self.private_key_path, \"wb\") as pr_key_file:\n pr_key_file.write(encrypted_key)\n\n pub_key = key.public_key().exportKey()\n self.public_key = RSA.importKey(pub_key)\n if not os.path.isfile(self.public_key_path):\n with open(self.public_key_path, \"wb\") as pub_key_file:\n pub_key_file.write(pub_key)\n","repo_name":"mihail12101/FileServerApp","sub_path":"FileServerApp/crypto/RSACipher.py","file_name":"RSACipher.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16982957582","text":"class Solution:\n def smallerNumbersThanCurrent(self, nums: List[int]) -> List[int]:\n table = {}\n sorted_nums = sorted(nums, reverse=True)\n count = len(nums) - 1\n \n for i, val in enumerate(sorted_nums):\n table[val] = count\n count -= 1\n \n res = []\n for val in nums:\n res.append(table[val])\n \n return res\n ","repo_name":"dyhliang/Leetcode","sub_path":"1365-how-many-numbers-are-smaller-than-the-current-number/1365-how-many-numbers-are-smaller-than-the-current-number.py","file_name":"1365-how-many-numbers-are-smaller-than-the-current-number.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"2198007343","text":"\"\"\"\nThis module is used to train and test the ML models used on the dataset\n\nIt saves trained models to local storage. Thus, as the dataset expands (for real life situations), the models can be\nretrained by running this script to account for data drift.\n\"\"\"\nimport sys\nimport os\nsys.path.append(f\"{os.getcwd()}\")\nfrom sklearn.metrics import matthews_corrcoef, f1_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nimport joblib\nimport pandas as pd\nfrom utils.preprocess import preprocess_train_input\nfrom utils.pipeline_log_config import pipeline as logger\n\n# List of models for frontend use\nmodels_dict = {\n 1: 'Logistic Regression',\n 2: 'Support Vector Machine',\n 3: 'Neural Network',\n 4: 'Random Forest'\n}\n\ndef train_models(models: dict, X_train, y_train):\n \"\"\"\n Takes dictionary of initialized models, training features and outcomes as input and saves the trained models.\n\n Within a for loop, each model is fitted to X-train and y_train, with the resulting model being saved to memory\n Args:\n models:\n X_train:\n y_train:\n\n Returns:\n\n \"\"\"\n\n for model, name in models.items():\n model.fit(X_train, y_train)\n joblib_file = f\"./models/{name}.pkl\"\n joblib.dump(model, joblib_file)\n logger.info(f\"{name} trained and saved\")\n logger.info(\"All models trained and saved successfully\")\n\n\ndef test_models(models: dict, X_test, y_test):\n \"\"\"\n Takes dictionary of initialized models, test features and outcomes as input and prints prediction scores to console.\n\n - Loads saved models using models dictionary for reference purpose\n - Prints test score for each model to console\n\n Args:\n models:\n X_test:\n y_test:\n\n Returns:\n\n \"\"\"\n # Test models\n for mdl, name in models.items():\n model = joblib.load(f'./models/{name}.pkl')\n y_true = y_test.copy()\n y_pred = model.predict(X_test)\n accuracy = model.score(X_test, y_test)\n MCC = matthews_corrcoef(y_true, y_pred)\n F1_SCORE = f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None,\n zero_division='warn')\n logger.info(f\"{name}: \\naccuracy --> {accuracy} \\nMCC --> {MCC} \\nf1_score --> {F1_SCORE}\\n\")\n\n\nif __name__==\"__main__\":\n # Load the data\n data = pd.read_csv(r\"./data/UCI_Credit_Card.csv\")\n\n # train-test split\n X_train_scaled, X_test_scaled, y_train, y_test = preprocess_train_input(data)\n\n # # models dictionary\n # models_dict = {\n # LogisticRegression(solver='liblinear',\n # C=21): 'Logistic_Regression',\n # SVC(kernel='rbf',\n # gamma='auto',\n # C=51): 'C_Support_Vector_Classification',\n # MLPClassifier(solver='adam',\n # max_iter=950,\n # hidden_layer_sizes=900,\n # activation='tanh'): 'Neural_Network_(Multi_layer_Perceptron_classifier)',\n # RandomForestClassifier(max_features='log2',\n # criterion='gini'): 'Random_Forest'\n # }\n\n # models dictionary\n models_dict = {\n LogisticRegression(solver='liblinear', C=21): 'Logistic_Regression',\n SVC(): 'C_Support_Vector_Classification',\n MLPClassifier(): 'Neural_Network_(Multi_layer_Perceptron_classifier)',\n RandomForestClassifier(): 'Random_Forest'\n }\n\n # Train models\n train_models(models=models_dict, X_train=X_train_scaled, y_train=y_train)\n\n # Test models\n test_models(models=models_dict, X_test=X_test_scaled, y_test=y_test)","repo_name":"theabrahamaudu/credit_card_default_predictor","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43395295548","text":"import sys\nimport random\n\n\"\"\"\nRandom Chunking approach. In this methodology,\nwe chunk each gene sequence into random-length \"words\"\n\nFor now, we're using \"text-document like\" files, which means\nthat we're putting everything into plain text files with\nspace-separated chunks\n\"\"\"\n\n# minimum and maximum chunk sizes\nCHUNK_MIN = 1\nCHUNK_MAX = 18\n\n\ndef chunk_random(input_gene, chunk_size=CHUNK_MIN):\n chunks = []\n current = 0\n while True:\n try:\n next_chunk_size = random.randint(CHUNK_MIN, CHUNK_MAX)\n chunks.append(input_gene[current:current + next_chunk_size])\n current += next_chunk_size\n except Exception:\n # We've hit the end of the gene\n chunks.append(input_gene[current:])\n break\n\n return chunks\n\n\ndef read_from_file(species):\n pass\n\n\ndef create_chunked_file(species, output_dir=\".\"):\n unchunked_data = read_from_file(species)\n chunked_data = chunk_random(unchunked_data)\n with open(f\"{output_dir}/{species}.txt\") as fh:\n for chunk in chunked_data:\n fh.write(f\"{chunk} \")\n\n\nif __name__ == '__main__':\n if not (2 < len(sys.argv) < 3):\n print(\"Usage: python3 random_chunking.py []\")\n exit(1)\n\n output_dir = \".\" if len(sys.argv == 2) else sys.argv[2]\n species = sys.argv[1]\n create_chunked_file(species, output_dir)\n","repo_name":"benhg/phylo-text-mining","sub_path":"data_processing/random_chunk/random_chunking.py","file_name":"random_chunking.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"35283436549","text":"# Imports the Google Cloud client library\nfrom google.cloud import language_v1\n#import service account credentials\nfrom google.oauth2 import service_account\n\n# Instantiates a client\n#provide credentials\n\ncredentials = service_account.Credentials.from_service_account_file(\"gsheetwriter-378419-d50229d7ebf2.json\")\nclient = language_v1.LanguageServiceClient(credentials=credentials)\n\n# The text to analyze\ntext = \"I flew to London from Pittsburgh and then took an early flight to Edinburgh where I arrived at about 11a.m.. After that, it is time for lunch, so I went to the Royal Mile.\"\ndocument = language_v1.Document(\n content=text, type_=language_v1.Document.Type.PLAIN_TEXT\n)\n\n# parse the entities in the text\nentities = client.analyze_entities(request={'document': document}).entities\n\n#find out location entities and put them in a list\nlocation_entities = []\nfor entity in entities:\n if entity.type_ == language_v1.Entity.Type.LOCATION:\n location_entities.append(entity.name)\n\n#find out the transportation entities and put them in a list\ntransportation_entities = []\nfor entity in entities:\n if entity.type_ == language_v1.Entity.Type.EVENT:\n transportation_entities.append(entity.name)\n\n\n#print the list\nprint(location_entities)\nprint(transportation_entities)\n\n\n#print out all entities and their type names\nfor entity in entities:\n print(u\"Representative name for the entity: {}\".format(entity.name))\n print(u\"Entity type: {}\".format(language_v1.Entity.Type(entity.type_)))\n\n\n#get the google maps coordinates for the locations\n\n","repo_name":"PineappleTech1213/GoogleMaps4Travel","sub_path":"GoogleTextParser.py","file_name":"GoogleTextParser.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74846507241","text":"from init import db\nfrom flask import Blueprint, request\nfrom models.book import Book, book_schema, books_schema\nfrom models.user import User\nfrom flask_jwt_extended import get_jwt_identity, jwt_required\n\ndef authorise_admin():\n '''Function to authorise whether or not a specific user has admin permissions'''\n user_id = get_jwt_identity()\n stmt = db.select(User).filter_by(id=user_id)\n user = db.session.scalar(stmt)\n return user.is_admin\n\nbooks_bp = Blueprint('books', __name__, url_prefix='/books')\n\n@books_bp.route('/') # route for viewing all books in database\ndef get_all_books():\n '''/books GET route that will display a full list of all the books in the database'''\n stmt = db.select(Book).order_by(Book.id.desc()) # select all books in database and display them in descending order\n books = db.session.scalars(stmt) # makes books displayable\n return books_schema.dump(books) # returns books to users\n\n@books_bp.route('/') # route for viewing singular book in database\ndef get_one_book(id):\n '''/books/id GET route that will display a book corresponding to the ID in the URL'''\n stmt = db.select(Book).filter_by(id=id) # search the database with a filter being that of the ID in the URL\n book = db.session.scalar(stmt) # convert book to viewable format\n if book:\n return book_schema.dump(book) # return book to user\n else:\n return {'error': f'A book with the id {id} does not exist.'}, 404 # if book not found, return this error message\n \n@books_bp.route('/', methods=['POST'])\n@jwt_required()\ndef create_book():\n '''/books POST route that will receive raw JSON with fields {title, genre_id, page_count, format_id and collection_id} before being validated and then added to database'''\n json_data = book_schema.load(request.get_json()) # gets the JSON from user\n book = Book( # creates new instance of book\n title=json_data.get('title'),\n genre_id=json_data.get('genre_id'),\n page_count=json_data.get('page_count'),\n format_id=json_data.get('format_id'),\n collection_id=json_data.get('collection_id'),\n user_id=get_jwt_identity() # attaches jwt identity to book for verification\n )\n db.session.add(book) # adds book to session\n db.session.commit() # commits book to database\n return book_schema.dump(book), 201 # returns book to user\n\n@books_bp.route('/', methods=['DELETE'])\n@jwt_required()\ndef delete_one_book(id):\n '''/books/id DELETE route that will use the ID in the URL to locate the book, verify the user as admin and then remove the book from the database'''\n admin_status = authorise_admin() # authorise_admin function checks whether user has admin permissions\n if not admin_status:\n return {'error': 'You must have admin permissions to delete books.'} # if not admin return this error message\n stmt = db.select(Book).filter_by(id=id) # filter books by the id to find specific book\n book = db.session.scalar(stmt)\n if book:\n db.session.delete(book)\n db.session.commit()\n return {'message': f'Book {book.title} has been deleted succesfully.'} # if book has been found and user has admin permissions, drop book from database\n else:\n return {'error': f'A book with the id {id} does not exist.'}, 404 # if book not found return this error message\n\n@books_bp.route('/', methods=['PUT', 'PATCH'])\n@jwt_required()\ndef update_one_book(id):\n '''/books/id PUT/PATCH route that will use the ID in the URL in order to find the book, verify the user as the owner of the book, and then allow the user to update details of the book'''\n json_data = book_schema.load(request.get_json(), partial=True) # loads schema for book, uses partial tag as not all details may be updated\n stmt = db.select(Book).filter_by(id=id) # filter books by the id to find specific book\n book = db.session.scalar(stmt)\n if book:\n if str(book.user_id) != get_jwt_identity():\n return {'error': 'You must be the owner of the book in order to edit it.'}, 403 # if book is found and user ID does not match the jwt, return this error message\n book.title = json_data.get('title') or book.title\n book.genre = json_data.get('genre') or book.genre\n book.page_count = json_data.get('page_count') or book.page_count\n book.format = json_data.get('format') or book.format\n return book_schema.dump(book) # return edited book to user\n else:\n return {'error': f'A book with the id {id} does not exist.'}, 404 # if book is not found return this error message","repo_name":"zakaryjs/CA-T2A2-Webserver_API","sub_path":"controllers/book_controller.py","file_name":"book_controller.py","file_ext":"py","file_size_in_byte":4598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8612627072","text":"array = [1,4,5,8,2,9,3,7,6,0]\nprint(f'Current unstored array: {array}')\nprint('The working of selection sort: ')\nfor index in range(len(array)):\n selection = index\n for compare in range(index,len(array),1):\n if array[selection] > array[compare]:\n selection = compare\n #this is swapping\n temp = array[index]\n array[index] = array[selection]\n array[selection] = temp\n #this is steps per iteration\n print(array)\nprint('\\nThe Result For Selected Array:')\nprint(array)","repo_name":"KZTanvir/code_sol","sub_path":"algorithms/selection sort.py","file_name":"selection sort.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42876311085","text":"import torch\nfrom utils import misc as ksd\nfrom core.autoencoders import ConvAutoEncoderCIFAR10\n\n\nif __name__ == '__main__':\n\n loss_fn = torch.nn.MSELoss()\n lr = 1e-3\n torch.manual_seed(0)\n d = 4\n epochs = 502\n train_loader, test_loader = ksd.load_data_cifar_10(batch_size=64)\n net = ConvAutoEncoderCIFAR10()\n # ksd.train_autoencoder_and_save(net, train_loader, test_loader, epochs, lr, noise_factor=0.1, device=ksd.try_gpu(), model_name='../models/autoencoders/conv_autoencoder_cifar10.pth')\n net.load_state_dict(torch.load())\n for X, y in test_loader:\n ksd.show_images_ae_cifar10(net(X.to(\"cuda\")).to('cpu'), X)\n break\n\n","repo_name":"SduWangyu/Adversarial_Defence_KSD","sub_path":"train_autoencoder/train_cae_cifar10.py","file_name":"train_cae_cifar10.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9049200701","text":"\"\"\"\n Vytvořte funkci, která vypíše součet dvou čísel\n\"\"\"\n\ndef funkce_na_cisla(cislo_1, cislo_2): \n print(cislo_1 + cislo_2)\n return\n\ncislo1 = int(\"-4\")\ncislo2 = int(input(\"Zadejte druhe cislo: \"))\n\nprint(funkce_na_cisla(cislo1, cislo2))","repo_name":"klusik/Python","sub_path":"Bonusy/Pyladies_221011_funkce/something.py","file_name":"something.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14451040807","text":"from django.contrib.auth import get_user_model\nfrom django.contrib.auth.hashers import make_password\nfrom django.contrib.auth.models import Permission\n\nfrom rest_framework import serializers\nfrom rest_framework.authtoken.models import Token\n\n\nUser = get_user_model()\n\n\nclass UserSerializer(serializers.ModelSerializer):\n user_permissions = serializers.SerializerMethodField()\n\n def get_user_permissions(self, obj):\n return obj.get_user_permissions()\n\n class Meta:\n model = User\n fields = [\"id\", \"username\", \"email\", \"first_name\", \"last_name\", \"url\",\n \"mobile\", \"is_active\", \"is_approved\", \"address\", \"user_permissions\"]\n extra_kwargs = {\n \"url\": {\"view_name\": \"api:user-detail\", \"lookup_field\": \"username\"}\n }\n\n\n\nclass UserInfoSerializer(serializers.ModelSerializer):\n user_permissions = serializers.SerializerMethodField()\n\n def get_user_permissions(self, obj):\n return obj.get_user_permissions()\n\n class Meta:\n model = User\n fields = [\"id\", \"username\", \"email\", \"first_name\", \"last_name\",\n \"is_staff\", \"is_superuser\", \"is_active\", 'is_approved', \"user_permissions\"]\n\n\nclass TokenSerializer(serializers.ModelSerializer):\n user = UserInfoSerializer(read_only=True)\n\n class Meta:\n model = Token\n fields = ('key', 'user')\n\n\n\n","repo_name":"TejsinghDhaosriya/Contact-Book","sub_path":"workex/users/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39901967318","text":"#!/usr/bin/env python3\nimport sys\n\nfrom src.lib.logger import LogicalDocLogger\nfrom src.lib.variables import PathVariables, CLICommands\nfrom src.operations.base import BasicOperations\n\n\nclass Backup(BasicOperations):\n\n def __init__(self, logger: LogicalDocLogger):\n \"\"\"\n Backup constructor\n :param logger: logger object\n \"\"\"\n super().__init__(logger)\n self.backup = self.cwd.joinpath(PathVariables.SRC_BACKUP)\n self.log.info(\"Back up is running and will be stored at %s\" % self.backup)\n self.log.debug(\"cwd: %s\" % self.cwd)\n self.tar_archive = self._get_tarfile_object('w')\n\n def run(self):\n \"\"\"\n Method runs all backup-operations and offers the only access to this class.\n :return: None\n \"\"\"\n if self._is_logicaldoc_running():\n out = self.run_linux_command(CLICommands.LOGICALDOC_STOP)\n self.log.debug(\"response from %s: %s\" % (CLICommands.LOGICALDOC_STOP, out))\n\n self.__backup_datafiles()\n out = self.run_linux_command(CLICommands.LOGICALDOC_START.__str__())\n self.log.debug(\"response from %s: %s\" % (CLICommands.LOGICALDOC_START, out))\n\n def __backup_datafiles(self):\n \"\"\"\n Method checks if folders which are backed up are available and creates a sql export file from mysql.\n :return: None\n \"\"\"\n for x in [self.logicaldoc_conf, self.logicaldoc_doc, self.logicaldoc_index]:\n if not x.exists():\n self.log.debug(\"%s is not available for backing up. Backup up aborted\" % x)\n sys.exit()\n sql_dump_path = self.cwd.joinpath(PathVariables.SRC__DUMP)\n self.log.debug(\"dumpfile: %s\" % sql_dump_path)\n\n try:\n out = self.run_linux_command(self.__get_sql_dump_cmd())\n\n self.log.debug(\"output sql dump: %s\" % out)\n # with open(str(sql_dump_path), 'w') as sql:\n # sql.write(out.get(CLICommands.STDOUT).decode(\"utf-8\"))\n except Exception as e:\n self.log.debug(\"sql dump could not be executed. Backup aborted: %s\" % e)\n sys.exit()\n\n self.tar_archive.add(str(sql_dump_path))\n self.tar_archive.add(str(self.logicaldoc_conf))\n self.tar_archive.add(str(self.logicaldoc_doc))\n self.tar_archive.add(str(self.logicaldoc_index))\n self.tar_archive.close()\n\n def __get_sql_dump_cmd(self) -> str:\n \"\"\"\n Method creates sqldump - command.\n :return: command\n \"\"\"\n self.cfg.run()\n # return \"mysqldump -u%s -p%s --add-drop-database %s >\" % (self.cfg.get_username(), self.cfg.get_password(), self.cfg.get_database())\n return 'mysqldump -u%s -p%s --add-drop-database %s > %s' % (self.cfg.get_username(), self.cfg.get_password(), self.cfg.get_database(), PathVariables.SRC__DUMP)\n","repo_name":"siless/backup_logicaldoc","sub_path":"src/operations/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20346705664","text":"#!/usr/bin/python2\n# -*- coding: utf-8 -*-\n\nimport subprocess,time,os\n# stime = time.strftime('%Y%m%d%H%M%S',time.localtime())\nwork_dir = '/home/kilox/monitor'\n\n# cnt = 0\n# for f in os.listdir(work_dir):\n# name = int(os.path.split(f)[0]) +1\n# cnt = name%10\ncnt = 0\ncnt_file = os.path.join(work_dir,'cnt_file')\nif os.path.exists(cnt_file):\n with open(cnt_file,'r') as f:\n cnt = int(f.readline()) + 1\n\nwith open(cnt_file,'w') as f:\n f.write('{} \\n'.format(cnt))\n\n\nos.chdir(work_dir)\n\nwrite_name = os.path.join(work_dir,'{}.log'.format(cnt))\nif os.path.exists('latest.log'):\n os.remove('latest.log')\nif os.path.exists(write_name):\n os.remove(write_name)\n\n\nwhile True:\n # logfile = '{}_{}.log'.format(stime,cnt)\n subprocess.Popen('./monitor.sh >> {}'.format(write_name),shell=True)\n time.sleep(60)\n subprocess.Popen('ln -s {} latest.log'.format(write_name),shell=True)\n ","repo_name":"haduoken/python_tools","sub_path":"monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38325713475","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'api'\n\nurlpatterns = [\n path(r'', views.ansible_index,name='ansible_index'),\n path(r'result/', views.ansible_api,name='ansible_api'),\n path(r'listhost/', views.display,name='display'),\n]\n","repo_name":"quanloveshui/cmdb","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40386931353","text":"#!/usr/bin/env python3\n# prints ENU coords, temp for testing\nimport rospy\nimport std_msgs\nfrom sensor_msgs.msg import Image, NavSatFix\n\nimport pymap3d as pm\nimport numpy as np\n\n\n\ndef main():\n rospy.init_node('enuPrinter', anonymous=True)\n printer=enuPrinter()\n rospy.spin()\n\n\nclass enuPrinter():\n def __init__(self):\n\n ## **** GPS ORIGIN ****\n self.lat0 = 30.632913\n self.lon0 = -96.481894 # deg\n self.h0 = 54.3 # meters\n rospy.Subscriber('/piksi/navsatfix_best_fix', NavSatFix,self.enuConverter)\n\n def enuConverter(self,data):\n tempVal=pm.geodetic2enu(data.latitude,data.longitude,data.altitude, self.lat0, self.lon0, self.h0)\n print(tempVal)\n \nif __name__=='__main__':\n try:\n main()\n except KeyboardInterrupt:\n print(\"Shutting down\")","repo_name":"VegaVK/CACC_TAMU","sub_path":"v_cacc/scripts/temp_enu_print.py","file_name":"temp_enu_print.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"71183640041","text":"#coding: utf-8\n\n\"\"\"\nСкрипт парсит все страницы с сайта rp5.ru, посвящённые какой-либо стране\nи формирует _1_ xml-файл с информацией по стране.\nИерархия: страна -> регион -> населённый пункт\nНас интересует следующая информация:\n* С населённых пунктов:\n - название\n - id\n - регион\n\"\"\"\n\n###########\n# Imports #\n###########\nimport lxml.html as LH\nfrom lxml import etree\nimport re\nimport logging\nimport sys\nfrom os import chdir\nimport urllib\nfrom time import sleep\n\n##############\n# Переменные #\n##############\nCOUNTRY_PAGE = u'http://rp5.ru/Погода_в_Эстонии'\nDEST_DIR = '/home/wind/GAE/traceyourself-hrd/rp5/'\nCOUNTRY = u'Эстония'\nchdir(DEST_DIR)\n\n# Настройки журналирования\ninfo_logger = logging.getLogger('info')\ninfo_logger.addHandler(logging.StreamHandler(sys.stdout))\ninfo_logger.setLevel(logging.INFO)\nerror_logger = logging.getLogger('errors')\nerror_logger.addHandler(logging.FileHandler('./errors.log', mode='w'))\nerror_logger.setLevel(logging.ERROR)\n\n# Исключения\nclass URLTraversed(Exception):\n \"\"\"Исключение означает, что проход по данному URL уже осуществлялся\"\"\"\n pass\n\nclass URLMismatch(Exception):\n \"\"\"Исключение означает, что url, по которому происходил переход,\n не соответствует url конечной страницы.\"\"\"\n pass\n\n# Временная обрезка списка регионов (для опытов)\n# all_links = all_links[:1]\n\nurls_traversed = set([COUNTRY_PAGE])\n\ndef parse(url):\n if url not in urls_traversed:\n try_count = 1 # номер попытки скачать страницу\n max_try_count = 5\n # Режим молотка\n while True:\n try:\n P = LH.parse(url)\n break\n except IOError: # Не получилось прочитать url\n if try_count == max_try_count:\n raise\n sleep_seconds = try_count**2\n info_logger.warning(\"Couldn't read url %s. Try number %d. Sleeping %d sec\" % (url, try_count, sleep_seconds))\n sleep(sleep_seconds)\n try_count += 1\n\n urls_traversed.add(url)\n if P.docinfo.URL != urllib.quote(url.encode('utf-8'), safe='/:()'):\n raise URLMismatch\n else:\n return LH.parse(url)\n else:\n raise URLTraversed\n\n\ndef traverse_links(countryMap, country):\n global count\n for link in countryMap.iterlinks():\n url = link[2]\n el = link[0]\n try:\n P = parse(url)\n except URLTraversed:\n pass\n except URLMismatch:\n count += 1\n error_logger.error(\"URLMismatch: %s, url=%s\" % (el.text, url))\n else:\n forecast = P.xpath('//div[@id=\"content\"]//table[@id=\"forecastTable\"]')\n if forecast: # лист (населённый пункт)\n # \"Путь\" к населённому пункту\n point_path = P.xpath('//div[@class=\"intoLeftNavi\"]/span[@class=\"verticalBottom\"]')[0]\n path_parts = [a.text_content() for a in point_path.iterchildren('a')]\n region_name = path_parts[2]\n try:\n district_name = path_parts[3] # на случай повторов\n except IndexError:\n district_name = ''\n # Проверка страны (всегда д.б. COUNTRY)\n country_name = path_parts[1]\n if country_name != COUNTRY:\n error_logger.error(\"Country mismatch: %s != %s, url=%s\" % (country_name, COUNTRY, P.docinfo.URL))\n count += 1\n return\n # выясняем id пункта\n appMenu = P.find('//div[@id=\"appMenu\"]')\n onclick = appMenu.xpath('.//li[text()=\"XML\"]')[0].get('onclick')\n id = id_rgx.search(onclick).group(1)\n\n point = etree.SubElement(country, \"point\")\n etree.SubElement(point, \"name\").text = el.text\n etree.SubElement(point, \"region\").text = region_name\n etree.SubElement(point, \"district\").text = district_name\n etree.SubElement(point, \"id\").text = id\n count += 1\n if count%10 == 0:\n info_logger.info(\"%d. %s (%s) (id=%s)\" % (count, el.text, region_name, id))\n else: # ветка\n try:\n countryMap = P.xpath('//table[@class=\"countryMap\"]')[0]\n except IndexError: # Нет списка городов и/или регионов -- пропускаем\n continue\n else:\n countryMap.make_links_absolute()\n traverse_links(countryMap, country)\n return country\n\n\n############\n# Действие #\n############\ncountry_page = LH.parse(COUNTRY_PAGE)\ncountryMap = country_page.xpath('//table[@class=\"countryMap\"]')[0]\ncountryMap.make_links_absolute()\nall_links = countryMap.xpath('.//a')\n\n# Регулярные выражения\ncount_rgx = re.compile(r\"\\b[\\d']+\\b\")\nid_rgx = re.compile(r'id=(\\d+)')\n\n# Информация о стране\ntitle = country_page.xpath('//div[@id=\"content\"]//h1')[0].text # Заголовок \"Погода в Эстонии в 64 населённых пунктах\"\ntotal_count = int(count_rgx.findall(title)[-1].replace(\"'\", \"\")) # Количество населённых пунктов в стране (64)\n\ncount = 0 # Счётчик пройденных населённых пунктов\ncountry = etree.Element(\"country\", name=COUNTRY)\n\ntraverse_links(countryMap, country)\n\nif count != total_count:\n info_logger.warning(u\"ВНИМАНИЕ! Количество населённых пунктов в заголовке не совпадает с \"\n u\"количеством пройденных населённых пунктов: %d != %d\" % (total_count, count))\n\ncountry.attrib['count'] = str(count)\nF = open(\"%s.xml\" % COUNTRY, \"w\")\ndoc = etree.ElementTree(country)\ndoc.write(F, encoding=\"utf-8\", pretty_print=True)\nF.close()\n","repo_name":"andbar-ru/traceyourself.appspot.com","sub_path":"scripts/rp5_country_to_regions.py","file_name":"rp5_country_to_regions.py","file_ext":"py","file_size_in_byte":6528,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"3368248802","text":"from keras.layers import *\r\nfrom keras.models import *\r\nfrom keras import backend as K\r\nfrom keras.layers import Dropout, Activation\r\nfrom keras.callbacks import Callback, ModelCheckpoint\r\nfrom keras.models import Model\r\nfrom sklearn import metrics\r\nfrom keras.callbacks import EarlyStopping\r\nfrom sklearn.model_selection import StratifiedKFold,KFold\r\nfrom sklearn.metrics import confusion_matrix,roc_auc_score,roc_curve,auc\r\nfrom keras import utils\r\nimport numpy as np\r\nfrom keras import layers, optimizers\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import roc_auc_score,average_precision_score, f1_score,accuracy_score,roc_curve,precision_recall_curve,auc\r\nimport pandas as pd\r\nK.set_image_data_format('channels_last')\r\nfrom layer import CapsuleLayer, PrimaryCap, Length\r\n\r\n\r\ndef margin_loss(y_true, y_pred):\r\n \"\"\"\r\n Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it.\r\n :param y_true: [None, n_classes]\r\n :param y_pred: [None, num_capsule]\r\n :return: a scalar loss value.\r\n \"\"\"\r\n L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \\\r\n 0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))\r\n\r\n return K.mean(K.sum(L, 1))\r\n\r\ndef open_fa(file):\r\n record = []\r\n f = open(file, 'r')\r\n for item in f:\r\n if '>' not in item:\r\n record.append(item[0:-1])\r\n f.close()\r\n return record\r\n\r\ndef onehot(sequence):\r\n data = []\r\n for seq in sequence:\r\n num= []\r\n for pp in seq:\r\n if pp == 'A':\r\n num.append([1, 0, 0, 0])\r\n if pp == 'C':\r\n num.append([0, 1, 0, 0])\r\n if pp == 'G':\r\n num.append([0, 0, 1, 0])\r\n if pp == 'T':\r\n num.append([0, 0, 0, 1])\r\n data.append(num)\r\n return data\r\n\r\nauc_savepath = 'D:/PycharmProjects/pythonProject/CapsNetYY1/weight/HCT116.png'\r\ncoloer_list = ['b','orange','fuchsia','green','dimgray','khaki','mediumpurple','c','pink','rosybrown']\r\n\r\nkfold = KFold(n_splits=10, shuffle=True, random_state=7)\r\nnames = ['HCT116', 'K562']\r\nname=names[0]\r\nseq1 = open_fa('D:/PycharmProjects/pythonProject/CapsNetYY1/one-hot/HCT116seq1train.txt')\r\nseq1_onehot = onehot(seq1)\r\nX0_1 = np.array(seq1_onehot)\r\nX_1 =X0_1\r\n#X_1 =X0_1 .reshape(len(X0_1),46,44)\r\nseq2 = open_fa('D:/PycharmProjects/pythonProject/CapsNetYY1/one-hot/HCT116seq2train.txt')\r\nseq2_onehot = onehot(seq2)\r\nX0_2 = np.array(seq2_onehot)\r\nX_2 =X0_2\r\ny_tra_1= np.loadtxt('D:/PycharmProjects/pythonProject/CapsNetYY1/one-hot/HCT116labeltrain.txt')\r\ny_tra = utils.to_categorical(y_tra_1, 2)\r\nfpr_list = []\r\ntpr_list = []\r\nplt.figure(figsize=(8,8))\r\nacc_score = []\r\nauc_score = []\r\n\r\nfor i,(train, test) in enumerate(kfold.split(y_tra)):\r\n print('\\n\\n%d'%i)\r\n #print(i,(train,test))\r\n path = 'D:/PycharmProjects/pythonProject/CapsNetYY1/weight/%sModel%d.h5' % (name, i)\r\n checkpoint = ModelCheckpoint(filepath=path,monitor='val_loss', verbose=1, save_best_only=True,\r\n save_weights_only=True, mode='auto')\r\n\r\n def get_model():\r\n X1 = Input(shape=(506, 4))\r\n x1_1 = Conv1D(filters=32, kernel_size=7, strides=2, padding='same', activation='relu')(X1)\r\n x1_2 = Conv1D(filters=32, kernel_size=5, strides=2, padding='same', activation='relu')(X1)\r\n x1_3 = Conv1D(filters=32, kernel_size=3, strides=2, padding='same', activation='relu')(X1)\r\n x1 = Concatenate(axis=-1)([x1_1, x1_2, x1_3])\r\n x1 = Conv1D(filters=32, kernel_size=5, strides=2, padding='valid', activation='relu')(x1)\r\n X2 = Input(shape=(506, 4))\r\n x2_1 = Conv1D(filters=32, kernel_size=7, strides=2, padding='same', activation='relu')(X2)\r\n x2_2 = Conv1D(filters=32, kernel_size=5, strides=2, padding='same', activation='relu')(X2)\r\n x2_3 = Conv1D(filters=32, kernel_size=3, strides=2, padding='same', activation='relu')(X2)\r\n x2 = Concatenate(axis=-1)([x2_1, x2_2, x2_3])\r\n x2 = Conv1D(filters=32, kernel_size=5, strides=2, padding='valid', activation='relu')(x2)\r\n merge_layer = Concatenate(axis=1)([x1, x2])\r\n dt = Dropout(0.5)(merge_layer)\r\n dt = Bidirectional(GRU(32, return_sequences=True))(dt)\r\n primarycaps = PrimaryCap(dt, dim_vector=8, n_channels=16, kernel_size=9, strides=2, padding='valid')\r\n digitcaps = CapsuleLayer(num_capsule=2, dim_vector=8, num_routing=3, name='digitcaps')(primarycaps)\r\n out_caps = Length()(digitcaps)\r\n model = Model([X1, X2], out_caps)\r\n model.compile(optimizer=optimizers.Adam(lr=0.0005), loss=[margin_loss], metrics=['accuracy'])\r\n return model\r\n\r\n print('Train...')\r\n callbacks_list = checkpoint\r\n back = EarlyStopping(monitor='val_loss', patience=10, verbose=2, mode='auto')\r\n model = None\r\n model = get_model()\r\n model.summary()\r\n history = model.fit([X_1[train], X_2[train]], y_tra[train], validation_data=([X_1[test], X_2[test]], y_tra[test]), epochs=200,batch_size=64,callbacks=[checkpoint, back], verbose=2)\r\n model1 = get_model()\r\n model1.load_weights(path)\r\n '''for i in prd_acc:\r\n pre_acc2.append(i[0])'''\r\n\r\n pre_test_y = model1.predict([X_1[test], X_2[test]])\r\n y_pred1 = pre_test_y.argmax(axis=1)\r\n #print(pre_test_y)\r\n '''prd_acc = model.predict([X_en_tra[test], X_pr_tra[test]])\r\n pre_acc2 = []'''\r\n test_auc = metrics.roc_auc_score(y_tra[test], pre_test_y)\r\n auc_score.append(test_auc)\r\n\r\n fpr, tpr, threshold = roc_curve(y_tra[test][:, -1], pre_test_y[:, -1])\r\n fpr_list.append(fpr)\r\n tpr_list.append(tpr)\r\n auc_roc = metrics.roc_auc_score(y_tra[test], pre_test_y)\r\n print('auc_roc', auc_roc, 'len(fpr)', len(fpr), 'tpr', tpr)\r\n font = {'family': 'Times New Roman',\r\n 'weight': 'normal',\r\n 'size': 22,\r\n }\r\n lw = 1.5\r\n\r\n plt.rcParams['font.sans-serif'] = ['SimHei']\r\n plt.plot(fpr, tpr, color=coloer_list[i], lw=lw, label='ROC fold' + str(i + 1) + '(AUC=%0.4f)' % auc_roc)\r\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\r\n plt.xlim([-0.01, 1.01])\r\n plt.ylim([-0.01, 1.01])\r\n plt.tick_params(labelsize=20)\r\n plt.xlabel('1-Specificity', font)\r\n plt.ylabel('Sensitivity', font)\r\n plt.title('HCT116', font)\r\n plt.legend(loc=\"lower right\")\r\n # plotROC(y_tra[test].argmax(axis=1),pre_test_y.argmax(axis=1),auc_path)\r\n score = model1.evaluate([X_1[test], X_2[test]], y_tra[test])\r\n\r\n acc =accuracy_score(y_tra_1[test],y_pred1)\r\n acc_score.append(acc)\r\n print('Test accuracy:', acc)\r\n print(\"test_auc: \", test_auc)\r\nfpr_mean_list = []\r\ntpr_mean_list = []\r\nfpr_tpr_len = []\r\n# print('fpr_list',fpr_list,'/n','fpr_list[0]',fpr_list[0])\r\nfor j in range(0, 10, 1):\r\n len_j = len(fpr_list[j])\r\n fpr_tpr_len.append(len_j)\r\nlength1 = np.min(np.array(fpr_tpr_len))\r\nprint('length1', length1)\r\nfor i in range(length1):\r\n fpr_mean = np.mean(np.array((fpr_list[0][i], fpr_list[1][i], fpr_list[2][i], fpr_list[3][i], fpr_list[4][i],\r\n fpr_list[5][i], fpr_list[6][i], fpr_list[7][i], fpr_list[8][i], fpr_list[9][i])))\r\n fpr_mean_list.append(fpr_mean)\r\nfor i in range(length1):\r\n tpr_mean = np.mean(np.array((tpr_list[0][i], tpr_list[1][i], tpr_list[2][i], tpr_list[3][i], tpr_list[4][i],\r\n tpr_list[5][i], tpr_list[6][i], tpr_list[7][i], tpr_list[8][i], tpr_list[9][i])))\r\n tpr_mean_list.append(tpr_mean)\r\nfpr_mean = np.array(fpr_mean_list)\r\ntpr_mean = np.array(tpr_mean_list)\r\nauc_mean = np.mean(np.array(auc_score))\r\n\r\nplt.plot(fpr_mean, tpr_mean, color='r', lw=lw, label='Mean ROC(AUC=%0.4f)' % auc_mean)\r\nplt.legend(loc=\"lower right\")\r\nplt.savefig(auc_savepath, dpi=350)\r\nplt.show()\r\n\r\nprint('***********************print final result*****************************')\r\nprint(auc_score)\r\nmean_acc = np.mean(acc_score)\r\nmean_auc = np.mean(auc_score)\r\n# line = 'acc\\tauc:\\n%.2f\\t%.4f' % (100 * mean_acc, mean_auc)\r\nline = 'acc\\tauc:\\n%.2f\\t%.4f' % (100 * mean_acc, mean_auc)\r\nprint('10-fold result:\\n' + line)\r\n","repo_name":"zhangzhimin1108/CapsNetYY1","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35771528984","text":"#!/usr/bin/env python3\nimport json\nimport datetime\n\nfrom flask import Flask, render_template, request, escape, flash, redirect\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///database.sqlite3\"\napp.config[\"DEBUG\"] = True\n\ndb = SQLAlchemy(app)\n\n\n# A discord server\nclass Server(db.Model):\n id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n name = db.Column(db.String(128), unique=True, nullable=False)\n\n channels = db.relationship(\"Channel\", back_populates=\"server\")\n messages = db.relationship(\"Message\", back_populates=\"server\")\n\n def __init__(self, name):\n self.name = name\n\n\n# A user in a chat room team\nclass User(db.Model):\n id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n discord_id = db.Column(db.String(128), unique=True, nullable=False)\n name = db.Column(db.String(128))\n\n messages = db.relationship(\"Message\", back_populates=\"user\")\n\n def __init__(self, discord_id, name):\n self.discord_id = discord_id\n self.name = name\n\n def permalink(self):\n return f\"/user/{self.id}\"\n\n def message_count(self):\n return Message.query.filter_by(user=self).count()\n\n\n# A channel\nclass Channel(db.Model):\n id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n discord_id = db.Column(db.String(128), unique=True, nullable=False)\n name = db.Column(db.String(128))\n\n messages = db.relationship(\"Message\", back_populates=\"channel\")\n server_id = db.Column(db.Integer, db.ForeignKey(\"server.id\"))\n server = db.relationship(\"Server\", back_populates=\"channels\")\n\n def __init__(self, server, discord_id, name):\n self.server = server\n self.discord_id = discord_id\n self.name = name\n\n def permalink(self):\n return f\"/channel/{self.id}\"\n\n def message_count(self):\n return Message.query.filter_by(channel=self).count()\n\n\n# A message posted in a channel\nclass Message(db.Model):\n id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n discord_id = db.Column(db.String(128), unique=True, nullable=False)\n timestamp = db.Column(db.DateTime)\n message = db.Column(db.String(4096))\n attachments_json = db.Column(db.String(4096))\n\n user_id = db.Column(db.Integer, db.ForeignKey(\"user.id\"))\n user = db.relationship(\"User\", back_populates=\"messages\")\n\n channel_id = db.Column(db.Integer, db.ForeignKey(\"channel.id\"))\n channel = db.relationship(\"Channel\", back_populates=\"messages\")\n\n server_id = db.Column(db.Integer, db.ForeignKey(\"server.id\"))\n server = db.relationship(\"Server\", back_populates=\"messages\")\n\n def __init__(\n self,\n server,\n discord_id,\n timestamp,\n message,\n user,\n channel,\n attachments_json=None,\n ):\n self.server = server\n self.discord_id = discord_id\n self.timestamp = datetime.datetime.fromtimestamp(timestamp / 1000)\n self.message = message\n self.user = user\n self.channel = channel\n if attachments_json:\n self.attachments_json = attachments_json\n\n def formatted_timestamp(self):\n return self.timestamp.strftime(\"%b %d, %Y %I:%M:%S %p\")\n\n def permalink(self):\n return f\"/view/{self.id}\"\n\n def highlight(self, query):\n # Make sure to escape the message here, and replace newslines with line breaks\n m = str(escape(self.message)).replace(\"\\n\", \"
\\n\")\n\n # If there isn't a query, return the original message\n if not query:\n return m\n\n new_m = \"\"\n index = 0\n while True:\n new_index = m.lower().find(query.lower(), index)\n if new_index > 0:\n # Found\n new_m += m[index:new_index]\n new_m += f\"{m[new_index : new_index + len(query)]}\"\n index = new_index + len(query)\n else:\n # Not found\n new_m += m[index:]\n break\n\n return new_m\n\n def attachments(self):\n if not self.attachments_json:\n return []\n\n return json.loads(self.attachments_json)\n\n\ndata = None\n\n\n# Get the page and per_page args from query string, as ints\ndef get_pagination_args():\n page = request.args.get(\"page\", 1)\n per_page = request.args.get(\"per_page\", 2000)\n return (int(page), int(per_page))\n\n\n@app.route(\"/\")\ndef index():\n servers = Server.query.all()\n return render_template(\"index.html\", servers=servers)\n\n\n@app.route(\"/search\")\ndef search():\n q = request.args.get(\"q\")\n s = request.args.get(\"s\", 0)\n if s == \"\":\n s = 0\n page, per_page = get_pagination_args()\n\n server = Server.query.filter_by(id=s).first()\n\n messages = Message.query\n if server:\n messages = messages.filter_by(server=server)\n pagination = (\n messages.filter(Message.message.like(f\"%{q}%\"))\n .order_by(Message.timestamp)\n .paginate(page=page, per_page=per_page)\n )\n\n if server:\n description = f\"Search {server.name}: {q}\"\n else:\n description = f\"Search: {q}\"\n\n servers = Server.query.all()\n pagination_link = f\"/search?q={q}&s={s}\"\n return render_template(\n \"results.html\",\n q=q,\n s=int(s),\n servers=servers,\n pagination=pagination,\n pagination_link=pagination_link,\n description=description,\n )\n\n\n@app.route(\"/view/\")\ndef view(message_id):\n q = request.args.get(\"q\")\n\n # Look up the Message\n message = Message.query.filter_by(id=message_id).first()\n if not message:\n flash(\"Invalid message\")\n return redirect(\"/\")\n\n # Find messages before and after this one\n prev_messages = (\n Message.query.filter_by(channel=message.channel)\n .filter(Message.timestamp < message.timestamp)\n .order_by(Message.timestamp.desc())\n .limit(20)\n .all()\n )\n prev_messages.reverse()\n next_messages = (\n Message.query.filter_by(channel=message.channel)\n .filter(Message.timestamp > message.timestamp)\n .order_by(Message.timestamp)\n .limit(20)\n .all()\n )\n\n # Create a description\n description = f\"Message by {message.user.name}, in {message.server.name}, #{message.channel.name}\"\n\n servers = Server.query.all()\n return render_template(\n \"view.html\",\n q=q,\n s=message.server.id,\n channel=message.channel,\n servers=servers,\n description=description,\n active_message_id=message.id,\n message=message,\n prev_messages=prev_messages,\n next_messages=next_messages,\n )\n\n\n@app.route(\"/channel/\")\ndef channel(channel_id):\n page, per_page = get_pagination_args()\n\n # Look up the Channel\n channel = Channel.query.filter_by(id=channel_id).first()\n if not channel:\n flash(\"Invalid channel\")\n return redirect(\"/\")\n\n # Look up messages\n pagination = (\n Message.query.filter_by(channel=channel)\n .order_by(Message.timestamp)\n .paginate(page=page, per_page=per_page)\n )\n\n # Description\n description = f\"Messages in {channel.server.name}, #{channel.name}\"\n\n servers = Server.query.all()\n pagination_link = f\"/channel/{channel_id}?\"\n return render_template(\n \"results.html\",\n s=channel.server.id,\n channel=channel,\n servers=servers,\n pagination=pagination,\n pagination_link=pagination_link,\n description=description,\n )\n\n\n@app.route(\"/users\")\ndef user_list():\n users = User.query.all()\n servers = Server.query.all()\n return render_template(\"user_list.html\", servers=servers, users=users)\n\n\n@app.route(\"/user/\")\ndef user(user_id):\n page, per_page = get_pagination_args()\n\n # Look up the User\n user = User.query.filter_by(id=user_id).first()\n if not user:\n flash(\"Invalid user\")\n return redirect(\"/\")\n\n # Look up messages\n pagination = (\n Message.query.filter_by(user=user)\n .order_by(Message.timestamp)\n .paginate(page=page, per_page=per_page)\n )\n\n # Description\n description = f\"Messages from @{user.name}\"\n\n servers = Server.query.all()\n pagination_link = f\"/user/{user_id}?\"\n return render_template(\n \"results.html\",\n servers=servers,\n pagination=pagination,\n pagination_link=pagination_link,\n description=description,\n )\n\n\ndef main():\n app.run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"micahflee/hacks-leaks-and-revelations","sub_path":"chapter-14/discord-analysis/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8615,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"18"} +{"seq_id":"71879214760","text":"from django.shortcuts import render, HttpResponse,redirect\nfrom django.contrib import messages\nfrom .models import Registration, Trips\n\n\ndef home(request):\n if 'id' in request.session:\n\n context = {\n \"all_trips\": Trips.objects.all(),\n \"logged_user\": Registration.objects.get(id = request.session['id']),\n }\n return render(request,\"home.html\", context)\n \n else:\n return redirect(\"/\")\n\ndef addTrip(request):\n return render(request,\"addTrip.html\")\n\ndef createTrip(request):\n \n errors = Trips.objects.basic_validator(request.POST)\n if len(errors) > 0:\n for key,value in errors.items():\n messages.error(request, value)\n return redirect(\"/content/addTrip\")\n else:\n destination_from_form = request.POST['destination']\n plan_from_form = request.POST['plan']\n start_date_from_form = request.POST['start_date']\n end_date_from_form = request.POST['end_date']\n user_id = int(request.POST['user_id'])\n\n logged_user = Registration.objects.get(id = user_id)\n\n Trips.objects.create(destination = destination_from_form, plan = plan_from_form, user = logged_user, start_date = start_date_from_form, end_date = end_date_from_form)\n\n return redirect('/content/home')\n\ndef editTrip(request,id):\n\n context= {\n \"trip_id\": int(id),\n \"trip_details\": Trips.objects.get(id = int(id)),\n }\n \n return render(request,\"editTrip.html\", context)\n\n\n\ndef updateTrip(request,id):\n errors = Trips.objects.basic_validator(request.POST)\n if len(errors) > 0:\n for key,value in errors.items():\n messages.error(request, value)\n return redirect(f\"/content/{id}/edit\")\n else:\n trip_id = int(id)\n new_destination_from_form = request.POST['destination']\n new_start_date_from_form = request.POST['start_date']\n new_end_date_from_form = request.POST['end_date']\n new_plan_from_form = request.POST['plan']\n \n trip_to_edit = Trips.objects.get(id=trip_id)\n\n trip_to_edit.destination = f\"{new_destination_from_form}\"\n trip_to_edit.start_date = f\"{new_start_date_from_form}\" \n trip_to_edit.end_date = f\"{new_end_date_from_form}\" \n trip_to_edit.plan = f\"{new_plan_from_form}\"\n\n trip_to_edit.save()\n\n return redirect(\"/content/home\")\n\n# def grant(request, id):\n\n# wish_id = int(id)\n# wish_to_grant = Wishes.objects.get(id = wish_id)\n# wish_to_grant.granted = \"True\"\n# wish_to_grant.save()\n\n\n# return redirect('/content/home')\n\n \n# # user_id = int(request.POST['user_id'])\n# # logged_user = Registration.objects.get(id = user_id)\n# # all_wishes = Wishes.objects.all()\n\n\n# # return render(request,\"grantedWishes.html\")\n\n# def stats(request,id):\n \n# user_id = int(id)\n# logged_user = Registration.objects.get(id = user_id)\n\n# user_wishes_granted = 0\n# for wishes in Wishes.objects.filter(user = logged_user, granted= 'True'):\n# user_wishes_granted += 1\n \n# user_wishes_ungranted = 0\n# for wishes in Wishes.objects.filter(user = logged_user, granted= 'False'):\n# user_wishes_ungranted += 1\n \n# user_total_wishes = user_wishes_granted + user_wishes_ungranted\n\n\n# total_wishes = 0\n# for wishes in Wishes.objects.filter(granted = 'True'):\n# total_wishes += 1\n\n \n\n# context = {\n# \"user_wishes_granted\": user_wishes_granted,\n# \"user_wishes_ungranted\": user_wishes_ungranted,\n# \"user_total_wishes\": user_total_wishes,\n# \"total_wishes\": total_wishes\n# }\n\n# return render(request,\"stats.html\", context)\ndef removeJoin(request,id,userid):\n trip_id = int(id)\n trip_to_unjoin = Trips.objects.get(id = trip_id)\n user_id = int(userid)\n logged_user = Registration.objects.get(id = user_id)\n\n trip_to_unjoin.join.remove(logged_user)\n \n return redirect('/content/home')\n\ndef joinTrip(request,id,userid):\n trip_id = int(id)\n trip_to_join = Trips.objects.get(id = trip_id)\n user_id = int(userid)\n logged_user = Registration.objects.get(id = user_id)\n\n trip_to_join.join.add(logged_user)\n \n return redirect('/content/home')\n\ndef tripDetails(request,id):\n\n context = {\n \"trip_details\": Trips.objects.get(id=int(id))\n }\n\n return render(request,\"tripDetails.html\",context)\n\n\ndef destroyTrip(request,id):\n trip_to_delete = Trips.objects.get(id =int(id))\n trip_to_delete.delete()\n return redirect(\"/content/home\")\n\n\n\n","repo_name":"kstratton1209/Trip","sub_path":"apps/content/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37230208379","text":"from copy import deepcopy\nfrom math import floor\nfrom typing import List\n\nimport dask.dataframe as dd\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\n\nfrom constants import *\n\npd.options.display.max_rows = 100\n\nclass data_utils_class():\n\n def __init__(self, time_column_name : str = 'localminute', time_frequency : str = 'S', metadata_time_prefix : str = 'egauge_1s_') -> None:\n r\"\"\"\n Data utils class.\n\n Parameters\n ----------\n time_column_name : `str`, default `\"localminute\"`)\n Datetime column name in data files. Valid values can be `\"localminute\"` or `\"local_15min\"`.\n time_frequency : `str`, default `'S'`\n Time frequency the data should have.\n metadata_time_prefix : `str`, default `\"egauge_1s_\"`\n Metadata column prefix name. Can be `\"egauge_1s_\"` or `\"egauge_1min_\"`. \n \"\"\"\n \n self.time_column_name = time_column_name\n self.time_frequency = time_frequency\n self.metadata_time_prefix = metadata_time_prefix\n \n self.sensors_excluding_grid = [sensor for sensor in SENSOR_NAMES if sensor != \"grid\"]\n\n self.metadata_dtype = {'dataid': int, 'active_record': bool, 'building_type': str, 'city': str, 'state': str, metadata_time_prefix+'min_time': object, metadata_time_prefix + 'max_time': object}\n self.data_dtype = {sensor : float for sensor in SENSOR_NAMES}\n self.data_dtype['dataid'] = int\n self.data_dtype[time_column_name] = object\n \n availability_converter = lambda val: 0 if val == '' else int(val.strip('%'))\n\n self.metadata_converters = {metadata_time_prefix+'data_availability': availability_converter, metadata_time_prefix + 'data_availability': availability_converter}\n self.metadata_converters.update({sensor: (lambda val: True if val == \"yes\" else False) for sensor in SENSOR_NAMES})\n\n self.hierarchy = None\n\n def process_data(self, files: str, metadata_files:str, save_path: str, from_time: pd.Timestamp, end_time: pd.Timestamp, sensors: List[str] = None):\n r\"\"\"\n Used to process the original unpacked data. Missing rows (seconds) are filled with the last value of the 5 seconds that came before.\n If there is no pre-existing value within those 5 seconds, the value will be set to 0.\n Additionally values that represent over 1MW are removed and treated as if they were empty from the start.\n \n\n Paramters\n ---------\n files : `str`\n The globstring (\"path/to/files/*.csv\") to the orignal unpacked csv files\n metadata_files : `str`\n The path to the metadata file made by `generate_metadata`\n save_path : `str`\n The first location that can be used for storing intermediate results\n from_time : `pd.Timestamp`\n Time after which you want the 1-second data to start\n end_time : `pd.Timestamp`\n Time after which you want the 1-second data to end\n sensors : `List[str]`, default `None`\n List of sensors that will be in the final result. If the value is `None` all sensors are included.\n \"\"\"\n sensors_excluding_grid = sensors if sensors is not None else self.sensors_excluding_grid\n columns = ['dataid', self.time_column_name] + sensors_excluding_grid\n data_dtype = {sensor : float for sensor in sensors_excluding_grid}\n data_dtype['dataid'] = int\n data_dtype[self.time_column_name] = object\n \n ddf : dd.DataFrame = dd.read_csv(files, dtype = data_dtype, blocksize=10e7, usecols=columns)\n original_metadata : dd.DataFrame = dd.read_csv(metadata_files, dtype = self.metadata_dtype, blocksize=10e7, usecols=['dataid', 'state', 'delta_year'])\n original_metadata['delta_year'] = dd.to_timedelta(original_metadata['delta_year'])\n \n # Filter dataids not in metadata\n ddf = ddf.merge(original_metadata[['dataid', 'delta_year']], how=\"inner\", on=[\"dataid\"])\n \n # Align the recorded datetimes of the data to the same year\n ddf[self.time_column_name] = dd.to_datetime(ddf[self.time_column_name], utc=True)\n ddf[self.time_column_name] = ddf[self.time_column_name] + ddf['delta_year']\n del ddf['delta_year']\n\n dd.to_parquet(ddf.set_index('dataid').repartition(partition_size=\"100MB\").reset_index(), save_path+\"/temp/time-adjusted\", write_index=False, partition_on=[\"dataid\"], name_function=lambda x: f\"data-{x}.parquet\", schema={self.time_column_name: pa.timestamp(unit='s', tz='UTC'), 'dataid': pa.int32()}, overwrite=True)\n dataids: List[int] = original_metadata['dataid'].compute().values.tolist()\n del original_metadata\n \n # Create DataFrame with 100% of the timestamps and store it\n date_range = dd.from_pandas(pd.date_range(start=from_time, end=end_time, freq=self.time_frequency).to_frame(name=self.time_column_name), sort=False, npartitions=1)\n date_range['index'] = 1\n date_range['index'] = date_range['index'].cumsum()\n dd.to_parquet(date_range.repartition(partition_size=\"100MB\"), save_path+\"/temp/date-range\", write_index=False, name_function=lambda x: f\"data-{x}.parquet\", schema={self.time_column_name: pa.timestamp(unit='s', tz='UTC')}, overwrite=True)\n del date_range\n \n total = len(dataids)\n \n for progress, dataid in enumerate(dataids):\n print(\"\\n-------------------------------\", f\"| Starting with dataid={dataid}. progress: {progress}/{total} |\", \"--------------------------------\\n\")\n \n dataid_ddf = dd.read_parquet(save_path+\"/temp/time-adjusted\",\n filters=[('dataid', '==', dataid)],\n columns=sensors_excluding_grid+[self.time_column_name])\n date_range = dd.read_parquet(save_path+\"/temp/date-range\")\n merged = date_range.merge(dataid_ddf, how='left', on=[self.time_column_name]).assign(dataid=dataid)\n dd.to_parquet(merged.repartition(partition_size=\"100MB\"), save_path+\"/temp/rows-filled\",\n write_index=False, partition_on=[\"dataid\"],\n name_function=lambda x: f\"data-{x}.parquet\",\n schema={self.time_column_name: pa.timestamp(unit='s', tz='UTC'), 'dataid': pa.int32()},\n append=True)\n \n print(\"\\n--------------------------------\", f\"| Done with dataid={dataid} |\", \"--------------------------------\\n\")\n \n print(\"\\n--------------------------------\", f\"| Outlier detection and missing value filling started. |\", \"--------------------------------\\n\")\n \n # If a sensor reports 1MW of charging we interperet it as an error and remove the datapoint\n # Fill in small gaps of 5 seconds with the previously encountered value and fill the rest up with 0's\n for progress, dataid in enumerate(dataids):\n print(\"\\n-------------------------------\", f\"| Starting with dataid={dataid}. progress: {progress}/{total} |\", \"--------------------------------\\n\")\n \n dataid_ddf = dd.read_parquet(save_path+\"/temp/rows-filled\",\n filters=[('dataid', '==', dataid)],\n columns=sensors_excluding_grid+[\"dataid\", self.time_column_name, \"index\"]).set_index(self.time_column_name)\n mean = dataid_ddf[sensors_excluding_grid].mean(axis=1)\n std = dataid_ddf[sensors_excluding_grid].std(axis=1)\n dataid_ddf[sensors_excluding_grid] = dataid_ddf[sensors_excluding_grid].mask(abs((dataid_ddf[sensors_excluding_grid] - mean) / std) > 3, np.nan).fillna(method=\"ffill\", limit=5).fillna(0)\n \n dataid_ddf['dataid'] = dataid_ddf['dataid'].cat.as_ordered()\n \n dd.to_parquet(self.timestamp_feature_extraction(dataid_ddf.reset_index()), save_path+\"/temp/timestamp_extracted\",\n write_index=False, partition_on=[\"dataid\"],\n name_function=lambda x: f\"data-{x}.parquet\",\n schema={self.time_column_name: pa.timestamp(unit='s', tz='UTC'), 'dataid': pa.int32()},\n append=True)\n \n print(\"\\n--------------------------------\", f\"| Done with dataid={dataid} |\", \"--------------------------------\\n\")\n \n print(\"\\n--------------------------------\", f\"| Outlier detection and missing value filling done. |\", \"--------------------------------\\n\")\n\n \n print(\"\\n--------------------------------\", f\"| Repartitioning Appliance data started. |\", \"--------------------------------\\n\")\n \n # Repartition appliance data\n dataid_ddf = dd.read_parquet(save_path+\"/temp/timestamp_extracted\")\n dd.to_parquet(dataid_ddf.repartition(partition_size=\"100MB\"), save_path+\"/final_appliance\",\n write_index=False, partition_on=[\"dataid\"],\n name_function=lambda x: f\"data-{x}.parquet\",\n schema={self.time_column_name: pa.timestamp(unit='s', tz='UTC'), 'dataid': pa.int32()})\n print(\"\\n--------------------------------\", f\"| Repartitioning Appliance data done. |\", \"--------------------------------\\n\")\n\n def generate_metadata(self, data_path: str, save_path: str, metadata_files: str, extra_metadata_cols : List[str] = [], community_size : int = 5):\n \"\"\"\n Generates the correct metadata for all the 1-second csv data files found with globstring `data_path`. If the complete metadata is provided,\n the columns listed in `extra_metadata_cols` will be added to the generated metadata.\n\n Parameters:\n -----------\n data_path : `str`\n Globstring for data files.\n save_path : `str`\n Where to save the generated metadata.\n metadata_files : `str`, default `None`\n Globstring for complete metadata files.\n extra_metadata_cols : `List[str]`, default `[]`\n Extra columns from complete metadata to be added to the generated metadata next to `['city', 'state']`.\n \"\"\"\n all_present = dd.Aggregation('all_present',\n chunk=lambda x: x.aggregate(lambda x: x.notna().any()),\n agg=lambda x: x.aggregate(lambda x: x.any()),\n finalize=lambda x: x.replace({True: \"yes\", False: \"\"}))\n\n columns = ['dataid', self.time_column_name] + SENSOR_NAMES\n \n ddf : dd.DataFrame = dd.read_csv(data_path, dtype = self.data_dtype, blocksize=10e7, usecols=columns)\n \n reorder_columns = list(ddf.columns)\n reorder_columns.insert(1, 'aggregation_abs_error')\n ddf = ddf.assign(aggregation_abs_error=(ddf[[x for x in SENSOR_NAMES if x not in [\"grid\", \"solar\", \"solar2\", \"battery1\"]]].sum(axis=1) - ddf[[\"solar\", \"solar2\", \"battery1\", \"grid\"]].sum(axis=1)).abs().where(cond=ddf[\"grid\"].notnull(), other=np.nan))[reorder_columns]\n\n agg_dict = {self.time_column_name: {self.metadata_time_prefix + 'min_time': 'min', self.metadata_time_prefix + 'max_time': 'max', self.metadata_time_prefix + 'data_availability': 'count'}, 'aggregation_abs_error': {'aggregation_abs_error_max' : 'max', 'aggregation_abs_error_mean': 'mean'}}\n agg_dict.update({sensor: {sensor: all_present} for sensor in SENSOR_NAMES})\n \n ddf = ddf.groupby('dataid', sort=False).agg(agg_dict)\n ddf.columns = ddf.columns.get_level_values(1)\n \n ddf[self.metadata_time_prefix + 'min_time'] = dd.to_datetime(ddf[self.metadata_time_prefix + 'min_time'], utc=True)\n ddf[self.metadata_time_prefix + 'max_time'] = dd.to_datetime(ddf[self.metadata_time_prefix + 'max_time'], utc=True)\n\n ddf[self.metadata_time_prefix + 'data_availability'] = (ddf[self.metadata_time_prefix + 'data_availability'] / ((ddf[self.metadata_time_prefix + 'max_time'] - ddf[self.metadata_time_prefix + 'min_time'] + pd.Timedelta(self.time_frequency)) / pd.Timedelta(self.time_frequency))).apply(lambda x: str(floor(x*10000)/100)+\"%\", meta=(self.metadata_time_prefix + 'data_availability', str))\n ddf['aggregation_abs_error_max'] = ddf['aggregation_abs_error_max'].apply(lambda x: str(floor(x*100)/100), meta=('aggregation_abs_error_max', str))\n ddf['aggregation_abs_error_mean'] = ddf['aggregation_abs_error_mean'].apply(lambda x: str(floor(x*100)/100), meta=('aggregation_abs_error_mean', str))\n \n merged_columns = ['city', 'state'] + extra_metadata_cols + list(ddf.columns)[0:]\n\n original_metadata : dd.DataFrame = dd.read_csv(metadata_files, skiprows=[1], dtype = self.metadata_dtype, blocksize=10e7, usecols=['dataid', 'city', 'state']+extra_metadata_cols)\n original_metadata = original_metadata.set_index('dataid')\n \n ddf : dd.DataFrame = ddf.merge(original_metadata, how='left', left_index=True, right_index=True)[merged_columns]\n \n # Drop dataids that don't have data within the from and end dates while ignoring years.\n ddf['delta_year'] = (END_TIME.year - ddf[self.metadata_time_prefix+'max_time'].dt.year).astype('timedelta64[Y]')\n ddf['delta_year'] = ddf['delta_year'].where(ddf[self.metadata_time_prefix+'max_time'] + ddf['delta_year'] >= END_TIME, ddf['delta_year'] + np.timedelta64(1, 'Y'))\n ddf = ddf[ddf[self.metadata_time_prefix+'min_time'] + ddf['delta_year'] <= FROM_TIME]\n \n # Assign communities and drop smaller communities\n ddf = ddf.sort_values(['city', 'dataid'])\n ddf['community'] = (ddf.groupby('city').cumcount() % community_size == 0).replace({True: 1, False: 0})\n ddf['community'] = ddf['community'].cumsum()\n community_sizes = ddf['community'].value_counts().reset_index()\n community_filter = community_sizes[community_sizes['count'] == community_size]['community'].compute()\n ddf = ddf[ddf['community'].isin(community_filter)]\n \n ddf.to_csv(save_path, single_file = True)\n \n \n def generate_aggregated_data(self, data_path: str, metadata: str):\n r\"\"\"\n Aggregates the data to a houshold, community, and city level and stores it to the `data_path` folder.\n\n Parameters:\n ----------\n data_path : `str`\n The path to the root folder of the data.\n metadata : `str`\n The path to the generated metadata file.\n \"\"\"\n ddf: dd.DataFrame = dd.read_parquet(data_path+\"/final_appliance\")\n\n ddf_household = ddf[['dataid', self.time_column_name, \"index\"]+FEATURE_COLUMNS].assign(\n total=ddf[[x for x in SENSOR_NAMES if x not in [\"solar\", \"solar2\", \"battery1\", \"grid\"]]].sum(axis=1) - ddf[[\"solar\", \"solar2\", \"battery1\"]].sum(axis=1))\n dd.to_parquet(ddf_household.repartition(partition_size=\"100MB\"), data_path+\"/final_household\",\n write_index=False, partition_on=[\"dataid\"],\n name_function=lambda x: f\"data-{x}.parquet\",\n schema={self.time_column_name: pa.timestamp(unit='s', tz='UTC'), 'dataid': pa.int32()})\n \n meta_ddf: dd.DataFrame = dd.read_csv(metadata, dtype = self.metadata_dtype, blocksize=10e7, usecols=['dataid', 'community', 'city'])\n ddf_household_merged = ddf_household.merge(meta_ddf, how='left', on=[\"dataid\"])\n del ddf\n del meta_ddf\n del ddf_household\n \n total_community = ddf_household_merged.groupby(['community', self.time_column_name])['total'].sum()\n ddf_community = (ddf_household_merged.drop(columns=['total', 'dataid', 'city']).drop_duplicates(subset=['community', self.time_column_name])\n .join(total_community, on=['community', self.time_column_name]))\n dd.to_parquet(ddf_community.repartition(partition_size=\"100MB\"), data_path+\"/final_community\",\n write_index=False, partition_on=[\"community\"],\n name_function=lambda x: f\"data-{x}.parquet\",\n schema={self.time_column_name: pa.timestamp(unit='s', tz='UTC'), 'community': pa.int32()})\n del total_community\n del ddf_community\n \n total_city = ddf_household_merged.groupby(['city', self.time_column_name])['total'].sum()\n ddf_city = ddf_household_merged.drop(columns=['total', 'dataid', 'community']).drop_duplicates(subset=['city', self.time_column_name]).join(total_city, on=['city', self.time_column_name])\n dd.to_parquet(ddf_city.repartition(partition_size=\"100MB\"), data_path+\"/final_city\",\n write_index=False, partition_on=[\"city\"],\n name_function=lambda x: f\"data-{x}.parquet\",\n schema={self.time_column_name: pa.timestamp(unit='s', tz='UTC'), 'city': pa.string()})\n \n def normalize_data(self, data_path: str):\n ddf: dd.DataFrame = dd.read_parquet(data_path+\"/final_appliance\")\n mean = ddf[self.sensors_excluding_grid].mean(axis=0)\n std = ddf[self.sensors_excluding_grid].std(axis=0)\n ddf[self.sensors_excluding_grid] = (ddf[self.sensors_excluding_grid] - mean) / std\n ddf = self.normalize_timestamp_features(ddf)\n dd.to_parquet(ddf.repartition(partition_size=\"100MB\"), data_path+\"/normalized/final_appliance\",\n write_index=False, partition_on=[\"dataid\"],\n name_function=lambda x: f\"data-{x}.parquet\",\n schema={self.time_column_name: pa.timestamp(unit='s', tz='UTC'), 'dataid': pa.int32()})\n \n ddf: dd.DataFrame = dd.read_parquet(data_path+\"/final_household\")\n mean = ddf[\"total\"].mean()\n std = ddf[\"total\"].std()\n ddf[\"total\"] = (ddf[\"total\"] - mean) / std\n ddf = self.normalize_timestamp_features(ddf)\n dd.to_parquet(ddf.repartition(partition_size=\"100MB\"), data_path+\"/normalized/final_household\",\n write_index=False, partition_on=[\"dataid\"],\n name_function=lambda x: f\"data-{x}.parquet\",\n schema={self.time_column_name: pa.timestamp(unit='s', tz='UTC'), 'dataid': pa.int32()})\n \n ddf: dd.DataFrame = dd.read_parquet(data_path+\"/final_community\")\n mean = ddf[\"total\"].mean()\n std = ddf[\"total\"].std()\n ddf[\"total\"] = (ddf[\"total\"] - mean) / std\n ddf = self.normalize_timestamp_features(ddf)\n dd.to_parquet(ddf.repartition(partition_size=\"100MB\"), data_path+\"/normalized/final_community\",\n write_index=False, partition_on=[\"community\"],\n name_function=lambda x: f\"data-{x}.parquet\",\n schema={self.time_column_name: pa.timestamp(unit='s', tz='UTC'), 'community': pa.int32()})\n \n ddf: dd.DataFrame = dd.read_parquet(data_path+\"/final_city\")\n mean = ddf[\"total\"].mean()\n std = ddf[\"total\"].std()\n ddf[\"total\"] = (ddf[\"total\"] - mean) / std\n ddf = self.normalize_timestamp_features(ddf)\n dd.to_parquet(ddf.repartition(partition_size=\"100MB\"), data_path+\"/normalized/final_city\",\n write_index=False, partition_on=[\"city\"],\n name_function=lambda x: f\"data-{x}.parquet\",\n schema={self.time_column_name: pa.timestamp(unit='s', tz='UTC'), 'city': pa.string()})\n \n # Normalize timestamp features with MinMax normalization\n def normalize_timestamp_features(self, ddf : dd.DataFrame) -> dd.DataFrame:\n ddf[\"dayofyear\"] = (ddf[\"dayofyear\"] - 1) / 365\n ddf[\"season\"] = (ddf[\"season\"] - 1) / 3\n ddf[\"month\"] = (ddf[\"month\"] - 1) / 11\n ddf[\"dayofweek\"] = (ddf[\"dayofweek\"] - 1) / 6\n ddf[\"hourofday\"] = ddf[\"hourofday\"] / 23\n ddf[\"minuteofhour\"] = ddf[\"minuteofhour\"] / 59\n return ddf\n \n def timestamp_feature_extraction(self, ddf : dd.DataFrame) -> dd.DataFrame:\n ddf['dayofyear'] = ddf[self.time_column_name].dt.dayofyear\n ddf['season'] = ddf[self.time_column_name].dt.quarter\n ddf['month'] = ddf[self.time_column_name].dt.month\n ddf['dayofweek'] = ddf[self.time_column_name].dt.dayofweek\n ddf['hourofday'] = ddf[self.time_column_name].dt.hour\n ddf['minuteofhour'] = ddf[self.time_column_name].dt.minute\n ddf['typeofday'] = 1 # 1=Weekend, 0=Weekday\n ddf['typeofday'] = ddf['typeofday'].where(ddf['dayofweek'].isin([5,6]), -1)\n return ddf\n \n def get_appliances(self, metadata_files : str, household : int) -> List[str]:\n original_metadata : dd.DataFrame = dd.read_csv(metadata_files, dtype = self.metadata_dtype, blocksize=10e7, usecols=['dataid']+self.sensors_excluding_grid, converters=self.metadata_converters)\n available = original_metadata[original_metadata['dataid'] == household].drop(columns=['dataid']).any()\n return available[available].compute().index.tolist()\n \n def get_households(self, metadata_files : str) -> List[int]:\n original_metadata : dd.DataFrame = dd.read_csv(metadata_files, dtype = self.metadata_dtype, blocksize=10e7, usecols=['dataid'])\n return original_metadata['dataid'].values.compute()\n \n def get_communities(self, metadata_files : str) -> List[int]:\n original_metadata : dd.DataFrame = dd.read_csv(metadata_files, dtype = self.metadata_dtype, blocksize=10e7, usecols=['community'])\n return original_metadata.drop_duplicates(subset=['community'])['community'].values.compute()\n \n def get_cities(self, metadata_files : str) -> List[str]:\n original_metadata : dd.DataFrame = dd.read_csv(metadata_files, dtype = self.metadata_dtype, blocksize=10e7, usecols=['city'])\n return original_metadata.drop_duplicates(subset=['city'])['city'].values.compute()\n \n def get_hierarchy_dict(self, metadata_files : str) -> dict:\n if self.hierarchy == None:\n self.hierarchy = {}\n original_metadata : pd.DataFrame = dd.read_csv(metadata_files, dtype = self.metadata_dtype, blocksize=10e7, usecols=['dataid', 'city', 'community']+self.sensors_excluding_grid, converters=self.metadata_converters)\n for city in original_metadata['city'].drop_duplicates().values.compute():\n self.hierarchy[city] = {}\n for community in original_metadata[original_metadata['city'] == city]['community'].drop_duplicates().values.compute():\n self.hierarchy[city][community] = {}\n for household in original_metadata[original_metadata['community'] == community]['dataid'].values.compute():\n self.hierarchy[city][community][household] = {}\n ddf = original_metadata[original_metadata['dataid'] == household][self.sensors_excluding_grid].any()\n for appliance in ddf[ddf].compute().index.tolist():\n self.hierarchy[city][community][household][appliance] = None\n \n return deepcopy(self.hierarchy)\n\n\n# Generators for model input and expected output\n################################################################################################################\n\ndef get_appliance_ec_input(data_path: str, dataid: int, sensor: str, windows: List[int]):\n if type(data_path) == bytes:\n data_path = data_path.decode('utf-8')\n if type(sensor) == bytes:\n sensor = sensor.decode('utf-8')\n ddf : dd.DataFrame = dd.read_parquet(data_path+\"/normalized/final_appliance/dataid=\"+str(dataid), columns=[sensor, 'index'] + FEATURE_COLUMNS)\n for window in windows:\n indexes = [*range(window, window + INPUT_SIZE)]\n np_array = ddf[ddf['index'].isin(indexes)][[sensor] + FEATURE_COLUMNS].compute().to_numpy(dtype=np.float64)\n yield np_array\n\ndef get_appliance_ec_output(data_path: str, dataid: int, sensor: str, windows: List[int]):\n if type(data_path) == bytes:\n data_path = data_path.decode('utf-8')\n if type(sensor) == bytes:\n sensor = sensor.decode('utf-8')\n ddf : dd.DataFrame = dd.read_parquet(data_path+\"/final_appliance/dataid=\"+str(dataid), columns=[sensor, 'index'] + FEATURE_COLUMNS)\n for window in windows:\n indexes = [*range(window + INPUT_SIZE, window + INPUT_SIZE + OUTPUT_SIZE)]\n np_array = ddf[ddf['index'].isin(indexes)][sensor].compute().to_numpy(dtype=np.float64)\n yield np_array\n \ndef get_household_ec_input(data_path: str, dataid: int, windows: List[int]):\n if type(data_path) == bytes:\n data_path = data_path.decode('utf-8')\n ddf : dd.DataFrame = dd.read_parquet(data_path+\"/normalized/final_household/dataid=\"+str(dataid), columns=['index', 'total'] + FEATURE_COLUMNS)\n for window in windows:\n indexes = [*range(window, window + INPUT_SIZE)]\n np_array = ddf[ddf['index'].isin(indexes)][['total'] + FEATURE_COLUMNS].compute().to_numpy(dtype=np.float64)\n yield np_array\n\ndef get_household_ec_output(data_path: str, dataid: int, windows: List[int]):\n if type(data_path) == bytes:\n data_path = data_path.decode('utf-8')\n ddf : dd.DataFrame = dd.read_parquet(data_path+\"/final_household/dataid=\"+str(dataid), columns=['index', 'total'] + FEATURE_COLUMNS)\n for window in windows:\n indexes = [*range(window + INPUT_SIZE, window + INPUT_SIZE + OUTPUT_SIZE)]\n np_array = ddf[ddf['index'].isin(indexes)]['total'].compute().to_numpy(dtype=np.float64)\n yield np_array\n \ndef get_community_ec_input(data_path: str, community: int, windows: List[int]):\n if type(data_path) == bytes:\n data_path = data_path.decode('utf-8')\n ddf : dd.DataFrame = dd.read_parquet(data_path+\"/normalized/final_community/community=\"+str(community), columns=['index', 'total'] + FEATURE_COLUMNS)\n for window in windows:\n indexes = [*range(window, window + INPUT_SIZE)]\n np_array = ddf[ddf['index'].isin(indexes)][['total'] + FEATURE_COLUMNS].compute().to_numpy(dtype=np.float64)\n yield np_array\n\ndef get_community_ec_output(data_path: str, community: int, windows: List[int]):\n if type(data_path) == bytes:\n data_path = data_path.decode('utf-8')\n ddf : dd.DataFrame = dd.read_parquet(data_path+\"/final_community/community=\"+str(community), columns=['index', 'total'] + FEATURE_COLUMNS)\n for window in windows:\n indexes = [*range(window + INPUT_SIZE, window + INPUT_SIZE + OUTPUT_SIZE)]\n np_array = ddf[ddf['index'].isin(indexes)]['total'].compute().to_numpy(dtype=np.float64)\n yield np_array\n \ndef get_city_ec_input(data_path: str, city: str, windows: List[int]):\n if type(data_path) == bytes:\n data_path = data_path.decode('utf-8')\n if type(city) == bytes:\n city = city.decode('utf-8')\n ddf : dd.DataFrame = dd.read_parquet(data_path+\"/normalized/final_city/city=\"+city, columns=['index', 'total'] + FEATURE_COLUMNS)\n for window in windows:\n indexes = [*range(window, window + INPUT_SIZE)]\n np_array = ddf[ddf['index'].isin(indexes)][['total'] + FEATURE_COLUMNS].compute().to_numpy(dtype=np.float64)\n yield np_array\n\ndef get_city_ec_output(data_path: str, city: str, windows: List[int]):\n if type(data_path) == bytes:\n data_path = data_path.decode('utf-8')\n if type(city) == bytes:\n city = city.decode('utf-8')\n ddf : dd.DataFrame = dd.read_parquet(data_path+\"/final_city/city=\"+city, columns=['index', 'total'] + FEATURE_COLUMNS)\n for window in windows:\n indexes = [*range(window + INPUT_SIZE, window + INPUT_SIZE + OUTPUT_SIZE)]\n np_array = ddf[ddf['index'].isin(indexes)]['total'].compute().to_numpy(dtype=np.float64)\n yield np_array\n","repo_name":"TwanBorst/aggregation-of-ec-forecasts-accross-spatial-levels","sub_path":"data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":27906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1845727712","text":"from optparse import OptionParser\nfrom collections import OrderedDict\nimport multiprocessing\nimport sys\nimport time\n\nimport h5py\nimport joblib\nimport numpy as np\nimport pyBigWig\nimport pysam\nfrom sklearn import preprocessing\nfrom sklearn.decomposition import PCA\nimport tensorflow as tf\n\nimport basenji\n\n################################################################################\n# basenji_scent.py\n#\n# Train an autoencoder to project the full functional profiles defined by a set\n# of Bigwig files into a lower dimension latent space that simultaneously\n# smooths the signal using cross-dataset correlations and compresses the space\n# required to store it. Win-win.\n################################################################################\n\n\n################################################################################\n# main\n################################################################################\ndef main():\n usage = ('usage: %prog [options] '\n '')\n parser = OptionParser(usage)\n parser.add_option(\n '-g',\n dest='gaps_file',\n help='Genome assembly gaps BED [Default: %default]')\n parser.add_option(\n '-l',\n dest='load_targets_file',\n help='Load the sampled target set from disk')\n parser.add_option('-m', dest='params_file', help='Model parameters')\n parser.add_option(\n '-p',\n dest='processes',\n default=1,\n type='int',\n help='Number parallel processes to load data [Default: %default]')\n parser.add_option(\n '-n',\n dest='num_samples',\n type='int',\n default=1000,\n help='Genomic positions to sample for training data [Default: %default]')\n parser.add_option(\n '-r',\n dest='reconstruct_out_pre',\n help='Save the recontructed validation targets to disk')\n parser.add_option(\n '-s',\n dest='save_targets_file',\n help='Save the sampled target set to disk')\n parser.add_option(\n '-v',\n dest='valid_pct',\n type='float',\n default=0.1,\n help='Proportion of the data for validation [Default: %default]')\n parser.add_option(\n '-w',\n dest='whiten',\n default=False,\n action='store_true',\n help='Whiten functional data [Default: %default]')\n (options, args) = parser.parse_args()\n\n if len(args) != 3:\n parser.error(\n 'Must provide genome file, sample Wig/BigWig labels and paths, and '\n 'model output file')\n else:\n genome_file = args[0]\n sample_wigs_file = args[1]\n model_out_file = args[2]\n\n if options.load_targets_file is not None:\n targets = np.load(options.load_targets_file)\n\n else:\n #######################################################\n # sample genome\n #######################################################\n chrom_segments = basenji.genome.load_chromosomes(genome_file)\n if options.gaps_file:\n chrom_segments = basenji.genome.split_contigs(chrom_segments,\n options.gaps_file)\n\n # determine how frequently to sample\n genome_sum = 0\n for chrom in chrom_segments:\n for seg_start, seg_end in chrom_segments[chrom]:\n genome_sum += (seg_end - seg_start)\n\n sample_every = genome_sum // options.num_samples\n\n # sample positions\n chrom_samples = {}\n for chrom in chrom_segments:\n chrom_samples[chrom] = []\n sample_counter = 0\n for seg_start, seg_end in chrom_segments[chrom]:\n seg_i = seg_start\n while seg_i + (sample_every - sample_counter) < seg_end:\n seg_i += (sample_every - sample_counter)\n chrom_samples[chrom].append(seg_i)\n sample_counter = 0\n\n sample_counter += seg_end - seg_i\n\n #######################################################\n # read from bigwigs\n #######################################################\n # get wig files and labels\n target_wigs = OrderedDict()\n for line in open(sample_wigs_file):\n a = line.split()\n target_wigs[a[0]] = a[1]\n\n print('Loading from BigWigs')\n sys.stdout.flush()\n t0 = time.time()\n\n pool = multiprocessing.Pool(options.processes)\n targets_t = pool.starmap(bigwig_read,\n [(wig_file, chrom_samples)\n for wig_file in target_wigs.values()])\n\n # convert and transpose\n targets = np.array(targets_t).T\n\n # shuffle\n np.random.shuffle(targets)\n\n if options.save_targets_file is not None:\n np.save(options.save_targets_file, targets)\n\n print('%ds' % (time.time() - t0))\n\n print('\\nSampled dataset', targets.shape, '\\n')\n\n # pre-process\n targets = np.nan_to_num(targets)\n if options.whiten:\n targets = preprocessing.scale(targets)\n\n # divide train and valid\n tv_line = int(options.valid_pct * targets.shape[0])\n\n #######################################################\n # model parameters and placeholders\n #######################################################\n # read parameters\n job = basenji.dna_io.read_job_params(options.params_file)\n\n job['num_targets'] = targets.shape[1]\n\n # construct model\n print('Constructing model')\n sys.stdout.flush()\n\n if job.get('model', 'autoencoder') == 'pca':\n # construct\n model = PCA(n_components=job['latent_dim'])\n\n # train\n model.fit(targets[tv_line:])\n\n # validate\n latent_valid = model.transform(targets[:tv_line])\n recon_valid = model.inverse_transform(latent_valid)\n valid_var = targets[:tv_line].var()\n recon_var = (targets[:tv_line] - recon_valid).var()\n r2 = 1.0 - np.divide(recon_var, valid_var)\n print('Valid R2: %7.5f' % r2.mean())\n\n # save\n joblib.dump(model, model_out_file)\n\n if options.reconstruct_out_pre:\n np.save(options.reconstruct_out_pre, recon_valid)\n\n else:\n model = basenji.autoencoder.AE(job)\n\n #######################################################\n # train\n #######################################################\n # initialize batcher\n batcher_train = basenji.batcher.BatcherT(\n targets[tv_line:], model.batch_size, shuffle=True)\n batcher_valid = basenji.batcher.BatcherT(targets[:tv_line],\n model.batch_size)\n\n # checkpoints\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n t0 = time.time()\n\n # initialize variables\n sess.run(tf.initialize_all_variables())\n\n train_loss = None\n best_r2 = -1000\n early_stop_i = 0\n\n for epoch in range(1000):\n if early_stop_i < model.early_stop:\n t0 = time.time()\n\n # save previous\n train_loss_last = train_loss\n\n # train\n train_loss = model.train_epoch(sess, batcher_train)\n\n # validate\n valid_loss, valid_r2 = model.test(sess, batcher_valid)\n\n best_str = ''\n if valid_r2 > best_r2:\n best_r2 = valid_r2\n best_str = 'best!'\n early_stop_i = 0\n saver.save(sess, model_out_file)\n else:\n early_stop_i += 1\n\n # measure time\n et = time.time() - t0\n if et < 600:\n time_str = '%3ds' % et\n elif et < 6000:\n time_str = '%3dm' % (et / 60)\n else:\n time_str = '%3.1fh' % (et / 3600)\n\n # print update\n print('Epoch %3d: Train loss: %7.5f, Valid loss: %7.5f, Valid R2: '\n '%7.5f, Time: %s %s' % (epoch + 1, train_loss, valid_loss,\n valid_r2, time_str, best_str))\n sys.stdout.flush()\n\n if options.reconstruct_out_pre:\n preds = model.predict(sess, batcher_valid)\n np.save(options.reconstruct_out_pre, preds)\n\n\ndef bigwig_read(wig_file, chrom_samples):\n print(' %s' % wig_file)\n sys.stdout.flush()\n\n # initialize target values\n targets = []\n\n # open wig\n wig_in = pyBigWig.open(wig_file)\n\n # read position values\n for chrom in chrom_samples:\n for pos in chrom_samples[chrom]:\n try:\n pos_val = wig_in.values(chrom, pos, pos + 1)[0]\n except:\n print(chrom, pos)\n exit(1)\n targets.append(pos_val)\n\n return targets\n\n\n################################################################################\n# __main__\n################################################################################\nif __name__ == '__main__':\n main()\n","repo_name":"seyuboglu/genome-attention","sub_path":"bin/basenji_scent.py","file_name":"basenji_scent.py","file_ext":"py","file_size_in_byte":8388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"75419626920","text":"import numpy as np\nimport random\n\nclass QLearningModel:\n \"\"\"\n Defines online Q-learning setup and update rule for a given gridworld\n \"\"\"\n \n def __init__(self, g, alpha):\n self.gamma = g.gamma\n Q_dims = [g.gridworld_length, g.gridworld_width, g.num_orientations,\n 3, 3, 3, \n g.getNumActions()]\n self.Q = np.zeros(Q_dims)\n self.alpha = alpha\n self.actions = g.actions[0]\n self.g = g\n \n def lookahead(self, s, a):\n g = self.g\n s = g.state_to_ind(s)\n a = g.action_to_ind(a)\n Q_index = s + [a]\n return self.Q[tuple(Q_index)]\n \n def update(self, s, a, r, s_prime):\n g = self.g\n s = g.state_to_ind(s)\n a = g.action_to_ind(a)\n s_prime = g.state_to_ind(s_prime)\n Q_index = s + [a]\n self.Q[tuple(Q_index)] += self.alpha * (r + self.gamma * max(self.Q[tuple(s_prime)]) - self.Q[tuple(Q_index)])\n\n\nclass ValueIterationModel:\n \"\"\"\n Defines an offline policy based on value iteration\n \"\"\"\n\n def __init__(self, g, residual = 0.001, maxIter = 1000):\n self.res = residual\n self.maxIter = maxIter\n self.converged = False\n self.g = g\n self.actions = g.actions[0]\n self.gamma = g.gamma\n self.U_dims = [g.gridworld_length, g.gridworld_width, g.num_orientations]\n self.U = np.zeros(self.U_dims)\n self.policy = np.zeros(self.U_dims) \n\n def value_update(self):\n counter = 0\n while not self.converged:\n counter += 1\n maxRes = 0 \n printer = 0 \n A = self.g.actions[0]\n for i in range(self.U_dims[0]):\n for j in range(self.U_dims[1]):\n for o in range(self.U_dims[2]):\n self.g.state[:3] = [i,j,o]\n self.g.state[3:] = self.g.getSurroundingMarkers()\n old_U = self.U[i,j,o]\n old_state = self.g.state\n #Now you have self.g.state iterating through every x,y,o and the associated surrounding markers\n #The U and policy would be a list of length 4 for each x,y\n\n #I have replaced val with self.U[i,j,o], is that correct?\n self.U[i,j,o] = max((self.g.takeAction(a, old_state) + self.gamma*(self.g.failChance*self.U[i,j,o] + (1-self.g.failChance)*self.U[tuple(self.g.state[:3])])) for a in A)\n \n if maxRes <= abs(self.U[i,j,o] - old_U):\n maxRes = abs(self.U[i,j,o] - old_U)\n \n\n #print(maxRes)\n\n \"\"\"\n for s_ind, val in np.ndenumerate(self.U):\n self.g.state = list(s_ind)\n old_U = self.U[s_ind]\n self.U[s_ind] = max((self.g.takeAction(a, self.g.state) + \n self.gamma*(self.g.failChance*val + (1-self.g.failChance)*self.U[tuple(self.g.state)])) for a in A)\n if maxRes <= abs(self.U[s_ind] - old_U):\n maxRes = abs(self.U[s_ind] - old_U)\n print(maxRes)\n \"\"\"\n if maxRes <= self.res:\n self.converged = True\n if counter == self.maxIter:\n break\n return self.converged\n\n def lookahead(self, s, a):\n self.g.state = s\n value = self.g.takeAction(a) + self.gamma*(self.g.failChance*self.U[tuple(s[:3])] + (1-self.g.failChance)*self.U[tuple(self.g.state[:3])])\n return value\n\n\nclass Policy:\n \"\"\"\n Defines a policy for a given model\n \"\"\"\n \n def __init__(self, g):\n self.g = g\n self.isModelUpdate = False\n \n def next_action(self, s):\n raise NotImplementedError(\"You must implement this method\")\n \n def greedy_action(self, model, s):\n list_temp = []\n for a_i in range(len(self.g.actions[0])):\n a = self.g.actions[0][a_i]\n list_temp.append([model.lookahead(s, a) for a in self.g.actions[0]])\n max_val = max(list_temp)\n # choose randomly from the best possible actions in the event of a tie\n candidate_actions = [self.g.actions[0][a_i] for a_i in range(len(self.g.actions[0])) if list_temp[a_i] == max_val]\n next_a = random.choice(candidate_actions)\n \n return next_a\n\n\nclass RandomPolicy(Policy):\n \"\"\"\n Defines a random policy\n \"\"\"\n \n def __init__(self, g):\n Policy.__init__(self, g)\n \n def next_action(self, model, s):\n return random.choice(self.g.actions[0])\n\nclass GreedyPolicy(Policy):\n \"\"\"\n Helper class to apply a learned policy\n \"\"\"\n def __init__(self, g):\n Policy.__init__(self, g)\n \n def next_action(self, model, s):\n return self.greedy_action(model, s)\n \n \nclass EpsilonGreedyExploration(Policy):\n \"\"\"\n Defines exploration policy with specified parameter epsilon and decay rate alpha\n \"\"\"\n\n def __init__(self, g, epsilon, alpha=1):\n Policy.__init__(self, g)\n self.isModelUpdate = True\n self.epsilon = epsilon\n self.alpha = alpha\n \n def next_action(self, model, s):\n A = model.actions\n if np.random.uniform() < self.epsilon:\n next_a = random.choice(model.actions)\n else:\n next_a = self.greedy_action(model, s)\n self.epsilon *= self.alpha\n return next_a\n \n","repo_name":"anujshetty/SearchAndResQ","sub_path":"Learning.py","file_name":"Learning.py","file_ext":"py","file_size_in_byte":5541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7455558711","text":"import random\nfrom sys import stderr\nimport requests\n\nURL_BASE = 'https://game.maj-soul.com/1/'\nMAX_ATTEMPT_PER_SERVER = 2\nUSER_AGENT = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.88 Safari/537.36\"\nHEADERS = {\n \"User-Agent\": USER_AGENT,\n \"If-Modified-Since\": \"0\",\n \"Referer\": URL_BASE,\n \"sec-ch-ua\": '\"Chromium\";v=\"100\", \"Google Chrome\";v=\"100\"',\n \"sec-ch-ua-platform\": \"Windows\",\n}\n\nclass Majsoul:\n def __init__(self) -> None:\n self.session = requests.Session()\n self.set_requests_session()\n\n def set_requests_session(self):\n self.session.headers= HEADERS\n\n # Expected to be hard-coded according to mahjong soul api (2022-07-14)\n # notify the author if this part throws Error\n def get_majsoul_resource(self, path: str):\n url = URL_BASE + path\n return self.session.get(url).json()\n\n def connect(self):\n # Parts below are hard-coded according to mahjong soul api (2022-07-14)\n # notify the author if this part throws KeyError\n try:\n ws_scheme = 'wss'\n version = self.get_majsoul_resource(\"version.json\")\n resversion = self.get_majsoul_resource('resversion{}.json'.format(version['version']))\n protobuf_version = resversion['res'][\"res/proto/liqi.json\"]['prefix']\n protobuf_schema = self.get_majsoul_resource('{}/res/proto/liqi.json'.format(protobuf_version))\n config = self.get_majsoul_resource('{}/config.json'.format(resversion['res']['config.json']['prefix']))\n ip_config = [x for x in config['ip'] if x['name'] == 'player'][0]\n # a list of urls where we can request for game server information\n request_game_server_url_list = ip_config['region_urls']\n except KeyError as e:\n print(e, stderr)\n print(\"Majsoul api may have changed. Please notify the author.\")\n\n last_error: Exception = None\n\n for attempt in range(len(request_game_server_url_list) * MAX_ATTEMPT_PER_SERVER):\n request_game_server_url = request_game_server_url_list[attempt // MAX_ATTEMPT_PER_SERVER]['url']\n # the final url where we request for game server info\n # javascript float type has 17 digits after the dot.\n request_game_server_url \\\n += \"?service=ws-gateway&protocol=ws&ssl=true&rv=\" \\\n + str(random.random())[2:].ljust(17, '0');\n\n try:\n game_server_info = self.session.get(request_game_server_url).json()\n print(game_server_info)\n # check maintenance\n if 'maintenance' in game_server_info:\n print(\"Majsoul is in maintenance\")\n return\n\n game_server_url = random.choice(game_server_info[\"servers\"])\n\n if 'maj-soul' not in game_server_url:\n game_server_url += '/gateway'\n print(game_server_url)\n\n break\n except Exception as e:\n last_error = e\n print(e, stderr)\n continue\n # failed to fetch game servers\n if last_error:\n print(e, stderr)\n return last_error\n\n\nif __name__ == '__main__':\n majsoul = Majsoul()\n majsoul.connect()","repo_name":"makersmelx/mahjong-bot","sub_path":"mahjong_bot/majsoul.py","file_name":"majsoul.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"34158050843","text":"import datetime\nimport requests\nfrom flask import Flask\nfrom flask_restful import Resource, Api\nfrom requests.compat import urljoin, quote_plus\n\napp = Flask(__name__)\napi = Api(app)\n\nbase_url = 'https://api.wmata.com/Incidents.svc/json/Incidents'\n\n\nclass Train(Resource):\n def get(self):\n # Request headers\n headers = {'api_key': '88c04b279955416f8605d0b76ebc8974'}\n\n response = requests.get(base_url, headers=headers)\n response = response.json()\n\n return(response)\n\n\napi.add_resource(Train, '/')\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80, debug=True)\n","repo_name":"OrlandoSoto/my-docker","sub_path":"incidents/incident.py","file_name":"incident.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71426785640","text":"import os\n\nimport numpy as np\n\nimport pandas as pd\n#definition d'un dataframe\n\ndf_train = pd.read_csv(\"../input/train_users_2.csv\")\n\ndf_train.sample(n=5) \n\n#df_train.head(n=5) \n#charge les data de test\n\n#pr traitement simult des data de tests et de train\n\ndf_test = pd.read_csv(\"../input/test_users.csv\")\n\ndf_test.sample(n=5) \ndf_all = pd.concat((df_train,df_test),axis=0,ignore_index=True)\n\ndf_all.head(n=5)\ndf_all.drop('date_first_booking',axis=1,inplace=True)# on supprime la colonne\n\ndf_all.sample(n=5)\n#clean format des dates\n\ndf_all['date_account_created'] = pd.to_datetime(df_all['date_account_created'], format='%Y-%m-%d')\n\ndf_all.sample(n=5)\n#format du time stamp\n\ndf_all['timestamp_first_active'] = pd.to_datetime(df_all['timestamp_first_active'], format='%Y%m%d%H%M%S')\n\ndf_all.sample(n=5)\n#suppression des data outliers (résa entre 0 et 15 ans par exemple) (façon simple sans se compliquer la vie)\n\ndef remove_age_outliers(x, min_value=15, max_value=90):\n\n if np.logical_or(x<=min_value, x>=max_value):\n\n return np.nan\n\n else:\n\n return x\n\n \n#df_all['age'].apply(lambda x: remove_age_outliers(x)) #crash en python 2, on peut comparer un nan avec un num\n\ndf_all['age']=df_all['age'].apply(lambda x: remove_age_outliers(x) if(not np.isnan(x))else x)\n\ndf_all['age'].fillna(-1, inplace=True) #fonctionne ici, mais pas forcément pour un autre projet\n\ndf_all.sample(n=5)\n#conversion age en entier\n\ndf_all.age = df_all.age.astype(int) #equivalent à df_all['age']\n\ndf_all.sample(n=5)\ndef check_Nan_Values_in_df(df):\n\n for col in df:\n\n nan_count = df[col].isnull().sum()\n\n \n\n if nan_count != 0:\n\n print(col + \"=>\"+str(nan_count)+ \" Nan Values\")\ncheck_Nan_Values_in_df(df_all)\n#pas normal d'avoir des Nan sur first affiliate tracked\n\ndf_all['first_affiliate_tracked'].fillna(-1, inplace=True)\n\ncheck_Nan_Values_in_df(df_all)\n\ndf_all.sample(n=5)\n#on dégage le time stamp car redondant avec la date et heure min sec inutile (redondance 99% du temps)\n\ndf_all.drop('timestamp_first_active',axis=1, inplace=True)\n\n#on dégage la langue, pour essayer\n\ndf_all.drop('language',axis=1, inplace=True)\n\ndf_all.sample(n=5)\n#on dégage ceux avant février 2013 (retrait des early outliers)\n\n#on pourrait laisser janvier, potentiellement pour capter les effets saisonnniers\n\ndf_all = df_all[df_all['date_account_created']>'2013-02-01']\n\ndf_all.sample(n=5)\n\n#enregistrement du nouveau csv propre\n\nif not os.path.exists(\"output\"):\n\n os.makedirs(\"output\")\n\n \n\ndf_all.to_csv(\"output/cleaned.csv\", sep=',', index=False)","repo_name":"aorursy/new-nb-3","sub_path":"gblondet_test-kaggle.py","file_name":"gblondet_test-kaggle.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23136262642","text":"from xml.etree.ElementInclude import include\nimport os\nfrom tqdm import tqdm\nimport argparse\nfrom torch.nn import DataParallel\nimport torch\nimport json\nfrom transformers import BertTokenizer\nfrom utils import extract_relevant_idx, saving_category_vocabulary_file, making_abb_list, remove_slash, remove_suid\nfrom preprocessing import PreproSUTranscript, preprocess_su_name\nfrom model import SUClassModel\nfrom dataset import SUdataset\nfrom filtering import Filtering\n\n\ndef preprocess_transcript(args):\n study_unit_list_path = os.path.join(args.temp_dir, 'study_unit_list')\n with open(study_unit_list_path, 'r') as f:\n file_list = json.load(f)\n #create abbrevation\n abb, non_unique_abb = making_abb_list()\n\n #save prerprocessed data\n for name in tqdm(file_list):\n remove_su_id = remove_suid(name)\n file_name = remove_slash(name)\n if os.path.exists(f'{args.transcript_path}/{file_name}.source') == False:\n print(f'{file_name}')\n PreproSUTranscript(raw_transcript_path=args.raw_transcript_path, raw_doc_transcript_path=args.raw_doc_transcript_path, file_name = file_name,\n remove_su_id=remove_su_id, abb=abb, non_unique_abb=non_unique_abb).save_processed_transcript(args.transcript_path)\n\ndef run(args):\n study_unit_list_path = os.path.join(args.temp_dir, 'study_unit_list')\n with open(study_unit_list_path,'r') as f:\n file_list = json.load(f)\n #create abbrevation\n abb,_ = making_abb_list()\n #define model\n model = SUClassModel.from_pretrained(args.pretrained_lm, output_attentions=False, output_hidden_states=False, num_labels=2)\n if torch.cuda.is_available():\n model = DataParallel(model.cuda())\n model.eval()\n tokenizer = BertTokenizer.from_pretrained(args.pretrained_lm, do_lower_case=True)\n #filtering\n for name in tqdm(file_list):\n remove_su_id = remove_suid(name)\n file_name = remove_slash(name)\n if os.path.exists(f'{args.temp_dir}/category_vocabulary/{file_name}_category_vocab.pt') == False:\n print(f'{file_name}')\n included_abb, _, _, _, preprocessed_label_name = preprocess_su_name(remove_su_id,abb)\n print('words that consist label :', preprocessed_label_name)\n #create dataset\n data = SUdataset(args.temp_dir, args.transcript_path, file_name, preprocessed_label_name, tokenizer, args.truncated_len)\n #filtering\n results = Filtering(data,args.temp_dir, included_abb,\n args.category_vocab_size).making_catevoca_and_classification(model, top_pred_num=args.top_pred_num,\n match_threshold=args.match_threshold, doc_weight=args.doc_weight)\n if results == None:\n print(f'There is no transcript which includes label : {file_name}')\n continue\n #saving results\n extract_relevant_idx(args.out_path, args.transcript_path, file_name, results['label_words'], results['relevant_idx'], results['cal_freq'], included_abb, num_word_threshold=args.num_word_threshold, low_frequency=args.low_frequency)\n if args.saving_category_vocab_file == True:\n saving_category_vocabulary_file(args.temp_dir, file_name, results['category_vocab'], results['only_manually_cate_vocab'], results['only_youtube_cate_vocab'])\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='run', formatter_class=argparse.ArgumentDefaultsHelpFormatter) \n parser.add_argument('--temp_dir', default=None, help='path of saving the results')\n parser.add_argument('--raw_transcript_path',default= None,help='path of raw youtube transcripts')\n parser.add_argument('--raw_doc_transcript_path',default= None, help='path of raw manually collected document transcripts')\n parser.add_argument('--transcript_path',default=None,help='path of the preprocessed transcripts')\n parser.add_argument('--pretrained_lm',default='allenai/scibert_scivocab_uncased',help='pretrained model')\n parser.add_argument('--top_pred_num', default=50, type=int, help='language model MLM top prediction cutoff')\n parser.add_argument('--category_vocab_size',default=50, type=int, help='size of category vocabulary for each study unit')\n parser.add_argument('--match_threshold',default=30, type=int, help='matching threshold whether each transcript is relevant to study unit')\n parser.add_argument('--low_frequency',default=0.001, type=float, help='criteria for filtering out lower-frequency')\n parser.add_argument('--doc_weight', default=2, type=int, help='define how much affect manually collected document to create category vocabulary')\n parser.add_argument('--truncated_len',default=100, type=int, help='length that transcripts are padded/truncated to, one unit means 512 length of tokens, 100 -> 512*100')\n parser.add_argument('--num_word_threshold',default=3, type=int, help='how many words are related to classify whether transcript is relevant')\n parser.add_argument('--saving_category_vocab_file',default=True,help='save category vocabulary file')\n parser.add_argument('--out_path',default=None,help='path of saving filter-in content index')\n args = parser.parse_args()\n \n print(args)\n run(args)\n","repo_name":"brianimpact/air4all","sub_path":"confiltering/src/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"26517789005","text":"from acme import types\nfrom acme.adders import reverb as reverb_adders\nfrom acme.utils import reverb_utils\nimport numpy as np\nimport reverb\nimport tree\n\nfrom absl.testing import absltest\n\n\nclass ReverbUtilsTest(absltest.TestCase):\n\n def test_make_replay_table_preserves_table_info(self):\n limiter = reverb.rate_limiters.SampleToInsertRatio(\n samples_per_insert=1, min_size_to_sample=2, error_buffer=(0, 10))\n table = reverb.Table(\n name='test',\n sampler=reverb.selectors.Uniform(),\n remover=reverb.selectors.Fifo(),\n max_size=10,\n rate_limiter=limiter)\n new_table = reverb_utils.make_replay_table_from_info(table.info)\n new_info = new_table.info\n\n # table_worker_time is not set by the above utility since this is meant to\n # be monitoring information about any given table. So instead we copy this\n # so that the assertion below checks that everything else matches.\n\n new_info.table_worker_time.sleeping_ms = (\n table.info.table_worker_time.sleeping_ms)\n\n self.assertEqual(new_info, table.info)\n\n _EMPTY_INFO = reverb.SampleInfo(*[() for _ in reverb.SampleInfo.tf_dtypes()])\n _DUMMY_OBS = np.array([[[0], [1], [2]]])\n _DUMMY_ACTION = np.array([[[3], [4], [5]]])\n _DUMMY_REWARD = np.array([[6, 7, 8]])\n _DUMMY_DISCOUNT = np.array([[.99, .99, .99]])\n _DUMMY_NEXT_OBS = np.array([[[1], [2], [0]]])\n _DUMMY_RETURN = np.array([[20.77, 14.92, 8.]])\n\n def _create_dummy_steps(self):\n return reverb_adders.Step(\n observation=self._DUMMY_OBS,\n action=self._DUMMY_ACTION,\n reward=self._DUMMY_REWARD,\n discount=self._DUMMY_DISCOUNT,\n start_of_episode=True,\n extras={'return': self._DUMMY_RETURN})\n\n def _create_dummy_transitions(self):\n return types.Transition(\n observation=self._DUMMY_OBS,\n action=self._DUMMY_ACTION,\n reward=self._DUMMY_REWARD,\n discount=self._DUMMY_DISCOUNT,\n next_observation=self._DUMMY_NEXT_OBS,\n extras={'return': self._DUMMY_RETURN})\n\n def test_replay_sample_to_sars_transition_is_sequence(self):\n fake_sample = reverb.ReplaySample(\n info=self._EMPTY_INFO, data=self._create_dummy_steps())\n fake_transition = self._create_dummy_transitions()\n transition_from_sample = reverb_utils.replay_sample_to_sars_transition(\n fake_sample, is_sequence=True)\n tree.map_structure(np.testing.assert_array_equal, transition_from_sample,\n fake_transition)\n\n\nif __name__ == '__main__':\n absltest.main()\n","repo_name":"deepmind/acme","sub_path":"acme/utils/reverb_utils_test.py","file_name":"reverb_utils_test.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":3100,"dataset":"github-code","pt":"18"} +{"seq_id":"15656088173","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport arrow\nimport torch\nimport numpy as np\nimport torch.optim as optim\n\n\n\nclass DeepNoise2Fourier(torch.nn.Module):\n \"\"\"\n Deep Fourier Feature Generator\n \"\"\"\n def __init__(self, nsize, fsize):\n \"\"\"\n Args:\n - nsize: size of input noise\n - fsize: size of output fourier feature\n \"\"\"\n super(DeepNoise2Fourier, self).__init__()\n self.nsize = nsize\n self.n2f = torch.nn.Sequential(\n torch.nn.Linear(nsize, 100), # [ nf, 100 ]\n torch.nn.Tanh(), \n torch.nn.Linear(100, fsize), # [ nf, fsize ]\n torch.nn.Tanh())\n\n def forward(self, nf, mean=0, std=10):\n \"\"\"\n custom forward function returning fourier features with number of nf.\n \"\"\"\n noise = torch.FloatTensor(nf, self.nsize).normal_(mean=mean,std=std)\n fourier = self.n2f(noise)\n return fourier\n\n\n\nclass FourierPointProcess(torch.nn.Module):\n \"\"\"\n Point Process with Deep Fourier Triggering Kernel\n \"\"\"\n def __init__(self, nsize, fsize, dsize):\n \"\"\"\n Args:\n - fsize: size of fourier feature dimension\n - dsize: size of data dimension\n \"\"\"\n super(FourierPointProcess, self).__init__()\n # sub model\n self.n2f = DeepNoise2Fourier(nsize, fsize)\n # model parameters\n self.W = torch.nn.Parameter(torch.FloatTensor(fsize, dsize).uniform_(0, 1))\n self.alpha = torch.nn.Parameter(torch.ones(1))\n\n def forward(self, X, nf=100, T=10.):\n \"\"\"\n custom forward function returning conditional intensities and corresponding log-likelihood\n \"\"\"\n # pre-computed fourier features for fast log-likelihood calculation\n # fourier [ nf, fsize ] x W [ fsize, dsize ] = Womg.t [ nf, dsize ]\n self.Womg = torch.matmul(self.n2f(nf), self.W).transpose(0, 1) # [ dsize, nf ]\n # return conditional intensities and corresponding log-likelihood\n return self._log_likelihood(X, T)\n\n def _fourier_kernel(self, xi, xj, nf=None):\n \"\"\"\n return the kernel value evaluated at xi and xj. i.e., \n kernel K(xi,xj) = sum(cos(x1*omega)*cos(x2*omega))\n\n Args:\n - xi, xj: evaluation point [ batch_size, dsize ]\n - nf: (optional) if nf is not None, then recalculate the Womg\n\n Return:\n - kij : kernel value [ batch_size, 1 ]\n \"\"\"\n Womg = self.Womg \\\n if nf is None else \\\n torch.matmul(self.n2f(nf), self.W).transpose(0, 1)\n cos1 = torch.cos(torch.matmul(xi, Womg)) # [ batch_size, nf ]\n cos2 = torch.cos(torch.matmul(xj, Womg)) # [ batch_size, nf ]\n sin1 = torch.sin(torch.matmul(xi, Womg)) # [ batch_size, nf ]\n sin2 = torch.sin(torch.matmul(xj, Womg)) # [ batch_size, nf ]\n kij = torch.mean(cos1 * cos2 + sin1 * sin2, dim=1) # [ batch_size ]\n kij = self.alpha * kij.unsqueeze_(1) # [ batch_size ]\n return kij\n \n def _lambda(self, xi, ht, nf=None):\n \"\"\"\n return conditional intensity given x\n\n Args:\n - xi: current ith point [ batch_size, dsize ]\n - ht: history points [ batch_size, seq_len, dsize ]\n - nf: number of sampled fourier features\n\n Return:\n - lami: ith lambda [ batch_size ]\n \"\"\"\n batch_size, seq_len, dsize = ht.shape\n if seq_len > 0:\n xi = xi.unsqueeze_(1).repeat(1, seq_len, 1).reshape(-1, dsize) # [ batch_size * seq_len, dsize ]\n ht = ht.reshape(-1, dsize) # [ batch_size * seq_len, dsize ]\n k = self._fourier_kernel(xi, ht, nf).reshape(batch_size, seq_len) # [ batch_size, seq_len ]\n lami = k.sum(1) + self._mu() # [ batch_size ]\n return lami\n else:\n return torch.ones(batch_size) * self._mu()\n\n def _log_likelihood(self, X, T):\n \"\"\"\n return log-likelihood given sequence X\n\n Args:\n - X: input points sequence [ batch_size, seq_len, dsize ]\n - T: time horizon\n\n Return:\n - lam: sequence of lambda [ batch_size, seq_len ]\n - loglik: log-likelihood [ batch_size ]\n \"\"\"\n batch_size, seq_len, dsize = X.shape\n lam = [ self._lambda(X[:, i, :].clone(), X[:, :i, :].clone()) \n for i in range(seq_len) ]\n lam = torch.stack(lam, dim=1) # [ batch_size, seq_len ]\n # log-likelihood\n mask = X[:, :, 0] > 0 # [ batch_size, seq_len ]\n loglik1 = torch.log(lam) * mask # [ batch_size, seq_len ]\n loglik1 = torch.cat((loglik1, torch.zeros(batch_size, 1)), dim=1) # [ batch_size, seq_len + 1 ]\n loglik2 = - self._mu() * T * (2 * np.pi) ** (dsize - 1) \\\n if dsize > 1 else self._integral4temporal(X, T) # [ batch_size, seq_len ]\n loglik = loglik1 + loglik2 # [ batch_size, seq_len + 1 ]\n return lam, loglik\n\n def _integral4temporal(self, X, T):\n \"\"\"\n the integral term calculation only for one-dimensional point\n\n Args:\n - X: input points sequence [ batch_size, seq_len, dsize ]\n \"\"\"\n batch_size, seq_len, dsize = X.shape\n assert dsize == 1, \"dsize = %d is not 1.\" % dsize\n # first mask for shifting zero-paddings to T\n mask1 = (X[:, :, 0] <= 0).float() # [ batch_size, seq_len ]\n X[:, :, 0] = X[:, :, 0].clone() + mask1 * T\n # calculate the integral\n nf = self.Womg.shape[1]\n x0 = torch.zeros(batch_size, 1, dsize) # [ batch_size, 1, 1 ]\n xn = torch.ones(batch_size, 1, dsize) * T # [ batch_size, 1, 1 ]\n X = torch.cat((x0, X, xn), dim=1) # [ batch_size, seq_len + 2, 1 ]\n # second mask for masking integral sub-terms\n m0 = torch.ones(batch_size, 1) # [ batch_size, 1 ]\n mask2 = torch.cat((m0, (1. - mask1)), dim=1) # [ batch_size, seq_len + 1 ]\n intg0 = torch.zeros(batch_size, 1)\n intgi = []\n for i in range(1, seq_len + 1):\n xi = X[:, i, :].clone() # [ batch_size, 1 ]\n xi1 = X[:, i+1, :].clone() # [ batch_size, 1 ]\n ht = X[:, :i, :].clone() # [ batch_size, seq_len=i, 1 ]\n cos1 = torch.cos(- torch.matmul(ht, self.Womg)) # [ batch_size, seq_len=i, nf ]\n cos1 = cos1.sum(1) # [ batch_size, nf ]\n cos2 = torch.cos((xi1 + xi) / 2) # [ batch_size, 1 ]\n sin1 = torch.sin((xi1 - xi) / 2) # [ batch_size, 1 ]\n sinc = 2 * torch.exp(self.Womg) / self.Womg # [ 1, nf ]\n sinc = sinc.squeeze_(0) # [ nf ]\n intg = (cos1 * sinc).mean(1).unsqueeze_(1) * cos2 * sin1 # [ batch_size, 1 ]\n intgi.append(intg)\n intgs = [ intg0 ] + intgi\n intgs = torch.stack(intgs, dim=1).squeeze_() * mask2 # [ batch_size, seq_len + 1 ]\n base = self._mu() * (X[:, 1:, 0].clone() - X[:, :-1, 0].clone()) # [ batch_size, seq_len + 1 ]\n intgs = base + intgs / nf # [ batch_size, seq_len + 1 ]\n return - intgs # [ batch_size, seq_len + 1 ]\n\n def _mu(self):\n \"\"\"\n return base intensity\n \"\"\"\n return 10.\n\n\n\ndef train(model, dataloader, \n n_epoch=10, log_interval=10, lr=1e-4, log_callback=lambda x, y: None):\n \"\"\"training procedure\"\"\"\n # NOTE: gradient for loss is expected to be None, \n # since it is not leaf node. (it's root node)\n optimizer = optim.Adadelta(model.parameters(), lr=lr)\n for e in range(n_epoch):\n avgloss = []\n logloss = []\n dataloader.shuffle()\n for i in range(len(dataloader)):\n X = dataloader[i]\n model.train()\n optimizer.zero_grad() # init optimizer (set gradient to be zero)\n _, loglik = model(X) # inference\n loss = - loglik.sum(1).mean() # negative log-likelihood\n avgloss.append(loss.item())\n logloss.append(loss.item())\n loss.backward() # gradient descent\n optimizer.step() # update optimizer\n if i % log_interval == 0 and i != 0:\n print(\"[%s] Train batch: %d\\tLoglik: %.3f\" % (arrow.now(), i, - sum(logloss) / log_interval))\n # callback \n log_callback(model, dataloader)\n logloss = []\n \n # log loss\n print(\"[%s] Train epoch: %d\\tAvg Loglik: %.3f\" % (arrow.now(), e, - sum(avgloss) / len(dataloader)))","repo_name":"meowoodie/Fourier-Point-Process-via-Imitation-Learning","sub_path":"fourierpp.py","file_name":"fourierpp.py","file_ext":"py","file_size_in_byte":9402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"70920342441","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core.management.utils import get_random_secret_key\n\n\nclass User(AbstractUser):\n username = None\n email = models.EmailField(unique=True, db_index=True, primary_key=True)\n secret_key = models.CharField(max_length=255, default=get_random_secret_key)\n\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = []\n\n class Meta:\n swappable = 'AUTH_USER_MODEL'\n\n @property\n def name(self):\n\n if not self.last_name:\n return self.first_name.capitalize()\n\n return f'{self.first_name.capitalize()} {self.last_name.capitalize()}'\n\n\nclass Profile(models.Model):\n DEAFAULTPIC=\"https://firebasestorage.googleapis.com/v0/b/top-cubist-344010.appspot.com/o/files%2Ficon-students-3.jpg?alt=media&token=69e9185d-846d-429b-a078-e21c51cc21ae%22\"\n status_choices = (\n ('S', 'Student'),\n ('P', 'Professor')\n )\n first_name = models.CharField(max_length=200, null=True, blank=True)\n last_name = models.CharField(max_length=200, null=True, blank=True)\n email = models.OneToOneField(User, unique=True, on_delete=models.CASCADE)\n # avatar = models.ImageField(upload_to='profile', blank = True) this\n avatar = models.CharField(max_length=200, null=True, blank=True, default=DEAFAULTPIC)\n status = models.CharField(\n max_length=9, choices=status_choices, default=('S', 'Student')) # Student / Professor\n faculty = models.CharField(max_length=200, blank=True, null=False)\n\n def __str__(self):\n return self.email.email\n\n\nclass Todo(models.Model):\n title = models.CharField(max_length=120)\n description = models.TextField()\n completed = models.BooleanField(default=False)\n\n def _str_(self):\n return self.title\n","repo_name":"Chonlasit666/cn332-engineering-project","sub_path":"server/users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"7422025171","text":"import json\n\nimport requests\n\n\nclass SRError(Exception):\n ...\n\n\ndef get_last_games_for_all_tournaments(tournaments, number_of_games):\n last_games = {}\n for t in tournaments:\n tournament_id = t['_tid']\n last_games[t['name']] = get_last_games_for_tournament(\n tournament_id, number_of_games)\n return last_games\n\n\ndef get_last_games_for_tournament(tournament_id, number_of_games):\n url = f'https://cp.fn.sportradar.com/common/en/Etc:UTC/gismo/fixtures_tournament/{tournament_id}/2021' # noqa\n rv = requests.get(url)\n if rv.status_code == 200:\n games = parse_games(json.loads(rv.content))[:number_of_games]\n return games\n else:\n raise SRError('Failed to get games')\n\n\ndef parse_games(metadata):\n parsed_games = []\n for m in list(metadata['doc'][0]['data']['matches'].values()):\n parsed_games.append(serialize_game(m))\n return sort_games_by_time(parsed_games)\n\n\ndef serialize_game(game):\n serialized = {}\n if game['canceled']:\n return None\n for k, v in game.items():\n if k in ('_tid', 'round'):\n serialized[k] = v\n continue\n if k == 'time':\n serialized['timestamp'] = v['uts']\n continue\n if k == 'result':\n serialized['full_time_score'] = {\n 'home': v['home'], 'away': v['away']}\n continue\n if k == 'periods':\n if v is not None: # some games don't have half-time data ¯\\_(ツ)_/¯\n serialized['half_time_score'] = v['p1']\n continue\n if k == 'teams':\n serialized['home_team_name'] = v['home']['name']\n serialized['away_team_name'] = v['away']['name']\n continue\n if k == 'comment':\n serialized['goals'] = serialize_comment(v)\n continue\n return serialized\n\n\ndef serialize_comment(comment):\n serialized = []\n if not comment:\n return None\n goals = comment.split(', ')\n for goal in goals:\n serialized_goal = {}\n goal_data = goal.split(' ')\n serialized_goal['goalscorer_name'] = goal_data[2]\n serialized_goal['goal_minute'] = (goal_data[1])[1:-1]\n serialized_goal['new_score'] = goal_data[0]\n serialized.append(serialized_goal)\n return serialized\n\n\ndef sort_games_by_time(games):\n \"\"\"\n Function returns list of games from newest to oldest.\n \"\"\"\n non_cancelled = [g for g in games if g]\n return sorted(non_cancelled, key=lambda g: g['timestamp'], reverse=True)\n","repo_name":"etozheya/sr_challenge","sub_path":"data_layer/get_games.py","file_name":"get_games.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7783203304","text":"import pika\nimport threading\nimport tkinter as tk\n\n\nclass CollaborativeTextEditor:\n def __init__(self, exchange_name, exchange_type, rabbitmq_server_url):\n self.exchange_name = exchange_name\n self.exchange_type = exchange_type\n self.rabbitmq_server_url = rabbitmq_server_url\n # establish connection with the rabbit mq server\n self.connection = pika.BlockingConnection(\n pika.ConnectionParameters(rabbitmq_server_url))\n self.channel = self.connection.channel()\n\n # add the exchange\n self.channel.exchange_declare(\n exchange=exchange_name, exchange_type=exchange_type)\n\n # create a queue using a random name (generate by the rabbitmq itself)\n # the queue will be delted automatically when the connection is lost\n result = self.channel.queue_declare(queue='', exclusive=True)\n self.queue_name = result.method.queue\n\n self.channel.queue_bind(queue=self.queue_name, exchange=exchange_name)\n\n self.channel.basic_consume(\n queue=self.queue_name, on_message_callback=self.receive_message, auto_ack=True)\n\n self.stop_listening_flag = threading.Event()\n threading.Thread(target=self.channel.start_consuming).start()\n self.root = tk.Tk()\n self.root.title(\"Collaborative Text Editor\")\n self.text_widget = tk.Text(self.root)\n self.text_widget.pack()\n self.previous_content = \"\"\n self.text_widget.bind(\"\", self.on_key)\n self.cursor = \"1.0\"\n self.root.mainloop()\n\n def on_key(self, event):\n self.cursor = self.text_widget.index(tk.INSERT)\n text_widget_content = self.text_widget.get(\"1.0\", \"end-1c\")\n if text_widget_content != self.previous_content:\n self.send_message(text_widget_content)\n self.previous_content = text_widget_content\n\n def send_message(self, message):\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(self.rabbitmq_server_url))\n channel = connection.channel()\n channel.basic_publish(\n exchange=self.exchange_name, routing_key=\"\", body=message)\n\n def receive_message(self, ch, method, properties, body):\n message = body.decode()\n self.set_text_content(message)\n self.text_widget.mark_set(tk.INSERT, self.cursor)\n\n def set_text_content(self, content):\n self.text_widget.delete(\"1.0\", tk.END)\n self.text_widget.insert(\"1.0\", content)\n\n\ndef main():\n collaborative_text_editor = CollaborativeTextEditor(\n \"text-editor\", \"fanout\", \"localhost\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ahmedmahfoudhi/collaborative-text-editor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70821303081","text":"#!/usr/bin/python3\n\"\"\"\nTakes in a URL and an email, sends a POST request to the passed URL with the\nemail as a parameter, and displays the body of the response (decoded in utf-8)\n\"\"\"\nimport sys\nimport urllib.parse\nimport urllib.request\n\n\nif __name__ == \"__main__\":\n url = sys.argv[1]\n dic = {\"email\": sys.argv[2]}\n qstring = urllib.parse.urlencode(dic).encode(\"ascii\")\n requrl = urllib.request.Request(url, data=qstring)\n\n with urllib.request.urlopen(requrl) as response:\n print(response.read().decode(\"utf-8\"))\n","repo_name":"Dtikoli/alx-higher_level_programming","sub_path":"0x11-python-network_1/2-post_email.py","file_name":"2-post_email.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8404372821","text":"# -*- coding: utf-8 -*-\n#numpy 1.14.0\n#matplotlib 2.1.2\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport neuralnetwork\nimport pygame\nimport sys\n\nplt.ioff()\n\n\nif __name__ == \"__main__\":\n\n population = 80\n mutation_rate = 0.10\n total_generations = 80\n filename = \"grafico.png\"\n \n show_last = 5 #Cuantas partidas mostrar. 5 = Mostrar la mejor red de las últimas 5 generaciones\n x = 16 #tamaño del mapa\n y = 10\n ticks = 120 #cuantos ticks jugará la ia\n bonus_ticks = 80 #cuantos ticks gana al comer\n visual_delay = 0.1 #tiempo en segundos entre cada tic de juego\n score_mult = 5 #Cuantos puntos vale cada comida\n \n net_arch = [7,5,3] #el primer elemento es la cantidad de inputs, el ultimo de outputs,\n #y los intermedios son capas ocultas. [7,5,3] son 7 inputs, 3 outputs\n #y una sola capa oculta con 5 neuronas\n \n pop = []\n for i in range(population):\n pop.append(neuralnetwork.NeuralNetwork(net_arch))\n \n \n \n graphics_enabled = False\n delay = 0\n generations = 0\n best = []\n best_nn = []\n avg = []\n avg_score = []\n best_score = []\n \n start = time.time()\n while generations < total_generations:\n fitness_array = []\n score_array = []\n for i in pop:\n fitness_array.append(i.fitness(x, y, graphics_enabled, ticks, bonus_ticks, delay, score_mult,0, None))\n score_array.append(i.game_score)\n \n fitness_array.sort()\n score_array.sort()\n best.append(fitness_array[-1])\n avg.append(sum(fitness_array)/population)\n avg_score.append(sum(score_array)/population)\n best_score.append(score_array[-1])\n threshold = fitness_array[int(population*.90)]\n mating_pool = []\n \n for i in pop:\n if i.fit >= threshold:\n mating_pool.append(i)\n if i.fit == fitness_array[-1]:\n best_nn.append(i)\n \n pop = []\n for i in range(population):\n a = np.random.randint(len(mating_pool))\n b = np.random.randint(len(mating_pool))\n pop.append(mating_pool[a].reproduce(mating_pool[b], mutation_rate))\n \n generations += 1\n \n end = time.time()\n plt.plot(np.arange(len(best_score)), best_score, label=\"Mejor de cada generación\")\n plt.plot(np.arange(len(avg_score)), avg_score, label=\"Promedio de cada generación\")\n plt.xlabel(\"Generación\")\n plt.ylabel(\"Puntaje\")\n \n plt.legend()\n plt.ylim(ymin=min(avg_score))\n plt.xlim(xmin=0)\n \n plt.savefig(filename)\n plt.close()\n print(\"\\n\")\n print(\"Tiempo total: \" + str(end-start))\n \n pygame.init()\n screen = pygame.display.set_mode((20*x,20*y))\n for i in range(total_generations - show_last,len(best_nn)):\n best_nn[i].fitness(x, y, True, ticks, bonus_ticks, visual_delay, 1, i+1, screen)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.display.quit()\n pygame.quit()\n sys.exit()\n\n pygame.display.quit()\n pygame.quit()\n sys.exit()","repo_name":"AlbertoSara/Tarea3CC5114","sub_path":"src/gen_algorithm.py","file_name":"gen_algorithm.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37314693041","text":"'''\r\nIMPORTS\r\n'''\r\n\r\nimport time\r\nimport black_jack_helper as bjh\r\n\r\n'''\r\nWELCOME MESSAGE\r\n'''\r\nprint('Welcome to Black Jack!')\r\ntime.sleep(1)\r\nprint('5')\r\ntime.sleep(1)\r\nprint('4')\r\ntime.sleep(1)\r\nprint('3')\r\ntime.sleep(1)\r\nprint('2')\r\ntime.sleep(1)\r\nprint('1')\r\ntime.sleep(1)\r\nprint('Let\\'s go!')\r\n\r\n\r\n'''\r\nCHIPS BANK\r\n'''\r\n\r\npl_total_chips = 0\r\n\r\nwhile pl_total_chips <= 0:\r\n\r\n while True:\r\n\r\n try:\r\n total_chips_inp = int(input('Give me the total amount of chips you have: '))\r\n except:\r\n print('That needs to be an integer number')\r\n time.sleep(2)\r\n else:\r\n break\r\n\r\n if total_chips_inp > 0:\r\n\r\n pl_total_chips += total_chips_inp\r\n time.sleep(2)\r\n break\r\n\r\n else:\r\n\r\n print('This needs to be a number greater than 0')\r\n time.sleep(2)\r\n\r\npl_chips_bank = bjh.ChipsBank(total=pl_total_chips)\r\n\r\n# keep a record of the initial amount of chips, this will be handy to work out the net gain/loss at the end\r\npl_total_chips_initial = pl_total_chips\r\n\r\nprint(pl_chips_bank)\r\n\r\n\r\n'''\r\nGame while loop begins\r\n'''\r\nwhile True:\r\n\r\n # Begin the game\r\n\r\n # Create the deck\r\n my_deck = bjh.Deck()\r\n\r\n # Shuffle it\r\n my_deck.shuffle()\r\n\r\n # Ask for bet\r\n my_bet = bjh.ask_bet(pl_chips_bank)\r\n\r\n # Deal 2 cards to dealer, 2 cards to player\r\n pl_card_1 = my_deck.deal()\r\n pl_card_2 = my_deck.deal()\r\n de_card_1 = my_deck.deal()\r\n de_card_2 = my_deck.deal()\r\n\r\n print(\"This is your first card: \" + str(pl_card_1))\r\n time.sleep(2)\r\n\r\n print('This is your second card: ' + str(pl_card_2))\r\n time.sleep(2)\r\n\r\n print(\"This is the dealer's first card: \" + str(de_card_1))\r\n time.sleep(2)\r\n\r\n print('The dealer\\'s second card is hidden')\r\n time.sleep(2)\r\n\r\n # Add the cards to the player's hand\r\n pl_hand = bjh.Hand()\r\n pl_hand.add_card(pl_card_1)\r\n pl_hand.add_card(pl_card_2)\r\n print(pl_hand)\r\n\r\n # Add cards to the dealer's hand\r\n de_hand = bjh.Hand()\r\n de_hand.add_card(de_card_1)\r\n de_hand.add_card(de_card_2)\r\n\r\n # Begin player's turn\r\n while pl_hand.value < 21:\r\n\r\n # Ask if player wants to hit\r\n while True:\r\n\r\n while True:\r\n try:\r\n hit_resp = str(input('Do you want to hit? y/n ')).upper()\r\n except:\r\n print('Please reply with a string \\'y\\' or \\'no\\'')\r\n else:\r\n break\r\n\r\n if hit_resp == 'Y' or hit_resp == 'N':\r\n\r\n break\r\n\r\n else:\r\n print('Please reply with a string \\'y\\' or \\'no\\'')\r\n\r\n # if yes, add the card to the hand and desplay the hand\r\n if hit_resp == 'Y':\r\n pl_new_card = my_deck.deal()\r\n\r\n pl_hand.add_card(pl_new_card)\r\n\r\n print(pl_hand)\r\n time.sleep(2)\r\n\r\n # if not, break the while and continue\r\n if hit_resp == 'N':\r\n break\r\n\r\n # if the hand's value has gone above 21, then substract bet and continue to the next iteration\r\n if pl_hand.value > 21:\r\n\r\n print('BUST!!!\\nEnd of this match')\r\n time.sleep(2)\r\n pl_chips_bank.lose_bet(my_bet)\r\n\r\n if bjh.continue_playing(pl_chips_bank):\r\n continue\r\n else:\r\n print('End of the game. You are going home with: \\n' + str(pl_chips_bank.total))\r\n break\r\n\r\n print('It is now the dealer\\'s turn')\r\n time.sleep(2)\r\n\r\n print('As we said, the dealer\\'s first card is: \\n' + str(de_card_1))\r\n time.sleep(2)\r\n\r\n print('We can now show you the dealer\\'s second card too: \\n' + str(de_card_2))\r\n time.sleep(2)\r\n\r\n print('So the total value of the dealer\\'s hand is: \\n' + str(de_hand.value))\r\n time.sleep(2)\r\n\r\n while de_hand.value < 17:\r\n de_new_card = my_deck.deal()\r\n print('The dealer has just got a: \\n' + str(de_new_card))\r\n time.sleep(2)\r\n\r\n de_hand.add_card(de_new_card)\r\n print('The dealer\\'s hand is now worth: ' + str(de_hand.value))\r\n time.sleep(2)\r\n\r\n if de_hand.value > 21:\r\n\r\n print('The dealer has gone BUST! You win the game!')\r\n\r\n pl_chips_bank.win_bet(my_bet)\r\n time.sleep(2)\r\n\r\n if bjh.continue_playing(pl_chips_bank):\r\n continue\r\n else:\r\n print('End of the game. You are going home with: \\n' + str(pl_chips_bank.total))\r\n break\r\n\r\n elif de_hand.value > pl_hand.value:\r\n\r\n print('The dealer has won. Dealer\\'s hand is worth: \\n' + str(de_hand.value) + '\\nYour hand is worth: \\n' + str(\r\n pl_hand.value))\r\n pl_chips_bank.lose_bet(my_bet)\r\n time.sleep(2)\r\n\r\n if bjh.continue_playing(pl_chips_bank):\r\n continue\r\n else:\r\n print('End of the game. You are going home with: \\n' + str(pl_chips_bank.total))\r\n break\r\n\r\n elif de_hand.value < pl_hand.value:\r\n\r\n print('You won. Dealer\\'s hand is worth: \\n' + str(de_hand.value) + '\\nYour hand is worth: \\n' + str(\r\n pl_hand.value))\r\n pl_chips_bank.win_bet(my_bet)\r\n time.sleep(2)\r\n\r\n if bjh.continue_playing(pl_chips_bank):\r\n continue\r\n else:\r\n print('End of the game. You are going home with: \\n' + str(pl_chips_bank.total))\r\n break\r\n\r\n elif de_hand.value == pl_hand.value:\r\n\r\n print('You draw. Dealer\\'s hand is worth: \\n' + str(de_hand.value) + '\\nYour hand is worth: \\n' + str(\r\n pl_hand.value))\r\n time.sleep(2)\r\n\r\n if bjh.continue_playing(pl_chips_bank):\r\n continue\r\n else:\r\n print('End of the game. You are going home with: \\n' + str(pl_chips_bank.total))\r\n break\r\n","repo_name":"pierodellagiustina/black-jack-course-assignment","sub_path":"black_jack.py","file_name":"black_jack.py","file_ext":"py","file_size_in_byte":5783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"75234026919","text":"import sys\ninput = sys.stdin.readline\nimport heapq\n##############################################\ndx = [0, 1, 0, -1]\ndy = [1, 0, -1, 0]\n##############################################\nINF = int(1e9)\n##############################################\nw, h = map(int, input().split())\n\ngraph = [list(input().rstrip()) for _ in range(h)]\n\ncheck_point = []\nfor i in range(h):\n for j in range(w):\n if graph[i][j] == 'C':\n check_point.append([i, j])\n graph[i][j] = '.'\n\nheap = []\nx = check_point[0][0]\ny = check_point[0][1]\n\ngraph[x][y] = 0\n\n# 시작점에서 상하좌우는 모두 0개로 접근 가능\nfor dir in range(4):\n i = 1\n while True:\n nx = x + dx[dir]*i\n ny = y + dy[dir]*i\n\n # 범위 이탈이나 벽만나면 멈춤\n if nx < 0 or nx >= h or ny < 0 or ny >= w or graph[nx][ny] == \"*\":\n break\n\n # 거울의 개수와 방향, 위치를 저장\n graph[nx][ny] = 0\n heapq.heappush(heap, [0, dir, nx, ny])\n i += 1\n\n# 다익스트라 시작\nwhile heap:\n cost, diriction, x, y = heapq.heappop(heap)\n\n # diriction +1 or -1이 꺾이는 방향\n for k in [-1, 1]:\n dir = (diriction + k) % 4\n\n # 해당 방향으로 쭉 나아감\n i = 1\n while True:\n nx = x + dx[dir] * i\n ny = y + dy[dir] * i\n\n # 범위 이탈하거나 벽만나면 멈춤\n if nx < 0 or nx >= h or ny < 0 or ny >= w or graph[nx][ny] == \"*\":\n break\n\n # 처음 도착하거나 더 적은 거울 개수로 올 수 있으면 heap에 추가\n if graph[nx][ny] == \".\" or graph[nx][ny] > cost + 1:\n graph[nx][ny] = cost + 1\n heapq.heappush(heap, [cost + 1, dir, nx, ny])\n\n i += 1\n\n# 출력\nx, y = check_point[1]\nprint(graph[x][y])\n\n\n\n","repo_name":"Sunghyun1320/algorithm","sub_path":"python/BOJ/dijkstra/6087.py","file_name":"6087.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16347060335","text":"from pdfquery import PDFQuery\nfrom io import BytesIO \n\n# def extractTextPDF(file):\n# text = \"\"\n# reader = PdfReader(BytesIO(file))\n# total = len(reader.pages)\n# for i in range(total):\n# page = reader.pages[i]\n# text += page.extract_text()\n# return text\n\ndef extractTextPDF(file):\n pdf = PDFQuery(BytesIO(file))\n pdf.load()\n text_elements = pdf.pq('LTTextLineHorizontal')\n text = [t.text for t in text_elements]\n return \"\".join(text)","repo_name":"ahmedBilal5/qwizard-backend","sub_path":"PDFReader.py","file_name":"PDFReader.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9804894250","text":"import numpy as np\r\nimport cv2\r\n\r\n\r\ndef onChange(value):\r\n global image, title, pos\r\n\r\n add_value = value - int(image[0][0])\r\n pos += add_value\r\n print(\"추가 화소값:\", add_value)\r\n image = image + add_value\r\n cv2.imshow(title, image)\r\n\r\n\r\npos = 0\r\nimage = np.zeros((300, 500), np.uint8)\r\n\r\ntitle = \"Trackbar Event\"\r\ncv2.imshow(title, image)\r\n\r\ncv2.createTrackbar(\"Brightness\", title, pos, 255, onChange)\r\n\r\nwhile True:\r\n key = cv2.waitKeyEx(100)\r\n if key == 27: break\r\n\r\n if key == 2424832 and pos >= 1: # 왼쪽 키\r\n pos -= 1\r\n cv2.setTrackbarPos(\"Brightness\", title, pos)\r\n image -= 1\r\n cv2.imshow(title, image)\r\n elif key == 2555904 and pos <= 254: # 오른쪽 키\r\n pos += 1\r\n cv2.setTrackbarPos(\"Brightness\", title, pos)\r\n image += 1\r\n cv2.imshow(title, image)\r\n\r\ncv2.destroyAllWindows()\r\n\r\n\"\"\"\r\n한 쪽 방향으로 계속 증가시킨다고 했을 때, 처음만 1 증가하고 그 이후로는 계속 2씩 증가함\r\n왼쪽 오른쪽 둘 다 그럼\r\n그리고 내렸을 때 회색으로 고정되는거, 아무리 봐도 이유를 모르겠음\r\n\"\"\"","repo_name":"nahyeongjin1/PoseEstimation","sub_path":"CHAPTER04/exercise/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29728497687","text":"from dataclasses import dataclass\nfrom datetime import datetime\nfrom typing import Optional, List, Union, Any\nfrom uuid import uuid4\n\nfrom .entity import Entity\nfrom .event_metadata import EventMetadata\nfrom pydantic import BaseModel, root_validator\nfrom typing import Tuple\n\nfrom .marketing import UTM\nfrom .metadata import OS, Device, Application, Hit\nfrom .named_entity import NamedEntity\nfrom .profile import ProfileLoyalty, ProfileJob, ProfilePreference, ProfileMedia, \\\n ProfileIdentifier, ProfileContact, ProfilePII\nfrom .value_object.operation import RecordFlag\nfrom .value_object.storage_info import StorageInfo\nfrom ..service.string_manager import capitalize_event_type_id\n\n\nclass Tags(BaseModel):\n values: Tuple['str', ...] = ()\n count: int = 0\n\n class Config:\n validate_assignment = True\n\n @root_validator(skip_on_failure=True)\n def total_tags(cls, values):\n values[\"count\"] = len(values.get(\"values\"))\n return values\n\n def add(self, tag: Union[str, List[str]]):\n\n if isinstance(tag, list):\n tag = tuple(tag)\n self.values += tag\n else:\n self.values += tag,\n\n self.count = len(self.values)\n\n\nclass EventSession(Entity):\n start: datetime = datetime.utcnow()\n duration: float = 0\n tz: Optional[str] = 'utc'\n\n\nclass EventJourney(BaseModel):\n state: Optional[str] = None\n\n\nclass EventProductVariant(BaseModel):\n name: Optional[str] = None\n color: Optional[str] = None\n size: Optional[str] = None\n\n\nclass EventCheckout(BaseModel):\n id: Optional[str] = None\n status: Optional[str] = None\n currency: Optional[str] = None\n value: Optional[float] = 0\n\n\nclass EventIncome(BaseModel):\n value: Optional[float] = 0\n revenue: Optional[float] = 0\n\n\nclass EventCost(BaseModel):\n shipping: Optional[float] = 0\n tax: Optional[float] = 0\n discount: Optional[float] = 0\n other: Optional[float] = 0\n\n\nclass EventProduct(BaseModel):\n id: Optional[str] = None\n name: Optional[str] = None\n sku: Optional[str] = None\n category: Optional[str] = None\n brand: Optional[str] = None\n variant: Optional[str] = None\n price: Optional[float] = 0\n quantity: Optional[int] = 0\n position: Optional[int] = 0\n review: Optional[str] = None\n rate: Optional[float] = 0\n\n\nclass EventOrder(BaseModel):\n id: Optional[str] = None\n status: Optional[str] = None\n currency: Optional[str] = None\n income: Optional[EventIncome] = EventIncome()\n cost: Optional[EventCost] = EventCost()\n affiliation: Optional[str] = None\n\n\nclass EventEc(BaseModel):\n product: Optional[EventProduct] = EventProduct()\n checkout: Optional[EventCheckout] = EventCheckout()\n order: Optional[EventOrder] = EventOrder()\n\n\nclass EventMessage(BaseModel):\n type: Optional[str] = None\n text: Optional[str] = None\n recipient: Optional[str] = None\n\n\nclass EventCreditCard(BaseModel):\n number: Optional[str] = None\n expires: Optional[datetime] = None\n holder: Optional[str] = None\n\n\nclass EventPayment(BaseModel):\n method: Optional[str] = None\n credit_card: Optional[EventCreditCard] = EventCreditCard()\n\n\nclass EventPromotion(BaseModel):\n id: Optional[str] = None\n name: Optional[str] = None\n\n\nclass EventMarketing(BaseModel):\n coupon: Optional[str] = None\n promotion: Optional[EventPromotion] = EventPromotion()\n\n\nclass EventData(BaseModel):\n pii: Optional[ProfilePII] = ProfilePII.construct()\n contact: Optional[ProfileContact] = ProfileContact.construct()\n identifier: Optional[ProfileIdentifier] = ProfileIdentifier.construct()\n media: Optional[ProfileMedia] = ProfileMedia.construct()\n preferences: Optional[ProfilePreference] = ProfilePreference.construct()\n job: Optional[ProfileJob] = ProfileJob.construct()\n loyalty: Optional[ProfileLoyalty] = ProfileLoyalty.construct()\n ec: Optional[EventEc] = EventEc.construct()\n message: Optional[EventMessage] = EventMessage.construct()\n payment: Optional[EventPayment] = EventPayment.construct()\n marketing: Optional[EventMarketing] = EventMarketing.construct()\n\n\nclass Event(NamedEntity):\n metadata: EventMetadata\n type: str\n\n utm: Optional[UTM] = UTM()\n\n properties: Optional[dict] = {}\n traits: Optional[dict] = {}\n operation: RecordFlag = RecordFlag()\n\n source: Entity\n session: Optional[EventSession] = None\n profile: Optional[Entity] = None\n context: Optional[dict] = {}\n request: Optional[dict] = {}\n config: Optional[dict] = {}\n tags: Tags = Tags()\n aux: dict = {}\n\n device: Optional[dict] = {}\n os: Optional[dict] = {}\n app: Optional[dict] = {}\n hit: Optional[dict] = {}\n # journey: Optional[dict] = {}\n data: Optional[dict] = {}\n\n # device: Optional[Device] = Device.construct()\n # os: Optional[OS] = OS.construct()\n # app: Optional[Application] = Application.construct()\n # hit: Optional[Hit] = Hit.construct()\n journey: EventJourney = EventJourney.construct()\n # data: Optional[EventData] = EventData.construct()\n\n def __init__(self, **data: Any):\n if 'type' in data and isinstance(data['type'], str):\n data['type'] = data['type'].lower().replace(' ', '-')\n if 'name' not in data:\n data['name'] = capitalize_event_type_id(data['type'])\n super().__init__(**data)\n\n def replace(self, event):\n if isinstance(event, Event):\n self.id = event.id\n self.metadata = event.metadata\n self.type = event.type\n self.properties = event.properties\n self.traits = event.traits\n # do not replace those - read only\n # self.source = event.source\n # self.session = event.session\n # self.profile = event.profile\n self.context = event.context\n self.request = event.request\n self.config = event.config\n self.tags = event.tags\n self.aux = event.aux\n self.os = event.os\n self.device = event.device\n self.app = event.app\n\n def get_ip(self):\n if 'headers' in self.request and 'x-forwarded-for' in self.request['headers']:\n return self.request['headers']['x-forwarded-for']\n return None\n\n def is_persistent(self) -> bool:\n if 'save' in self.config and isinstance(self.config['save'], bool):\n return self.config['save']\n else:\n return True\n\n def has_profile(self) -> bool:\n return self.profile is not None\n\n def has_session(self) -> bool:\n return self.session is not None\n\n @staticmethod\n def new(data: dict) -> 'Event':\n data['id'] = str(uuid4())\n return Event(**data)\n\n @staticmethod\n def storage_info() -> StorageInfo:\n return StorageInfo(\n 'event',\n Event,\n multi=True\n )\n\n @staticmethod\n def dictionary(id: str = None,\n type: str = None,\n session_id: str = None,\n profile_id=None,\n properties: dict = None,\n context=None) -> dict:\n if context is None:\n context = {}\n if properties is None:\n properties = {}\n return {\n \"id\": id,\n \"type\": type,\n \"name\": capitalize_event_type_id(type),\n \"metadata\": {\n \"aux\": {},\n \"time\": {\n \"insert\": None,\n \"create\": None,\n \"update\": None,\n \"process_time\": 0\n },\n \"ip\": None,\n \"status\": None,\n \"channel\": None,\n \"processed_by\": {\n \"rules\": [],\n \"flows\": [],\n \"third_party\": []\n },\n \"profile_less\": False,\n \"debug\": False,\n \"valid\": True,\n \"error\": False,\n \"warning\": False,\n \"instance\": {\n \"id\": None\n }\n },\n \"utm\": {\n \"source\": None,\n \"medium\": None,\n \"campaign\": None,\n \"term\": None,\n \"content\": None\n },\n \"properties\": properties,\n \"traits\": {},\n \"operation\": {\n \"new\": False,\n \"update\": False\n },\n \"source\": {\n \"id\": None,\n \"type\": [],\n \"bridge\": {\n \"id\": None,\n \"name\": None\n },\n \"timestamp\": None,\n \"name\": None,\n \"description\": None,\n \"channel\": None,\n \"enabled\": True,\n \"transitional\": False,\n \"tags\": [],\n \"groups\": [],\n \"returns_profile\": False,\n \"permanent_profile_id\": False,\n \"requires_consent\": False,\n \"manual\": None,\n \"locked\": False,\n \"synchronize_profiles\": True,\n \"config\": None\n },\n \"session\": {\n \"id\": session_id,\n \"start\": None,\n \"duration\": 0,\n \"tz\": \"utc\"\n },\n \"profile\": {\n \"id\": profile_id\n },\n \"context\": context,\n \"request\": {},\n \"config\": {},\n \"tags\": {\n \"values\": (),\n \"count\": 0\n },\n \"aux\": {},\n \"data\": {},\n \"device\": {\n \"name\": None,\n \"brand\": None,\n \"model\": None,\n \"type\": None,\n \"touch\": False,\n \"ip\": None,\n \"resolution\": None,\n \"geo\": {\n \"country\": {\n \"name\": None,\n \"code\": None\n },\n \"city\": None,\n \"county\": None,\n \"postal\": None,\n \"latitude\": None,\n \"longitude\": None\n },\n \"color_depth\": None,\n \"orientation\": None\n },\n \"os\": {\n \"name\": None,\n \"version\": None\n },\n \"app\": {\n \"type\": None,\n \"name\": None,\n \"version\": None,\n \"language\": None,\n \"bot\": False,\n \"resolution\": None\n },\n \"hit\": {\n \"name\": None,\n \"url\": None,\n \"referer\": None,\n \"query\": None,\n \"category\": None\n },\n \"journey\": {\n \"state\": None\n }\n }\n","repo_name":"Tracardi/tracardi","sub_path":"tracardi/domain/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":10990,"program_lang":"python","lang":"en","doc_type":"code","stars":430,"dataset":"github-code","pt":"18"} +{"seq_id":"15506048145","text":"import datetime\nimport pytest\n\nfrom keywords.utils import log_info, check_xattr_support, version_is_binary\nfrom keywords.LiteServFactory import LiteServFactory\nfrom keywords.SyncGateway import sync_gateway_config_path_for_mode\nfrom keywords.ClusterKeywords import ClusterKeywords\nfrom keywords.MobileRestClient import MobileRestClient\nfrom keywords.constants import CLUSTER_CONFIGS_DIR\nfrom keywords.constants import RESULTS_DIR\nfrom keywords.tklogging import Logging\nfrom utilities.cluster_config_utils import persist_cluster_config_environment_prop\n\n\n# This will get called once before the first test that\n# runs with this as input parameters in this file\n# This setup will be called once for all tests in the\n# testsuites/listener/shared/client_sg_topology_specific/multiple_sync_gateways/ directory\n@pytest.fixture(scope=\"module\")\ndef setup_client_syncgateway_suite(request):\n \"\"\"Suite setup fixture for client sync_gateway tests\"\"\"\n\n log_info(\"Setting up client sync_gateway suite ...\")\n\n liteserv_platform = request.config.getoption(\"--liteserv-platform\")\n liteserv_version = request.config.getoption(\"--liteserv-version\")\n liteserv_host = request.config.getoption(\"--liteserv-host\")\n liteserv_port = request.config.getoption(\"--liteserv-port\")\n liteserv_storage_engine = request.config.getoption(\"--liteserv-storage-engine\")\n\n skip_provisioning = request.config.getoption(\"--skip-provisioning\")\n sync_gateway_version = request.config.getoption(\"--sync-gateway-version\")\n sync_gateway_mode = request.config.getoption(\"--sync-gateway-mode\")\n server_version = request.config.getoption(\"--server-version\")\n xattrs_enabled = request.config.getoption(\"--xattrs\")\n sg_ssl = request.config.getoption(\"--sg-ssl\")\n use_views = request.config.getoption(\"--use-views\")\n number_replicas = request.config.getoption(\"--number-replicas\")\n\n liteserv = LiteServFactory.create(platform=liteserv_platform,\n version_build=liteserv_version,\n host=liteserv_host,\n port=liteserv_port,\n storage_engine=liteserv_storage_engine)\n\n if xattrs_enabled and version_is_binary(sync_gateway_version):\n check_xattr_support(server_version, sync_gateway_version)\n\n log_info(\"Downloading LiteServ ...\")\n # Download LiteServ\n liteserv.download()\n\n # Install LiteServ\n liteserv.install()\n\n cluster_config = \"{}/multiple_sync_gateways_{}\".format(CLUSTER_CONFIGS_DIR, sync_gateway_mode)\n\n try:\n server_version\n except NameError:\n log_info(\"Server version is not provided\")\n persist_cluster_config_environment_prop(cluster_config, 'server_version', \"\")\n else:\n log_info(\"Running test with server version {}\".format(server_version))\n persist_cluster_config_environment_prop(cluster_config, 'server_version', server_version)\n\n try:\n sync_gateway_version\n except NameError:\n log_info(\"Sync gateway version is not provided\")\n persist_cluster_config_environment_prop(cluster_config, 'sync_gateway_version', \"\")\n else:\n log_info(\"Running test with sync_gateway version {}\".format(sync_gateway_version))\n persist_cluster_config_environment_prop(cluster_config, 'sync_gateway_version', sync_gateway_version)\n\n if sg_ssl:\n log_info(\"Enabling SSL on sync gateway\")\n persist_cluster_config_environment_prop(cluster_config, 'sync_gateway_ssl', True)\n else:\n persist_cluster_config_environment_prop(cluster_config, 'sync_gateway_ssl', False)\n\n if use_views:\n log_info(\"Running SG tests using views\")\n # Enable sg views in cluster configs\n persist_cluster_config_environment_prop(cluster_config, 'sg_use_views', True)\n else:\n log_info(\"Running tests with cbs <-> sg ssl disabled\")\n # Disable sg views in cluster configs\n persist_cluster_config_environment_prop(cluster_config, 'sg_use_views', False)\n\n # Write the number of replicas to cluster config\n persist_cluster_config_environment_prop(cluster_config, 'number_replicas', number_replicas)\n\n if xattrs_enabled:\n log_info(\"Running test with xattrs for sync meta storage\")\n persist_cluster_config_environment_prop(cluster_config, 'xattrs_enabled', True)\n else:\n log_info(\"Using document storage for sync meta data\")\n persist_cluster_config_environment_prop(cluster_config, 'xattrs_enabled', False)\n\n sg_config = sync_gateway_config_path_for_mode(\"listener_tests/listener_tests\", sync_gateway_mode)\n\n if not skip_provisioning:\n log_info(\"Installing Sync Gateway + Couchbase Server + Accels ('di' only)\")\n cluster_utils = ClusterKeywords(cluster_config)\n cluster_utils.provision_cluster(\n cluster_config=cluster_config,\n server_version=server_version,\n sync_gateway_version=sync_gateway_version,\n sync_gateway_config=sg_config\n )\n\n # Wait at the yeild until tests referencing this suite setup have run,\n # Then execute the teardown\n yield {\n \"liteserv\": liteserv,\n \"cluster_config\": cluster_config,\n \"sg_mode\": sync_gateway_mode,\n \"xattrs_enabled\": xattrs_enabled\n }\n\n log_info(\"Tearing down suite ...\")\n\n liteserv.remove()\n\n\n# Passed to each testcase, run for each test_* method in\n# testsuites/listener/shared/client_sg_topology_specific/multiple_sync_gateways/ directory\n@pytest.fixture(scope=\"function\")\ndef setup_client_syncgateway_test(request, setup_client_syncgateway_suite):\n \"\"\"Test setup fixture for client sync_gateway tests\"\"\"\n\n log_info(\"Setting up client sync_gateway test ...\")\n\n liteserv = setup_client_syncgateway_suite[\"liteserv\"]\n cluster_config = setup_client_syncgateway_suite[\"cluster_config\"]\n xattrs_enabled = setup_client_syncgateway_suite[\"xattrs_enabled\"]\n test_name = request.node.name\n\n client = MobileRestClient()\n\n # Start LiteServ and delete any databases\n ls_url = liteserv.start(\"{}/logs/{}-{}-{}.txt\".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now()))\n client.delete_databases(ls_url)\n\n cluster_helper = ClusterKeywords(cluster_config)\n cluster_hosts = cluster_helper.get_cluster_topology(cluster_config=cluster_config)\n\n sg_url = cluster_hosts[\"sync_gateways\"][0][\"public\"]\n sg_admin_url = cluster_hosts[\"sync_gateways\"][0][\"admin\"]\n\n # Yield values to test case via fixture argument\n yield {\n \"cluster_config\": cluster_config,\n \"sg_mode\": setup_client_syncgateway_suite[\"sg_mode\"],\n \"ls_url\": ls_url,\n \"sg_url\": sg_url,\n \"sg_admin_url\": sg_admin_url,\n \"xattrs_enabled\": xattrs_enabled\n }\n\n log_info(\"Tearing down test\")\n client.delete_databases(ls_url)\n liteserv.stop()\n\n # if the test failed pull logs\n if request.node.rep_call.failed:\n logging_helper = Logging()\n logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config, test_name=test_name)\n","repo_name":"couchbaselabs/mobile-testkit","sub_path":"testsuites/listener/shared/client_sg_topology_specific/multiple_sync_gateways/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":7098,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"18"} +{"seq_id":"74062347881","text":"from tkinter import *\n\nimport tkinter\nimport WebCam_Threading\nimport GestureDetector\nimport cv2\nfrom PIL import Image\nfrom PIL import ImageTk\n\n\n\nclass cameraStream(): \n def __init__(self):\n self.gestureDectector = GestureDetector.GestureDetector()\n \n self.window = tkinter.Tk()\n self.window.title('Computer Vision')\n self.window.geometry('900x400')\n\n self.panel = tkinter.Label()\n self.panel.grid(column=0, row=0)\n \n self.camera = cv2.VideoCapture(0)\n self.Camera()\n self.window.mainloop()\n self.window.after(500,self.Camera)\n \n def Camera(self):\n \n message, frame = self.gestureDectector.gestureDetection()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = Image.fromarray(frame)\n frame = ImageTk.PhotoImage(frame)\n\n self.panel.configure(image=frame)\n self.panel.image = frame\n self.panel.after(1,self.Camera)\n\n frame2 = Frame(self.window,width=100, highlightbackground='blue', highlightthickness=3)\n frame2.grid(row=0,column=1, padx=100, pady=10, ipadx=60, ipady=145)\n\n frame3 = Frame(self.window,width=100, highlightbackground='blue', highlightthickness=2)\n frame3.grid(row=0,column=1, padx=10, pady=10, ipadx=40, ipady=100,)\n \n label2= Label(frame3, text='Informacion', fg='red', font=(18))\n label2.place(x=85, y=6, anchor=\"center\")\n \n frame4 = Frame(self.window,width=100, highlightbackground='blue', highlightthickness=2)\n frame4.grid(row=0,column=1, padx=10, pady=10, ipadx=40, ipady=50,)\n\n label4= Label(frame4, text=message,fg='red', font=(18))\n label4.place(x=85, y=6, anchor=\"center\")\n \n \n \nif __name__ == \"__main__\":\n cameraStream()\n\n\n\"\"\"\nimport cv2 as cv\nfrom PIL import Image, ImageTk\nimport mediapipe as mp\n\nfrom facerecognition import FaceRecognition\n\nmp_face_detection = mp.solutions.face_detection\nmp_drawing = mp.solutions.drawing_utils\n\n\nwindow = Tk()\ncap = cv.VideoCapture(0)\nwindow.title('Computer Vision')\nwindow.geometry('900x400')\n\n\nframe1 = Frame(window, width=100, height=100, highlightbackground='blue',\n highlightthickness=3)\nframe1.grid(row=0, column=0, padx=30, pady=40, ipadx=195, ipady=110)\n\nlabel1 = Label(frame1, text=\"Camera\", fg='red', font=(18))\nlabel1.place(x=250, y=80, anchor=\"center\")\n\n\ndef show_frames():\n success, image = cap.read()\n cv2image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(image=img)\n label1.imgtk = imgtk\n label1.configure(image=imgtk)\n label1.after(20, show_frames)\n\ndef detect_face():\n # return FaceRecognition()\n FaceRecognition(cap)\n # window.destroy()\n\n\nframe2 = Frame(window, width=100, highlightbackground='blue',\n highlightthickness=3)\nframe2.grid(row=0, column=1, padx=100, pady=10, ipadx=60, ipady=145)\n\nframe3 = Frame(window, width=100, highlightbackground='blue',\n highlightthickness=2)\nframe3.grid(row=0, column=1, padx=10, pady=10, ipadx=40, ipady=100,)\nlabel2 = Label(frame3, text=\"Result List\", fg='red', font=(18))\nlabel2.place(x=85, y=6, anchor=\"center\")\n\n\nframe4 = Frame(window, width=100, highlightbackground='blue',\n highlightthickness=2)\nframe4.grid(row=0, column=1, padx=10, pady=10, ipadx=40, ipady=50,)\n\nbtn = Button(frame1, text=\"Start face recognition\", height=2,\n command=detect_face )\nbtn.place(x=180, y=260)\n\nshow_frames()\nwindow.mainloop()\n\n\n\"\"\"","repo_name":"willysjose2026/vision-computacional","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16935481769","text":"import argparse\nfrom p_reporting.m_reporting import export_challenge\nfrom p_reporting.m_reporting import export_bonus_1\nfrom p_reporting.m_reporting import export_bonus_2\n\nCOUNTRIES = ['Belgium',\n 'Lithuania',\n 'Portugal',\n 'Bulgaria',\n 'Spain',\n 'Luxembourg',\n 'Romania',\n 'Czechia',\n 'France',\n 'Hungary',\n 'Slovenia',\n 'Denmark',\n 'Croatia',\n 'Malta',\n 'Slovakia',\n 'Germany',\n 'Italy',\n 'Netherlands',\n 'Finland',\n 'Estonia',\n 'Cyprus',\n 'Austria',\n 'Sweden',\n 'Ireland',\n 'Latvia',\n 'Poland',\n 'Great Britain',\n 'Greece']\n\n\ndef argument_parser():\n parser = argparse.ArgumentParser(description='Obtain a full or country focused scope')\n\n # arguments here!\n parser.add_argument(\"-c\", \"--country\", help=\"Create output for country X\", type=str)\n parser.add_argument(\"-v\", \"--votes\", help=\"Print table with In Favour/Against counts\", type=str)\n\n args = parser.parse_args()\n\n return args\n\n\ndef main():\n print('Running pipeline...')\n\n # 1/3 - Export the Challenge 1 result to csv using parsed arguments\n\n if argument_parser().country in COUNTRIES:\n for c in COUNTRIES:\n table = export_challenge()\n table.loc[table['Country'] == argument_parser().country].to_csv(f'data/results/{argument_parser()}.csv',\n index=False)\n break\n elif argument_parser().country == 'All':\n table = export_challenge()\n table.to_csv('data/results/all.csv')\n\n else:\n raise ValueError(\"Please chose a valid country from the list, or alternatively, type 'All'\")\n\n print('Table exported! Check your output folder ')\n\n # 2/3 - Print and export bonus 1 table to csv\n\n b1 = input('Would you also like an excel table with bonus 1? (yes/no)')\n if b1 == 'yes':\n bonus_1_table = export_bonus_1()\n print(bonus_1_table)\n bonus_1_table.to_csv('data/results/votes.csv')\n print('table has been exported, Check your folder!')\n elif b1 == 'no':\n print('Whatever... Your loss!')\n\n # 3/3 - Print and export bonus 2 table to csv\n\n b1 = input('How about top skills table for bonus 2? (yes/no)')\n if b1 == 'yes':\n bonus_2_table = export_bonus_2()\n bonus_2_table.to_csv('data/results/top_skills.csv')\n print(bonus_2_table)\n print('table has been exported, Check your folder!')\n elif b1 == 'no':\n print('Oh... what a waste of my time then')\n\n print('Script complete, thanks for using this awesome tool')\n\n\nif __name__ == '__main__':\n arguments = argument_parser()\n main()\n","repo_name":"markaww/workforce_research","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71602070120","text":"#Given 2 integer values greater than 0,\n# return whichever value is closest to 21 with- out going over 21.\n# If they both go over 21 then return 0\n\ndef blackjack(card1, card2):\n if card1 == 0 or card2 == 0:\n return \"Not a valid card\"\n elif card1 < 21 and card1 > card2:\n return card1\n elif card1 < 21 and card2 > 21:\n return card1\n elif card1 < 21 and card2 > card1:\n return card2\n elif card1 > 21 and card2 < 21:\n return card2\n elif card1 == 21 or card2 == 21:\n return 21\n else:\n return 0\n\n\nprint(blackjack(0,22))","repo_name":"Alex-Sal91/QA-Python_Tasks","sub_path":"Intermediate/BlackJack.py","file_name":"BlackJack.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32717776251","text":"from flask import request\nfrom flask_restx import Resource\nfrom ..util.dto import CommentDto\nfrom ..util.decorator import *\nfrom ..service.comment_service import save_new_comment, get_all_comments_by_user, delete_a_comment, get_all_comments_by_post\n\napi = CommentDto.api\n_comment = CommentDto.comment\n\n@api.route('/')\nclass CommentList(Resource):\n @token_required\n @api.response(201, 'Comment successfully created.')\n @api.doc('create a new comment')\n @api.expect(_comment, validate=True)\n def post(self, user_pid):\n \"\"\"Creates a new Comment \"\"\"\n data = request.json\n return save_new_comment(user_pid=user_pid, data=data)\n\n@api.route('/creator/')\n@api.param(\"creator_id\", \"The User public identifier\")\nclass CommentListByUser(Resource):\n @token_required\n @api.doc('list_of_registered_comments')\n @api.marshal_list_with(_comment, envelope='data')\n def get(self, user_pid, creator_id):\n \"\"\"List all registered comments\"\"\"\n return get_all_comments_by_user(user_pid, creator_id)\n\n@api.route('/parent/')\n@api.param(\"parent_id\", \"The Pet public identifier\")\nclass CommentListByPost(Resource):\n @token_required\n @api.doc('list_of_registered_comments')\n @api.marshal_list_with(_comment, envelope='data')\n def get(self, user_pid, parent_id):\n \"\"\"List all registered comments\"\"\"\n return get_all_comments_by_post(user_pid, parent_id, request.args.get(\"pagination_no\", type=int))\n\n@api.route('/')\n@api.param('public_id', 'The Comment identifier')\n@api.response(404, 'Comment not found.')\nclass Comment(Resource):\n @token_required\n @api.response(201, 'Comment successfully deleted.')\n @api.doc('delete a comment')\n def delete(self, user_pid, public_id):\n \"\"\"delete a comment given its identifier\"\"\"\n return delete_a_comment(public_id, user_pid)","repo_name":"fmscrns/boop-server","sub_path":"app/main/controller/comment_controller.py","file_name":"comment_controller.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31848914401","text":"from __future__ import print_function\nimport cv2 as cv\nimport numpy as np\nimport action\n\ndef surf(img1, img2,ratio_thresh):\n if img1 is None or img2 is None:\n print('Could not open or find the images!')\n exit(0)\n #-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors\n\n minHessian = 400\n detector = cv.SIFT_create() #(hessianThreshold=minHessian)\n keypoints1, descriptors1 = detector.detectAndCompute(img1, None)\n keypoints2, descriptors2 = detector.detectAndCompute(img2, None)\n #-- Step 2: Matching descriptor vectors with a FLANN based matcher\n # Since SURF is a floating-point descriptor NORM_L2 is used\n matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)\n knn_matches = matcher.knnMatch(descriptors1, descriptors2, 2)\n #-- Filter matches using the Lowe's ratio test\n # ratio_thresh = 0.65\n good_matches = []\n distance = []\n for m,n in knn_matches:\n if m.distance < ratio_thresh * n.distance:\n distance.append(m.distance)\n good_matches.append(m)\n\n #-- Draw matches\n if distance:\n best_distance = min(distance)\n for i in good_matches:\n if (i.distance == best_distance ):\n best_match = i\n\n\n img_matches = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8)\n cv.drawMatches(img1, keypoints1, img2, keypoints2, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\n\n img1_idx = best_match.queryIdx\n img2_idx = best_match.trainIdx\n a = keypoints1[img1_idx].pt\n b = keypoints2[img2_idx].pt\n return b\n else:\n return []\n","repo_name":"Maxagl/YYS-script","sub_path":"Featurematcah.py","file_name":"Featurematcah.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"18"} +{"seq_id":"32458917628","text":"\ndef bfs(graph, node):\n visited = []\n queue = []\n\n print(f\"All the nodes of are:\")\n visited.append(node)\n queue.append(node)\n\n while queue:\n s = queue.pop(0)\n\n print(s, end=\" \")\n\n for neighbour in graph[s]:\n if neighbour not in visited:\n visited.append(neighbour)\n queue.append(neighbour)\n\n\ngraph_test = {\n 'A': ['B', 'C'],\n 'B': ['A', 'D'],\n 'C': ['A', 'D'],\n 'D': ['C', 'E'],\n 'E': ['D']\n}\nprint(\"\\nFor graph_test:\")\nbfs(graph_test, 'A')\n\ngraph1 = {\n 'A': ['B', 'C', 'D'],\n 'B': ['E'],\n 'C': ['F'],\n 'D': ['G'],\n 'E': ['H'],\n 'F': ['H'],\n 'G': ['H', 'I'],\n 'H': ['J'],\n 'I': [],\n 'J': ['I']\n}\nprint(\"\\n\\nFor graph1:\")\nbfs(graph1, 'A')\n\ngraph2 = {\n 'A': ['B', 'C'],\n 'B': ['D'],\n 'C': [],\n 'D': ['C', 'D']\n}\nprint(\"\\n\\nFor graph2:\")\nbfs(graph2, 'A')\n","repo_name":"aravindhrs-lab/ADA-1BM18CS145","sub_path":"bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37246792058","text":"#Média Aritmética\n\nn1 = float(input('Sua primeira nota: '))\nn2 = float(input('Sua segunda nota: '))\nn = (n1 + n2)/2\n\nif(n > 6):\n print('Parabéns, sua nota é: {:.1f} e você foi aprovado no curso!'.format(n))\nelse:\n print('Sinto muito, sua nota é: {:.1f} e você reprovou nessa matéria'.format(n))","repo_name":"mateuszaparoli/curso-em-video-_-mundo1_exercicios","sub_path":"ex07.py","file_name":"ex07.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71335777320","text":"# 299. Bulls and Cows\n# hash\n\n# https://blog.csdn.net/fuxuemingzhu/article/details/82872065\n# running time: faster than 67.38%\nclass Solution:\n def getHint(self, secret: str, guess: str) -> str: \n d = collections.defaultdict(int)\n a, b = 0, 0\n \n for s, g in zip(secret, guess):\n if s == g:\n a += 1\n else:\n d[s] += 1\n \n for i, g in enumerate(guess):\n if secret[i] != guess[i] and d[g]:\n b += 1\n d[g] -= 1\n \n return str(a) + 'A' + str(b) + 'B'\n","repo_name":"junyang10734/leetcode-python","sub_path":"299.py","file_name":"299.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22723522413","text":"import gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nfrom gym_sudoku.envs.sudoku import Sudoku\nimport numpy as np\n\n\nclass SudokuEnv(gym.Env):\n \"\"\"\n Gym for sudoku.\n The action space consists of the 9 actions to fill numbers 1 to 9 in the cell and 4 actions to move up, down, left, right in the puzzle.\n The observation space consists of 81 cells of the puzzle and 2 indices of the pointer.\n \"\"\"\n metadata = {'render.modes': ['human']}\n\n def __init__(self):\n self._n_filled_cells = 10\n self.reset(regenerate=True)\n # we have 9 numbers that can be filled in a cell and 4 movements up,down,right,left to shift cells\n self.action_space = spaces.Discrete(9+4)\n self.observation_space = spaces.Box(low=np.array([0]*(9*9+2)),# 9*9 cells and 2 coordinates\n high=np.array([9]*9*9+[8,8]))\n\n @property\n def state(self):\n \"\"\"\n State of the environment which has the puzzle and the indices of the pointer.\n :return: state\n \"\"\"\n return np.append(self.sudoku.puzzle.flatten(), self.fill_pointer)\n\n def step(self, action):\n \"\"\"\n Run one timestep of the environment's dynamics. When end of\n episode is reached, you are responsible for calling `reset()`\n to reset this environment's state.\n\n Accepts an action and returns a tuple (observation, reward, done, info).\n :param action: an action provided by the agent\n :return: observation, reward, done, info\n\n \"\"\"\n if action < 9:\n if self.sudoku.puzzle[self.fill_pointer[0], self.fill_pointer[1]] == 0 and self.sudoku.check_if_number_ok(self.fill_pointer[0], self.fill_pointer[1], action + 1):\n self.sudoku.puzzle[self.fill_pointer[0], self.fill_pointer[1]] = action + 1\n else:\n return self.state, 0, True, {}\n else:\n if action == 9: # down\n if self.fill_pointer[0] < 8:\n self.fill_pointer[0] += 1\n else:\n return self.state, 0, True, {}\n elif action == 10: # up\n if self.fill_pointer[0] > 0:\n self.fill_pointer[0] -= 1\n else:\n return self.state, 0, True, {}\n elif action == 11: # right\n if self.fill_pointer[1] < 8:\n self.fill_pointer[1] += 1\n else:\n return self.state, 0, True, {}\n elif action == 12: # left\n if self.fill_pointer[1] > 0:\n self.fill_pointer[1] -= 1\n else:\n return self.state, 0, True, {}\n\n if not np.any(self.sudoku.puzzle == 0):\n return self.state, 1, True, {}\n return self.state, 0, False, {}\n\n def reset(self, regenerate=False):\n \"\"\"\n Resets the environment to an initial state and returns an initial\n observation.\n :param regenerate: boolean. If true, new puzzle is generated\n :return: new state\n \"\"\"\n if regenerate:\n self.sudoku = Sudoku()\n self.sudoku.generate(self._n_filled_cells)\n self.fill_pointer = np.array([0, 0])\n return self.state\n\n def render(self, mode='human'):\n \"\"\"\n Renders the environment.\n \"\"\"\n print(self.sudoku.puzzle)\n\n\n\n","repo_name":"vrundha/sudoku","sub_path":"gym_sudoku/envs/sudoku_env.py","file_name":"sudoku_env.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5348293241","text":"import json\n\n\n\ndef get_posts_all():\n \"\"\" возвращает посты\n \"\"\"\n with open(\"data/posts.json\", \"r\", encoding=\"utf-8\") as file:\n posts = json.load(file)\n return posts\n\n\ndef get_posts_by_user(user_name):\n \"\"\"\n возвращает посты определенного пользователя. Функция должна вызывать ошибку\n `ValueError` если такого пользователя нет и пустой список, если у пользователя нет постов.\n \"\"\"\n posts = get_posts_all()\n posts_of_user = []\n for post in posts:\n if user_name.lower() in post[\"poster_name\"].lower():\n posts_of_user.append(post)\n return posts_of_user\n\n raise ValueError (\"такого пользователя нет\")\n\n\ndef get_comments_by_post_id(post_id):\n \"\"\"\n возвращает комментарии определенного поста. Функция должна вызывать ошибку\n `ValueError` если такого поста нет и пустой список, если у поста нет комментов.\n \"\"\"\n with open(\"comments.json\", \"r\", encoding=\"utf-8\") as file:\n comments = json.load(file)\n comments_by_post_id = []\n for comment in comments:\n if post_id == comment[\"post_id\"]:\n comments_by_post_id.append(comment)\n return comments_by_post_id\n\n raise ValueError(\"такого поста нет\")\n\n\ndef search_for_posts(query):\n \"\"\"\n возвращает список постов по ключевому слову\n \"\"\"\n posts = get_posts_all()\n posts_with_word = []\n for post in posts:\n if query in posts[\"content\"]:\n posts_with_word.append(post)\n return posts_with_word\n\ndef get_post_by_pk(pk):\n \"\"\"\n возвращает один пост по его идентификатору\n \"\"\"\n posts = get_posts_all()\n\n for post in posts:\n if pk == post[\"pk\"]:\n return post\n\n\n#Напишите к каждой функции юнит тесты, расположите тесты в отдельной папке `/tests`.\nprint(get_posts_by_user(\"pop\"))","repo_name":"dolinenko2006/DOLINENKO_VADIM_KURSOVAJA_3","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70110708841","text":"N = int(input())\nS = [list(map(int, input().split())) for _ in range(N)]\nvisited = [0 for _ in range(N)]\nans = int(1e9)\n\ndef dfs(index, count):\n global ans\n if count == N//2:\n start, link = 0, 0\n\n for i in range(N):\n for j in range(N):\n\n if visited[i] and visited[j]:\n start += S[i][j]\n\n elif not visited[i] and not visited[j]:\n link += S[i][j]\n\n ans = min(ans, abs(start-link))\n\n for i in range(index, N):\n if visited[i]: continue\n\n visited[i] = 1\n dfs(i+1, count + 1)\n visited[i] = 0\n\ndfs(0, 0)\nprint(ans)\n\n","repo_name":"Eunjnnn/AlgorithmStudy","sub_path":"BOJ/14.백트래킹/**14889.py","file_name":"**14889.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19081510060","text":"# My own implementation \n# Time complexity = O(n)\n# Space complexity = O(1) \ndef romanToInt_me(s: str) -> int:\n s += '\\0'\n romanDict = {\"I\": 1, \"V\": 5, \"X\":10, \"L\":50, \"C\":100 ,\"D\":500 ,\"M\":1000, \"\\0\": 0}\n flag = 0\n res = 0\n\n for i in range(len(s)-1):\n if flag == 1:\n flag = 0\n continue\n\n # Null character was added to make this valid until (len(s)-1)th iteration\n if romanDict[s[i]] < romanDict[s[i+1]]:\n res += romanDict[s[i+1]] - romanDict[s[i]]\n flag = 1 # So it won't add to res at next iteration\n else:\n res += romanDict[s[i]]\n \n return res\n\ndef romanToInt_leetcode(s: str) -> int:\n translations = {\n \"I\": 1,\n \"V\": 5,\n \"X\": 10,\n \"L\": 50,\n \"C\": 100,\n \"D\": 500,\n \"M\": 1000\n }\n number = 0\n s = s.replace(\"IV\", \"IIII\").replace(\"IX\", \"VIIII\")\n s = s.replace(\"XL\", \"XXXX\").replace(\"XC\", \"LXXXX\")\n s = s.replace(\"CD\", \"CCCC\").replace(\"CM\", \"DCCCC\")\n for char in s:\n number += translations[char]\n return number\n\ndef romanToInt_leetcodeFastest(s: str) -> int:\n Num = 0\n Roman = {\n 'I' : 1,\n 'V' : 5,\n 'X' : 10,\n 'L' : 50,\n 'C' : 100,\n 'D' : 500,\n 'M' : 1000,\n }\n Prev = 0\n for letter in s:\n Next = Roman[letter]\n if Prev >= Next:\n Num += Prev\n else:\n Num -= Prev\n Prev = Next\n print(Num)\n Num += Next\n\n return Num\n# print(romanToInt_me(\"MCMXCIV\"))\n# print(romanToInt_me(\"III\"))\n# print(romanToInt_me(\"LVIII\"))\n# print(romanToInt_leetcode(\"MCMXCIV\"))\n# print(romanToInt_leetcode(\"III\"))\n# print(romanToInt_leetcode(\"LVIII\"))\nprint(romanToInt_leetcodeFastest(\"MCMXCIV\"))\nprint(romanToInt_leetcodeFastest(\"III\"))\nprint(romanToInt_leetcodeFastest(\"LVIII\"))\n\n\"\"\"\ns = MCMXCIV\ns.append('\\0')\nres = 0\np1->p2 ... (traverse aside each other)\np1: i\np2: i+1\nflag = 0\nfor i in 0..len(s)-1:\n if flag == 1:\n flag = 0\n continue\n if s[i] < s[i+1]:\n res += s[i+1] - s[i]\n flag = 1\n else:\n res += s[i]\n\n\nIII\n123\n\n1. i = 0 \nres = 1\n2. i = 1 \nres = 2\n3. i = 2\n\nMCMXCIV\n1234567\n\n1. first iteration || i = 0\nres = 1000\nflag = 0\n2. second iteration || i = 1\nres = 1900\nflag = 1\n3. third iteration || i = 2\nflag = 0\ncontinue\n4. fourth iteration || i = 3\nres = 1990\nflag = 1\n5. fifth iteration || i = 4\nflag = 0\ncontinue\n\n\"\"\"\n\"\"\"\nDifference between my implementation vs their implementation:\n\n- Hashmap calls uses extra memory, use temp variables\n\"\"\"\n","repo_name":"alztr2908/Studying-DSA","sub_path":"Leetcode Questions/Arrays/13. Roman to Integer/romanToInt.py","file_name":"romanToInt.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3773497307","text":"import time\nimport keyboard\nimport _thread\n# import configuration before PyOpenfaceVideo or sys.path will not be loaded\nfrom openface_configuration import *\nimport lavatar.lavatar as lavatar\nfrom PyOpenfaceVideo import *\n\nIP = '127.0.0.1'\nPORT = 25000\nosc_sender = lavatar.init_osc_sender( IP, PORT )\n\nlast_time = time.time()\nelapsed_time = 0\n\nframe_indices = lavatar.generate_frame_indices()\nprint( frame_indices )\n\ndef ov_runner():\n\tglobal ov\n\tov.start()\n\ndef frame_raw( f ):\n\tprint( \"\\tgaze_0: \", f['gaze_0'][0], ',', f['gaze_0'][1], ',', f['gaze_0'][2] )\n\tprint( \"\\tgaze_1: \", f['gaze_1'][0], ',', f['gaze_1'][1], ',', f['gaze_1'][2] )\n\tprint( \"\\thead_rot: \", f['head_rot'][0], ',', f['head_rot'][1], ',', f['head_rot'][2] )\n\tprint( \"\\thead_pos: \", f['head_pos'][0], ',', f['head_pos'][1], ',', f['head_pos'][2] )\n\tprint( \"\\tlandmarks: \" )\n\tfor i in range( len(f['landmarks']) ):\n\t\tprint( \"\\t\\t\", i, ':', f['landmarks'][i][0], ',', f['landmarks'][i][1], ',', f['landmarks'][i][2] )\n\ndef frame_lavatar( f ):\n\tkeys = f.keys()\n\tprint( \"frame: \", f['index'] )\n\tprint( \"\\ttimestamp: \", f['timestamp'] )\n\tif 'landmarks' in keys:\n\t\tprint( \"\\tlandmarks: \" )\n\t\tfor i in range( len( f['landmarks'] ) ):\n\t\t\tprint( \"\\t\\t\", i, ':', f['landmarks'][i][0], ',', f['landmarks'][i][1], ',', f['landmarks'][i][2] )\n\tif 'gazes' in keys:\n\t\tfor i in range( len( f['gazes'] ) ):\n\t\t\tprint( \"\\tgaze_%s: \"%i, f['gazes'][i][0], ',', f['gazes'][i][1], ',', f['gazes'][i][2] )\n\tif 'pose_euler' in keys:\n\t\tprint( \"\\tpose_euler: \", f['pose_euler'][0], ',', f['pose_euler'][1], ',', f['pose_euler'][2] )\n\tif 'center' in keys:\n\t\tprint( \"\\tcenter: \", f['center'][0], ',', f['center'][1], ',', f['center'][2] )\n\tif 'min' in keys:\n\t\tprint( \"\\tmin: \", f['min'][0], ',', f['min'][1], ',', f['min'][2] )\n\tif 'max' in keys:\n\t\tprint( \"\\tmax: \", f['max'][0], ',', f['max'][1], ',', f['max'][2] )\n\tif 'size' in keys:\n\t\tprint( \"\\tsize: \", f['size'][0], ',', f['size'][1], ',', f['size'][2] )\n\t\t\ndef frame_parsing( frame ):\n\t\n\tglobal last_time\n\tglobal elapsed_time\n\t\n\tnow = time.time()\n\tdelta = now - last_time\n\tlast_time = now\n\telapsed_time += delta\n\t# enable to print raw info of OpenfaceVideo\n\t#frame_raw( frame )\n\tf = lavatar.parse_osc_frame( frame, frame_indices, elapsed_time, MAT_TRANSLATION, MAT_ROTATION )\n\t# enable to print lavatar ready frame\n\t#frame_lavatar( f )\n\tlavatar.send_full_frame( osc_sender, f, '/of' )\n\nov = OpenfaceVideo()\nov.device = 0\n# set model path: mandatory to start tracking\nprint( \">> \", OFV_LANDMARK )\nprint( \">> \", OFV_HAAR )\nprint( \">> \", OFV_MTCNN )\nprint( \">> \", OFV_AU )\nov.landmark_model = OFV_LANDMARK\nov.HAAR = OFV_HAAR\nov.MTCNN = OFV_MTCNN\n\nov.callback_frame( frame_parsing )\n\n# waiting for the OpenfaceVideo to be stopped by pressing 'q'\nprint( \"Press 'q' in the window to stop OpenfaceVideo\" )\nov.start()","repo_name":"numediart/ReVA-toolkit","sub_path":"python/openface_realtime.py","file_name":"openface_realtime.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"6605333011","text":"from os import removedirs\nfrom posixpath import dirname\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom cloudinary.models import CloudinaryField\n\n# Create your models here.\n\n# /* |=| Tabla que corresponde a cada una de las categorias. |=| */\n# /* |=| Ej. {Cafés de Origen, Miel de Abeja, De Cacao, etc} |=| */\n\n\nclass Categorie(models.Model):\n catId = models.AutoField(primary_key=True, editable=False)\n catName = models.CharField(max_length=32)\n catDesc = models.CharField(max_length=256, null=True, blank=True)\n\n\n# /* |=| Tabla que corresponde al tipo de presentacion de |=| */\n# /* |=| cada uno de los productos, ej. Coca Cola de 600ml |=| */\n# /* |=| donde la presentación corresponderia a los \"600ml\". |=| */\nclass Presentation(models.Model):\n presId = models.AutoField(primary_key=True, editable=False)\n presName = models.CharField(max_length=32)\n presContNet = models.DecimalField(\n max_digits=8, decimal_places=2, blank=True, null=True)\n presUnit = models.CharField(max_length=4) # Ej. Kg, mg, etc.\n\n# /* |=| Tabla que corresponde a cada uno de los paises. |=| */\n# /* |=| Ej. {México, Estados Unidos, España, Cuba, etc.} |=| */\n\n\nclass Pais(models.Model):\n countryId = models.AutoField(primary_key=True, editable=False)\n countryName = models.CharField(max_length=32)\n\n# /* |=| Tabla que corresponde a cada una de las ciudades. |=| */\n# /* |=| Ej. {Tijuana, Mexicali, Tecate, CDMX, etc.} |=| */\n\n\nclass Ciudad(models.Model):\n cityId = models.AutoField(primary_key=True, editable=False)\n cityName = models.CharField(max_length=16)\n countryId = models.ForeignKey(\n Pais, on_delete=models.SET_NULL, null=True) # ¿ Nulo ?\n\n# /* |=| Tabla que corresponde a cada una de las ciudades. |=| */\n# /* |=| Ej. {Tijuana, Mexicali, Tecate, CDMX, etc.} |=| */\n\n\nclass Domicile(models.Model):\n domId = models.AutoField(primary_key=True, editable=False)\n domDomicilio = models.CharField(max_length=128)\n domPostal = models.CharField(max_length=8)\n cityId = models.ForeignKey(\n Ciudad, on_delete=models.SET_NULL, null=True) # ¿ Nulo ?\n\n# /* |=| Tabla que corresponde a cada una de las sucursales. |=| */\n# /* |=| Ej. {Pendiente en el pdf de referencia.} |=| */\n\n\nclass Branch(models.Model):\n branchId = models.AutoField(primary_key=True, editable=False)\n branchName = models.CharField(max_length=128)\n domId = models.ForeignKey(\n Domicile, on_delete=models.SET_NULL, null=True) # ¿ Nulo ?\n# /* |=| Tabla que corresponde a las marcas/productores. |=| */\n\n\nclass Brand(models.Model):\n brandId = models.AutoField(primary_key=True, editable=False)\n brandName = models.CharField(max_length=128)\n brandDesc = models.CharField(max_length=1024)\n brandPic = CloudinaryField('image')\n # brandRegDate = models.DateTimeField(auto_now_add=True)\n\n# /* |=| Tabla que corresponde a cada uno de los productos. |=| */\n# /* |=| Ej. {1, Miel de abeja, 19.99, 'Miel de abeja', , '01/05/1994', 1, 1, 1, 1}\t|=| */\n\n\nclass Products(models.Model):\n prodId = models.AutoField(primary_key=True, editable=False)\n prodName = models.CharField(max_length=128)\n prodPrice = models.DecimalField(\n max_digits=8, decimal_places=2, blank=True, null=True)\n prodDesc = models.CharField(max_length=1024)\n \n prodPic = CloudinaryField('image')\n # presId = models.ForeignKey(Presentation, on_delete=models.SET_NULL, null=True) # ¿ Nulo ?\n sucId = models.ForeignKey(\n Branch, on_delete=models.SET_NULL, null=True) # ¿ Nulo ?v\n marcId = models.ForeignKey(\n Brand, on_delete=models.SET_NULL, null=True) # ¿ Nulo ?\n catId = models.ForeignKey(\n Categorie, on_delete=models.SET_NULL, null=True) # ¿ Nulo ?\n rating = models.DecimalField(\n max_digits=7, decimal_places=2, verbose_name=\"Rating\", blank=True, null=True)\n numReviews = models.IntegerField(\n null=True, blank=True, default=0, verbose_name=\"Number of Reviews\")\n# |==============================================================================================| #\n# |==| Omití las clases que tiene por defecto 'models', es decir, users, groups y permissions |==| #\n# |==============================================================================================| #\n\n# /* |=| Tabla que corresponde a la relacion con la\t |=| */\n# /* |=| direccion\t\t\t\t\t\t\t\t\t\t |=| */\n\n\nclass Address(models.Model):\n addname = models.CharField(max_length=128)\n usrId = models.ForeignKey(\n User, on_delete=models.SET_NULL, null=True) # ¿ Nulo ?\n domId = models.ForeignKey(\n Domicile, on_delete=models.SET_NULL, null=True) # ¿ Nulo ?\n\n# /* |=| Tabla referente a el carrito de compras del usuario |=| */\n\n\nclass Orders(models.Model):\n orderId = models.AutoField(primary_key=True, editable=False)\n orderName = models.CharField(max_length=128)\n orderDate = models.DateTimeField(auto_now_add=True)\n usrId = models.ForeignKey(\n User, on_delete=models.SET_NULL, null=True) # ¿ Nulo ?\n city = models.CharField(max_length=128)\n street = models.CharField(max_length=128)\n zipcode = models.CharField(max_length=128)\n country = models.CharField(max_length=128)\n\n# /* |=| Cada uno de los productos asociados a el carrito de |=| */\n# /* |=| compras.\t\t\t\t\t\t\t\t\t\t\t |=| */\n\n\nclass OrderItems(models.Model):\n mprodId = models.AutoField(primary_key=True, editable=False)\n mprodCantidad = models.IntegerField()\n mcanId = models.ForeignKey(\n Orders, on_delete=models.SET_NULL, null=True) # ¿ Nulo ?\n prodId = models.ForeignKey(\n Products, on_delete=models.SET_NULL, null=True) # ¿ Nulo ?\n\n# /* |=| Tabla para el sistema de rating de los productos. |=| */\n\n\nclass Reviews(models.Model):\n revId = models.AutoField(primary_key=True, editable=False)\n revName = models.CharField(max_length=128)\n revRating = models.IntegerField()\n revComment = models.CharField(max_length=1024)\n revDate = models.DateTimeField(auto_now_add=True)\n prodId = models.ForeignKey(\n Products, on_delete=models.SET_NULL, null=True) # ¿ Nulo ?\n usrId = models.ForeignKey(\n User, on_delete=models.SET_NULL, null=True) # ¿ Nulo ?\n","repo_name":"Axolotl-Team-mx/OBIO-shop","sub_path":"base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6296,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42584085793","text":"from core.admin.admin_perm import *\nfrom core.ibs_exceptions import *\nfrom core.errors import errorText\nfrom core.admin import perm_loader\nfrom core.group import group_main\n\ndef init():\n perm_loader.getLoader().registerPerm(\"CHANGE GROUP\",ChangeGroup)\n\nclass ChangeGroup (AllRestrictedSingleValuePermission,GroupCatPermission,Permission):\n def init(self):\n self.setDescription(\"\"\"\n Can edit and update group and group attributes\n This Permission can have 2 values:\n 1- RESTRICTED : Admin can change only groups that he is owner\n 2- ALL : Admin can change groups that he has access to , regardless of who is the owner\n\n Related Permissions: GROUP_ACCESS, ACCESS_ALL_GROUPS, ADD_NEW_GROUP\n \"\"\")\n\n self.addDependency(\"ADD NEW GROUP\")\n self.addAffectedPage(\"Groups -> Group List\")\n\n def check(self,admin_obj,admin_perm_obj,group_name):\n if admin_perm_obj.getValue()==\"All\" and not admin_obj.canUseGroup(group_name):\n raise GeneralException(errorText(\"GROUPS\",\"GROUP_CHANGE_DENIED\")%group_name)\n \n elif group_main.getLoader().getGroupByName(group_name).getOwnerID()!=admin_obj.getAdminID():\n raise GeneralException(errorText(\"GROUPS\",\"GROUP_CHANGE_DENIED\")%group_name)","repo_name":"pouyadarabi/IBSng","sub_path":"core/admin/perms/CHANGE_GROUP.py","file_name":"CHANGE_GROUP.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"18"} +{"seq_id":"13195502133","text":"global compares\ncompares = 0\nl = raw_input().split(\",\")\nl = map(int, l)\n\ndef silly(a):\n if len(a) > 10:\n return -1\n global compares\n if len(a) < 2:\n return a\n else:\n y = 0\n while y < len(a):\n b = list(a)\n i = b[0]\n b[0] = b[y]\n b[y] = i\n maybe_sorted = [b[0]] + (silly(b[1:len(b)]))\n x = 0\n t = True\n while x < len(maybe_sorted) - 1:\n compares += 1\n if maybe_sorted[x] > maybe_sorted[x + 1]:\n t = False\n break\n x += 1\n if t:\n return maybe_sorted\n y += 1\n\ndef bubble(a):\n b = list(a)\n if len(b) > 100001:\n return -1\n global compares\n while True:\n swapped_this_turn = False\n i = 0\n while i < len(b) - 1:\n compares += 1\n if b[i] > b[i + 1]:\n x = b[i]\n b[i] = b[i + 1]\n b[i + 1] = x \n swapped_this_turn = True\n i += 1\n if not swapped_this_turn:\n return b\n\ndef merge(a):\n b = list(a)\n if len(b) < 2:\n return b\n global compares\n left = b[0:len(b)//2]\n right = b[len(b)//2:len(b)]\n left = merge(left)\n right = merge(right)\n i = 0\n j = 0\n k = 0\n while i < len(left) and j < len(right):\n compares += 1\n if left[i] < right[j]:\n b[k] = left[i]\n k += 1\n i += 1\n else:\n b[k] = right[j]\n k += 1\n j += 1\n if i < len(left):\n b[k:len(a)] = left[i:len(left)]\n else:\n b[k:len(a)] = right[j:len(right)]\n return b\n\ndef lToString(l):\n s = \"\"\n for x in l:\n s += (str(x) + \",\")\n return s[0:len(s) - 1]\n\nprint(lToString(silly(l)))\nprint(compares)\ncompares = 0\nbubble(l)\nprint(compares)\ncompares = 0\nmerge(l)\nprint(compares)\n","repo_name":"Zilby/Algorithms","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30731257151","text":"from src.TkinterTutorial.Window import Window\nfrom Company import Company\n\ncompanies = []\n\ndef AddCompany(app):\n name = app.CreateEntryBox()\n com = Company(name)\n companies.append(com)\n\napp = Window(\"GUI\", \"1280x720\")\napp.AddButton(\"Button\", AddCompany(app), 50, 50)\napp.mainloop()\n\nfor company in companies:\n print (company.name)\n\n","repo_name":"dunkybaldy/Python","sub_path":"src/CompanySim/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42410174602","text":"class Solution(object):\n def countPrefixes(self, words, s):\n \"\"\"\n :type words: List[str]\n :type s: str\n :rtype: int\n \"\"\"\n count = 0\n for i in range(len(s) + 1):\n if s[:i] in words:\n count += words.count(s[:i])\n return count","repo_name":"quinonesnn/LeetCode","sub_path":"2255-count-prefixes-of-a-given-string/2255-count-prefixes-of-a-given-string.py","file_name":"2255-count-prefixes-of-a-given-string.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}