diff --git "a/1053.jsonl" "b/1053.jsonl" new file mode 100644--- /dev/null +++ "b/1053.jsonl" @@ -0,0 +1,761 @@ +{"seq_id":"568696176","text":"import sys\nsys.path.append(\"../\")\nfrom create import *\n\nclass Solution:\n def mergeTwoLists(self, l1, l2):\n if not l1:\n return l2\n if not l2:\n return l1\n head = t = ListNode(0)\n while l1 and l2:\n if l1.val < l2.val:\n t.next = l1\n l1 = l1.next\n else:\n t.next = l2\n l2 = l2.next\n t = t.next\n if l1:\n t.next = l1\n if l2:\n t.next = l2\n return head.next\nif __name__ == \"__main__\":\n nums1, nums2 = [1,3,5,7,9], [2,4,6,8]\n #nums = [1,4,3]\n\n l = LinkList()\n h1 = l.create_list(nums1)\n h2 = l.create_list(nums2)\n l.print_list(h1)\n l.print_list(h2)\n app = Solution()\n newh = app.mergeTwoLists(h1, h2)\n l.print_list(newh)\n","sub_path":"exercise/lc/tag/list/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"125025429","text":"# Base converter.\ndef denary_to_base(base, number):\n assert(base < 37 and base > 1), \"Chosen base is out of range [2-36].\"\n number = int(number)\n if number < 0:\n negative = True\n else:\n negative = False\n mod = abs(number)\n values = [mod%base]\n while mod >= base:\n mod = mod//base\n values.append(mod%base)\n values = values[::-1]\n for index, integer in enumerate(values):\n if integer >= 10:\n values[index] = chr(integer + 55)\n result = ''.join(str(x) for x in values)\n if negative:\n result = \"-\" + result\n return result\n\n# Base de-converter.\ndef base_to_denary(base, number):\n assert(base < 37 and base > 1), \"Chosen base is out of range [2-36].\"\n number = str(number).upper()\n if number.startswith(\"-\"):\n negative = True\n number = number[1:]\n else:\n negative = False\n values = []\n for digit in number:\n if digit.isalpha():\n digit = ord(digit) - 55\n assert(base > int(digit)), \"Number not of base \" + str(base) + \".\"\n values.append(int(digit))\n values = values[::-1]\n result = 0\n for index, integer in enumerate(values):\n result += (base**index) * integer\n if negative:\n result = \"-\" + result\n return int(result)\n\n\ndef base_convert(init_base, res_base, number):\n assert(init_base < 37 and init_base > 1), \"Initial base is out of range [2-36].\"\n assert(res_base < 37 and res_base > 1), \"Resultant base is out of range [2-36].\"\n number = str(number).upper()\n if number.startswith(\"-\"):\n negative = True\n number = number[1:]\n else:\n negative = False\n values = []\n for digit in number:\n if digit.isalpha():\n digit = ord(digit) - 55\n assert(init_base > int(digit)), \"Number not of base \" + str(init_base) + \".\"\n values.append(int(digit))\n values = values[::-1]\n denary = 0\n for index, integer in enumerate(values):\n denary += (init_base**index) * integer\n mod = denary\n values = [mod%res_base]\n while mod >= res_base:\n mod = mod//res_base\n values.append(mod%res_base)\n values = values[::-1]\n for index, integer in enumerate(values):\n if integer >= 10:\n values[index] = chr(integer + 55)\n result = ''.join(str(x) for x in values)\n if negative:\n result = \"-\" + result\n return result\n","sub_path":"base_converter.py","file_name":"base_converter.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"331847681","text":"import os \nimport nltk\nimport numpy as np\nimport nltk.tree as Tree\nfrom nltk import treetransforms, induce_pcfg, Nonterminal\n\nclass preparation():\n\n\tdef __init__(self):\n\t\ttrain_file = open(\"filename\",\"w\")\n\t\twith open(\"sequoia-corpus+fct.mrg_strict.txt\",\"r\") as file:\n\t\t\tdata = file.readlines() #split into lines\n\t\n\t\t\tfor line in data :\n\t\t\t\t#we remove first and last 2 character, we cannot do that in words because when we split we have words and no more characters\n\t\t\t\twords = line[1:-2].split() #words is the list of words for sentence line, we supprime the first and the last 2 character\n\t\t\t\tfor w in words : # we take each words\n\t\t\t\t\tif w[0] == \"(\": #If the first character is a \"(\" we know that it is not terminal so we remove \"-\" when we find one\n\t\t\t\t\t\tif \"-\" in w:\n\t\t\t\t\t\t\ttrain_file.write(w[:w.index(\"-\")]) #W.index(\"-\") gives the place where \"-\" is.\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\ttrain_file.write(w)\n\t\t\t\t\telse : #it's terminal, so we write the word even if we have \"-\" because it could be a a date for example\n\t\t\t\t\t\ttrain_file.write(w)\n\t\t\t\t\ttrain_file.write(\" \") # we need to add a spaxce between words\n\t\t\t\ttrain_file.write(\"\\n\") #we finish the sentence, we write in another line\n\n\t\ttrain_file.close()\n\n\n\n\t\t#WE SPLIT INTO TRAIN VALIDATION and TEST set\n\t\tfile1 = open(\"train_file\",\"w\")\n\t\tfile2 = open(\"validation_file\",\"w\")\n\t\tfile3 = open(\"test_file\",\"w\")\n\t\twith open(\"filename\",\"r\") as f:\n\t\t\tdata = f.readlines()\n\t\t\tlongueur = len(data)\n\t\t\tfor line in data[:int(longueur*0.8)]:\n\t\t\t\twords = line.split()\n\t\t\t\tfor w in words : \n\t\t\t\t\tfile1.write(w)\n\t\t\t\t\tfile1.write(\" \")\n\t\t\t\tfile1.write(\"\\n\")\n\n\t\t\tfor line in data[int(longueur*0.8) : int(longueur*0.9)]:\n\t\t\t\twords = line.split()\n\t\t\t\tfor w in words : \n\t\t\t\t\tfile2.write(w)\n\t\t\t\t\tfile2.write(\" \")\n\t\t\t\tfile2.write(\"\\n\")\n\n\t\t\tfor line in data[int(longueur*0.9):]:\n\t\t\t\twords = line.split()\n\t\t\t\tfor w in words : \n\t\t\t\t\tfile3.write(w)\n\t\t\t\t\tfile3.write(\" \")\n\t\t\t\tfile3.write(\"\\n\")\n\n\t\tfile1.close()\n\t\tfile2.close()\n\t\tfile3.close()\n\n\n\n\t#We create the file with just word and not the grammar\n\t\ttest_sentence = open(\"test_sentence\",\"w\")\n\t\twith open(\"test_file\",\"r\") as txt:\n\t\t\tfor phrase in txt :\n\t\t\t\ttest_tree = nltk.tree.Tree.fromstring(phrase)\n\t\t\t\tfor word in test_tree.leaves():\n\t\t\t\t\ttest_sentence.write(word)\n\t\t\t\t\ttest_sentence.write(\" \")\n\t\t\t\ttest_sentence.write(\"\\n\")\n\n\t\n\t\ttest_sentence.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"MVA_TD2_LEVY_John/system/extraction_preparation.py","file_name":"extraction_preparation.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"151756386","text":"#!/usr/bin/env python3\n\"\"\"Dmenu script for getting information about contacts from vcard files\"\"\"\n\nimport os\nimport subprocess\nimport person\nfrom vcards import *\nfrom utils import *\n\nPATH = f\"{os.getenv('HOME')}/.contacts/contacts/\"\n\n# user chooses what he wants to know\nquery = subprocess.run([\"dmenu\", \"-i\", \"-p\", \"Query:\"], input=b\"phone\\nemail\\nbirthday\",\n stdout=subprocess.PIPE, check=True).stdout.decode(\"UTF-8\")[:-1]\nif query == \"\":\n exit(1)\n\n\n# get the list of files\nfiles = [f\"{PATH}{item}\" for item in os.listdir(PATH)]\n\n# list of all people using the Person class\npeople = []\n\nfor file in files:\n with open(file, \"r\") as curr:\n content = curr.read()\n name = parse_name(get_name(content))\n phone = get_phone(content)\n email = get_email(content)\n birthday = get_birthday(content)\n people += [person.Person(name[\"first name\"],\n name[\"last name\"], email, phone, birthday)]\n\nnames = [person.get_unaccented_name() for person in people]\nselected = subprocess.run(\n [\"dmenu\", \"-i\", \"-p\", \"Choose person: \"], input=\"\\n\".join(names).encode(\"UTF-8\"), stdout=subprocess.PIPE, check=True)\n\nselected = selected.stdout.decode(\"UTF-8\")[:-1]\n\n\nfor person in people:\n if person.get_unaccented_name() == selected:\n if query == \"email\":\n if len(person.email) > 0:\n if len(person.email) == 1:\n res = str(list(person.email.values())[0])\n print(res)\n copy_to_clipboard(res)\n else:\n cmd = subprocess.run([\"dmenu\", \"-i\", \"-p\", \"Choose email: \"], input=\"\\n\".join(\n list(person.email.keys())).encode(\"UTF-8\"), stdout=subprocess.PIPE, check=True)\n res = person.email[cmd.stdout.decode(\"UTF-8\")[:-1]]\n print(res)\n copy_to_clipboard(res)\n\n else:\n print(\"No information about this contact\")\n notify(\"No information about this contact\")\n\n elif query == \"phone\":\n print(person.phone)\n if len(person.phone) > 0:\n if len(person.phone) == 1:\n res = str(list(person.phone.values())[0])\n print(res)\n copy_to_clipboard(res)\n else:\n cmd = subprocess.run([\"dmenu\", \"-i\", \"-p\", \"Choose phone: \"], input=\"\\n\".join(\n list(person.phone.keys())).encode(\"UTF-8\"), stdout=subprocess.PIPE, check=True)\n res = person.phone[cmd.stdout.decode(\"UTF-8\")[:-1]]\n print(res)\n copy_to_clipboard(res)\n\n else:\n print(\"No information about this contact\")\n notify(\"No information about this contact\")\n elif query == \"birthday\":\n if len(person.birthday) > 0:\n print(person.birthday)\n text = f\"{person.get_name()} has birthday on {person.birthday}.\"\n notify(text)\n else:\n print(\"No information about this contact\")\n notify(\"No information about this contact\")\n break\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"83600687","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n # http://localhost:8000/blog/1\n path('', views.blog_list, name='blog_list'),\n path('', views.blog_detail, name='blog_detail'),\n # http://localhost:8000/blog/type/1\n path('type/', views.blogs_with_type, name='blogs_with_type'),\n path('author/', views.blogs_with_author, name='blogs_with_author'),\n]","sub_path":"myblog/mysite_env/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"139295314","text":"from common.lib.servers.Pulser2.pulse_sequences.pulse_sequence import pulse_sequence\nfrom barium.lib.scripts.pulse_sequences.sub_sequences.DopplerCooling133 import doppler_cooling_133 as doppler_cooling_133\n\"\"\"\n6/17/17\nKeeping the same format as optical pumping, but here the TTL is not auto inverted so\nwe use ttl high to turn on, and off time is just empty space at the end if needed.\n\"\"\"\n\nclass state_detection_133(pulse_sequence):\n\n required_parameters = [\n ('StateDetection133', 'state_detection_duration'),\n ('StateDetection133', 'TTL_493'),\n ('StateDetection133', 'TTL_650')\n ]\n\n #required_parameters.extend(doppler_cooling_133.all_required_parameters())\n\n def sequence(self):\n # start time is defined to be 0s.\n p = self.parameters.StateDetection133\n\n\n self.ttl_493 = p.TTL_493\n self.ttl_650 = p.TTL_650\n\n self.addTTL('TimeResolvedCount', self.start, p.state_detection_duration)\n self.addTTL(self.ttl_493, self.start, p.state_detection_duration)\n self.addTTL(self.ttl_650, self.start, p.state_detection_duration)\n self.end = self.start + p.state_detection_duration\n\n","sub_path":"lib/scripts/pulse_sequences/sub_sequences/StateDetection133.py","file_name":"StateDetection133.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"574617130","text":"from sqlalchemy.orm import Session\nfrom server import models\nfrom server.elastic import Elastic\nelastic = Elastic()\n\n\ndef delete_post_by_id(id_delete: int):\n deleted_post = elastic.search_by_id(id_delete)\n\n if deleted_post is not None:\n elastic_deleted = elastic.delete_by_id(deleted_post[0]['_id'])\n\n return elastic_deleted\n return False\n\n\ndef get_posts(db: Session, text: str):\n id_list = [item[\"id\"] for item in elastic.search_by_text(text)]\n\n result = db.query(models.Post) \\\n .filter(models.Post.id.in_(id_list)) \\\n .order_by(models.Post.created_date.desc()) \\\n .all()\n return result\n","sub_path":"server/db_crud.py","file_name":"db_crud.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"500721502","text":"from front_machine.config.parser import get_config_from_json\nimport pandas as pd\nimport argparse\nimport time\nimport zmq\n\ndef result_collector(address, outputPath, numTerminate, is_test=False):\n \"\"\"\n takes controur values of an image and save them in a text file.\n Args:\n address : string of the ip address followed by the port to make the connection with contours_node.\n outputPath: string path to the output text.\n numTerminate: number of terminates to be sent\n \"\"\"\n #make the connections\n context = zmq.Context()\n results_receiver = context.socket(zmq.PULL)\n results_receiver.bind(address)\n\n #create an output dictionary\n out_dict = {\"Frame Number\": [], \"Contours\": []}\n counter = 0\n TerminationCount = 0\n\n #receive the contours and save them in a txt file\n while True:\n if TerminationCount == numTerminate:\n break\n\n work = results_receiver.recv_pyobj()\n data = work['contours']\n\n if len(data) == 0:\n TerminationCount += 1\n continue\n\n #add the results to output dictionary\n out_dict[\"Frame Number\"].append(\"Frame #{}\".format(counter))\n out_dict[\"Contours\"].append(data)\n counter += 1\n\n #create a dataframe and write outputs\n out_df = pd.DataFrame(out_dict, columns=[\"Frame Number\", \"Contours\"])\n out_df.to_csv(outputPath)\n\n # return if the caller is a test\n if is_test:\n return\n\n # wait for the other processes to finish \n # time.sleep(10) \n\ndef main():\n \"\"\"Main driver of output node\"\"\"\n argparser = argparse.ArgumentParser(description=__doc__)\n argparser.add_argument('-t', '--text_path', type=str, help='path to the output text')\n argparser.add_argument('-n', '--total_num', type=int, help='total number of consumer nodes')\n \n args = argparser.parse_args()\n\n config = get_config_from_json(\"front_machine/config/server.json\") # get other nodes addresses from json config\n\n result_collector(config.output_socket, args.text_path, args.total_num) # call the output collector process\n\nif __name__=='__main__':\n main()","sub_path":"front_machine/output_node.py","file_name":"output_node.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"295922766","text":"'''\n@author:sjie\n'''\nimport pymysql\nimport re\n#连接数据库\ndef get_newprice(markercode):\n url='''http://market43.gdiex.com/market/status.do?market=GDIEX&contract={markercode}'''.format(markercode=markercode)\n response = urllib.request.urlopen(url)\n data = response.read()\n data = data.decode('utf-8')\n #print(data)\n data_split = re.split(',',data)[6].split(':')\n data_dict = {}\n data_dict['status'] = data_split[1]\n for status,value in data_dict.items():\n return (value)\n\ndef connect_mysql():\n conn = pymysql.connect(host='10.0.1.61',user='shengjie',passwd='520xiaowen',db='fxeasy')\n try:\n with conn.cursor() as cursor:\n sql = \"SELECT count(1) FROM et_storages WHERE buying_date >= '2016-12-07 08:00:00'\"\n cursor.execute(sql)\n result=cursor.fetchone()\n print(str(result)[1:-2])\n except Exception as e:\n print(e)\n finally:\n conn.close()\n\nconnect_mysql()","sub_path":"python/php/python/Mysql.py","file_name":"Mysql.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"291773482","text":"import time\nimport RPi.GPIO as GPIO\n\nclass UltrasonicSensor():\n def __init__(self, triggerPin,echoPin):\n GPIO.setup(triggerPin, GPIO.OUT)\n GPIO.setup(echoPin, GPIO.IN)\n self.triggerPin = triggerPin\n self.echoPin = echoPin\n \n def getDistanceInCentimeters(self):\n GPIO.output(self.triggerPin, False)\n time.sleep(2)\n\n GPIO.output(self.triggerPin, True)\n time.sleep(0.00001)\n GPIO.output(self.triggerPin, False)\n\n while GPIO.input(self.echoPin)==0:\n pulse_start = time.time()\n while GPIO.input(self.echoPin)==1:\n pulse_end = time.time() \n\n pulse_duration = pulse_end - pulse_start \n distance = pulse_duration * 17150\n distance = round(distance, 2)\n return distance\n","sub_path":"Classes/UltrasonicSensor.py","file_name":"UltrasonicSensor.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"60479019","text":"from numpy import isnan, transpose\nfrom scipy.io import loadmat\n\ntry:\n from h5py import File\nexcept ImportError:\n File = None\n\n\ndef import_electrodes(mat_file, n_chan):\n\n try:\n mat_all = loadmat(mat_file)\n for varname, mat in mat_all.items():\n if varname.startswith('__'):\n continue\n elec = _find_electrodes(mat, n_chan)\n if elec is not None:\n return elec\n\n except NotImplementedError:\n if File is None:\n raise ImportError('You need to install h5py to open this file')\n\n with File(mat_file, 'r') as f:\n for varname in f:\n mat = transpose(f[varname][()])\n elec = _find_electrodes(mat, n_chan)\n if elec is not None:\n return elec\n\n return None\n\n\ndef _find_electrodes(mat, n_chan):\n print(f'Number of electrodes in mat file: {mat.shape[0]}')\n if mat.shape[0] == n_chan:\n return mat\n\n has_nan = isnan(mat).all(axis=1)\n mat = mat[~has_nan, :3]\n\n print(f'Number of electrodes in mat file without nan: {mat.shape[0]}')\n if mat.shape[0] == n_chan:\n return mat\n\n return None\n","sub_path":"xelo2/io/electrodes.py","file_name":"electrodes.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"328071653","text":"from decimal import Decimal\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom btcrpc.utils import constantutil\nfrom btcrpc.utils.config_file_reader import ConfigFileReader\nfrom btcrpc.vo.wallet_balance import (\n GetWalletBalancePostParameterSerializer,\n WalletsBalanceResponseSerializer)\nfrom btcrpc.utils.rpc_calls.rpc_instance_generator import RpcGenerator\nfrom btcrpc.utils.chain_enum import ChainEnum\nimport logging\n\nlog = logging.getLogger(__name__)\nyml_config = ConfigFileReader()\n\n\nclass CheckWalletsBalance(APIView):\n\n def post(self, request):\n post_serializers = GetWalletBalancePostParameterSerializer(\n data=request.data)\n post_serializers.is_valid(raise_exception=True)\n\n chain = ChainEnum.UNKNOWN\n wallet_balance_response_list = []\n currency = post_serializers.data[\"currency\"]\n wallet_list = yml_config.get_wallet_list(currency)\n log.info(wallet_list)\n\n for wallet_json in wallet_list:\n wallet = wallet_json[\"wallet_name\"]\n wallet_type = wallet_json[\"wallet_type\"]\n\n log.info(wallet)\n rpc_call = RpcGenerator.get_rpc_instance(wallet=wallet,\n currency=currency)\n chain = constantutil.check_service_chain(rpc_call)\n log.info(chain)\n balance = rpc_call.get_wallet_balance()\n log.info(format(balance, \"0.8f\"))\n wallet_balance_response = {\n \"wallet\": wallet,\n \"wallet_type\": wallet_type,\n \"balance\": Decimal(balance),\n \"chain\": chain.value,\n \"error\": 0,\n \"error_message\": \"\"}\n\n log.info(wallet_balance_response)\n wallet_balance_response_list.append(wallet_balance_response)\n\n s = WalletsBalanceResponseSerializer(\n data={\"wallets\": wallet_balance_response_list})\n s.is_valid(raise_exception=True)\n return Response(s.data)\n","sub_path":"btcxblockchainapi/btcrpc/view/check_wallets_balance.py","file_name":"check_wallets_balance.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"583700096","text":"############################################\n###\n#\n# This file contains codes for processing\n# data of density of states, usually got\n# from Wang-Landau simulation\n#\n# @Author: Jerry Shi\n# @Date: Jun 6th, 2013\n#\n###\n############################################\n\n\nimport numpy as np\n\n#####################\n# @input:\n# 1. ndarray: [0] energy [1-] ln(dos) for runs\n# 2. dt\n# 3. num_points (temperature points)\n# 4. sys_size (size of system, to calculate specific heat)\n# @return:\n# 1. ndarray: [0] temperature [1] Cv [2] eb_Cv [3] aveE [4] eb_aveE\ndef thermo(ln_dos, dT, num_points, sys_size):\n kb=1\n num_runs = ln_dos.shape[1] -1\n thermo_result=np.zeros((num_points,5))\n size = ln_dos.shape[0] # number of points of density of states\n\n for i in range(0,num_points):\n T = i*dT+dT\n sumE = np.zeros((1,num_runs))\n sumE2 = np.zeros((1,num_runs))\n factor = np.reshape(-ln_dos[0,0]/(kb*T),(1,1))\n normal = np.reshape(ln_dos[0,1:num_runs+1], (1,num_runs)) + factor\n for j in range(1,size):\n factor = np.reshape(-ln_dos[j,0]/(kb*T),(1,1))\n normal += np.reshape( np.log1p( np.exp( np.reshape(ln_dos[j,1:num_runs+1],(1,num_runs)) + factor - normal) ) ,(1,num_runs))\n\n for j in range(0,size):\n factor = np.reshape(-ln_dos[j,0]/(kb*T),(1,1))\n poss = np.reshape( np.exp( np.reshape(ln_dos[j,1:num_runs+1],(1,num_runs)) + factor - normal),(1,num_runs))\n sumE += ln_dos[j,0]*poss\n sumE2 += np.square(ln_dos[j,0])*poss\n\n sumE = np.reshape(sumE, (1,num_runs))\n sumE2 = np.reshape(sumE2, (1,num_runs))\n cv = np.reshape((sumE2-sumE*sumE)/T/T/sys_size,(1,num_runs))\n\n thermo_result[i] = [ T ,\n np.mean(cv,axis=1) ,\n np.std(cv,axis=1) ,\n np.mean(sumE,axis=1) ,\n np.std(sumE,axis=1) ]\n return thermo_result\n\n","sub_path":"backup/script/Library/DOS.py","file_name":"DOS.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"150714480","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# This is easiest to run with helper script ./tools/pytest.sh\n\n\n__author__ = 'jtmoon79'\n__doc__ = \\\n \"\"\"Test the goto_http_redirect_server project using pytest.\"\"\"\n\nfrom collections import defaultdict\nfrom datetime import datetime\nimport getpass\nimport http\nfrom http import client\nimport threading\nimport time\nimport typing\nfrom urllib.parse import ParseResult\n\nimport pytest\n\nimport goto_http_redirect_server\nfrom goto_http_redirect_server.goto_http_redirect_server import (\n Re_User,\n Re_Date,\n Re_Entry,\n Re_EntryType,\n Re_EntryKey,\n Re_Entry_Dict,\n FromTo_List,\n REDIRECT_PATHS_NOT_ALLOWED,\n REDIRECT_CODE_DEFAULT,\n html_escape,\n html_a,\n htmls,\n print_debug,\n fromisoformat,\n to_ParseResult,\n redirect_handler_factory,\n RedirectHandler,\n RedirectServer,\n RedirectsLoader,\n)\nstr_None = typing.Optional[str]\n\n# override for comparisons of datetime.now() generated values\nNOW = datetime.now().replace(microsecond=0)\ngoto_http_redirect_server.goto_http_redirect_server.DATETIME_START = NOW\ngoto_http_redirect_server.goto_http_redirect_server.datetime_now = lambda: NOW\n# need something different than NOW\nLATER = datetime.now()\nLATER = LATER.replace(second=(LATER.second + 1 if LATER.second < 59 else 0))\n\nUSER = getpass.getuser()\n\n# shorten some names for clarity\ntopr = to_ParseResult\nET = Re_EntryType\n\n\n# all committed test resources should be under this directory\n#resources = Path.joinpath(Path(__file__).parent, 'test_resources')\n\n\ndef pr(**kwargs):\n \"\"\"create a ParseResult, sets unset parameters to empty string\"\"\"\n args = defaultdict(str, kwargs)\n return ParseResult(\n scheme=args['scheme'],\n netloc=args['netloc'],\n path=args['path'],\n params=args['params'],\n query=args['query'],\n fragment=args['fragment'],\n )\n\n\nclass Test_ClassesSimple(object):\n \"\"\"basic building-block classes\"\"\"\n\n @pytest.mark.parametrize(\n 'entry_args, entry_kwargs,'\n 'entry_expected, raises',\n (\n # basic error case\n pytest.param((), {},\n None, ValueError),\n # basic happy path\n pytest.param(('a', 'b'), {},\n Re_Entry('a', 'b'), None),\n # different Re_EntryType\n pytest.param(('a', 'b'), {},\n Re_Entry('a', 'b', USER, NOW, topr('a'), topr('b'), ET._), None),\n pytest.param(('a;', 'b'), {},\n Re_Entry('a;', 'b', USER, NOW, topr('a;'), topr('b'), ET._P), None),\n pytest.param(('a;?', 'b'), {},\n Re_Entry('a;?', 'b', USER, NOW, topr('a;?'), topr('b'), ET._PQ), None),\n pytest.param(('a?', 'b'), {},\n Re_Entry('a?', 'b', USER, NOW, topr('a?'), topr('b'), ET._Q), None),\n # different args\n pytest.param(('a', 'b', 'u3'), {},\n Re_Entry('a', 'b', 'u3', NOW, topr('a'), topr('b'), ET._), None),\n pytest.param(('a', 'b', 'u3', LATER), {},\n Re_Entry('a', 'b', 'u3', LATER, topr('a'), topr('b'), ET._), None),\n pytest.param(('a', 'b', 'u3', LATER, topr('NOT a')), {},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('b'), ET._), None),\n pytest.param(('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b')), {},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._), None),\n pytest.param(('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._P), {},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._P), None),\n # different kwargs\n pytest.param(('a', 'b'), {'user': 'u3'},\n Re_Entry('a', 'b', 'u3', NOW, topr('a'), topr('b'), ET._), None),\n pytest.param(('a', 'b', 'u3'), {'date': LATER},\n Re_Entry('a', 'b', 'u3', LATER, topr('a'), topr('b'), ET._), None),\n pytest.param(('a', 'b'), {'user': 'u3', 'date': LATER},\n Re_Entry('a', 'b', 'u3', LATER, topr('a'), topr('b'), ET._), None),\n pytest.param(('a', 'b', 'u3', LATER), {'from_pr': topr('NOT a')},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('b'), ET._), None),\n pytest.param(('a', 'b', 'u3'), {'date': LATER, 'from_pr': topr('NOT a')},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('b'), ET._), None),\n pytest.param(('a', 'b'), {'user': 'u3', 'date': LATER, 'from_pr': topr('NOT a')},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('b'), ET._), None),\n pytest.param(('a', 'b', 'u3', LATER, topr('NOT a')), {'to_pr': topr('NOT b')},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._), None),\n pytest.param(('a', 'b', 'u3', LATER), {'from_pr': topr('NOT a'), 'to_pr': topr('NOT b')},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._), None),\n pytest.param(('a', 'b', 'u3'), {'date': LATER, 'from_pr': topr('NOT a'), 'to_pr': topr('NOT b')},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._), None),\n pytest.param(('a', 'b'), {'user': 'u3', 'date': LATER, 'from_pr': topr('NOT a'), 'to_pr' :topr('NOT b')},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._), None),\n pytest.param(('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b')), {'etype': ET._P},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._P), None),\n pytest.param(('a', 'b', 'u3', LATER, topr('NOT a')), {'to_pr': topr('NOT b'), 'etype': ET._P},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._P), None),\n pytest.param(('a', 'b', 'u3', LATER), {'from_pr': topr('NOT a'), 'to_pr': topr('NOT b'), 'etype': ET._P},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._P), None),\n pytest.param(('a', 'b', 'u3'), {'date': LATER, 'from_pr': topr('NOT a'), 'to_pr': topr('NOT b'), 'etype': ET._P},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._P), None),\n pytest.param(('a', 'b'), {'user': 'u3', 'date': LATER, 'from_pr': topr('NOT a'), 'to_pr': topr('NOT b'), 'etype': ET._P},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._P), None),\n # all kwargs\n pytest.param((), {'from_': 'a', 'to': 'b', 'user': 'u3', 'date': LATER, 'from_pr': topr('NOT a'), 'to_pr': topr('NOT b'), 'etype': ET._P},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._P), None),\n )\n )\n def test_Re_Entry(self,\n entry_args,\n entry_kwargs,\n entry_expected,\n raises):\n if raises:\n with pytest.raises(raises):\n Re_Entry(*entry_args, **entry_kwargs)\n else:\n entry = Re_Entry(*entry_args, **entry_kwargs)\n assert entry == entry_expected\n\n\nclass Test_Functions(object):\n\n @pytest.mark.parametrize(\n 's_, expected',\n (\n pytest.param('', htmls(''),),\n pytest.param('A', htmls('A'),),\n pytest.param('&', htmls('&'),),\n pytest.param('<>', htmls('<>'),),\n pytest.param('foo\\nbar', htmls('foo
\\nbar'),),\n )\n )\n def test_html_escape(self, s_: str, expected: htmls):\n actual = html_escape(s_)\n assert expected == actual\n assert type(actual) == type(expected)\n\n @pytest.mark.parametrize(\n 'href, text, expected',\n (\n pytest.param('', None, ''),\n pytest.param('', '', ''),\n pytest.param('ABC', None, 'ABC'),\n pytest.param('ABC', '', ''),\n pytest.param('ABC', '123', '123'),\n pytest.param('<>', '<>', '\"><>'),\n )\n )\n def test_html_a(self,\n href: str,\n text: str_None,\n expected: str):\n actual = html_a(href, text)\n assert actual == expected\n\n @pytest.mark.parametrize(\n 'dts, expected',\n (\n # these two cases will differ from Python 3.5 and subsequent Python versions\n #pytest.param('2001-01-02 03 04 05', datetime(year=2001, month=1, day=2, hour=3, minute=4, second=5)),\n #pytest.param('2002/01/02 03:04:05', datetime(year=2002, month=1, day=2, hour=3, minute=4, second=5)),\n pytest.param('2003-01-02 03:04:05', datetime(year=2003, month=1, day=2, hour=3, minute=4, second=5)),\n pytest.param('2004-01-02T03:04:05', datetime(year=2004, month=1, day=2, hour=3, minute=4, second=5)),\n pytest.param('BAD STRING', NOW),\n )\n )\n def test_fromisoformat(self,\n dts: str,\n expected: datetime):\n actual = fromisoformat(dts)\n assert actual == expected\n\n @pytest.mark.parametrize(\n 'pr1, pr2, expected',\n (\n pytest.param(pr(path='a'), pr(path='a'), True),\n pytest.param(pr(path='a'), pr(path='a', query='b'), True),\n pytest.param(pr(path='a'), pr(path='b'), False),\n pytest.param(pr(query='a'), pr(path='b', query='a'), False),\n )\n )\n def test_query_match(self,\n pr1: ParseResult,\n pr2: ParseResult,\n expected: bool):\n assert RedirectHandler.query_match(pr1, pr2) is expected\n\n @pytest.mark.parametrize(\n 'ppq, ppqpr,'\n 'redirects,'\n 'entry',\n (\n pytest.param(\n '/a0', pr(path='/a0'),\n {'/a0': Re_Entry('/a0', '/b')},\n Re_Entry('/a0', '/b')\n ),\n pytest.param(\n '/a1', pr(path='/a1'),\n {'/b': Re_Entry('/a1', '/b')},\n None,\n ),\n pytest.param(\n '/a2', pr(path='/a2'),\n {'/a2': Re_Entry('/a2', '/b'), '/a2;': Re_Entry('/a2;', '/b')},\n Re_Entry('/a2', '/b'),\n ),\n pytest.param(\n '/a3', pr(path='/a3'),\n {'/a3;': Re_Entry('/a3;', '/b'), '/a3;?': Re_Entry('/a3;?', '/b'), '/a3?': Re_Entry('/a3?', '/b'), '/a3': Re_Entry('/a3', '/b')},\n Re_Entry('/a3', '/b'),\n ),\n pytest.param(\n '/a4', pr(path='/a4'),\n {'/a4;': Re_Entry('/a4;', '/b'), '/a4?': Re_Entry('/a4?', '/b'), '/a4': Re_Entry('/a4', '/b'), '/a4;?': Re_Entry('/a4;?', '/b')},\n Re_Entry('/a4', '/b'),\n ),\n pytest.param(\n '/a5;c', pr(path='/a5', params='c'),\n {'/a5': Re_Entry('/a5', '/b'), '/a5;': Re_Entry('/a5;', '/b')},\n Re_Entry('/a5;', '/b'),\n ),\n pytest.param(\n '/a?00', pr(path='/a', query='00'),\n {'/a;': Re_Entry('/a;', '/b'), '/a;?': Re_Entry('/a;?', '/b')},\n None,\n ),\n pytest.param(\n '/a?01', pr(path='/a', query='01'),\n {'/a': Re_Entry('/a', '/b'), '/a;': Re_Entry('/a;', '/b')},\n Re_Entry('/a', '/b'),\n ),\n pytest.param(\n '/a;02', pr(path='/a', params='02'),\n {'/a': Re_Entry('/a', '/b'), '/a?': Re_Entry('/a?', '/b')},\n Re_Entry('/a', '/b'),\n ),\n pytest.param(\n '/a;03', pr(path='/a', params='03'),\n {'/a;?': Re_Entry('/a;?', '/b'), '/a?': Re_Entry('/a?', '/b')},\n None,\n ),\n pytest.param(\n '/a?04', pr(path='/a', query='04'),\n {'/a;': Re_Entry('/a;', '/b'), '/a?': Re_Entry('/a?', '/b')},\n Re_Entry('/a?', '/b'),\n ),\n pytest.param(\n '/a?05', pr(path='/a', query='05'),\n {'/a;': Re_Entry('/a;', '/b'), '/a;?': Re_Entry('/a;?', '/b')},\n None,\n ),\n pytest.param(\n '/a?06', pr(path='/a', query='06'),\n {'/a;': Re_Entry('/a;', '/b'), '/a;?': Re_Entry('/a;?', '/b'), '/a?': Re_Entry('/a?', '/b')},\n Re_Entry('/a?', '/b'),\n ),\n pytest.param(\n '/a?07', pr(path='/a', query='07'),\n {'/a;': Re_Entry('/a;', '/b'), '/a;?': Re_Entry('/a;?', '/b'), '/a?': Re_Entry('/a?', '/b'), '/a': Re_Entry('/a', '/b')},\n Re_Entry('/a?', '/b'),\n ),\n # XXX: Disable Path Required Request Modifier\n # with paths\n # pytest.param(\n # '/d/path?00', pr(path='/d/path', query='00'),\n # {'/d;': Re_Entry('/d;', '/b'), '/d;?': Re_Entry('/d;?', '/b')},\n # None,\n # ),\n # pytest.param(\n # '/d/path?01', pr(path='/d/path', query='01'),\n # {'/d': Re_Entry('/d', '/b'), '/d/?': Re_Entry('/d/?', '/b')},\n # Re_Entry('/d/?', '/b'),\n # ),\n # pytest.param(\n # '/d;02', pr(path='/d', params='02'),\n # {'/d': Re_Entry('/d', '/b'), '/d?': Re_Entry('/d?', '/b')},\n # Re_Entry('/d', '/b'),\n # ),\n # pytest.param(\n # '/d;03', pr(path='/d', params='03'),\n # {'/d;?': Re_Entry('/d;?', '/b'), '/d?': Re_Entry('/d?', '/b')},\n # None,\n # ),\n # pytest.param(\n # '/d?04', pr(path='/d', query='04'),\n # {'/d;': Re_Entry('/d;', '/b'), '/d?': Re_Entry('/d?', '/b')},\n # Re_Entry('/d?', '/b'),\n # ),\n # pytest.param(\n # '/d?05', pr(path='/d', query='05'),\n # {'/d;': Re_Entry('/d;', '/b'), '/d;?': Re_Entry('/d;?', '/b')},\n # None,\n # ),\n # pytest.param(\n # '/d?06', pr(path='/d', query='06'),\n # {'/d;': Re_Entry('/d;', '/b'), '/d;?': Re_Entry('/d;?', '/b'), '/d?': Re_Entry('/d?', '/b')},\n # Re_Entry('/d?', '/b'),\n # ),\n # pytest.param(\n # '/d?07', pr(path='/d', query='07'),\n # {'/d;': Re_Entry('/d;', '/b'), '/d;?': Re_Entry('/d;?', '/b'), '/d?': Re_Entry('/d?', '/b'), '/d': Re_Entry('/d', '/b')},\n # Re_Entry('/d?', '/b'),\n # ),\n )\n )\n def test_query_match_finder(self,\n ppq: str, ppqpr: ParseResult,\n redirects: Re_Entry_Dict,\n entry: Re_Entry):\n assert RedirectHandler.query_match_finder(\n ppq, ppqpr,\n redirects) == entry\n\n @pytest.mark.parametrize(\n 'pr1,'\n 'pr2,'\n 'expected',\n (\n # URI component parts\n # https://docs.python.org/3/library/urllib.parse.html#urllib.parse.urlparse\n #\n # empty test cases\n pytest.param(\n pr(),\n pr(),\n '',\n id='(empty)'\n ),\n pytest.param(\n pr(scheme='http'),\n pr(scheme='http'),\n r'http://',\n id='scheme http'\n ),\n pytest.param(\n pr(scheme='https'),\n pr(scheme='http'),\n r'https://',\n id='scheme pr2'\n ),\n pytest.param(\n pr(scheme='https', netloc='a', path='b', params='c', query='d', fragment='e'),\n pr(),\n r'https://a/b;c?d#e',\n id='pr1 only'\n ),\n pytest.param(\n pr(),\n pr(scheme='https', netloc='a', path='b', params='c', query='d', fragment='e'),\n r';c?d#e',\n id='pr2 only'\n ),\n pytest.param(\n pr(),\n pr(scheme='https', netloc='a', path='b', params='c', query='d', fragment='e'),\n r';c?d#e',\n id='pr2 only'\n ),\n # precedence test cases\n pytest.param(\n pr(scheme='ftp', netloc='a1'),\n pr(scheme='ftp', netloc='a2'),\n r'ftp://a1',\n id='pr1.netloc'\n ),\n pytest.param(\n pr(scheme='ftp', netloc='a1', path='b1'),\n pr(scheme='ftp', netloc='a2', path='b2'),\n r'ftp://a1/b1',\n id='pr1.netloc pr1.path'\n ),\n pytest.param(\n pr(scheme='ftp', netloc='a1', query='d1'),\n pr(scheme='ftp', netloc='a2', query='d2'),\n r'ftp://a1?d1&d2',\n id='pr1.netloc pr1&2.query'\n ),\n pytest.param(\n pr(scheme='ftp', netloc='a1', fragment='f1'),\n pr(scheme='ftp', fragment='f2'),\n r'ftp://a1#f2',\n id='pr2.fragment'\n ),\n # Template Syntax basic test cases\n pytest.param(\n pr(netloc='a1', path='p1_${path}'),\n pr(path='p2'),\n r'//a1/p1_p2',\n id='Template Syntax: pr1.path \"p1_${path}\"'\n ),\n pytest.param(\n pr(netloc='a1', path='p1_${params}'),\n pr(params='r2'),\n r'//a1/p1_r2',\n id='Template Syntax: pr1.path \"p1_${params}\"'\n ),\n pytest.param(\n pr(netloc='a1', path='p1_${query}'),\n pr(query='q2'),\n r'//a1/p1_q2',\n id='Template Syntax: pr1.path \"p1_${query}\"'\n ),\n pytest.param(\n pr(netloc='a1', path='p1_${fragment}'),\n pr(fragment='f2'),\n r'//a1/p1_f2',\n id='Template Syntax: pr1.path \"p1_${fragment}\"'\n ),\n pytest.param(\n pr(netloc='a1', params='r1_${path}'),\n pr(path='p2'),\n r'//a1/;r1_p2',\n id='Template Syntax: pr1.params \"r1_${path}\"'\n ),\n pytest.param(\n pr(netloc='a1', query='q1_${path}'),\n pr(path='p2'),\n r'//a1?q1_p2',\n id='Template Syntax: pr1.query \"q1_${path}\"'\n ),\n pytest.param(\n pr(netloc='a1', fragment='f1_${path}'),\n pr(path='p2'),\n r'//a1#f1_p2',\n id='Template Syntax: pr1.fragment \"f1_${path}\"'\n ),\n # Template Syntax complex test cases\n # consuming ${path}\n # XXX: these are the odd behaviors of current implementation\n pytest.param(\n pr(netloc='a1', query='q1_${path}', fragment='f1_${path}'),\n pr(path='p2'),\n r'//a1?q1_p2#f1_path',\n id='Template Syntax1: consume ${path}'\n ),\n pytest.param(\n pr(netloc='a1_${path}', query='q1_${path}', fragment='f1'),\n pr(path='p2'),\n r'//a1_p2?q1_path#f1',\n id='Template Syntax2: consume ${path}'\n ),\n pytest.param(\n pr(netloc='a1', params='prm1', query='q1_${path}', fragment='f1'),\n pr(path='p2', params='prm2'),\n r'//a1/;prm1;prm2?q1_p2#f1',\n id='Template Syntax3: consume ${path}'\n ),\n pytest.param(\n pr(netloc='a1', query='q1_${query}', fragment='f1_${query}'),\n pr(path='p2'),\n r'//a1?q1_#f1_query',\n id='Template Syntax4: consume ${query}'\n ),\n pytest.param(\n pr(netloc='a1_${query}', query='q1_${query}', fragment='f1'),\n pr(path='p2'),\n r'//a1_?q1_query#f1',\n id='Template Syntax5: consume ${query}'\n ),\n pytest.param(\n pr(netloc='a1', params='prm1', query='q1_${query}', fragment='f1'),\n pr(path='p2', params='prm2', query='q2'),\n r'//a1/;prm1;prm2?q1_q2#f1',\n id='Template Syntax6: consume ${query}'\n ),\n )\n )\n def test_combine_parseresult(self,\n pr1: ParseResult,\n pr2: ParseResult,\n expected: str):\n actual = RedirectHandler.combine_parseresult(pr1, pr2)\n assert actual == expected\n\n @pytest.mark.parametrize(\n 'mesg, end',\n (\n pytest.param('', None),\n pytest.param('', ''),\n pytest.param('A', None),\n pytest.param('B', ''),\n pytest.param('C', '\\n'),\n )\n )\n def test_print_debug(self,\n mesg: str,\n end: str):\n print_debug(mesg, end=end)\n\n @pytest.mark.parametrize(\n 'href, text, expected',\n (\n pytest.param('', None, ''),\n pytest.param('', '', ''),\n pytest.param('ABC', None, 'ABC'),\n pytest.param('ABC', '', ''),\n pytest.param('ABC', '123', '123'),\n pytest.param('<>', '<>', '\"><>'),\n )\n )\n def test_html_a(self,\n href,\n text,\n expected):\n actual = html_a(href, text)\n assert actual == expected\n\n @pytest.mark.parametrize(\n 'from_to, expected',\n (\n pytest.param(\n [('a', 'b',)], {'a': Re_Entry('a', 'b')},\n # TODO: add more!\n ),\n )\n )\n def test_load_redirects_fromto(self,\n from_to: FromTo_List,\n expected: Re_Entry_Dict):\n actual = RedirectsLoader.load_redirects_fromto(from_to)\n assert actual == expected\n\n @pytest.mark.parametrize(\n 'input_, expected',\n (\n # simply happy path\n pytest.param(\n {'a': Re_Entry('a', 'b')},\n {'a': Re_Entry('a', 'b')},\n ),\n # reserved path\n pytest.param(\n {REDIRECT_PATHS_NOT_ALLOWED[0]: Re_Entry(REDIRECT_PATHS_NOT_ALLOWED[0], 'b')},\n {},\n ),\n # encoding not allowed\n pytest.param(\n {'a': Re_Entry('a', r'混沌')},\n {},\n ),\n # encoding allowed in `to` field\n pytest.param(\n {r'混沌': Re_Entry(r'混沌', 'b')},\n {r'混沌': Re_Entry(r'混沌', 'b')},\n ),\n )\n )\n def test_clean_redirects(self,\n input_: Re_Entry_Dict,\n expected: Re_Entry_Dict):\n actual = RedirectsLoader.clean_redirects(input_)\n assert actual == expected\n\n\nIP = '127.0.0.3'\nPORT = 33797 # an unlikely port to be used\nENTRY_LIST = {'/a': ('b', USER, NOW)}\n\n\ndef port() -> int:\n \"\"\"\n Use a new port for each new RedirectServer instance.\n\n Some CI Services images tend to keep the port open after it's use. This\n means a new RedirectServer will raise\n OSError: [Errno 98] Address already in use\n This also implies it's difficult to search for an unused port because\n that would require testing if the port can be opened.\n This is good enough.\n \"\"\"\n global PORT\n PORT += 1\n return PORT\n\n\ndef new_redirect_handler(redirects: Re_Entry_Dict) \\\n -> RedirectHandler:\n return redirect_handler_factory(\n redirects,\n REDIRECT_CODE_DEFAULT,\n '/status',\n '/reload',\n htmls('')\n )\n\n\ndef shutdown_server_thread(redirect_server: RedirectServer, sleep: float = 4):\n\n # thread target\n def shutdown_do(redirect_server_, sleep_):\n time.sleep(sleep_)\n redirect_server_.shutdown()\n\n st = threading.Thread(\n name='pytest-shutdown_thread',\n target=shutdown_do,\n args=(redirect_server, sleep))\n st.start()\n return st\n\n\n# XXX: crude way to pass object from a thread back to main thread\nRequest_Thread_Return = None\n\nreq_count = 0\n\n\ndef request_thread(ip: str, port: int, url: str, method: str, wait: float):\n \"\"\"caller should `.join` on thread\"\"\"\n\n # thread target\n def request_do(ip_: str, port_: int, url_: str, method_: str, wait_: float):\n time.sleep(wait_)\n cl = client.HTTPConnection(ip_, port=port_, timeout=1)\n cl.request(method_, url_)\n global Request_Thread_Return\n Request_Thread_Return = cl.getresponse()\n\n global req_count\n req_count += 1\n rt = threading.Thread(\n name='pytest-request_thread-%d' % req_count,\n target=request_do,\n args=(ip, port, url, method, wait))\n rt.start()\n return rt\n\n\nclass Test_ClassesComplex(object):\n\n def test_RedirectServer_server_activate(self):\n with RedirectServer((IP, port()), new_redirect_handler(ENTRY_LIST)) as redirect_server:\n redirect_server.server_activate()\n\n @pytest.mark.timeout(5)\n def test_RedirectServer_serve_forever(self):\n with RedirectServer((IP, port()), new_redirect_handler(ENTRY_LIST)) as redirect_server:\n _ = shutdown_server_thread(redirect_server, 1)\n redirect_server.serve_forever(poll_interval=0.3) # blocks\n\n\nclass Test_LiveServer(object):\n \"\"\"run the entire server which will bind to a real IP + Port\"\"\"\n\n F302 = int(http.HTTPStatus.FOUND) # 302\n NF404 = int(http.HTTPStatus.NOT_FOUND) # 404\n R308 = int(REDIRECT_CODE_DEFAULT) # 308\n ERR501 = int(http.HTTPStatus.NOT_IMPLEMENTED) # 501\n\n URL = 'http://' + IP\n\n rd = {'/a': Re_Entry('/a', 'A',)}\n\n @pytest.mark.parametrize(\n 'ip, url, method, redirects, loe, hi, header',\n (\n #\n # broad checks\n #\n pytest.param(IP, URL, 'GET', {}, 200, 499, None, id='broad check GET empty'),\n pytest.param(IP, URL, 'HEAD', {}, 200, 499, None, id='broad check HEAD empty'),\n pytest.param(IP, URL + '/X', 'GET', rd, 200, 499, None, id='broad check /X GET'),\n pytest.param(IP, URL + '/X', 'HEAD', rd, 200, 499, None, id='broad check /X HEAD'),\n #\n # precise checks - typical use-cases\n #\n pytest.param(IP, URL + '/X', 'GET', rd, NF404, None, ('Location', None), id='GET Not Found'),\n pytest.param(IP, URL + '/X', 'HEAD', rd, NF404, None, ('Location', None), id='HEAD Not Found'),\n # the two happy-path Redirect Found cases\n pytest.param(IP, URL + '/a', 'GET', rd, R308, None, ('Location', 'A'), id='GET Found'),\n pytest.param(IP, URL + '/a', 'HEAD', rd, R308, None, ('Location', 'A'), id='HEAD Found'),\n # make sure empty and None redirects is handled\n pytest.param(IP, URL + '/a', 'GET', {}, NF404, None, ('Location', None), id='/a GET empty'),\n pytest.param(IP, URL + '/a', 'HEAD', {}, NF404, None, ('Location', None), id='/a HEAD empty'),\n #\n # make sure other HTTP methods do nothing\n #\n pytest.param(IP, URL, 'POST', {}, ERR501, None, None, id='POST empty'),\n pytest.param(IP, URL, 'PUT', {}, ERR501, None, None, id='PUT empty'),\n pytest.param(IP, URL, 'DELETE', {}, ERR501, None, None, id='DELETE empty'),\n pytest.param(IP, URL, 'OPTIONS', {}, ERR501, None, None, id='OPTIONS empty'),\n pytest.param(IP, URL, 'TRACE', {}, ERR501, None, None, id='TRACE empty'),\n pytest.param(IP, URL, 'PATCH', {}, ERR501, None, None, id='PATCH empty'),\n pytest.param(IP, URL + '/a', 'POST', rd, ERR501, None, None, id='POST /a'),\n pytest.param(IP, URL + '/a', 'PUT', rd, ERR501, None, None, id='PUT /a'),\n pytest.param(IP, URL + '/a', 'DELETE', rd, ERR501, None, None, id='DELETE /a'),\n pytest.param(IP, URL + '/a', 'OPTIONS', rd, ERR501, None, None, id='OPTIONS /a'),\n pytest.param(IP, URL + '/a', 'TRACE', rd, ERR501, None, None, id='TRACE /a'),\n pytest.param(IP, URL + '/a', 'PATCH', rd, ERR501, None, None, id='PATCH /a'),\n pytest.param(IP, URL + '/', 'POST', rd, ERR501, None, None, id='POST /'),\n pytest.param(IP, URL + '/.', 'POST', rd, ERR501, None, None, id='POST /.'),\n )\n )\n @pytest.mark.timeout(4)\n def test_requests(self,\n ip: str,\n url: str,\n method: str,\n redirects: typing.Optional[Re_Entry_Dict],\n loe: int, # low bound or equal (assertion)\n hi: typing.Optional[int], # high bound or None (assertion)\n header: typing.Optional[typing.Tuple[str, str]] # assertion\n ):\n port_ = port()\n with RedirectServer((ip, port_), new_redirect_handler(redirects)) as redirect_server:\n # XXX: crude synchronizations. Good enough for this test harness!\n wait = 0.5\n srv_uptime = wait + 0.5\n thr_wait = wait\n shutdown_server_thread(redirect_server, srv_uptime)\n rt = request_thread(ip, port_, url, method, wait)\n redirect_server.serve_forever(poll_interval=0.2) # blocks for srv_uptime until server is shutdown\n rt.join(thr_wait) # blocks for thr_wait until thread ends\n\n # assertions\n assert not rt.is_alive(), 'thread did not end within %s seconds' % thr_wait\n global Request_Thread_Return\n assert Request_Thread_Return is not None, 'the thread did not set the global Request_Thread_Return; unlucky time synch? did the thread crash?'\n rr = Request_Thread_Return\n Request_Thread_Return = None\n if hi is None and loe:\n assert loe == rr.code\n elif hi and loe:\n assert loe <= rr.code <= hi, \"ip=(%s) url=(%s) method=(%s)\" % (ip, url, method)\n if header:\n assert rr.getheader(header[0]) == header[1], \"getheaders: %s\" % rr.getheaders()\n","sub_path":"goto_http_redirect_server/test/test_goto_http_redirect_server.py","file_name":"test_goto_http_redirect_server.py","file_ext":"py","file_size_in_byte":31268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"104595223","text":"# 1\nnumberList = [5, 1, 8, 92, 7, 30]\neven_number = [num for num in numberList if num % 2 == 0] \nprint(\"Even numbers in the list: \", even_number) \n\n# 2\nuserList = input(\"Enter a list of numbers, serperated by ',':\")\nuserList = userList.split(',')\ntotal = 0\nfor i in userList:\n total += int(i)\n\nprint(\"Sum of all entered numbers:\", total)","sub_path":"1st/part 4.py","file_name":"part 4.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"46830610","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 3 10:57:50 2019\n\n@author: ngritti\n\"\"\"\nfrom PyQt5.QtCore import Qt, QUrl\nfrom PyQt5.QtWidgets import (QApplication, QComboBox, QVBoxLayout, QDialog,\n QGridLayout, QGroupBox, QLabel, QLineEdit, QPushButton,\n QFileDialog, QMessageBox, QTabWidget, QWidget,\n QTableWidget, QTableWidgetItem, QSpinBox, QDoubleSpinBox,QCheckBox,\n QSplitter, QTreeView, QListView, QFileSystemModel, QAbstractItemView)\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nimport numpy as np\nimport sys, warnings, os, time\nfrom skimage.io import imread, imsave\nimport scipy.ndimage as ndi\nfrom collections.abc import Iterable\n\nfrom morgana.GUIs import manualmask\nfrom morgana.GUIs import inspection\nfrom morgana.GUIs import visualize0d\nfrom morgana.GUIs import visualize1d\nfrom morgana.GUIs import visualize2d\nfrom morgana.MLModel import io as ioML\nfrom morgana.MLModel import train\nfrom morgana.MLModel import predict\nfrom morgana.MLModel import overview as overviewML\nfrom morgana.DatasetTools import io as ioDT\nfrom morgana.DatasetTools.morphology import overview as overviewDT\nfrom morgana.DatasetTools import arrangemorphodata\nfrom morgana.DatasetTools import arrangefluodata\nfrom morgana.ImageTools.objectsparsing import objectsparser\nwarnings.filterwarnings(\"ignore\")\n\n\nclass morganaApp(QWidget):\n def __init__(self, parent=None):\n super(morganaApp, self).__init__(parent)\n\n self.modelFolder = '-'\n self.imageFolder = '-'\n self.imageImportFolder = '-'\n self.maskFolder = '-'\n self.classifier = None\n self.scaler = None\n self.params = { 'sigmas': [1,2,5,15],\n 'down_shape': 0.25,\n 'edge_size': 2,\n 'fraction': 0.5,\n 'bias': 0.5,\n 'feature_mode': 'ilastik' }\n\n tabs = QTabWidget()\n self.maskTab = self.createMaskTab()\n tabs.addTab(self.maskTab,'Generate or Import Masks')\n\n self.quantificationTab = self.createQuantificationTab()\n tabs.addTab(self.quantificationTab,'Quantification')\n\n ### defined handler for subwindows\n self.inspector = None\n self.quantifier = []\n\n ####################################################################################################\n '''\n TESTS WITHOUT CLICKING\n '''\n\n ####################################################################################################\n\n self.layout = QVBoxLayout(self)\n self.layout.addWidget(tabs)\n self.setLayout(self.layout)\n\n self.setWindowTitle('Organoids Segmentation App')\n QApplication.setStyle('Fusion')\n\n\n\n '''\n MASK TAB\n '''\n def createMaskTab(self): \n mainTab = QWidget()\n self.createModelGroup()\n self.createImportGroup()\n \n self.isMask = QCheckBox(\"Import external masks\")\n self.isMask.toggle()\n self.isMask.stateChanged.connect(self.changeMaskGroup)\n self.isMask.setChecked(False)\n \n mainTabLayout = QVBoxLayout() \n mainTabLayout.addWidget(self.isMask)\n mainTabLayout.addWidget(self.modelGroup)\n mainTabLayout.addWidget(self.importGroup)\n mainTab.setLayout(mainTabLayout)\n return mainTab\n \n def changeMaskGroup(self):\n if self.isMask.isChecked():\n self.modelGroup.hide()\n self.importGroup.show()\n else:\n self.importGroup.hide()\n self.modelGroup.show()\n\n \n '''\n Generating model and generation of masks\n '''\n def createModelGroup(self):\n self.modelGroup = QGroupBox(\"\")\n\n \n ########## create buttons for model definition group ##############\n self.modelDefGroup = QGroupBox(\"Machine Learning model definition\")\n\n selectModel = QPushButton(\"Specify model folder\")\n selectModel.setFocusPolicy(Qt.NoFocus)\n selectModel.clicked.connect( self.selectModelFolder )\n self.modelFolderSpace = QLineEdit(); self.modelFolderSpace.setText(self.modelFolder)\n self.modelFolderSpace.setReadOnly(True)\n self.modelFolderSpace.setStyleSheet('color:gray;')\n self.deepModel = QCheckBox(\"Use Multi Layer Perceptrons\")\n self.deepModel.setChecked(False)\n\n self.showMoreButton = QPushButton(\"Show/Hide params\")\n self.showMoreButton.setFocusPolicy(Qt.NoFocus)\n self.showMoreButton.clicked.connect(self.show_hide)\n\n self.sigmasLabel = QLabel('Sigmas:')\n self.sigmasSpace = QLineEdit(); self.sigmasSpace.setText(\"-\")\n self.sigmasSpace.setEnabled(False)\n self.down_shapeLabel = QLabel('Downscaling:')\n self.down_shapeSpace = QDoubleSpinBox(); self.down_shapeSpace.setSpecialValueText(\"-\")\n self.down_shapeSpace.setMinimum(-1); self.down_shapeSpace.setMaximum(1); self.down_shapeSpace.setSingleStep(.01);\n self.down_shapeSpace.setEnabled(False)\n self.edge_sizeLabel = QLabel('Edge size:')\n self.edge_sizeSpace = QSpinBox(); self.edge_sizeSpace.setSpecialValueText(\"-\")\n self.edge_sizeSpace.setMinimum(0);\n self.edge_sizeSpace.setEnabled(False)\n self.fractionLabel = QLabel('Pixel% extraction:')\n self.fractionSpace = QDoubleSpinBox(); self.fractionSpace.setSpecialValueText(\"-\")\n self.fractionSpace.setMinimum(0); self.fractionSpace.setMaximum(1); self.fractionSpace.setSingleStep(.1);\n self.fractionSpace.setEnabled(False)\n self.biasLabel = QLabel('Extraction bias:')\n self.biasSpace = QDoubleSpinBox(); self.biasSpace.setSpecialValueText(\"-\")\n self.biasSpace.setMinimum(0); self.biasSpace.setMaximum(1); self.biasSpace.setSingleStep(.1);\n self.biasSpace.setEnabled(False)\n self.featuresLabel = QLabel('Features:')\n self.feature_modeSpace = QComboBox();\n self.feature_modeSpace.addItems(['-','daisy','ilastik']);\n self.feature_modeSpace.setCurrentIndex(0)\n self.feature_modeSpace.setEnabled(False)\n\n self.trainButton = QPushButton(\"Train model\")\n self.trainButton.setEnabled(False)\n self.trainButton.setFocusPolicy(Qt.NoFocus)\n self.trainButton.clicked.connect(self.trainModel)\n\n ########## create buttons for model application group ##############\n self.predictionGroup = QGroupBox(\"Machine Learning model application\")\n\n selectFolder = QPushButton(\"Specify image folder\")\n selectFolder.setFocusPolicy(Qt.NoFocus)\n selectFolder.clicked.connect( self.selectImageFolder )\n self.imageFolderSpace = QLineEdit(); self.imageFolderSpace.setText(self.imageFolder)\n self.imageFolderSpace.setReadOnly(True)\n self.imageFolderSpace.setStyleSheet('color:gray;')\n\n self.predictButton = QPushButton(\"Generate masks\")\n self.predictButton.setFocusPolicy(Qt.NoFocus)\n self.predictButton.clicked.connect(self.predict)\n self.predictButton.setEnabled(False)\n\n self.recapButton = QPushButton(\"Save overview image of masks\")\n self.recapButton.setFocusPolicy(Qt.NoFocus)\n self.recapButton.clicked.connect(self.makeRecap)\n self.recapButton.setEnabled(False)\n\n self.inspectButton = QPushButton(\"Inspect masks\")\n self.inspectButton.setFocusPolicy(Qt.NoFocus)\n self.inspectButton.clicked.connect(self.openInspectionWindow)\n self.inspectButton.setEnabled(False)\n\n ######### create layout for model definition group ########\n layout = QGridLayout()\n\n # layout.addWidget(self.welcomeText, 0,0,1,2)\n layout.addWidget(selectModel, 1,0,1,2)\n layout.addWidget(QLabel('Model folder:'), 2,0,1,1)\n layout.addWidget(self.modelFolderSpace, 2,1,1,1)\n layout.addWidget(self.deepModel, 3,0,1,1)\n\n layout.addWidget(self.showMoreButton, 4,0,1,1)\n layout.addWidget(self.trainButton, 4,1,1,1)\n layout.addWidget(self.sigmasLabel, 5,0,1,1)\n layout.addWidget(self.sigmasSpace, 5,1,1,1)\n layout.addWidget(self.down_shapeLabel, 6,0,1,1)\n layout.addWidget(self.down_shapeSpace, 6,1,1,1)\n layout.addWidget(self.edge_sizeLabel, 7,0,1,1)\n layout.addWidget(self.edge_sizeSpace, 7,1,1,1)\n layout.addWidget(self.fractionLabel, 8,0,1,1)\n layout.addWidget(self.fractionSpace, 8,1,1,1)\n layout.addWidget(self.biasLabel, 9,0,1,1)\n layout.addWidget(self.biasSpace, 9,1,1,1)\n layout.addWidget(self.featuresLabel, 10,0,1,1)\n layout.addWidget(self.feature_modeSpace, 10,1,1,1)\n\n self.modelDefGroup.setLayout(layout)\n\n ######### create layout for model application group ########\n layout = QGridLayout()\n\n layout.addWidget(selectFolder, 13,0,1,2)\n layout.addWidget(QLabel('Image folder:'), 14,0,1,1)\n layout.addWidget(self.imageFolderSpace, 14,1,1,1)\n layout.addWidget(self.predictButton, 15,0,1,2)\n layout.addWidget(self.recapButton, 16,0,1,2)\n layout.addWidget(self.inspectButton, 17,0,1,2)\n\n self.predictionGroup.setLayout(layout)\n\n ##################################################################\n layout = QVBoxLayout()\n\n layout.addWidget(self.modelDefGroup)\n layout.addWidget(self.predictionGroup)\n \n self.sigmasLabel.hide()\n self.sigmasSpace.hide()\n self.down_shapeLabel.hide()\n self.down_shapeSpace.hide()\n self.edge_sizeLabel.hide()\n self.edge_sizeSpace.hide()\n self.fractionLabel.hide()\n self.fractionSpace.hide()\n self.biasLabel.hide()\n self.biasSpace.hide()\n self.featuresLabel.hide()\n self.feature_modeSpace.hide()\n self.showMoreModel = False\n\n self.modelGroup.setLayout(layout)\n\n def show_hide(self):\n if self.showMoreModel:\n self.sigmasLabel.hide()\n self.sigmasSpace.hide()\n self.down_shapeLabel.hide()\n self.down_shapeSpace.hide()\n self.edge_sizeLabel.hide()\n self.edge_sizeSpace.hide()\n self.fractionLabel.hide()\n self.fractionSpace.hide()\n self.biasLabel.hide()\n self.biasSpace.hide()\n self.featuresLabel.hide()\n self.feature_modeSpace.hide()\n self.showMoreModel = False\n else:\n self.sigmasLabel.show()\n self.sigmasSpace.show()\n self.down_shapeLabel.show()\n self.down_shapeSpace.show()\n self.edge_sizeLabel.show()\n self.edge_sizeSpace.show()\n self.fractionLabel.show()\n self.fractionSpace.show()\n self.biasLabel.show()\n self.biasSpace.show()\n self.featuresLabel.show()\n self.feature_modeSpace.show()\n self.showMoreModel = True\n\n def selectModelFolder(self):\n self.modelFolder = QFileDialog.getExistingDirectory(self, \"Select Input Folder of Model\")\n\n # check if a trainingset is present\n # a trainingset needs to exist for every model, even if the model is already trained.\n trainingset_folder = os.path.join(self.modelFolder,'trainingset')\n if os.path.exists(trainingset_folder):\n flist_in = ioDT.get_image_list(trainingset_folder, string_filter='_GT', mode_filter='exclude')\n flist_in.sort()\n flist_gt = ioDT.get_image_list(trainingset_folder, string_filter='_GT', mode_filter='include')\n flist_gt.sort()\n\n if len(flist_in) == 0:\n QMessageBox.warning(self,'Warning, no trainingset!','Selected \"'+self.modelFolder+'\" but no trainingset *data* detected. Transfer some images in the \"trainingset\" folder.')\n self.modelFolder = '-'\n return\n if len(flist_in)!=len(flist_gt):\n QMessageBox.warning(self,'Warning, trainingset incomplete!','Selected \"'+self.modelFolder+'\" but not all masks have been created.\\nPlease provide manually annotated masks.')\n for f in flist_in:\n fn,ext = os.path.splitext(f)\n mask_name = fn+'_GT'+ext\n if not os.path.exists(mask_name):\n m = manualmask.makeManualMask(f, subfolder='',fn=fn+'_GT'+ext)\n # m.setModal(True)\n m.show()\n m.exec()\n # self.modelFolder = '-'\n # return\n else:\n QMessageBox.warning(self,'Warning, no trainingset!','Selected \"'+self.modelFolder+'\" but no \"trainingset\" folder detected.')\n self.modelFolder = '-'\n return\n # check if the model is already trained.\n # if not, only allow training button\n model_file = os.path.join(self.modelFolder,'scaler.pkl')\n if not os.path.exists(model_file):\n QMessageBox.warning(self,'Warning, train model!','Train the model before loading!\\nSetting default parameters...')\n else:\n self.loadModel()\n if self.classifier is None:\n return\n self.predictButton.setEnabled(True)\n self.recapButton.setEnabled(True)\n self.inspectButton.setEnabled(True)\n \n self.modelFolderSpace.setText(self.modelFolder)\n self.set_params()\n self.sigmasSpace.setEnabled(True)\n self.down_shapeSpace.setEnabled(True)\n self.edge_sizeSpace.setEnabled(True)\n self.fractionSpace.setEnabled(True)\n self.biasSpace.setEnabled(True)\n self.feature_modeSpace.setEnabled(True)\n self.trainButton.setEnabled(True)\n\n def set_params(self):\n self.sigmasSpace.setText(str(self.params['sigmas']))\n self.down_shapeSpace.setValue(self.params['down_shape'])\n self.edge_sizeSpace.setValue(self.params['edge_size'])\n self.fractionSpace.setValue(self.params['fraction'])\n self.biasSpace.setValue(self.params['bias'])\n self.feature_modeSpace.setCurrentIndex(['-','daisy','ilastik'].index(self.params['feature_mode']))\n self.feature_modeSpace.model().item(0).setEnabled(False)\n\n def read_and_check_params(self):\n s_str = self.sigmasSpace.text().replace(' ','').replace('[','').replace(']','')\n if s_str[-1]==',': s_str = s_str[:-1]\n self.params['sigmas'] = []\n for x in s_str.split(','):\n try:\n self.params['sigmas'].append(float(x))\n except:\n self.params['sigmas'].append(x)\n self.params['down_shape'] = self.down_shapeSpace.value()\n self.params['edge_size'] = self.edge_sizeSpace.value()\n self.params['fraction'] = self.fractionSpace.value()\n self.params['bias'] = self.biasSpace.value()\n self.params['feature_mode'] = self.feature_modeSpace.currentText()\n if not all(isinstance(x, float) for x in self.params['sigmas']):\n QMessageBox.warning(self,'Warning, values of sigmas not valid!','It seems there is at least one sigma that is not a number:\\n'+str(self.params['sigmas']))\n \n def trainModel(self, archBox):\n self.read_and_check_params()\n\n #############################################\n # load images to be used as training set\n #############################################\n training_folder = os.path.join(self.modelFolder,'trainingset')\n flist_in = ioDT.get_image_list(training_folder, string_filter='_GT', mode_filter='exclude')\n img_train = []\n for f in flist_in:\n img = imread(f)\n if len(img.shape) == 2:\n img = np.expand_dims(img,0)\n if img.shape[-1] == np.min(img.shape):\n img = np.moveaxis(img, -1, 0)\n img_train.append( img[0] )\n # img_train = np.array(img_train)\n\n flist_gt = ioDT.get_image_list(training_folder, string_filter='_GT', mode_filter='include')\n gt_train = [ imread(f) for f in flist_gt ]\n gt_train = [ g.astype(int) for g in gt_train ]\n\n print('##### Training set:')\n for i,f in enumerate(zip(flist_in,flist_gt)):\n print(i+1,'\\t', os.path.split(f[0])[-1],'\\t', os.path.split(f[1])[-1])\n\n #############################################\n # compute features and generate training set and weights\n #############################################\n\n print('##### Generating training set...')\n X, Y, w, self.scaler = train.generate_training_set( img_train, \n [g.astype(np.uint8) for g in gt_train], \n sigmas=self.params['sigmas'],\n down_shape=self.params['down_shape'],\n edge_size=self.params['edge_size'],\n fraction=self.params['fraction'],\n feature_mode=self.params['feature_mode'],\n bias=self.params['bias'] )\n\n #############################################\n # Train the model\n #############################################\n\n print('##### Training model...')\n start = time.time()\n self.classifier = train.train_classifier( X, Y, w, deep=self.deepModel.isChecked(), hidden=(350, 50) )\n print('Models trained in %.3f seconds.'%(time.time()-start))\n # print('classes_: ', self.classifier.classes_)\n # print('coef_: ', self.classifier.coef_)\n\n #############################################\n # Save the model\n #############################################\n\n ioML.save_model( self.modelFolder,\n self.classifier,\n self.scaler,\n sigmas=self.params['sigmas'],\n down_shape=self.params['down_shape'],\n edge_size=self.params['edge_size'],\n fraction=self.params['fraction'],\n feature_mode=self.params['feature_mode'],\n bias=self.params['bias'], deep=self.deepModel.isChecked() )\n print('##### Model saved!')\n self.predictButton.setEnabled(True)\n\n def loadModel(self):\n #############################################\n # load parameters and classifier\n #############################################\n print('##### Loading classifier model and parameters...')\n self.classifier, self.scaler, self.params = ioML.load_model( self.modelFolder, deep=self.deepModel.isChecked() )\n if self.classifier is None:\n QMessageBox.warning(self,'Warning!','Could not find any model')\n else:\n print('Success! Model loaded!')\n\n def selectImageFolder(self):\n self.imageFolder = QFileDialog.getExistingDirectory(self, \"Select Input Folder of Model\",\n \"C:\\\\Users\\\\nicol\\\\Desktop\\\\dmso\")\n if self.imageFolder == '':\n self.imageFolder = '-'\n return\n\n self.imageFolderSpace.setText(self.imageFolder)\n self.recapButton.setEnabled(True)\n self.inspectButton.setEnabled(True)\n self.maskFolderSpace.setText(self.imageFolder)\n # self.inspectButtonTL.setEnabled(True)\n\n def predict(self):\n #############################################\n # apply classifiers and save images\n #############################################\n\n result_folder = os.path.join(self.imageFolder,'result_segmentation')\n if not os.path.exists(result_folder):\n os.mkdir(result_folder)\n\n flist_in = ioDT.get_image_list(self.imageFolder)\n flist_in.sort()\n\n for f_in in flist_in:\n\n print('#'*20+'\\nLoading',f_in,'...')\n img = imread(f_in)\n if len(img.shape) == 2:\n img = np.expand_dims(img,0)\n if img.shape[-1] == np.min(img.shape):\n img = np.moveaxis(img, -1, 0)\n img = img[0]\n\n print('Predicting image...')\n pred, prob = predict.predict_image( img,\n self.classifier,\n self.scaler,\n sigmas=self.params['sigmas'],\n new_shape_scale=self.params['down_shape'],\n feature_mode=self.params['feature_mode'],\n deep=self.deepModel.isChecked() )\n\n # remove objects at the border\n negative = ndi.binary_fill_holes(pred==0)\n mask_pred = (pred==1)*negative\n edge_prob = ((2**16-1)*prob[2]).astype(np.uint16)\n mask_pred = mask_pred.astype(np.uint8)\n\n # save mask\n parent, filename = os.path.split(f_in)\n filename, file_extension = os.path.splitext(filename)\n new_name = os.path.join(parent,'result_segmentation',filename+'_classifier'+file_extension)\n imsave(new_name, pred, check_contrast=False)\n\n # perform watershed\n mask_final = predict.make_watershed( mask_pred,\n edge_prob,\n new_shape_scale=self.params['down_shape'] )\n\n # save final mask\n parent, filename = os.path.split(f_in)\n filename, file_extension = os.path.splitext(filename)\n new_name = os.path.join(parent,'result_segmentation',filename+'_watershed'+file_extension)\n imsave(new_name, mask_final, check_contrast=False)\n\n print('All images done!')\n\n def makeRecap(self):\n name,_ = QFileDialog.getSaveFileName(self, 'Save Overview File')\n if name != '':\n overviewML.generate_overview(self.imageFolder, saveFig=True, fileName=name, downshape=5)\n\n def openInspectionWindow(self):\n self.inspector = inspection.inspectionWindow_20max(self.imageFolder, parent=None, start=0, stop=20)\n self.inspector.show()\n\n def selectMaskFolder(self):\n self.maskFolder = QFileDialog.getExistingDirectory(self, \"Select Input Folder of Masks\",\n \"C:\\\\Users\\\\nicol\\\\Desktop\\\\dmso\")\n if self.maskFolder == '':\n self.maskFolder = self.imageFolder\n return\n\n self.maskFolderSpace.setText(self.maskFolder)\n\n\n '''\n Import masks if user has already created them\n '''\n\n def createImportGroup(self):\n self.importGroup = QGroupBox(\"\")\n \n ########## create buttons for import masks and images group ##############\n self.importGroup1 = QGroupBox(\"If masks are already present, import files.\")\n\n\n # self.instruct2 = QLabel('If masks are already generated, \\nselect image and mask folder here.') \n \n selectFolder = QPushButton(\"Specify image folder\")\n selectFolder.setFocusPolicy(Qt.NoFocus)\n selectFolder.clicked.connect( self.selectImportImageFolder )\n self.imageImportFolderSpace = QLineEdit()\n self.imageImportFolderSpace.setText(self.imageImportFolder)\n self.imageImportFolderSpace.setReadOnly(True)\n self.imageImportFolderSpace.setStyleSheet('color:gray;')\n\n selectMaskFolder = QPushButton(\"Specify mask folder\")\n selectMaskFolder.setFocusPolicy(Qt.NoFocus)\n selectMaskFolder.clicked.connect( self.selectMaskFolder )\n self.maskFolderSpace = QLineEdit(); self.maskFolderSpace.setText(self.maskFolder)\n self.maskFolderSpace.setReadOnly(True)\n self.maskFolderSpace.setStyleSheet('color:gray;')\n \n self.maskLabel = QLabel('File identifier of masks:')\n self.maskSpace = QLineEdit(); self.maskSpace.setText(\"\")\n\n self.isBorder = QCheckBox(\"Include objects at border of images\")\n self.isBorder.setChecked(False)\n \n\n self.importGroup2 = QGroupBox(\"\")\n\n self.importButton = QPushButton(\"Import Masks and Images\")\n self.trainButton.setFocusPolicy(Qt.NoFocus)\n self.importButton.clicked.connect(self.importImageMask)\n\n layout = QGridLayout()\n # layout.addWidget(self.instruct2, 0,0,1,2)\n layout.addWidget(selectFolder, 1,0,1,2)\n layout.addWidget(QLabel('Image folder:'), 2,0,1,1)\n layout.addWidget(self.imageImportFolderSpace,2,1,1,1)\n\n layout.addWidget(selectMaskFolder, 3,0,1,2)\n layout.addWidget(QLabel('Masks folder:'), 4,0,1,1)\n layout.addWidget(self.maskFolderSpace, 4,1,1,1)\n \n layout.addWidget(self.maskLabel, 5,0,1,1)\n layout.addWidget(self.maskSpace, 5,1,1,1)\n \n layout.addWidget(self.isBorder, 6,0,1,2)\n self.importGroup1.setLayout(layout)\n\n layout = QGridLayout()\n layout.addWidget(self.importButton, 0,0,1,2)\n self.importGroup2.setLayout(layout)\n\n layout = QVBoxLayout()\n layout.addWidget(self.importGroup1)\n layout.addWidget(self.importGroup2)\n self.importGroup.setLayout(layout)\n\n\n def selectImportImageFolder(self):\n self.imageImportFolder = QFileDialog.getExistingDirectory(self, \"Select Input Folder of Model\",\n \"C:\\\\Users\\\\nicol\\\\Desktop\\\\dmso\")\n if self.imageImportFolder == '':\n self.imageImportFolder = '-'\n return\n\n self.imageImportFolderSpace.setText(self.imageImportFolder)\n self.maskFolderSpace.setText(self.imageImportFolder)\n\n def importImageMask(self):\n objectsparser.parsing_images(self.imageImportFolder, \\\n self.maskFolder, self.maskSpace.text(), self.isBorder.isChecked())\n\n\n '''\n QUANTIFICATION TAB\n '''\n def createQuantificationTab(self):\n self.groups = []\n\n mainTab = QWidget()\n self.createGroup1()\n self.createGroup2()\n splitter = QSplitter(Qt.Vertical)\n splitter.addWidget(self.group1)\n splitter.addWidget(self.group2)\n \n mainTabLayout = QVBoxLayout() \n mainTabLayout.addWidget(splitter)\n mainTab.setLayout(mainTabLayout) \n return mainTab\n\n def group_checked(self, state, group):\n chs = []\n for ch in group.findChildren(QLabel):\n chs.append(ch)\n for ch in group.findChildren(QSpinBox):\n chs.append(ch)\n for ch in group.findChildren(QComboBox):\n chs.append(ch)\n for ch in group.findChildren(QPushButton):\n chs.append(ch)\n for ch in group.findChildren(QCheckBox):\n chs.append(ch) \n\n if not state:\n for ch in chs:\n ch.setVisible(False)\n else:\n for ch in chs:\n ch.setVisible(True)\n\n def createGroup1(self):\n self.group1 = QGroupBox(\"Groups\")\n self.group1.setCheckable(True)\n self.group1.toggled.connect(lambda state, x=self.group1: self.group_checked(state, x))\n\n self.tabs = QTabWidget()\n self.tabs.setTabsClosable(True)\n\n self.tabs.tabCloseRequested.connect(self.removeGroup)\n\n self.AddTabButton = QPushButton(\"Add New Group\")\n self.AddTabButton.clicked.connect(self.addGroup)\n self.addGroup()\n\n layout = QVBoxLayout()\n layout.addWidget(self.AddTabButton)\n layout.addWidget(self.tabs)\n self.group1.setLayout(layout)\n\n def addGroup(self):\n class FileDialog(QFileDialog):\n def __init__(self, *args):\n QFileDialog.__init__(self, *args)\n self.setOption(self.DontUseNativeDialog, True)\n self.setFileMode(self.DirectoryOnly)\n\n for view in self.findChildren((QListView, QTreeView)):\n if isinstance(view.model(), QFileSystemModel):\n view.setSelectionMode(QAbstractItemView.ExtendedSelection)\n\n class MyTable(QTableWidget):\n def keyPressEvent(self, event):\n if event.key() == Qt.Key_Delete:\n row = self.currentRow()\n self.removeRow(row)\n else:\n super().keyPressEvent(event)\n\n def addDataset():\n dialog = FileDialog()\n if dialog.exec_() == QDialog.Accepted:\n datasets = dialog.selectedFiles()\n else:\n return\n # print(dialog.selectedFiles())\n\n # dataset = QFileDialog.getExistingDirectory(self, \"Select dataset\")\n for dataset in datasets:\n if dataset!='':\n table = self.tabs.widget(self.tabs.currentIndex()).children()[1]\n rowPosition = table.rowCount()\n table.insertRow(rowPosition)\n table.setItem(rowPosition,0,QTableWidgetItem(dataset))\n\n newTab = QWidget()\n\n table = MyTable()\n table.insertColumn(0)\n selectFolder = QPushButton(\"Select new dataset\")\n selectFolder.clicked.connect(addDataset)\n\n tablayout = QGridLayout()\n tablayout.addWidget(table,0,0,1,2)\n tablayout.addWidget(selectFolder,1,0,1,2)\n newTab.setLayout(tablayout)\n\n # n = self.tabs.tabText(self.tabs.count()-1)\n # \n self.tabs.addTab(newTab, 'Group '+str(self.tabs.count()+1))\n\n # print(self.tabs.widget(self.tabs.count()-1).children())\n\n # return tab\n \n def removeGroup(self,index):\n self.tabs.removeTab(index)\n\n def selectAllButtonClicked(self):\n if self.selectAllButton.isChecked():\n self.morphoType.setEnabled(False)\n # self.maskType.setEnabled(False)\n else:\n self.morphoType.setEnabled(True)\n # self.maskType.setEnabled(True)\n\n def createGroup2(self):\n self.group2 = QGroupBox(\"\")\n\n self.isTimelapse = QCheckBox(\"Timelapse data\")\n self.isTimelapse.setChecked(False)\n\n def buildGroupVis():\n group = QGroupBox(\"Visualization functions\")\n group.setCheckable(True)\n group.toggled.connect(lambda state, x=group: self.group_checked(state, x))\n group.setChecked(False)\n\n compositeButton = QPushButton(\"Create overview composite\")\n compositeButton.clicked.connect(self.createCompositeOverviewAll)\n\n meshgridButton = QPushButton(\"Create meshgrid overview\")\n meshgridButton.clicked.connect(self.createMeshgridOverviewAll)\n\n layout = QVBoxLayout()\n layout.addWidget(compositeButton)\n layout.addWidget(meshgridButton)\n group.setLayout(layout)\n self.group_checked(False, group)\n\n return group\n\n def buildGroupMorpho():\n group = QGroupBox(\"Morphology quantification\")\n group.setCheckable(True)\n group.toggled.connect(lambda state, x=group: self.group_checked(state, x))\n group.setChecked(False)\n\n self.maskType = QComboBox()\n self.maskType.addItem(\"Unprocessed\")\n self.maskType.addItem(\"Straightened\")\n\n self.morphoKeys = [\n 'area',\n 'eccentricity',\n 'major_axis_length',\n 'minor_axis_length',\n 'equivalent_diameter',\n 'perimeter',\n 'euler_number',\n 'extent',\n 'form_factor',\n # 'inertia_tensor',\n # 'inertia_tensor_eigvals',\n # 'moments',\n # 'moments_central',\n # 'moments_hu',\n # 'moments_normalized',\n 'orientation',\n 'locoefa_coeff'\n ]\n self.datamorphotype = [\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n # 0,\n # 0,\n # 0,\n # 0,\n # 0,\n # 0,\n 0,\n 1,\n ]\n self.morphoType = QComboBox()\n for key in self.morphoKeys:\n self.morphoType.addItem(key)\n\n self.selectAllButton = QCheckBox(\"Use all parameters\")\n self.selectAllButton.clicked.connect(self.selectAllButtonClicked)\n\n morphologyButton = QPushButton(\"Visualize Morphological Parameter(s)\")\n morphologyButton.clicked.connect(self.createMorphologyPlot)\n\n layout = QGridLayout()\n layout.addWidget(QLabel(\"Type of mask:\"), 1,0,1,1)\n layout.addWidget(self.maskType, 1,1,1,1)\n layout.addWidget(QLabel(\"Morphological parameter\"), 2,0,1,1)\n layout.addWidget(self.morphoType, 2,1,1,1)\n layout.addWidget(self.selectAllButton, 3,0,1,2)\n layout.addWidget(morphologyButton, 4,0,1,2)\n group.setLayout(layout)\n self.group_checked(False, group)\n\n return group\n\n def buildGroupFluo():\n group = QGroupBox(\"Fluorescence quantification\")\n group.setCheckable(True)\n group.toggled.connect(lambda state, x=group: self.group_checked(state, x))\n group.setChecked(False)\n\n self.fluorescenceChannel = QSpinBox()\n self.fluorescenceChannel.setRange(0,100)\n self.fluorescenceChannel.setAlignment(Qt.AlignRight)\n\n self.spatialType = QComboBox()\n self.spatialType.addItem('Average')\n self.spatialType.addItem('Antero-Posterior profile')\n self.spatialType.addItem('Left-Right profile')\n self.spatialType.addItem('Radial profile')\n self.spatialType.addItem('Angular profile')\n\n computeButton = QPushButton(\"Compute graph\")\n computeButton.clicked.connect(self.createFluoGraph)\n\n layout = QGridLayout()\n layout.addWidget(QLabel(\"Fluorescence channel:\"), 0,0,1,1)\n layout.addWidget(self.fluorescenceChannel, 0,1,1,1)\n layout.addWidget(QLabel(\"Spatial profile type:\"), 2,0,1,1)\n layout.addWidget(self.spatialType, 2,1,1,1)\n layout.addWidget(computeButton, 3,0,1,2)\n group.setLayout(layout)\n self.group_checked(False, group)\n\n return group\n\n # def buildGroupSpots():\n # group = QGroupBox(\"Spots quantification\")\n # group.setCheckable(True)\n # group.toggled.connect(lambda state, x=group: self.group_checked(state, x))\n # group.setChecked(False)\n\n # self.spotsFluorescenceChannel = QSpinBox()\n # self.spotsFluorescenceChannel.setRange(0,100)\n # self.spotsFluorescenceChannel.setAlignment(Qt.AlignRight)\n\n # self.spotsSpatialType = QComboBox()\n # self.spotsSpatialType.addItem('Average')\n # self.spotsSpatialType.addItem('Antero-Posterior profile')\n # self.spotsSpatialType.addItem('Left-Right profile')\n # self.spotsSpatialType.addItem('Radial profile')\n # self.spotsSpatialType.addItem('Angular profile')\n\n # self.spotsCountRadio = QPushButton(\"Spot count\")\n # self.spotsCountRadio.clicked.connect(self.makeSpotCountPlot)\n\n # # # self.spotsPositionRadio = QCheckBox(\"Position\")\n # # self.spotsAreaRadio = QCheckBox(\"Area\")\n # # self.spotaPerimeterRadio = QCheckBox(\"Perimeter\")\n # # self.spotsMajorAxisRadio = QCheckBox('Major axis')\n # # self.spotsMinorAxisRadio = QCheckBox('Minor Axis')\n # # self.spotsEccetricityRadio = QCheckBox('Eccentricity')\n # # self.spotsEftRadio = QCheckBox('Elliptical Fourier Transform')\n # # self.spotsOrientationRadio = QCheckBox('Orientation')\n # # self.spotsFluoRadio = QCheckBox('Fluorescence intensity')\n\n # # spotsButton = QPushButton(\"Compute graph\")\n # # spotsButton.clicked.connect(self.createSpotsGraphAll)\n\n # layout = QGridLayout()\n # layout.addWidget(QLabel('Fluorescence channel:'), 0,0,1,1)\n # layout.addWidget(self.spotsFluorescenceChannel, 0,1,1,1)\n # layout.addWidget(QLabel('Spatial profile type:'), 1,0,1,1)\n # layout.addWidget(self.spotsSpatialType, 1,1,1,1)\n # layout.addWidget(self.spotsCountRadio, 2,0,1,2)\n # # layout.addWidget(self.spotsAreaRadio, 3,0,1,1)\n # # layout.addWidget(self.spotaPerimeterRadio, 3,1,1,1)\n # # layout.addWidget(self.spotsMajorAxisRadio, 4,0,1,1)\n # # layout.addWidget(self.spotsMinorAxisRadio, 4,1,1,1)\n # # layout.addWidget(self.spotsEftRadio, 5,0,1,1)\n # # layout.addWidget(self.spotsOrientationRadio,5,1,1,1)\n # # layout.addWidget(self.spotsFluoRadio, 6,0,1,1)\n # # layout.addWidget(spotsButton, 7,0,1,2)\n # group.setLayout(layout)\n # self.group_checked(False, group)\n\n # return group\n\n groupVis = buildGroupVis()\n groupMorpho = buildGroupMorpho()\n groupFluo = buildGroupFluo()\n # groupSpots = buildGroupSpots()\n \n layout = QGridLayout()\n layout.addWidget(self.isTimelapse, 0,0,1,1)\n layout.addWidget(groupVis, 2,0,1,2)\n layout.addWidget(groupMorpho, 3,0,1,2)\n layout.addWidget(groupFluo, 4,0,1,2)\n # layout.addWidget(groupSpots, 5,0,1,2)\n self.group2.setLayout(layout)\n\n def createCompositeOverviewAll(self):\n\n # for every group\n folders = []\n for i in range(self.tabs.count()):\n # extract table in the group\n children = self.tabs.widget(i).children()\n table = children[1]\n # extract folders (dataset) in the table\n for j in range(table.rowCount()):\n folder = table.item(j,0).text()\n folders.append(folder)\n overviewDT.createCompositeOverview(folder)\n # print(folders)\n\n file = '_composite_recap.tif/.png'\n text = 'Composite files saved at:'\n for f in folders:\n parent,cond = os.path.split(f)\n text = text + '\\n\\t'+os.path.join(os.path.split(parent)[-1],'result_segmentation', cond + file)\n QMessageBox.information(self,\"Completed successfully\",text)\n\n def createMeshgridOverviewAll(self):\n\n # for every group\n for i in range(self.tabs.count()):\n # extract table in the group\n children = self.tabs.widget(i).children()\n table = children[1]\n # extract folders (dataset) in the table\n folders = []\n for j in range(table.rowCount()):\n folder = table.item(j,0).text()\n folders.append(folder)\n overviewDT.createMeshgridOverview(folder)\n # print(folders)\n\n file = '_meshgrid_recap.png'\n text = 'Meshgrid files saved at:'\n for f in folders:\n parent,cond = os.path.split(f)\n text = text + '\\n\\t'+os.path.join(os.path.split(parent)[-1],'result_segmentation', cond + file)\n QMessageBox.information(self,\"Completed successfully\",text)\n\n def createMorphologyPlot(self):\n\n computeMorpho = [False for key in self.morphoKeys]\n computeMorpho[self.morphoType.currentIndex()] = True\n if self.selectAllButton.isChecked():\n computeMorpho = [True for key in self.morphoKeys]\n\n # extract all folders to compute\n folders = [[] for i in range(self.tabs.count())]\n for i in range(self.tabs.count()):\n children = self.tabs.widget(i).children()\n table = children[1]\n for j in range(table.rowCount()):\n folders[i].append( table.item(j,0).text() )\n\n # extract data from all the folders\n data_all, keys = arrangemorphodata.collect_morpho_data( \n folders, \n self.morphoKeys, \n computeMorpho, \n self.maskType.currentText(), \n self.isTimelapse.isChecked()\n )\n\n # for every quantification parameter, make the appropriate plot\n for key in keys:\n data_key = [data[key] for data in data_all]\n # print(data_key)\n\n # find out number of dimensions of the data_key object by going deeper in the object\n # and checking if the first item of layer n is iterable\n iterable = True\n ndim = 0\n first_object = data_key[0][0]\n while iterable:\n iterable = isinstance(first_object, Iterable)\n if iterable:\n ndim += 1\n first_object = first_object[0]\n \n # call the right visualization tool according to the number of dimensions\n ### clean up quantifier handler:\n self.quantifier = [self.quantifier[i] for i in range(len(self.quantifier)) if self.quantifier[i] is not None]\n\n if ndim == 0:\n self.quantifier.append( visualize0d.visualization_0d( data_key, key ) )\n self.quantifier[-1].show()\n elif ndim == 1:\n self.quantifier.append( visualize1d.visualization_1d( data_key, key ) )\n self.quantifier[-1].show()\n elif ndim == 2:\n self.quantifier.append( visualize2d.visualization_2d( data_key, key ) )\n self.quantifier[-1].show()\n \n def createFluoGraph(self):\n # print('createFluoGraph')\n # return\n\n # extract all folders to compute\n folders = [[] for i in range(self.tabs.count())]\n for i in range(self.tabs.count()):\n children = self.tabs.widget(i).children()\n table = children[1]\n for j in range(table.rowCount()):\n folders[i].append( table.item(j,0).text() )\n \n channel = self.fluorescenceChannel.value()\n distributionType = ['Average','APprofile','LRprofile','RADprofile','ANGprofile'][self.spatialType.currentIndex()]\n\n # extract data from all the folders\n data_all = arrangefluodata.collect_fluo_data( \n folders, \n channel, \n distributionType, \n self.isTimelapse.isChecked()\n )\n\n # if the result is None, something went wrong!\n if not data_all:\n QMessageBox.warning(self,'Warning, invalid channel!','The channel selected doesn\\'t appear in the raw data!')\n return\n\n # print(data_all)\n # make the appropriate plot\n data_key = [data['ch%d_%s'%(channel,distributionType)] for data in data_all]\n data_bckg = [data['ch%d_Background'%(channel)] for data in data_all]\n\n # find out number of dimensions of the data_key object by going deeper in the object\n # and checking if the first item of layer n is iterable\n iterable = True\n ndim = 0\n first_object = data_key[0][0]\n while iterable:\n iterable = isinstance(first_object, Iterable)\n if iterable:\n ndim += 1\n first_object = first_object[0]\n\n # call the right visualization tool according to the number of dimensions\n ### clean up quantifier handler:\n self.quantifier = [self.quantifier[i] for i in range(len(self.quantifier)) if self.quantifier[i] is not None]\n\n if ndim == 0:\n self.quantifier.append( visualize0d.visualization_0d( data_key, distributionType, background=data_bckg ) )\n self.quantifier[-1].show()\n elif ndim == 1:\n self.quantifier.append( visualize1d.visualization_1d( data_key, distributionType, background=data_bckg ) )\n self.quantifier[-1].show()\n elif ndim == 2:\n self.quantifier.append( visualize2d.visualization_2d( data_key, distributionType, background=data_bckg ) )\n self.quantifier[-1].show()\n\n def makeSpotCountPlot(self):\n # print('createFluoGraph')\n # return\n\n # extract all folders to compute\n folders = [[] for i in range(self.tabs.count())]\n for i in range(self.tabs.count()):\n children = self.tabs.widget(i).children()\n table = children[1]\n for j in range(table.rowCount()):\n folders[i].append( table.item(j,0).text() )\n\n # if self.spotsSpatialType.currentText()=='Average':\n # data_all = utils_quantify.collect_spots_data_from_folders(folders,spatialDistNeeded='count')\n # if not data_all:\n # return\n # utils_quantify.computeAndPlotMorphoAll(data_all,['count'],[True],\n # int(self.spotsFluorescenceChannel.value()),\n # self.isTimelapse.isChecked(),\n # style=self.plotType.currentText())\n\n # else:\n # ### plot the AP profile of the fluorescence in the mask\n # if self.spotsSpatialType.currentText()=='Antero-Posterior profile':\n # key1, key2 = 'APposition', 'APprofile'\n # ### plot the LR profile of the fluorescence in the mask\n # if self.spotsSpatialType.currentText()=='Left-Right profile':\n # key1, key2 = 'LRposition', 'LRprofile'\n # ### plot the radial profile of the fluorescence in the mask\n # if self.spotsSpatialType.currentText()=='Radial profile':\n # key1, key2 = 'RADposition', 'RADprofile'\n # ### plot the radial profile of the fluorescence in the mask\n # if self.spotsSpatialType.currentText()=='Angular profile':\n # key1, key2 = 'ANGposition', 'ANGprofile'\n\n # data_all = utils_quantify.collect_spots_data_from_folders(folders,spatialDistNeeded=key1)\n # if not data_all:\n # return\n # data_all = multi_objects_functions.convert_to_distribution(data_all,'count')\n # utils_quantify.computeProfileAll( data_all,\n # channel = int(self.spotsFluorescenceChannel.value()),\n # isTimelapse = self.isTimelapse.isChecked(),\n # profileType = key2,\n # ylabel='Cell count' )\n\n def createSpotsGraphAll(self):\n print('createSpotsGraphAll')\n return\n \n # params = ['area','perimeter',\n # 'major_axis_length','minor_axis_length','eccentricity',\n # 'elliptical_fourier_transform','orientation','mean_intensity']\n\n # toplot = [False for i in params]\n # if self.spotsAreaRadio.isChecked(): toplot[0]=True\n # if self.spotaPerimeterRadio.isChecked(): toplot[1]=True\n # if self.spotsMajorAxisRadio.isChecked(): toplot[2]=True\n # if self.spotsMinorAxisRadio.isChecked(): toplot[3]=True\n # if self.spotsEccetricityRadio.isChecked(): toplot[4]=True\n # if self.spotsEftRadio.isChecked(): toplot[5]=True\n # if self.spotsOrientationRadio.isChecked(): toplot[6]=True\n # if self.spotsFluoRadio.isChecked(): toplot[7]=True\n\n # # extract all folders to compute\n # folders = [[] for i in range(self.tabs.count())]\n # for i in range(self.tabs.count()):\n\n # children = self.tabs.widget(i).children()\n # table = children[1]\n # for j in range(table.rowCount()):\n # folders[i].append( table.item(j,0).text() )\n\n # if self.spotsSpatialType.currentText()=='Average':\n # print('To be implemented!')\n # # data_all = utils_quantify.collect_spots_data_from_folders(folders,spatialDistNeeded='Average')\n # # success = utils_quantify.computeAndPlotMorphoAll(data_all,params,t,int(self.fluorescenceChannel.value()),self.isTimelapse.isChecked())\n\n # if self.spotsSpatialType.currentText()=='Antero-Posterior profile':\n # print(\"To be implemented!\")\n # # self.createAPprofileAll_spots(folders)\n # # data_all = utils_quantify.collect_fluo_data_from_folders(folders,spatialDistNeeded='APprofile')\n # # success = utils_quantify.computeProfileAll(data_all,int(self.fluorescenceChannel.value()),self.isTimelapse.isChecked())\n\n # if self.spotsSpatialType.currentText()=='Radial profile':\n # print(\"To be implemented!\")\n # # data_all = utils_quantify.collect_fluo_data_from_folders(folders,spatialDistNeeded='RadialProfile')\n # # success = utils_quantify.computeProfileAll(data_all,int(self.fluorescenceChannel.value()),self.isTimelapse.isChecked())\n\n'''\nrun the main gui from the current file\n'''\nif __name__ == '__main__':\n def run():\n app = QApplication(sys.argv)\n gallery = morganaApp()\n gallery.show()\n sys.exit(app.exec_())\n\n run()\n","sub_path":"morgana/GUIs/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":50636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"317150509","text":"from __future__ import print_function\n\nfrom __future__ import absolute_import\nimport ase.io\nimport ipywidgets as ipw\nfrom fileupload import FileUploadWidget\nimport tempfile\nimport nglview\nfrom six.moves import zip\n\ndef get_example_structure(key):\n from ase.io import read\n return read('miscellaneous/structures/' + key)\n\n\nclass StructureUploadWidget(ipw.VBox):\n\n DATA_FORMATS = ('StructureData', 'CifData')\n\n def __init__(self, text=\"Upload Structure\", **kwargs):\n \"\"\" Upload a structure and store it in AiiDA database.\n\n :param text: Text to display before upload button\n :type text: str\n \"\"\"\n\n self.file_upload = FileUploadWidget(text)\n structures = {\n \"Select structure\": False,\n }\n self.structure_select = ipw.Dropdown(\n options=[],\n description='Or choose from examples:',\n style={'description_width': '160px'},\n disabled=False)\n self.viewer = nglview.NGLWidget()\n self.btn_store = ipw.Button(\n description='Store in AiiDA', disabled=True)\n self.structure_description = ipw.Text(\n placeholder=\"Description (optional)\")\n\n self.structure_ase = None\n select = ipw.HBox([self.file_upload, self.structure_select])\n store = ipw.HBox([self.btn_store, self.structure_description])\n children = [select, self.viewer, store]\n\n super(StructureUploadWidget, self).__init__(\n children=children, **kwargs)\n\n self.file_upload.observe(self._on_file_upload, names='data')\n self.structure_select.observe(self._on_structure_select, names=['value'])\n self.btn_store.on_click(self._on_click_store)\n\n from aiida import load_dbenv, is_dbenv_loaded\n from aiida.backends import settings\n if not is_dbenv_loaded():\n load_dbenv(profile=settings.AIIDADB_PROFILE)\n\n # pylint: disable=unused-argument\n def _on_file_upload(self, change):\n self.tmp_folder = tempfile.mkdtemp()\n tmp = self.tmp_folder + '/' + self.file_upload.filename\n with open(tmp, 'w') as f:\n f.write(self.file_upload.data)\n structure_ase = self.get_ase(self.tmp_folder + '/' + self.file_upload.filename)\n self.select_structure(s=structure_ase, name=self.file_upload.filename)\n\n def _on_structure_select(self, change):\n global atoms\n indx = change['owner'].index\n atoms = change['new']\n if atoms is False:\n self.select_structure(s=None, name=None)\n return None\n formula = atoms.get_chemical_formula()\n self.select_structure(s=atoms, name=formula)\n\n\n def select_structure(self, s, name):\n self.btn_store.disabled = False\n if s is None:\n self.structure_ase = None\n self.btn_store.disabled = True\n self.structure_description.value = \"\"\n self.refresh_view()\n return\n\n self.structure_description.value = self.get_description(\n s, name)\n self.structure_ase = s\n self.refresh_view()\n\n def get_ase(self, fname):\n try:\n traj = ase.io.read(fname, index=\":\")\n except AttributeError:\n print(\"Looks like {} file does not contain structure coordinates\".\n format(fname))\n return None\n if len(traj) > 1:\n print(\n \"Warning: Uploaded file {} contained more than one structure. I take the first one.\"\n .format(fname))\n return traj[0]\n\n def get_description(self, structure_ase, name):\n formula = structure_ase.get_chemical_formula()\n return \"{} ({})\".format(formula, name)\n\n def refresh_view(self):\n viewer = self.viewer\n # Note: viewer.clear() only removes the 1st component\n # pylint: disable=protected-access\n for comp_id in viewer._ngl_component_ids:\n viewer.remove_component(comp_id)\n\n if self.structure_ase is None:\n return\n\n viewer.add_component(nglview.ASEStructure(\n self.structure_ase)) # adds ball+stick\n viewer.add_unitcell()\n\n # pylint: disable=unused-argument\n def _on_click_store(self, change):\n self.store_structure(\n self.file_upload.filename,\n description=self.structure_description.value)\n\n def store_structure(self, name, description=None):\n structure_ase = self.structure_ase\n if structure_ase is None:\n return\n\n from aiida.orm.data.structure import StructureData\n self.structure_node = StructureData(ase=structure_ase)\n if description is None:\n self.structure_node.description = self.get_description(\n structure_ase, name)\n else:\n self.structure_node.description = description\n self.structure_node.label = \".\".join(name.split('.')[:-1])\n self.structure_node.store()\n print(\"Stored in AiiDA: \" + repr(self.structure_node))\n\n#EOF\n","sub_path":"common/structure/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":5061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"615618276","text":"from flask import render_template, session, redirect, url_for, current_app, \\\n flash\nfrom flask import request\nfrom flask_login import login_required\nfrom . import main\nfrom .forms import PostForm\nfrom ..models import Post\nfrom .. import db\n\n\n@main.route('/', methods=['GET', 'POST'])\ndef index():\n page = request.args.get('page', 1, type=int)\n pagination = Post.query.order_by(Post.timestamp.desc()).paginate(\n page, per_page=current_app.config['POSTS_PER_PAGE'],\n error_out=True)\n posts = pagination.items\n return render_template('index.html', posts=posts,\n pagination=pagination)\n\n\n@main.route('/about', methods=['GET', 'POST'])\ndef about():\n return render_template('about.html')\n\n\n@main.route('/add-post', methods=['GET', 'POST'])\n@login_required\ndef add_post():\n form = PostForm()\n if form.validate_on_submit():\n post = Post(title=form.title.data, body=form.body.data)\n db.session.add(post)\n return redirect(url_for('main.index'))\n return render_template('add_post.html', form=form)\n\n\n@main.route('/post/')\ndef view_post(id):\n post = Post.query.get_or_404(id)\n return render_template('post.html', posts=[post])\n\n\n@main.route('/edit/', methods=['GET', 'POST'])\n@login_required\ndef edit_post(id):\n post = Post.query.get_or_404(id)\n form = PostForm()\n if form.validate_on_submit():\n post.title = form.title.data\n post.body = form.body.data\n db.session.add(post)\n flash('This post has been updated.')\n return redirect(url_for('main.view_post', id=post.id))\n form.title.data = post.title\n form.body.data = post.body\n return render_template('edit_post.html', form=form)\n","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"116823625","text":"import csv\r\nimport io\r\nfrom prestapyt import PrestaShopWebServiceDict\r\n\r\n\r\n# pip install --ignore-installed git+https://github.com/prestapyt/prestapyt.git@master\r\n\r\n\r\n# Add category to prestashop\r\ndef create_category(prestashop, blank_category, names, links, deep):\r\n for i in range(0, len(names)):\r\n blank_category.update({'category': {\r\n 'id_parent': deep,\r\n 'active': '1',\r\n 'name': {\r\n 'language': [{'attrs': {'id': '2'}, 'value': names[i]}]},\r\n 'link_rewrite': {\r\n 'language': [{'attrs': {'id': '2'}, 'value': links[i]}]}\r\n }})\r\n prestashop.add('categories', blank_category)\r\n\r\n\r\n# Add category tree to prestashop\r\ndef create_category_tree(prestashop):\r\n main_category_name = [\"Filmy\"]\r\n categories_name = [\"DVD\", \"Blu-Ray\"]\r\n subcategories_name = [\"Animowane/Familijne\", \"Dokumentalne\", \"Dramat\", \"Fantasy/Sci-Fi\", \"Horror/Thriller\",\r\n \"Komedia/Komedia Romantyczna\", \"Muzyczne/Musicale\", \"Sensacyjne/Przygodowe\"]\r\n\r\n main_category_link = [\"filmy\"]\r\n categories_link = [\"filmy-dvd\", \"filmy-blu-ray\"]\r\n subcategories_link = [\"animowanefamilijne\", \"dokumentalne\", \"dramat\", \"fantasysci-fi\", \"horrorthriller\",\r\n \"komediakomedia-romantyczna\", \"muzycznemusicale\", \"sensacyjneprzygodowe\"]\r\n\r\n blank_category = prestashop.get('categories', options={'schema': 'blank'})\r\n\r\n print(blank_category)\r\n\r\n # Film\r\n create_category(prestashop, blank_category, main_category_name, main_category_link, 2)\r\n # Categories\r\n create_category(prestashop, blank_category, categories_name, categories_link, 1000)\r\n # Subcategories\r\n create_category(prestashop, blank_category, subcategories_name, subcategories_link, 1001)\r\n create_category(prestashop, blank_category, subcategories_name, subcategories_link, 1002)\r\n\r\n\r\n# Add image to product\r\ndef add_image(prestashop, image_id, product_id):\r\n file_name = 'images/' + str(image_id) + '.jpeg'\r\n fd = io.open(file_name, \"rb\")\r\n content = fd.read()\r\n fd.close()\r\n prestashop.add('/images/products/' + str(product_id), files=[('image', file_name, content)])\r\n\r\n\r\n# Add attributes and quantities to product\r\ndef add_combinations(prestashop, id_product):\r\n\r\n # Attributes\r\n # PL\r\n blank_combination = prestashop.get('combinations', options={'schema': 'blank'})\r\n blank_combination.update({'combination': {\r\n 'id_product': str(id_product),\r\n 'minimal_quantity': '1',\r\n 'associations': {'product_option_values': {'product_option_value': {'id': '1'}}}}}\r\n )\r\n prestashop.add('combinations', blank_combination)\r\n # EN\r\n blank_combination.update({'combination': {\r\n 'id_product': str(id_product),\r\n 'minimal_quantity': '1',\r\n 'associations': {'product_option_values': {'product_option_value': {'id': '2'}}}}}\r\n )\r\n prestashop.add('combinations', blank_combination)\r\n\r\n # Quantities\r\n blank_stock_available = prestashop.get('stock_availables', id_product * 3 - 1)\r\n blank_stock_available['stock_available']['quantity'] = 50\r\n prestashop.edit('stock_availables', blank_stock_available)\r\n\r\n blank_stock_available = prestashop.get('stock_availables', id_product * 3)\r\n blank_stock_available['stock_available']['quantity'] = 50\r\n prestashop.edit('stock_availables', blank_stock_available)\r\n\r\n\r\n# Add products to prestashop\r\ndef add_products(prestashop):\r\n\r\n dict = {}\r\n product_features_dict = {}\r\n features, names = get_features()\r\n\r\n with open('products.csv', encoding=\"utf8\") as csvfile:\r\n products = list(csv.reader(csvfile, delimiter=\";\"))\r\n blank_product = prestashop.get('products', options={'schema': 'blank'})\r\n\r\n for i in range(0, 568):\r\n print(i)\r\n\r\n # Categories\r\n categories = []\r\n dict[\"id\"] = products[i][3].split(\"|\")[0]\r\n categories.append(dict.copy())\r\n for j in range(1, len(products[i][3].split(\"|\"))):\r\n dict[\"id\"] = products[i][3].split(\"|\")[j]\r\n categories.append(dict.copy())\r\n id_category_default = [x['id'] for x in categories]\r\n\r\n # Features\r\n product_features = []\r\n for product_feature in products[i][6].split('|'):\r\n for k, feature_name in enumerate(names):\r\n if feature_name == product_feature.split('@')[0]:\r\n product_features_dict[\"id\"] = k + 1\r\n break\r\n for k, feature in enumerate(features):\r\n if feature == product_feature:\r\n product_features_dict[\"id_feature_value\"] = k + 1\r\n break\r\n product_features.append(product_features_dict.copy())\r\n\r\n # Product\r\n blank_product.update({'product': {\r\n 'id_manufacturer': '0',\r\n 'id_default_combination': '1',\r\n 'id_category_default': max(id_category_default),\r\n 'id_tax_rules_group': '1',\r\n 'reference': '1438245'+str(i),\r\n 'supplier_reference': '982473182',\r\n 'state': '1',\r\n 'on_sale': '0',\r\n 'price': str(round(float(products[i][4]), 3)),\r\n 'wholesale_price': products[i][5],\r\n 'customizable': '1',\r\n 'active': '1',\r\n 'show_condition': '1',\r\n 'condition': 'new',\r\n 'show_price': '1',\r\n 'visibility': 'both',\r\n 'available_for_order': '1',\r\n 'link_rewrite': {'language': {'attrs': {'id': '2'}, 'value': products[i][2]}},\r\n 'name': {'language': [{'attrs': {'id': '1'}, 'value': products[i][1]},\r\n {'attrs': {'id': '2'}, 'value': products[i][1]}]},\r\n 'description': {'language': {'attrs': {'id': '2'}, 'value':products[i][7]}},\r\n 'description_short': {'language': {'attrs': {'id': '2'}, 'value': 'Film'}},\r\n 'available_now': {'language': {'attrs': {'id': '2'}, 'value': 'Produkt dostępny'}},\r\n 'available_later': {'language': {'attrs': {'id': '2'}, 'value': 'Zamówienie dozwolone'}},\r\n 'associations': {\r\n 'categories': {'attrs': {'nodeType': 'category', 'api': 'categories'}, 'category': categories},\r\n #'combinations': {'attrs': {'nodeType': 'combination', 'api': 'combinations'},'combination': [{'id': '1'}, {'id': '2'}]},\r\n #'product_option_values': {'attrs': {'nodeType': 'product_option_value', 'api': 'product_option_values'},'product_option_value': [{'id': '1'}, {'id': '2'}]},\r\n 'product_features': {'attrs': {'nodeType': 'product_feature', 'api': 'product_features'},'product_feature': product_features},\r\n #'stock_availables': {'attrs': {'nodeType': 'stock_available', 'api': 'stock_availables'},'stock_available': [{'id': '869', 'id_product_attribute': '0'},{'id': '1125', 'id_product_attribute': '1'},{'id': '1126', 'id_product_attribute': '2'}]},\r\n }}}\r\n\r\n )\r\n prestashop.add('products', blank_product)\r\n add_image(prestashop, products[i][0], i)\r\n add_combinations(prestashop, i)\r\n\r\n\r\n# Add features to prestashop\r\ndef add_features(names, prestashop):\r\n blank = prestashop.get('product_features', options={'schema': 'blank'})\r\n for name in names:\r\n blank.update({'product_feature': {\r\n 'name': {'language': {'attrs': {'id': '2'}, 'value': name}}}\r\n })\r\n prestashop.add('product_features', blank)\r\n return names\r\n\r\n\r\n# Add feature values to prestashop\r\ndef add_values(prestashop, features, names):\r\n id_feature = 1\r\n blank = prestashop.get('product_feature_values', options={'schema': 'blank'})\r\n for i, feature in enumerate(features):\r\n print(i)\r\n for j, name in enumerate(names):\r\n if feature.split(\"@\")[0] == name:\r\n id_feature = j + 1\r\n break\r\n blank.update({'product_feature_value': {\r\n 'id_feature': str(id_feature),\r\n 'value': {'language': {'attrs': {'id': '2'}, 'value': feature.split(\"@\")[1]}}}\r\n })\r\n prestashop.add('product_feature_values', blank)\r\n\r\n\r\n# Get features and feature values from products.csv\r\ndef get_features():\r\n features = []\r\n names = []\r\n with open('products.csv', encoding=\"utf8\") as csvfile:\r\n products = list(csv.reader(csvfile, delimiter=\";\"))\r\n for i in range(1, len(products)):\r\n features += products[i][6].split('|')\r\n\r\n features = list(dict.fromkeys(features))\r\n for i, atr in enumerate(features):\r\n names.append(atr.split('@')[0])\r\n names = list(dict.fromkeys(names))\r\n return features, names\r\n\r\n\r\n# Add feature tree to prestashop\r\ndef create_feature_tree(prestashop):\r\n features, names = get_features()\r\n add_features(names, prestashop)\r\n add_values(prestashop, features, names)\r\n\r\n\r\ndef main():\r\n prestashop = PrestaShopWebServiceDict('http://efilmy.best/api',\r\n 'AZ2A2PZC183CQIEHI8KR3SC48E8CTA7T', )\r\n while 1:\r\n print(\"1 Create Category Tree\")\r\n print(\"2 Add features\")\r\n print(\"3 Add products\")\r\n print(\"4 Exit\")\r\n x = input()\r\n if x == '1':\r\n create_category_tree(prestashop)\r\n elif x == '2':\r\n create_feature_tree(prestashop)\r\n elif x == '3':\r\n add_products(prestashop)\r\n elif x == '4':\r\n break\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"initialize_products.py","file_name":"initialize_products.py","file_ext":"py","file_size_in_byte":9803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"163491821","text":"import pandas as pd\nimport math\nimport operator\nclass CF:\n '''\n 协同过滤\n '''\n def __init__(self,data):\n '''\n :param data: 清洗后的数据\n '''\n #用户评分字典格式:{userID:[(goodsID,[rate,]),(goodsID,[rate,])...]}\n self.userRates=dict()\n self.data=data\n def getUserRates(self):\n '''\n 获得用户评分字典\n :return:\n '''\n dd=self.data.drop_duplicates('userID')#去重,为了获得订单表用户id\n\n for id in dd['userID']:\n\n temp2 = {}\n #print(self.data[self.data.userID == id])\n tempdf=self.data[self.data.userID == id]\n for gid in tempdf['goodsID']:\n templist=list((tempdf[tempdf.goodsID == gid])['rate'])#临时储存分数列表\n temp2.setdefault(gid,float(sum(templist)/len(templist)))\n #temp2=(gid,float(sum(templist)/len(templist)))#取分数平均值作为最终评分\n self.userRates.setdefault(id,temp2)\n\n #print(self.userRates)\n def cosine(self,userID,others):\n #,others):\n '''\n 根据余弦相似算法计算相似度\n :param userID:目标用户ID\n :param others:其他用户ID\n :return:\n '''\n\n thisUser=self.userRates[userID]\n otherUser=self.userRates[others]\n lista=[]\n listb=[]\n for goodsID in thisUser:\n for key in otherUser:\n if goodsID ==key:\n lista.append(float(thisUser[key]))\n listb.append(float(otherUser[key]))\n\n if len(lista)==0:#如果为空,表示该两个用户不相似\n return 0\n up=0\n aa=0\n bb=0\n\n for a in range(0,len(lista)):#计算余弦算法的数据\n up=up+lista[a]*listb[a]\n aa=aa+lista[a]*lista[a]\n bb=bb+listb[a]*listb[a]\n #print(lista[0])\n #print(lista[1])\n\n if aa==0 or bb==0:\n return 0\n x=math.sqrt(aa)\n y=math.sqrt(bb)\n\n\n degree=float(up/(x*y))#余弦计算相似度,值越接近1越相似\n\n #print(x*y)\n return degree\n\n def getSimilarListByuserid(self,userID,num=5):\n '''\n 根据用户ID来获取与该ID的相似列表\n :param userID: 用户ID\n :param num:推荐用户个数,默认为5\n :return: 最相似用户列表\n '''\n similarList=dict()\n for key in self.userRates.keys():\n if key!=userID:\n similarList.setdefault(key,self.cosine(userID,key))\n similarList=sorted(similarList.items(), key=lambda x:x[1], reverse=True)\n #根据字典值降序排序并生成元组列表\n lists=similarList[0:num]\n #获取前num个相似度数据元组列表[('ID',相似度)]\n #print(lists)\n usersList=[]\n for item in lists:\n if item[1]!=0:#去除完全不相似的用户\n usersList.append(item[0])\n return usersList\n\n def recommend(self,userID):\n '''\n 获取推荐\n :param userID: 被推荐用户\n :return: 推荐商品列表\n '''\n recommendList=[]\n for id in self.getSimilarListByuserid(userID,):\n for goodsrate in self.userRates[id].items():\n if goodsrate[1]==5:\n recommendList.append(goodsrate[0])\n recommendList=list(set(recommendList))#获取最终推荐商品id列表\n #print(recommendList)\n return recommendList[0]\n\t#推荐1条\n #print(self.userRates[userID])\n #uRL=self.userRates[userID]#用户评分列表\n\n\n def groupByBehaviour(self):\n pass\n","sub_path":"recommendSystem/Calculate/CollaborativeFiltering.py","file_name":"CollaborativeFiltering.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"325459110","text":"import numpy as np\nimport sys\n\ndef atom_type(x):\n if x == \"C\" : return \" C \"\n if x == \"A\" : return \" C \"\n if x == \"N\" : return \" N \"\n if x == \"O\" : return \" O \"\n if x == \"P\" : return \" P \"\n if x == \"S\" : return \" S \"\n if x == \"H\" : return \" H \"\n if x == \"F\" : return \" F \"\n if x == \"I\" : return \" I \"\n if x == \"NA\" : return \" N \"\n if x == \"OA\" : return \" O \"\n if x == \"SA\" : return \" S \"\n if x == \"HD\" : return \" H \"\n if x == \"Mg\" : return \" Mg\"\n if x == \"Mn\" : return \" Mn\"\n if x == \"Zn\" : return \" Zn\"\n if x == \"Ca\" : return \" Ca\"\n if x == \"Fe\" : return \" Fe\"\n if x == \"Cl\" : return \" Cl\"\n if x == \"Br\" : return \" Br\"\n\ndef rmsd_lb_asimetric(A,B):\n sum = 0\n for i in range(len(A)):\n r2 = 10000000.0\n for j in range(len(A)):\n if A[i][3] == B[j][3]: \n\n this_r2 = ( float(A[i][0]) - float(B[j][0]) )**2 + \\\n ( float(A[i][1]) - float(B[j][1]) )**2 + \\\n ( float(A[i][2]) - float(B[j][2]) )**2\n\n if this_r2 < r2 : r2 = this_r2;\n\n sum += r2;\n rmsd = np.sqrt( sum / float( len(A) ) )\n return rmsd\n\ndef rmsd_lb(arrayA,arrayB):\n rmsd1 = rmsd_lb_asimetric(arrayA,arrayB)\n rmsd2 = rmsd_lb_asimetric(arrayB,arrayA)\n return max( [rmsd1, rmsd2] )\n\nclass model:\n def __init__(self):\n self.vinascore = 'NaN'\n self.ad4score = 'NaN'\n self.globalscore = 'NaN'\n self.atoms = []\n self.accepted = True\n self.lines = []\n def write(self,filename):\n filename.write('MODEL\\n')\n filename.write('REMARK VINA RESULT: %6.1f '%(self.vinascore)+' \\n')\n filename.write('REMARK AD4 RESULT: %7.2f'%(self.ad4score)+' \\n')\n filename.write('REMARK FIN. RESULT: %7.2f'%(self.globalscore)+' \\n')\n for line in self.lines: \n filename.write(line)\n filename.write('ENDMDL\\n')\n#-----------------------------------------------------------------------------------------\n\nRawModelsFile = sys.argv[1]\nRMSDtreshold = float(sys.argv[2])\nOutputFile = sys.argv[3]\n\n\nModels=[]\n\nfor line in open(RawModelsFile):\n if line[0:5] == 'MODEL':\n ThisModel = model()\n\n elif line[0:6] == 'ENDMDL':\n Models.append(ThisModel)\n\n elif line[0:18] == 'REMARK VINA RESULT':\n ThisModel.vinascore = float( line.split()[3])\n\n elif line[0:18] == 'REMARK AD4 RESULT':\n ThisModel.ad4score = float( line.split()[3])\n\n elif line[0:18] == 'REMARK FIN. RESULT':\n ThisModel.globalscore = float( line.split()[3])\n\n elif line[0:4] == 'ATOM':\n ThisModel.atoms.append( [line[30:38], line[38:46], line[46:54], line[77:78]]) \n ThisModel.lines.append(line)\n\n else:\n ThisModel.lines.append(line)\n\n\nModels = sorted(Models, key=lambda model: -model.globalscore)\nNModels = len(Models)\n\nfor i in range(NModels):\n ModelA = Models[i]\n for j in range(NModels):\n ModelB = Models[j]\n if i == j:\n continue\n if not ModelB.accepted:\n continue\n rmsd = rmsd_lb(ModelA.atoms,ModelB.atoms)\n if rmsd <= RMSDtreshold:\n ModelA.accepted = False\n\nModels = sorted(Models, key=lambda model: model.globalscore)\n\n\nfilename = open(OutputFile,'w')\nfor i in range(NModels):\n if Models[i].accepted:\n Models[i].write(filename)\nfilename.close()\n\n","sub_path":"src/galileo.aux.diversity.py","file_name":"galileo.aux.diversity.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"439742486","text":"turmas = {}\r\n\r\n\r\ndef adicionarTurma():\r\n\r\n nome = str(input(\"Nome da turma: \"))\r\n\r\n alunos = {}\r\n\r\n turmas[nome] = alunos\r\n \r\ndef adicionarAlunoNotas():\r\n\r\n nomeTurma= str(input(\"Nome da turma:\"))\r\n\r\n matricula=str(input(\"Matricula: \"))\r\n\r\n notas=[]\r\n\r\n mais = 'Sim'\r\n\r\n while (mais =='Sim'):\r\n\r\n nota = float(input(\"Digite a nota: \"))\r\n\r\n notas.append(nota)\r\n\r\n mais = str(input(\"Deseja inserir mais notas? \"))\r\n\r\n turmas[nomeTurma][matricula] = notas\r\n\r\n\r\n\r\ndef calcularMedia(notas):\r\n\r\n soma = 0\r\n\r\n for x in notas:\r\n\r\n soma += x\r\n\r\n return soma/len(notas)\r\n\r\n\r\ndef calcularMediaTurma():\r\n\r\n soma = 0\r\n\r\n quantidadeDeTurmas = 0\r\n\r\n nomeTurma = str(input(\"Nome da turma: \"))\r\n\r\n for x in turmas[nomeTurma]:\r\n\r\n soma += calcularMedia(turmas[nomeTurma][x])\r\n\r\n quantidadeDeTurmas += 1\r\n\r\n return soma/quantidadeDeTurmas\r\n\r\n\r\ndef consulta():\r\n\r\n op = int(input(\"1. Turmas;\\n2. Alunos;\\n3. Aluno;\\n\"))\r\n\r\n if(op == 1):\r\n\r\n print(turmas)\r\n\r\n elif(op == 2):\r\n\r\n nomeTurma= str(input(\"Nome da turma: \"))\r\n\r\n print(turmas[nomeTurma])\r\n\r\n elif(op == 3):\r\n\r\n nomeTurma= str(input(\"Nome da turma: \"))\r\n\r\n matr = str(input(\"Matricula do aluno: \"))\r\n\r\n print(turmas[nomeTurma][matr])\r\n\r\n\r\n\r\n\r\ndef Menu():\r\n\r\n continuar = 'Sim'\r\n\r\n while (continuar == 'Sim'):\r\n\r\n opcao = int(input(\"O que deseja fazer?\\n 1: Adicionar turma;\\n 2: Adicionar aluno e notas;\\n 3: Calcular média de um aluno;\\n 4: Calcular média de um aluno;\\n 5: Consutar os dados inseridos;\\n 6: Sair.\\n\"))\r\n\r\n if (opcao == 1):\r\n\r\n adicionarTurma()\r\n\r\n elif (opcao == 2):\r\n\r\n adicionarAlunoNotas()\r\n\r\n elif (opcao == 3):\r\n\r\n turma = str(input(\"Turma: \"))\r\n\r\n matricula = str(input(\"Matricula: \"))\r\n\r\n print(calcularMedia(turmas[turma][matricula]))\r\n\r\n elif (opcao == 4):\r\n\r\n print(calcularMediaTurma())\r\n\r\n elif (opcao == 5):\r\n consulta()\r\n \r\n elif (opcao == 6):\r\n continuar = 'Não'\r\n\r\n else:\r\n\r\n print(\"Opção inválida! Tente Novamente.\\n\")\r\n\r\n\r\nMenu()","sub_path":"Questão-4.py","file_name":"Questão-4.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"99445412","text":"import numpy as np\nfrom Bio import SeqIO\nfrom Bio.Align import substitution_matrices\nfrom itertools import product\n\nMAT = substitution_matrices.load(\"BLOSUM62\")\nMIN_INT = np.iinfo(np.int32).min\nGAPOPEN = 11\nGAPEXTEND = 1\n# test datasets are too big, may not compute in time\n# on some of them\ndef laff(s1: str, s2:str) -> tuple:\n m = len(s1)\n n = len(s2)\n max_score, max_i, max_j = MIN_INT, 0, 0\n upper = np.zeros(shape=(m+1, n+1), dtype=int)\n middle = np.zeros(shape=(m+1, n+1), dtype=int)\n lower = np.zeros(shape=(m+1, n+1), dtype=int)\n pntrs = np.zeros(shape=(m+1, n+1), dtype=int)\n # fill tables\n for i, j in product(range(1, m+1), range(1, n+1)):\n upper[i, j] = max(\n upper[i, j-1] - GAPEXTEND,\n middle[i, j-1] - GAPOPEN\n )\n lower[i, j] = max(\n lower[i-1, j] - GAPEXTEND,\n middle[i-1, j] - GAPOPEN\n )\n scores = [\n lower[i, j],\n middle[i-1, j-1] + MAT[s1[i-1], s2[j-1]],\n upper[i, j],\n 0 # index will be 3 -> backtrack stop\n ]\n middle[i, j] = max(scores)\n pntrs[i, j] = scores.index(middle[i, j])\n if (middle[i, j] > max_score):\n max_score = middle[i, j]\n max_i, max_j = i, j\n # backtrack\n i, j = max_i, max_j\n s1_al, s2_al = s1[:i], s2[:j]\n while (i > 0) and (j > 0):\n if (pntrs[i, j] == 0):\n i -= 1\n elif (pntrs[i, j] == 1):\n i -= 1\n j -= 1\n elif (pntrs[i, j] == 2):\n j -= 1\n else: # pntrs[i, j] == 3\n break\n s1_al, s2_al = s1_al[i:], s2_al[j:]\n return max_score, s1_al, s2_al\n\ndef main():\n seq1, seq2 = (item.seq for item in SeqIO.parse(\"rosalind_laff.txt\", \"fasta\"))\n \n with open(\"out.txt\", \"w\") as o:\n print(*laff(seq1, seq2), sep='\\n', file=o) \n\nif __name__ == \"__main__\":\n main()","sub_path":"Bioinformatics Stronghold/79_laff.py","file_name":"79_laff.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"651877325","text":"from django.views.generic import FormView\nfrom google.appengine.api import users\nfrom guestbook.api import JsonResponse\n\n\nclass Auth(JsonResponse.JSONResponseMixin, FormView):\n\n\tdef get(self, request, *args, **kwargs):\n\t\tif users.get_current_user():\n\t\t\turl = users.create_logout_url('/')\n\t\t\turl_linktext = 'Logout'\n\t\t\tuser_email = users.get_current_user().email()\n\t\telse:\n\t\t\turl = users.create_login_url('/')\n\t\t\turl_linktext = 'Login'\n\t\t\tuser_email = ''\n\n\t\tcontext = {\n\t\t\t'url': url,\n\t\t\t'url_linktext': url_linktext,\n\t\t\t'user_email': user_email\n\t\t}\n\t\treturn self.render_to_response(context)\n","sub_path":"guestbook/api/api_view.py","file_name":"api_view.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"233903914","text":"############################################################################\n #\n # Write a Python program to find common items from two lists.\n #\n############################################################################\n\n#taking input from user\nlist1 = input(\"Enter 1st list \")\nlist1 = list1.split(\",\")\nlist2 = input(\"Enter 2nd list \")\nlist2 = list2.split(\",\")\n\nprint(\"Using sets\")\n#convert lists to set\nset_list1 = set(list1)\nset_list2 = set(list2)\n\n#by intersection find common elements\nresult = set_list1.intersection(set_list2)\n\nresult = list(result)\n\nprint(result)\n\nprint(\"using for loop\")\n\n#store answer in result\nresult = []\nfor each in list1:\n if each in list2: # check if common elemets or not\n result.append(each)\n\nprint(result)\n","sub_path":"git_ass_py_4/question9.py","file_name":"question9.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"218618685","text":"import math\n\nd = {}\nfor i in range(1, 1001):\n for j in range(i + 1, 1001):\n if i + j >= 1000:\n continue\n sqr_sum = i ** 2 + j ** 2\n k = int(math.sqrt(sqr_sum))\n if k ** 2 == sqr_sum:\n if i + j + k > 1000:\n continue\n if i + j + k in d:\n d[i + j + k] += 1\n else:\n d[i + j + k] = 1\nmax_s = 0\nidx = 0\nfor i in d:\n if d[i] > max_s:\n max_s = d[i]\n idx = i\nprint(idx, max_s)\n","sub_path":"39.py","file_name":"39.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"413103257","text":"### Author: Acciente\r\n### Version: 1.0\r\n### Last modified date: 2017.06.29\r\n### Usage : StrToRPN(inputStr)\r\n### Function name is defined in funcList.\r\n### Symbols that are not in funcList will be treated as unknown numbers.\r\n\r\nfuncList = (\"sin\", \"cos\", \"tan\", \"asin\", \"acos\", \"atan\", \"sqrt\")\r\nnumStr = \"0123456789.\"\r\ncharStr = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\npriorDic = {\"+\" : 0, \"-\" : 0, \"*\" : 1, \"/\" : 1, \"^\" : 2}\r\n\r\n\r\nclass Error(Exception):\r\n pass\r\n\r\n\r\nclass ParenthesesError(Error):\r\n def __init__(self):\r\n self.message = \"Parentheses don't make pair.\"\r\n def __str__(self):\r\n return self.message\r\n\r\n\r\nclass CharacterError(Error):\r\n def __init__(self):\r\n self.message = \"Unexpected character.\"\r\n def __str__(self):\r\n return self.message\r\n\r\n\r\ndef FindPairedBracket(inputStr, startIndex):\r\n leftNum = 0\r\n i = startIndex\r\n while i < len(inputStr):\r\n if inputStr[i] == ')':\r\n leftNum -= 1\r\n elif inputStr[i] == '(':\r\n leftNum += 1\r\n if leftNum == 0:\r\n break\r\n i += 1\r\n if i == len(inputStr):\r\n raise ParenthesesError\r\n return i\r\n\r\n\r\ndef FindCharEnd(inputStr, startIndex):\r\n i = startIndex\r\n while i < len(inputStr) - 1:\r\n if inputStr[i + 1] not in charStr:\r\n break\r\n i += 1\r\n if i == len(inputStr) - 1:\r\n if inputStr[i] not in charStr:\r\n i -= 1\r\n return i\r\n\r\n\r\ndef FindNumEnd(inputStr, startIndex):\r\n global numStr\r\n i = startIndex\r\n while i < len(inputStr) - 1:\r\n if inputStr[i + 1] not in numStr:\r\n break\r\n i += 1\r\n if i == len(inputStr) - 1:\r\n if inputStr[i] not in numStr:\r\n i -= 1\r\n return i\r\n\r\n\r\ndef StrToRPNList(inputStr):\r\n oprStack = []\r\n finStack = []\r\n i = 0\r\n while i < len(inputStr):\r\n if inputStr[i] == \" \":\r\n pass\r\n elif inputStr[i] == \"(\":\r\n pairedBracket = FindPairedBracket(inputStr, i)\r\n finStack += StrToRPNList(inputStr[i + 1:pairedBracket])\r\n i = pairedBracket\r\n elif inputStr[i] in numStr:\r\n numEnd = FindNumEnd(inputStr, i)\r\n finStack.append(inputStr[i:numEnd + 1])\r\n i = numEnd\r\n elif inputStr[i] in charStr:\r\n charEnd = FindCharEnd(inputStr, i)\r\n if inputStr[i:charEnd + 1] in funcList:\r\n oprStack.append((3, inputStr[i:charEnd + 1]))\r\n else:\r\n finStack.append(inputStr[i:charEnd + 1])\r\n i = charEnd\r\n elif inputStr[i] in priorDic.keys():\r\n while True:\r\n if len(oprStack) == 0:\r\n break\r\n if oprStack[len(oprStack) - 1][0] >= priorDic[inputStr[i]]:\r\n finStack.append(oprStack.pop()[1])\r\n else:\r\n break\r\n oprStack.append((priorDic[inputStr[i]], inputStr[i]))\r\n else:\r\n raise CharacterError\r\n i += 1\r\n while len(oprStack) > 0:\r\n finStack.append(oprStack.pop()[1])\r\n return finStack\r\n\r\n\r\ndef RPNListToStr(lst):\r\n s = \"\"\r\n for i in lst:\r\n s += str(i)\r\n s += \" \"\r\n return s\r\n\r\n\r\ndef StrToRPN(inputStr):\r\n if not isinstance(inputStr, str):\r\n raise TypeError(\"Input must be a string!\")\r\n return RPNListToStr(StrToRPNList(inputStr))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n print(StrToRPN(sys.argv[1]))","sub_path":"StrToRPN.py","file_name":"StrToRPN.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"316028676","text":"import os\nimport json\nfrom pathlib import Path\n\n\nclass JobManager:\n\n @staticmethod\n def parse_json(data, keys):\n for key in keys:\n if key in data:\n data[key] = json.loads(data[key])\n return data\n\n @staticmethod\n def get_job(job_id):\n file_path = Path(os.path.join(os.path.dirname(__file__), '..', 'jobs', '{}.json'.format(job_id)))\n if not file_path.exists():\n if not Path(os.path.join(os.path.dirname(__file__),\n '..',\n 'task_records',\n '{}.tmp'.format(job_id)\n )).exists():\n return {\n 'data': None,\n 'errorCode': 1001,\n 'message': '非法jobId',\n }\n else:\n return None\n with open(file_path, 'r') as f:\n try:\n data = json.load(f)\n if data['data'] is not None:\n data['data'] = JobManager.parse_json(data['data'], ['poiStartName', 'poiEndName'])\n except BaseException as error:\n return {\n 'data': None,\n 'errorCode': 1001,\n 'message': '产生异常数据',\n }\n return {\n 'data': data.get('data', None),\n 'errorCode': data.get('errorCode', None),\n 'message': data.get('message', None),\n }\n","sub_path":"util/job_manager.py","file_name":"job_manager.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"105360236","text":"import os\nimport sys\nimport shutil\nfrom shutil import copyfile\n\nimport argparse\nfrom argparse import Namespace\nimport yaml\nimport json\n\nimport numpy as np\nimport SimpleITK as sitk\n\nfrom data.dicom_loader import DicomLoader\n\n\ndef pairwise_registration(dirs, par_list, master, refs, exe):\n reg_dir = dirs['registration_dir']\n warped_dir = dirs['warped_dir']\n dfs_dir = dirs['dfs_dir']\n\n par_list.append('-t ' + master)\n par_list.append('-o ' + reg_dir)\n\n # Pairwise registration\n if os.path.isdir(warped_dir):\n shutil.rmtree(warped_dir)\n os.makedirs(warped_dir, exist_ok=True)\n\n if os.path.isdir(dfs_dir):\n shutil.rmtree(dfs_dir)\n os.makedirs(dfs_dir, exist_ok=True)\n\n for itr, ref in enumerate(refs):\n current_par_list = par_list.copy()\n current_par_list.append('-r ' + ref)\n\n reg_pars = ' '.join(current_par_list)\n reg_cmd = ' '.join([exe, reg_pars])\n os.system(reg_cmd)\n\n # save registration result\n result_dir = os.path.join(reg_dir, \"vtk\")\n warped = sorted([os.path.join(result_dir, i) for i in os.listdir(result_dir) if i.startswith(\"warpedImage_\")])\n warped_copy = os.path.join(warped_dir, (\"warpedImg%05d.vtk\" % itr))\n copyfile(warped[-1], warped_copy)\n\n df = sorted([os.path.join(result_dir, i) for i in os.listdir(result_dir) if i.startswith(\"displacement_\")])\n df_copy = os.path.join(dfs_dir, (\"dfReg%05d.vtk\" % itr))\n copyfile(df[-1], df_copy)\n\n\ndef main(config):\n # ----------------------------------------------------------\n # Load configuration parameters\n # ----------------------------------------------------------\n with open(config, 'r') as config_stream:\n cfg = yaml.safe_load(config_stream)\n\n opt = Namespace(**cfg['options'])\n exe = Namespace(**cfg['exe'])\n cfg_general = Namespace(**cfg['general'])\n cfg_reg2d = cfg['reg2d']\n cfg_reg3d = cfg['reg3d']\n cfg_gpr_model = Namespace(**cfg['gpr_model'])\n cfg_gpr_learn = Namespace(**cfg['gpr_learn'])\n\n # ----------------------------------------------------------\n # Preprocessing\n # ----------------------------------------------------------\n # Parse data files\n opt_data = Namespace(**cfg['general'])\n opt_data.is_navi = False\n opt_data.input_dir = os.path.join(opt_data.root_dir, opt_data.data_dir)\n opt_data.output_dir = os.path.join(opt_data.root_dir, opt_data.data_dir + \"_mod\")\n\n if opt.preprocessing:\n print('PREPROCESSING DATA FILES...')\n # Assert\n if not os.path.exists(opt_data.input_dir):\n sys.exit('Path to data files does not exist.')\n\n if os.path.isdir(opt_data.output_dir):\n if os.path.isdir(os.path.join(opt_data.output_dir, 'sorted')):\n shutil.rmtree(os.path.join(opt_data.output_dir, 'sorted'))\n [os.remove(os.path.join(opt_data.output_dir, f)) for f in os.listdir(opt_data.output_dir)]\n else:\n os.makedirs(opt_data.output_dir, exist_ok=True)\n\n data_loader = DicomLoader(opt_data)\n data_loader.preprocess()\n print('[done]')\n\n # Parse navi if required\n if cfg_general.surrogate_type == 0 or cfg_general.surrogate_type == 2:\n opt_navi = Namespace(**cfg['general'])\n opt_navi.is_navi = True\n opt_navi.input_dir = os.path.join(opt_navi.root_dir, opt_navi.navi_dir)\n opt_navi.output_dir = os.path.join(opt_navi.root_dir, opt_navi.navi_dir + \"_mod\")\n\n if opt.preprocessing:\n print('PREPROCESSING NAVIS...')\n if not os.path.exists(opt_navi.input_dir):\n sys.exit('Path to navigators does not exist.')\n\n if os.path.isdir(opt_navi.output_dir):\n [os.remove(os.path.join(opt_navi.output_dir, f)) for f in os.listdir(opt_navi.output_dir)]\n else:\n os.makedirs(opt_navi.output_dir, exist_ok=True)\n\n navi_loader = DicomLoader(opt_navi)\n navi_loader.preprocess()\n print('[done]')\n\n registration2d_dir = os.path.join(cfg_general.root_dir, 'reg_2d')\n reg2d_dirs = {\n 'registration_dir': registration2d_dir,\n 'warped_dir': os.path.join(registration2d_dir, 'warpedImage'),\n 'dfs_dir': os.path.join(registration2d_dir, 'dfs')\n }\n\n if opt.registration_2d:\n print('2D REGISTRATION OF NAVIS')\n refs = sorted([os.path.join(opt_navi.output_dir, i) for i in os.listdir(opt_navi.output_dir) if i.startswith('navi')])\n target = os.path.join(opt_navi.output_dir, cfg_general.master_navi)\n print('Registration 2d: Number of reference images: ' + str(len(refs)))\n\n pairwise_registration(reg2d_dirs, cfg_reg2d, target, refs, exe.registration_2d)\n print('[done]')\n\n # ----------------------------------------------------------\n # Stacking\n # ----------------------------------------------------------\n # TODO: change order of assignment (if input_dir defined, use this path always)\n stacking_par_list = []\n if cfg_general.surrogate_type == 0:\n stack_dir = os.path.join(cfg_general.root_dir, 'stacks_navi')\n surrogate_dir = os.path.join(registration2d_dir, 'dfs')\n stacking_method = 'vonSiebenthal'\n series_format = 'dfReg%05d.vtk'\n elif cfg_general.surrogate_type == 1:\n stack_dir = os.path.join(cfg_general.root_dir, 'stacks_us')\n surrogate_dir = os.path.join(cfg_general.root_dir, cfg_general.us_dir)\n stacking_method = 'ultrasound'\n series_format = '%05d.png'\n elif cfg_general.surrogate_type == 2:\n stack_dir = os.path.join(cfg_general.root_dir, 'stacks_navi')\n surrogate_dir = os.path.join(registration2d_dir, 'dfs')\n stacking_method = 'pusterla'\n series_format = 'dfReg%05d.vtk'\n else:\n try:\n surrogate_dir = os.path.join(cfg_general.root_dir, cfg_general.input_dir)\n except:\n sys.exit('Surrogate not correctly defined')\n\n # Assert\n if not os.path.isdir(surrogate_dir):\n sys.exit('Path to surrogate data does not exist')\n\n if opt.stacking:\n print('STACKING...')\n stacking_par_list.append('-o ' + stack_dir)\n stacking_par_list.append('-data ' + opt_data.output_dir)\n stacking_par_list.append('-surrogate ' + surrogate_dir)\n stacking_par_list.append('-startIndex 0')\n stacking_par_list.append('-endIndex ' + str(cfg_general.n_sweeps * cfg_general.n_slices - 1))\n stacking_par_list.append('-seriesFormat ' + series_format)\n stacking_par_list.append('-numberOfSweeps ' + str(cfg_general.n_sweeps))\n stacking_par_list.append('-numberOfSlicePos ' + str(cfg_general.n_slices))\n stacking_par_list.append('-stackingMethod ' + stacking_method)\n stacking_par_list.append('-save')\n\n stacking_pars = ' '.join(stacking_par_list)\n stacking_cmd = ' '.join([exe.stacking, stacking_pars])\n\n if os.path.isdir(stack_dir):\n [os.remove(os.path.join(stack_dir, f)) for f in os.listdir(stack_dir)]\n else:\n os.makedirs(stack_dir, exist_ok=True)\n\n os.system(stacking_cmd)\n print('[done]')\n\n # ----------------------------------------------------------\n # 3D Registration\n # ----------------------------------------------------------\n # TODO: change order of assignment (if output_dir defined, use this path always)\n if cfg_general.surrogate_type == 0 or cfg_general.surrogate_type == 2:\n registration3d_dir = os.path.join(cfg_general.root_dir, 'reg_3d_navi')\n elif cfg_general.surrogate_type == 1:\n registration3d_dir = os.path.join(cfg_general.root_dir, 'reg_3d_us')\n else:\n try:\n registration3d_dir = os.path.join(cfg_general.root_dir, cfg_general.output_dir)\n except:\n sys.exit('Data directory not correctly defined')\n\n reg3d_dirs = {\n 'registration_dir': registration3d_dir,\n 'warped_dir': os.path.join(registration3d_dir, 'warpedImage'),\n 'dfs_dir': os.path.join(registration3d_dir, 'dfs')\n }\n\n if opt.registration_3d:\n print('3D REGISTRATION...')\n refs = sorted([os.path.join(stack_dir, i) for i in os.listdir(stack_dir) if i.startswith('vol')])\n target = os.path.join(stack_dir, cfg_general.master_volume)\n print('Registration 3d: Number of reference images: ' + str(len(refs)))\n\n pairwise_registration(reg3d_dirs, cfg_reg3d, target, refs, exe.registration_3d)\n print('[done]')\n\n # ----------------------------------------------------------\n # Split data into training and test set\n # ----------------------------------------------------------\n if opt.splitting_data or ((opt.registration_2d or opt.registration_3d) and opt.regression):\n print('SPLITTING...')\n n_imgs = cfg_general.n_sweeps*cfg_general.n_slices\n n_training_imgs = cfg_general.n_training_sweeps*cfg_general.n_slices\n n_test_imgs = n_imgs - n_training_imgs\n\n # Create directories\n sub_dir = {'surrogate': surrogate_dir,\n 'dfs': reg3d_dirs['dfs_dir'],\n 'warped': reg3d_dirs['warped_dir']}\n\n for name, current_dir in sub_dir.items():\n if current_dir == surrogate_dir:\n format = cfg_general.input_format\n else:\n format = cfg_general.output_format\n\n files = sorted([os.path.join(current_dir, i) for i in os.listdir(current_dir) if i.endswith(format)])\n train_dir = os.path.join(current_dir, 'train')\n test_dir = os.path.join(current_dir, 'test')\n\n # Create or empty folder\n # train\n if os.path.isdir(train_dir):\n [os.remove(os.path.join(train_dir, f)) for f in os.listdir(train_dir)]\n else:\n os.makedirs(train_dir, exist_ok=True)\n\n # test\n if os.path.isdir(test_dir):\n [os.remove(os.path.join(test_dir, f)) for f in os.listdir(test_dir)]\n else:\n os.makedirs(test_dir, exist_ok=True)\n\n # copy all training files to train_dir\n for itr, file in enumerate(files[:n_training_imgs]):\n dest = os.path.join(train_dir, ('%05d.' % itr) + format)\n copyfile(file, dest)\n\n # copy all test files to train_dir\n for itr, file in enumerate(files[n_training_imgs:]):\n dest = os.path.join(test_dir, ('%05d.' % itr) + format)\n copyfile(file, dest)\n\n print('Splitting: Number of training images in' + train_dir +': ' + str(len(os.listdir(train_dir))))\n print('Splitting: Number of training images in' + test_dir +': ' + str(len(os.listdir(test_dir))))\n print('[done]')\n\n # ----------------------------------------------------------\n # GP Regression\n # ----------------------------------------------------------\n # Config files\n cfg_model = os.path.join(cfg_general.root_dir, 'config_model.json')\n with open(cfg_model, 'w') as fp:\n json.dump(cfg['gpr_model'], fp)\n\n cfg_learn = os.path.join(cfg_general.root_dir, 'config_learn.json')\n with open(cfg_learn, 'w') as fp:\n json.dump(cfg['gpr_learn'], fp)\n\n cfg_predict = os.path.join(cfg_general.root_dir, 'config_predict.json')\n with open(cfg_predict, 'w') as fp:\n json.dump(cfg['gpr_predict'], fp)\n\n # Folder structure\n subdir = cfg_gpr_model.subdir # validation, test\n gpr_dir = os.path.join(registration3d_dir, 'gpr')\n gpr_prefix = os.path.join(gpr_dir, 'gpr')\n gpr_result_dir = os.path.join(registration3d_dir, '{:s}_pred'.format(subdir))\n gpr_ar_dir = os.path.join(cfg_general.root_dir, cfg_general.ar_dir)\n\n # Perform regression\n if opt.regression:\n print('GP REGRESSION...')\n if os.path.isdir(gpr_dir):\n if not cfg_gpr_learn.use_precomputed:\n [os.remove(os.path.join(gpr_dir, f)) for f in os.listdir(gpr_dir)]\n else:\n # os.system('sudo mkdir {:s}'.format(gpr_dir))\n os.makedirs(gpr_dir, exist_ok=True)\n\n if os.path.isdir(gpr_result_dir):\n [os.remove(os.path.join(gpr_result_dir, f)) for f in os.listdir(gpr_result_dir)]\n else:\n os.makedirs(gpr_result_dir, exist_ok=True)\n # os.system('sudo mkdir {:s}'.format(gpr_result_dir))\n\n # Learn\n gpr_learn_par_list = []\n gpr_learn_par_list.append(cfg_model)\n gpr_learn_par_list.append(cfg_learn)\n gpr_learn_par_list.append(gpr_prefix)\n gpr_learn_par_list.append(os.path.join(surrogate_dir, 'train'))\n gpr_learn_par_list.append(os.path.join(registration3d_dir, 'train'))\n gpr_learn_par_list.append(gpr_ar_dir)\n\n gpr_learn_pars = ' '.join(gpr_learn_par_list)\n gpr_learn_cmd = ' '.join([exe.regression_learn, gpr_learn_pars])\n os.system(gpr_learn_cmd)\n\n # Predict\n gpr_predict_par_list = []\n gpr_predict_par_list.append(cfg_model)\n gpr_predict_par_list.append(cfg_predict)\n gpr_predict_par_list.append(gpr_prefix)\n gpr_predict_par_list.append(os.path.join(surrogate_dir, subdir))\n gpr_predict_par_list.append(os.path.join(registration3d_dir, subdir))\n gpr_predict_par_list.append(gpr_result_dir)\n gpr_predict_par_list.append(os.path.join(cfg_general.root_dir, cfg_general.master_volume))\n\n gp_predict_pars = ' '.join(gpr_predict_par_list)\n gp_predict_cmd = ' '.join([exe.regression_predict, gp_predict_pars])\n os.system(gp_predict_cmd)\n print('[done]')\n\n # ----------------------------------------------------------\n # Evaluation\n # ----------------------------------------------------------\n diff_dir = os.path.join(registration3d_dir, '{:s}_diff'.format(subdir))\n if opt.evaluation:\n print('EVALUATION...')\n if os.path.isdir(diff_dir):\n [os.remove(os.path.join(diff_dir, f)) for f in os.listdir(diff_dir)]\n else:\n os.makedirs(diff_dir, exist_ok=True)\n\n # Compute difference between ground-truth and gpr prediction\n if cfg_general.eval_warped:\n dfs_test_dir = os.path.join(reg3d_dirs['dfs_dir'], subdir)\n warped_test_dir = os.path.join(reg3d_dirs['warped_dir'], subdir)\n\n warped_true = sorted([os.path.join(warped_test_dir, i) for i in os.listdir(warped_test_dir)\n if i.endswith(cfg_general.output_format)])\n warped_pred = sorted([os.path.join(gpr_result_dir, i) for i in os.listdir(gpr_result_dir) if i.startswith('warpedImg')])\n\n stacks_true = sorted([os.path.join(stack_dir, i) for i in os.listdir(stack_dir)\n if i.startswith('vol')])\n else:\n dfs_test_dir = os.path.join(registration3d_dir, subdir)\n\n dfs_true = sorted([os.path.join(dfs_test_dir, i) for i in os.listdir(dfs_test_dir)\n if i.endswith(cfg_general.output_format)])\n dfs_pred = sorted([os.path.join(gpr_result_dir, i) for i in os.listdir(gpr_result_dir)\n if i.startswith('dfPred')])\n\n for itr in range(0, len(dfs_true)):\n # read images\n if cfg_general.eval_warped:\n sitk_imgs = {\n 'stack_true': sitk.ReadImage(stacks_true[itr]),\n 'warped_true': sitk.ReadImage(warped_true[itr]),\n 'warped_pred': sitk.ReadImage(warped_pred[itr]),\n 'df_true': sitk.ReadImage(dfs_true[itr]),\n 'df_pred': sitk.ReadImage(dfs_pred[itr])\n }\n else:\n sitk_imgs = {\n 'df_true': sitk.ReadImage(dfs_true[itr]),\n 'df_pred': sitk.ReadImage(dfs_pred[itr])\n }\n\n # Convert sitk to np\n np_imgs = {}\n for name, img in sitk_imgs.items():\n np_imgs[name] = sitk.GetArrayFromImage(img)\n\n # Qualitative comparison\n if cfg_general.eval_warped:\n np_diff = {\n 'stack': np.absolute(np_imgs['stack_true'] - np_imgs['warped_pred']),\n 'warped': np.absolute(np_imgs['warped_true']*4095 - np_imgs['warped_pred']),\n 'df': np_imgs['df_true'] - np_imgs['df_pred']\n }\n else:\n np_diff = {\n 'df': np_imgs['df_true'] - np_imgs['df_pred']\n }\n\n sitk_diff = {}\n for name, img in np_diff.items():\n diff = sitk.GetImageFromArray(img)\n diff.SetDirection(sitk_imgs[name + '_true'].GetDirection())\n diff.SetSpacing(sitk_imgs[name + '_true'].GetSpacing())\n diff.SetOrigin(sitk_imgs[name + '_true'].GetOrigin())\n\n sitk_diff[name] = diff\n sitk.WriteImage(diff, os.path.join(diff_dir, ('diff_' + name + '%05d.vtk' % itr)))\n\n print('[done]')\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', help='path to config.yaml file', type=str, default='./params/config.yaml')\n args = parser.parse_args()\n\n main(args.config)\n","sub_path":"scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"198690420","text":"import os\nimport sys\n\ndef updating(filename):\n\tfiletoread = open(filename, \"r\");\n\tlines = filetoread.readlines();\n\tfiletoread.close();\n\n\tfiletowrite = open(filename[:-4] + \"_updated.txt\", \"w\");\n\tfor line in lines:\n\t\t# group is a list [ A/C startTime endTime sentence ]\n\t\tgroup = line.split(\"\\t\");\n\t\tspeaker = group[0];\n\n\t\tnew_line_rec = \"[C]\" + group[3];\n\n\t\tif speaker == \"agent\":\n\t\t\tnew_line_rec = \"[A]\" + group[3];\n\n\t\tfiletowrite.write(new_line_rec);\n\t\tfiletowrite.flush();\n\n\tfiletowrite.close();\n\ndef beginUpdating(filename):\n\tlistfile = open(filename, \"r\");\n\tfiles = listfile.readlines();\n\tlistfile.close();\n\n\tfor file_ in files:\n\t\tupdating(file_[:-1]);\n\nif __name__ == \"__main__\":\n\t# Begin Updating !!\n\ttry:\n\t\tos.system(\"rm -rf list.txt\");\n\t\tos.system(\"for file in ./*trx.txt; do echo $file >> list.txt; done\");\n\t\tbeginUpdating(\"list.txt\");\n\texcept:\n\t\tprint(\"Error to update trx files\");\n\t\tsys.exit();\n\n\t# Mv / Rm the files\n\ttry:\n\t\tos.system(\"mkdir -p uploadtodb\");\n\t\tos.system(\"mv *trx_updated.rec *trx_updated.txt *_tx.txt *_rx.txt uploadtodb\");\n\t\tos.system(\"rm -rf list.txt\");\n\t\tos.system(\"rm -rf *.mlf\");\n\t\tos.system(\"rm -rf *.txt\");\n\texcept:\n\t\tprint(\"Error to mv/rm the files after updating\");\n","sub_path":"changeTRX.py","file_name":"changeTRX.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"331567437","text":"# -*- coding: utf-8 -*-\n##############################################################################\n# Copyright (c) 2015-Present Webkul Software Pvt. Ltd. ()\n# See LICENSE file for full copyright and licensing details.\n# License URL : \n##############################################################################\n\nimport logging\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import Warning\n\n_logger = logging.getLogger(__name__)\n\n\nclass StockWizardMessage(models.TransientModel):\n _name = \"stock.inventory.wizard\"\n _description = \"Stock Inventory Wizard\"\n\n text = fields.Text(string='Message')\n\n def generated_message(self, message, name='Message/Summary'):\n partial_id = self.create({'text': message}).id\n return {\n 'name': name,\n 'view_mode': 'form',\n 'view_id': False,\n 'res_model': 'stock.inventory.wizard',\n 'res_id': partial_id,\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': 'new',\n 'domain': '[]',\n 'context': self._context\n }\n\n def view_inventory(self):\n return {\n 'name': 'Inventory',\n 'view_mode': 'form',\n 'view_id': False,\n 'res_model': 'stock.inventory',\n 'res_id': self._context.get('inventory_id'),\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n # 'target': 'new',\n 'domain': '[]',\n }\n","sub_path":"webkul_addons/advance_inventory_import/wizards/wizard_message.py","file_name":"wizard_message.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"528528622","text":"import maya.api.OpenMaya as om2\n\n\ndef createDAGNode(dagNodeType, nodeName=\"newDAGNode\"):\n \"\"\"\n Create a new new DAG node of type with name\n :param createNodeType: str - node type\n :param nodeName: str\n :return: MObject - new node MObject\n \"\"\"\n dagMod = om2.MDagModifier()\n newDAGNode = dagMod.createNode(dagNodeType)\n dagMod.renameNode(newDAGNode, nodeName + \"_\" + dagNodeType)\n dagMod.doIt()\n return newDAGNode\n\n\ndef createDGNode(dgNodeType, nodeName=\"newDGNode\"):\n \"\"\"\n Create a new new DG node of type with name\n :param createNodeType: str - node type\n :param nodeName: str\n :return: MObject - new node MObject\n \"\"\"\n dgMod = om2.MDGModifier()\n newDGNode = dgMod.createNode(dgNodeType)\n dgMod.renameNode(newDGNode, nodeName + \"_\" + dgNodeType)\n dgMod.doIt()\n return newDGNode\n","sub_path":"creation_utils/creation_utils.py","file_name":"creation_utils.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"107776499","text":"FILE = \"./data/rosalind_ddeg.txt\"\n\ndef main():\n\tlines = open(FILE).read().splitlines()\n\tmeta_line = lines[0]\n\tdata_lines = lines[1:]\n\n\tnum_nodes, num_edges = map(int, meta_line.split())\n\tgraph = parse_graph(data_lines, num_nodes)\n\t\n\tresult = ddeg(graph)\n\n\tout = open(\"out.txt\", \"w\")\n\tout.write(\" \".join(map(str, result)))\n\tout.close()\n\ndef parse_graph(lines, n):\n\tgraph = {i: [] for i in range(1,n+1)}\n\tfor line in lines:\n\t\ta, b = map(lambda x: int(x), line.split())\n\t\tgraph[a].append(b)\n\t\tgraph[b].append(a)\n\treturn graph\n\ndef degree_list(graph):\n\tdegrees = []\n\n\tfor i in sorted(graph.keys()):\n\t\tdegrees.append(len(graph[i]))\n\n\treturn degrees\n\ndef ddeg(graph):\n\tdegrees = degree_list(graph)\n\tneighbor_sum_degrees = []\n\n\tfor i in sorted(graph.keys()):\n\t\tneighbor_sum = 0\n\t\tfor neighbor in graph[i]:\n\t\t\tneighbor_sum += degrees[neighbor-1]\n\t\tneighbor_sum_degrees.append(neighbor_sum)\n\n\treturn neighbor_sum_degrees\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"heights/ddeg.py","file_name":"ddeg.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"160710867","text":"import joblib\nimport pandas as pd\nimport sys\nfrom sklearn import metrics\n\npd.set_option('display.max_columns', None)\n\nname = sys.argv[1]\nDATA = pd.read_csv('./csvFinal2.csv')\nsample = DATA[DATA['Name']== name]\n\nsample_X = sample.drop(['Label', 'Frame' ,'Name'], axis=1)\n\nsample_y = sample['Label']\n\nclassifier = joblib.load('./trainedModels/KNeighborsClassifier(n_jobs=-1, n_neighbors=1).joblib')\npreds=classifier.predict(sample_X)\n\n\ndf = pd.DataFrame(preds, columns=[\"Frame predicition\"])\nprint(df.value_counts())\nprint(df)\n\nacc = metrics.accuracy_score(sample_y, preds)\nprint(\"Accuracy: \", acc)\nprint(\"real\", sample_y.iloc[0])","sub_path":"SquatClassifier.py","file_name":"SquatClassifier.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"67505673","text":"#!/usr/bin/env python3\n# coding:utf-8\n\nimport sys\n\nimport requests\n\nfrom .style import use_style\n\n\ndef translate():\n if len(sys.argv) >= 2:\n word = sys.argv[1]\n else:\n print(use_style('内容不能为空.', 'red'))\n return\n\n url = 'http://fy.iciba.com/ajax.php?a=fy'\n headers = {\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'Connection': 'keep-alive',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Host': 'fy.iciba.com',\n 'Origin': 'http://fy.iciba.com',\n 'Referer': 'http://fy.iciba.com/',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest',\n }\n forms = {\n 'f': 'auto',\n 't': 'auto',\n 'w': word,\n }\n r = requests.post(url, headers=headers, data=forms)\n jd = r.json()\n content = jd['content']\n if 'word_mean' in content:\n print(use_style('\\n'.join(content['word_mean']), 'yellow'))\n else:\n print(use_style(content['out'], 'yellow'))\n","sub_path":"fy/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"162781757","text":"# Copyright (c) 2016-2022 Association of Universities for Research in Astronomy, Inc. (AURA)\n# For license information see LICENSE or https://opensource.org/licenses/BSD-3-Clause\n\nimport asyncio\nimport signal\nimport functools\nfrom datetime import datetime\nfrom multiprocessing import Process\nfrom random import randint\n\nfrom scheduler.core.meta import Singleton\nfrom scheduler.core.service.service import Service\nfrom scheduler.core.service.modes import SchedulerModes\nfrom scheduler.config import config\n\nfrom .runner import StandardRunner\nfrom .task import SchedulerTask, TaskType\n\nDEFAULT_TIMEOUT = 10 # seconds\nDEFAULT_SIZE = 5 # number of tasks to run in parallel\n\n\nclass ProcessManager(metaclass=Singleton):\n \"\"\"\n Main handler for each runner, which is responsible for scheduling the task.\n \"\"\"\n\n def __init__(self, size: int = DEFAULT_SIZE, timeout: int = DEFAULT_TIMEOUT):\n self.realtime_runner = StandardRunner(1)\n self.standard_runner = StandardRunner(size)\n self.timeout = timeout\n\n def schedule_with_runner(self, task: SchedulerTask, mode: TaskType):\n \"\"\"\n Schedule a task with the corresponding runner for the given mode.\n \"\"\"\n if mode == TaskType.REALTIME:\n return self.realtime_runner.schedule(Process(target=task.target), task.timeout)\n elif mode == TaskType.STANDARD:\n return self.standard_runner.schedule(Process(target=task.target), task.timeout)\n else:\n raise ValueError(f'Invalid mode {mode}')\n\n def add_task(self, start: datetime, target: callable, mode: TaskType) -> None:\n task = SchedulerTask(start,\n target,\n self.timeout)\n self.schedule_with_runner(task, mode)\n\n async def run(self, scheduler: Service, period: int, mode: TaskType):\n done = asyncio.Event()\n\n def shutdown():\n done.set()\n self.shutdown()\n asyncio.get_event_loop().stop()\n\n asyncio.get_event_loop().add_signal_handler(signal.SIGINT, shutdown)\n\n while not done.is_set():\n self.add_task(datetime.now(), scheduler, mode)\n if period == 0:\n # random case #\n await asyncio.sleep(randint(1, 10))\n else:\n await asyncio.sleep(period)\n\n def shutdown(self):\n \"\"\"\n Callback for shutting down the process manager.\n \"\"\"\n self.realtime_runner.terminate_all()\n self.standard_runner.terminate_all()\n\n\ndef setup_with(mode: SchedulerModes):\n # Setup scheduler mode\n try:\n mode = SchedulerModes[config.mode.upper()]\n except KeyError:\n raise ValueError('Mode is Invalid!')\n\n def decorator_setup(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n pm = func(*args, **kwargs)\n if mode is SchedulerModes.OPERATION:\n pm.size = 1\n else:\n pm.size = config.process_manager.size\n return pm\n return wrapper\n return decorator_setup\n\n\n@setup_with(config.mode)\ndef setup_manager():\n \"\"\"Setup the manager based on the mode using setup_with decorator.\n\n Default values:\n TIMEOUT = 10 seconds\n SIZE = 5 task at the same time (Not valid for Operation).\n\n Returns:\n ProcessManager: Default Process Manager if timeout is not set.\n \"\"\"\n if config.process_manager.timeout:\n return ProcessManager(timeout=config.process_manager.timeout)\n return ProcessManager()\n","sub_path":"scheduler/process_manager/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"539207314","text":"import tensorflow as tf\nimport numpy as np\nimport os\nimport sys\nimport random\nimport subprocess\nfrom redis import Redis\nimport time\nsys.path.append(os.path.realpath(\"..\"))\n\nimport helpers.utils as hlp\nfrom models.feed_forward import FFContinuous\n\n\nclass TRPOContinuousTrainer(FFContinuous):\n def __init__(self, sess, args):\n FFContinuous.__init__(self, sess, args)\n self.sess = sess\n self.config = args['config']\n self.env = args['environment']\n self.timesteps_per_launch = args['max_pathlength']\n self.n_workers = args['n_workers']\n self.distributed = args['distributed']\n self.timesteps_per_batch = args['timesteps_batch']\n self.n_tests = args['n_tests']\n self.max_kl = args['max_kl']\n self.normalize = args['normalize']\n self.scale = args['scale']\n self.gamma = args['gamma']\n self.value_updates = args['value_updates']\n self.save_every = args.get('save_every', 1)\n self.sums = self.sumsqrs = self.sumtime = 0\n self.timestep = 0\n self.std = args['std']\n self.create_internal()\n self.init_weights()\n self.train_scores = []\n self.test_scores = []\n np.set_printoptions(precision=6)\n\n # Worker parameters:\n self.id_worker = args['id_worker']\n self.test_mode = args['test_mode']\n\n def create_internal(self):\n self.targets = {\n \"advantage\": tf.placeholder(dtype=tf.float32, shape=[None]),\n \"return\": tf.placeholder(dtype=tf.float32, shape=[None]),\n \"action\": tf.placeholder(dtype=tf.float32, shape=[None, len(self.n_actions)]),\n \"old_mean\": tf.placeholder(dtype=tf.float32, shape=[None, len(self.n_actions)]),\n \"old_std\": tf.placeholder(dtype=tf.float32, shape=[None, len(self.n_actions)]),\n \"flat_tangent\": tf.placeholder(dtype=tf.float32, shape=[None])\n }\n actions = self.targets[\"action\"]\n action_means = self.action_means\n action_stds = self.action_stds\n old_action_means = self.targets[\"old_mean\"]\n old_action_stds = self.targets[\"old_std\"]\n\n log_p = tf.reduce_sum(- 0.5 * tf.square((actions - action_means) / action_stds) \\\n - 0.5 * tf.log(2 * np.pi) - tf.log(action_stds), axis=1)\n\n log_old_p = tf.reduce_sum(- 0.5 * tf.square((actions - old_action_means) / old_action_stds) \\\n - 0.5 * tf.log(2 * np.pi) - tf.log(old_action_stds), axis=1)\n\n ratio_n = tf.exp(log_p - log_old_p)\n self.loss = -tf.reduce_mean(ratio_n * self.targets[\"advantage\"])\n\n def KL_gauss(mean1, std1, mean2, std2):\n return - 0.5 * len(self.n_actions) + tf.reduce_mean(tf.reduce_sum(\n tf.log(std2) - tf.log(std1) + (tf.square(std1) + tf.square(mean1 - mean2)) / (2 * tf.square(std2)),\n axis=1))\n\n self.KL = KL_gauss(old_action_means, old_action_stds, action_means, action_stds)\n\n self.policy_grad = hlp.flatgrad(self.loss, self.weights)\n fixed_means = tf.stop_gradient(action_means)\n fixed_stds = tf.stop_gradient(action_stds)\n KL_firstfixed = KL_gauss(fixed_means, fixed_stds, action_means, action_stds)\n kl_ff_grads = tf.gradients(KL_firstfixed, self.weights)\n w_shapes = list(map(hlp.var_shape, self.weights))\n start = 0\n tangents = []\n for shape in w_shapes:\n size = np.prod(shape)\n param = tf.reshape(self.targets[\"flat_tangent\"][start:(start + size)], shape)\n tangents.append(param)\n start += size\n gvp = [tf.reduce_sum(g * t) for (g, t) in zip(kl_ff_grads, tangents)]\n self.fisher_vector_product = hlp.flatgrad(gvp, self.weights)\n\n self.get_flat = hlp.GetFlat(self.weights, self.sess)\n self.set_from_flat = hlp.SetFromFlat(self.weights, self.sess)\n\n value_loss = tf.reduce_mean((self.targets[\"return\"] - self.value) ** 2)\n\n self.value_train_op = tf.train.AdamOptimizer(0.05).minimize(value_loss, var_list=self.value_weights)\n\n def save(self, name):\n directory = 'saves/' + name + '/'\n if not os.path.exists(directory):\n os.makedirs(directory)\n directory += 'iteration_{}'.format(self.timestep) + '/'\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n for i, tensor in enumerate(tf.global_variables()):\n value = self.sess.run(tensor)\n np.save(directory + 'weight_{}'.format(i), value)\n\n if self.scale != 'off':\n np.save(directory + 'sums', self.sums)\n np.save(directory + 'sumsquares', self.sumsqrs)\n np.save(directory + 'sumtime', self.sumtime)\n\n np.save(directory + 'timestep', np.array([self.timestep]))\n np.save(directory + 'train_scores', np.array(self.train_scores))\n np.save(directory + 'test_scores', np.array(self.test_scores))\n print(\"Agent successfully saved in folder {}\".format(directory))\n\n def load(self, name, iteration=None):\n try:\n directory = 'saves/' + name + '/'\n if not os.path.exists(directory):\n print('That directory does not exist!')\n raise Exception\n if iteration is None:\n iteration = np.max([int(x[10:]) for x in [dir for dir in os.walk(directory)][0][1]])\n directory += 'iteration_{}'.format(iteration) + '/'\n\n for i, tensor in enumerate(tf.global_variables()):\n arr = np.load(directory + 'weight_{}.npy'.format(i))\n self.sess.run(tensor.assign(arr))\n\n if self.scale != 'off':\n self.sums = np.load(directory + 'sums.npy')\n self.sumsqrs = np.load(directory + 'sumsquares.npy')\n self.sumtime = np.load(directory + 'sumtime.npy')\n\n self.timestep = np.load(directory + 'timestep.npy')[0]\n self.train_scores = np.load(directory + 'train_scores.npy').tolist()\n self.test_scores = np.load(directory + 'test_scores.npy').tolist()\n print(\"Agent successfully loaded from folder {}\".format(directory))\n except:\n print(\"Something is wrong, loading failed\")\n\n def init_weights(self):\n self.sess.run(tf.global_variables_initializer())\n init_weights = [self.sess.run(w) for w in self.weights]\n if self.std == \"Param\":\n for i in range(len(init_weights))[-len(self.n_actions):]:\n init_weights[i] /= 10.\n if self.std == \"Train\":\n for i in range(len(init_weights))[-2*len(self.n_actions)::2]:\n init_weights[i] /= 10.\n\n self.set_weights(init_weights)\n\n def make_rollout(self):\n variables_server = Redis(port=12000)\n if self.scale != 'off':\n try:\n means = hlp.load_object(variables_server.get(\"means\"))\n stds = hlp.load_object(variables_server.get(\"stds\"))\n self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))\n except:\n pass\n try:\n weights = [hlp.load_object(variables_server.get(\"weight_{}\".format(i))) for i in\n range(len(self.weights))]\n self.set_weights(weights)\n except:\n pass\n env = self.env\n if self.test_mode:\n n_tasks = self.n_tests\n timesteps_per_worker = 100000000\n else:\n n_tasks = 10000\n timesteps_per_worker = self.timesteps_per_batch // self.n_workers\n\n timestep = 0\n i_task = 0\n\n paths = []\n while timestep < timesteps_per_worker and i_task < n_tasks:\n path = {}\n observations, action_tuples, rewards, dist_tuples, timestamps = [], [], [], [], []\n sums = np.zeros((1, env.get_observation_space()))\n sumsqrs = np.zeros(sums.shape)\n\n env.reset()\n while not env.done and env.timestamp < self.timesteps_per_launch:\n sums += env.features\n sumsqrs += np.square(env.features)\n observations.append(env.features[0])\n timestamps.append(env.timestamp)\n\n if not self.test_mode:\n actions, dist_tuple = self.act(env.features, return_dists=True)\n dist_tuples.append(dist_tuple)\n else:\n actions = self.act(env.features, exploration=False)\n env.step(actions)\n timestep += 1\n\n action_tuples.append(actions)\n rewards.append(env.reward)\n\n path[\"observations\"] = np.array(observations)\n path[\"action_tuples\"] = np.array(action_tuples)\n path[\"rewards\"] = np.array(rewards)\n if not self.test_mode:\n path[\"dist_tuples\"] = np.array(dist_tuples)\n path[\"timestamps\"] = np.array(timestamps)\n path[\"sumobs\"] = sums\n path[\"sumsqrobs\"] = sumsqrs\n path[\"terminated\"] = env.done\n path[\"total\"] = env.get_total_reward()\n paths.append(path)\n i_task += 1\n\n if self.distributed:\n variables_server.set(\"paths_{}\".format(self.id_worker), hlp.dump_object(paths))\n else:\n self.paths = paths\n\n def train(self):\n cmd_server = 'redis-server --port 12000'\n p = subprocess.Popen(cmd_server, shell=True, preexec_fn=os.setsid)\n self.variables_server = Redis(port=12000)\n means = \"-\"\n stds = \"-\"\n if self.scale != 'off':\n if self.timestep == 0:\n print(\"Time to measure features!\")\n if self.distributed:\n worker_args = \\\n {\n 'config': self.config,\n 'test_mode': False,\n }\n hlp.launch_workers(worker_args, self.n_workers)\n paths = []\n for i in range(self.n_workers):\n paths += hlp.load_object(self.variables_server.get(\"paths_{}\".format(i)))\n else:\n self.test_mode = False\n self.make_rollout()\n paths = self.paths\n\n for path in paths:\n self.sums += path[\"sumobs\"]\n self.sumsqrs += path[\"sumsqrobs\"]\n self.sumtime += path[\"observations\"].shape[0]\n\n stds = np.sqrt((self.sumsqrs - np.square(self.sums) / self.sumtime) / (self.sumtime - 1))\n means = self.sums / self.sumtime\n print(\"Init means: {}\".format(means))\n print(\"Init stds: {}\".format(stds))\n self.variables_server.set(\"means\", hlp.dump_object(means))\n self.variables_server.set(\"stds\", hlp.dump_object(stds))\n self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))\n while True:\n print(\"Iteration {}\".format(self.timestep))\n start_time = time.time()\n\n if self.distributed:\n weights = self.get_weights()\n for i, weight in enumerate(weights):\n self.variables_server.set(\"weight_\" + str(i), hlp.dump_object(weight))\n worker_args = \\\n {\n 'config': self.config,\n 'test_mode': False,\n }\n hlp.launch_workers(worker_args, self.n_workers)\n paths = []\n for i in range(self.n_workers):\n paths += hlp.load_object(self.variables_server.get(\"paths_{}\".format(i)))\n else:\n self.test_mode = False\n self.make_rollout()\n paths = self.paths\n\n observations = np.concatenate([path[\"observations\"] for path in paths])\n actions = np.concatenate([path[\"action_tuples\"] for path in paths])\n action_means = []\n action_stds = []\n returns = []\n advantages = []\n for path in paths:\n self.sums += path[\"sumobs\"]\n self.sumsqrs += path[\"sumsqrobs\"]\n self.sumtime += path[\"rewards\"].shape[0]\n action_means += [d[0] for d in path[\"dist_tuples\"]]\n action_stds += [d[1] for d in path[\"dist_tuples\"]]\n returns += hlp.discount(path[\"rewards\"], self.gamma, path[\"timestamps\"]).tolist()\n values = self.sess.run(self.value, feed_dict={self.state_input: path[\"observations\"]})\n values = np.append(values, 0 if path[\"terminated\"] else values[-1])\n deltas = (path[\"rewards\"] + self.gamma * values[1:] - values[:-1])\n advantages += hlp.discount(deltas, self.gamma, path[\"timestamps\"]).tolist()\n returns = np.array(returns)\n advantages = np.array(advantages)\n action_means = np.array(action_means)\n action_stds = np.array(action_stds)\n\n if self.normalize == 'ranks':\n ranks = np.zeros_like(advantages)\n ranks[np.argsort(advantages)] = np.arange(ranks.shape[0], dtype=np.float32) / (ranks.shape[0] - 1)\n ranks -= 0.5\n advantages = ranks[:]\n elif self.normalize == 'center':\n advantages -= np.mean(advantages)\n advantages /= (np.std(advantages, ddof=1) + 0.001)\n\n feed_dict = {self.state_input: observations,\n self.targets[\"return\"]: returns,\n self.targets[\"advantage\"]: advantages,\n self.targets[\"old_mean\"]: action_means,\n self.targets[\"old_std\"]: action_stds,\n self.targets[\"action\"]: actions}\n\n for i in range(self.value_updates):\n self.sess.run(self.value_train_op, feed_dict)\n\n train_rewards = np.array([path[\"rewards\"].sum() for path in paths])\n train_lengths = np.array([len(path[\"rewards\"]) for path in paths])\n\n thprev = self.get_flat()\n\n def fisher_vector_product(p):\n feed_dict[self.targets[\"flat_tangent\"]] = p\n return self.sess.run(self.fisher_vector_product, feed_dict) + 0.1 * p\n\n g = self.sess.run(self.policy_grad, feed_dict)\n stepdir = hlp.conjugate_gradient(fisher_vector_product, -g)\n\n shs = .5 * stepdir.dot(fisher_vector_product(stepdir))\n lm = np.sqrt(shs / self.max_kl)\n fullstep = stepdir / (lm + 1e-18)\n\n def loss_kl(th):\n self.set_from_flat(th)\n return self.sess.run([self.loss, self.KL], feed_dict=feed_dict)\n\n theta = hlp.linesearch(loss_kl, thprev, fullstep, self.max_kl)\n self.set_from_flat(theta)\n\n lossafter, kloldnew = self.sess.run([self.loss, self.KL], feed_dict=feed_dict)\n\n print(\"Time for testing!\")\n\n if self.distributed:\n weights = self.get_weights()\n for i, weight in enumerate(weights):\n self.variables_server.set(\"weight_\" + str(i), hlp.dump_object(weight))\n worker_args = \\\n {\n 'config': self.config,\n 'test_mode': True,\n }\n hlp.launch_workers(worker_args, self.n_workers)\n paths = []\n for i in range(self.n_workers):\n paths += hlp.load_object(self.variables_server.get(\"paths_{}\".format(i)))\n else:\n self.test_mode = True\n self.make_rollout()\n paths = self.paths\n\n total_rewards = np.array([path[\"total\"] for path in paths])\n eplens = np.array([len(path[\"rewards\"]) for path in paths])\n\n if self.scale == 'full':\n stds = np.sqrt((self.sumsqrs - np.square(self.sums) / self.sumtime) / (self.sumtime - 1))\n means = self.sums / self.sumtime\n self.variables_server.set(\"means\", hlp.dump_object(means))\n self.variables_server.set(\"stds\", hlp.dump_object(stds))\n self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))\n\n print(\"\"\"\n-------------------------------------------------------------\nMean test score: {test_scores}\nMean train score: {train_scores}\nMean test episode length: {test_eplengths}\nMean train episode length: {train_eplengths}\nMax test score: {max_test}\nMax train score: {max_train}\nKL between old and new {kl}\nLoss after update {loss}\nMean of features: {means}\nStd of features: {stds}\nTime for iteration: {tt}\n-------------------------------------------------------------\n \"\"\".format(\n means=means,\n stds=stds,\n test_scores=np.mean(total_rewards),\n test_eplengths=np.mean(eplens),\n train_scores=np.mean(train_rewards),\n train_eplengths=np.mean(train_lengths),\n max_test=np.max(total_rewards),\n max_train=np.max(train_rewards),\n kl=kloldnew,\n loss=lossafter,\n tt=time.time() - start_time\n ))\n self.timestep += 1\n self.train_scores.append(np.mean(train_rewards))\n self.test_scores.append(np.mean(total_rewards))\n if self.timestep % self.save_every == 0:\n self.save(self.config[:-5])","sub_path":"algos/trpo_continuous.py","file_name":"trpo_continuous.py","file_ext":"py","file_size_in_byte":17794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"597328864","text":"import asyncio\r\nimport threading\r\nimport signal\r\nimport traceback\r\n\r\nfrom ..client import Client\r\nfrom ..logging import get_logger\r\nfrom ..typing import Iterable, Level, List, Optional\r\n\r\nfrom ..utils import tasks\r\nfrom ..utils._async import shutdown_loop\r\nfrom ..utils.decorators import run_once\r\nfrom ..utils.filters import Filters\r\n\r\n__all__ = (\r\n 'AbstractScanner', 'TimelyLevelScanner', 'RateLevelScanner',\r\n 'thread', 'get_loop', 'set_loop', 'run', 'differ',\r\n 'daily_listener', 'weekly_listener', 'rate_listener', 'unrate_listener',\r\n 'all_listeners'\r\n)\r\n\r\nloop = asyncio.new_event_loop()\r\n\r\nscanner_client = Client(loop=loop)\r\n\r\nlog = get_logger(__name__)\r\n\r\nall_listeners = []\r\n\r\n\r\ndef get_loop() -> asyncio.AbstractEventLoop:\r\n return loop\r\n\r\n\r\ndef set_loop(new_loop: asyncio.AbstractEventLoop) -> None:\r\n global loop\r\n loop = new_loop\r\n\r\n\r\ndef run(loop: asyncio.AbstractEventLoop) -> None:\r\n try:\r\n loop.add_signal_handler(signal.SIGINT, loop.stop)\r\n loop.add_signal_handler(signal.SIGTERM, loop.stop)\r\n\r\n except (NotImplementedError, RuntimeError):\r\n pass\r\n\r\n asyncio.set_event_loop(loop)\r\n\r\n try:\r\n loop.run_forever()\r\n\r\n except KeyboardInterrupt:\r\n log.info('Received the signal to terminate the event loop.')\r\n\r\n finally:\r\n log.info('Cleaning up tasks.')\r\n shutdown_loop(loop)\r\n\r\n\r\ndef update_thread_loop(thread: threading.Thread, loop: asyncio.AbstractEventLoop) -> None:\r\n thread.args = (loop,)\r\n\r\n\r\nthread = threading.Thread(target=run, args=(loop,), name='ScannerThread', daemon=True)\r\n\r\n\r\nclass AbstractScanner:\r\n def __init__(\r\n self, delay: float = 10.0, *,\r\n loop: Optional[asyncio.AbstractEventLoop] = None\r\n ) -> None:\r\n if loop is None:\r\n loop = get_loop()\r\n self.loop = loop\r\n self.runner = tasks.loop(seconds=delay, loop=loop)(self.main)\r\n self.cache = None\r\n self.clients = []\r\n all_listeners.append(self)\r\n\r\n def add_client(self, client: Client) -> None:\r\n \"\"\"Add a client to fire events for.\"\"\"\r\n if client not in self.clients:\r\n self.clients.append(client)\r\n\r\n def attach_to_loop(self, loop: asyncio.AbstractEventLoop) -> None:\r\n \"\"\"Attach the runner to another event loop.\"\"\"\r\n self.runner.loop = loop\r\n self.loop = loop\r\n\r\n def enable(self) -> None:\r\n try:\r\n self.runner.start()\r\n except RuntimeError:\r\n pass\r\n\r\n @run_once\r\n def close(self, *args, force: bool = True) -> None:\r\n \"\"\"Accurately shutdown a scanner.\r\n If force is true, cancel the runner, and wait until it finishes otherwise.\r\n \"\"\"\r\n if force:\r\n self.runner.cancel()\r\n else:\r\n self.runner.stop()\r\n\r\n async def on_error(self, exc: Exception) -> None:\r\n \"\"\"Basic event handler to print the errors if any occur.\"\"\"\r\n traceback.print_exc()\r\n\r\n async def scan(self) -> None:\r\n \"\"\"This function should contain main code of the scanner.\"\"\"\r\n pass\r\n\r\n async def main(self) -> None:\r\n \"\"\"Main function, that is basically doing all the job.\"\"\"\r\n try:\r\n await self.scan()\r\n\r\n except Exception as exc:\r\n await self.on_error(exc)\r\n\r\n\r\nclass TimelyLevelScanner(AbstractScanner):\r\n def __init__(\r\n self, t_type: str, delay: int = 10.0, *,\r\n loop: Optional[asyncio.AbstractEventLoop] = None\r\n ) -> None:\r\n super().__init__(delay, loop=loop)\r\n self.method = getattr(scanner_client, 'get_' + t_type)\r\n self.call_method = 'new_' + t_type\r\n\r\n async def scan(self) -> None:\r\n \"\"\"Scan for either daily or weekly levels.\"\"\"\r\n timely = await self.method()\r\n\r\n if self.cache is None:\r\n self.cache = timely\r\n return\r\n\r\n if timely.id != self.cache.id:\r\n for client in self.clients:\r\n dispatcher = client.dispatch(self.call_method, timely)\r\n self.loop.create_task(dispatcher) # schedule the execution\r\n\r\n self.cache = timely\r\n\r\n\r\nclass RateLevelScanner(AbstractScanner):\r\n def __init__(\r\n self, listen_to_rate: bool = True, delay: float = 10.0,\r\n *, loop: Optional[asyncio.AbstractEventLoop] = None\r\n ) -> None:\r\n super().__init__(delay, loop=loop)\r\n self.call_method = 'level_rated' if listen_to_rate else 'level_unrated'\r\n self.filters = Filters(strategy='awarded')\r\n self.find_new = listen_to_rate\r\n self.cache = []\r\n\r\n async def method(self, pages: int = 2) -> List[Level]:\r\n return await scanner_client.search_levels(filters=self.filters, pages=range(pages))\r\n\r\n async def scan(self) -> None:\r\n new = await self.method()\r\n\r\n if not self.cache:\r\n self.cache = new\r\n return\r\n\r\n difference = differ(self.cache, new, self.find_new)\r\n\r\n self.cache = new\r\n\r\n for level in await further_differ(difference, self.find_new):\r\n for client in self.clients:\r\n dispatcher = client.dispatch(self.call_method, level)\r\n self.loop.create_task(dispatcher)\r\n\r\n\r\nasync def further_differ(\r\n array: Iterable[Level], find_new: bool = True\r\n) -> List[Level]:\r\n array = list(array)\r\n updated = await asyncio.gather(*(level.refresh() for level in array))\r\n final = list()\r\n\r\n for level, new in zip(array, updated):\r\n if find_new:\r\n if new.is_rated() or new.has_coins_verified():\r\n final.append(new)\r\n else:\r\n if new is None:\r\n final.append(level)\r\n elif not new.is_rated() and not new.has_coins_verified():\r\n final.append(new)\r\n\r\n return final\r\n\r\n\r\ndef differ(before: list, after: list, find_new: bool = True) -> filter:\r\n a, b = (before, after) if find_new else (after, before)\r\n return filter(lambda elem: (elem not in a), b)\r\n\r\n\r\ndaily_listener = TimelyLevelScanner('daily')\r\nweekly_listener = TimelyLevelScanner('weekly')\r\n\r\nrate_listener = RateLevelScanner(listen_to_rate=True)\r\nunrate_listener = RateLevelScanner(listen_to_rate=False)\r\n","sub_path":"gd/events/scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":6214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"587220526","text":"\"\"\"\nAdd peaks or remove peaks from the k-core histogram of a base graph and save the results\n\"\"\"\nfrom __future__ import division, print_function\nimport networkx as nx\nimport numpy as np\nfrom decomposition import kcore\nfrom experiments import kcore as kexp\nfrom experiments import histogram\nfrom noise import missing\nimport csv\n\nclass KCoreGraph(object):\n \"\"\"docstring for KCoreGraph.\"\"\"\n def __init__(self, base, sname, adjacency=False):\n super(KCoreGraph, self).__init__()\n self.sname = sname\n if adjacency:\n self.graph = nx.read_adjlist(base)\n else:\n self.graph = nx.read_edgelist(base)\n self.cnumber = kcore.KCore(self.graph).coreNumber()\n self.top = [1, 0.2, 0.1]\n self.ori_graph = self.graph.copy()\n\n def resetGraph(self):\n self.graph = self.ori_graph.copy()\n\n def removeNodes(self, nodes, count, save=False):\n remove = np.random.choice(nodes, size=count, replace=False)\n nodes = [n for n in nodes if n not in remove]\n self.graph.remove_nodes_from(remove)\n if save:\n nx.write_edgelist(self.graph, self.sname, delimiter='\\t', data=False)\n return self.graph, nodes\n\n def addPeak(self, k, nodes, edges, count, lst, ext_nodes):\n nodes += ['n_'+str(i) for i in xrange(lst,lst+count)]\n ni_count = np.random.randint(k,high=k+10,size=len(nodes))\n no_count = np.random.randint(0,high=10,size=len(nodes))\n for i, n in enumerate(nodes):\n candidates = list(nodes).remove(n)\n ni = np.random.choice(candidates, size=ni_count[i], replace=False)\n no = np.random.choice(ext_nodes, size=no_count[i], replace=False)\n edges += [(n, u) for u in ni]\n edges += [(n, u) for u in no]\n\n graph = self.graph.copy()\n graph.add_edge_from(edges)\n return graph, nodes, edges, lst + count\n\n def saveData(self, data, t='removed'):\n with open(self.sname, 'w') as f:\n writer = csv.writer(f,delimiter=',')\n header = [t+'_core', 'frac'] + ['error_'+str(i) for i in self.top] + \\\n ['std_'+str(i*10) for i in self.top] +\\\n ['core', 'count', 'degree', 'clustering', 'components']\n writer.writerow(header)\n for d in data:\n writer.writerow(d)\n\n def runExpRemoveNodes(self, k):\n \"\"\"\n Remove fractions of top k nodes\n \"\"\"\n step = 1\n fracs = [i for i in xrange(0, 20, step)]\n data = []\n for _c in xrange(0,5):\n self.resetGraph()\n nodes = [n for n in self.cnumber if self.cnumber[n] == k]\n count = int(len(nodes)*step/100)\n for f in fracs:\n print(_c, f, len(nodes), count)\n self.graph, nodes = self.removeNodes(nodes, count)\n\n kcore_exp = kexp.KCoreExperiment(self.graph, None, ftype='object', top=self.top)\n _, _, _, error = kcore_exp.expRandomMissingEdges(5, 10, 50)\n\n exp = histogram.KCoreHistogram(self.graph, None, ftype='object')\n cdata = exp.runExperiment()\n data += [[k,f] + [e[0] for e in error] + [e[1] for e in error] + d for d in cdata]\n\n self.saveData(data)\n\n def runExpAddPeak(self, k):\n \"\"\"\n Add a peak at given core number\n \"\"\"\n step = 1\n fracs = [i for i in xrange(0, 10, step)]\n data = []\n ext_nodes = [n for n in self.cnumber if self.cnumber[n] == k]\n count = int(len(ext_nodes)*step/100)\n for _c in xrange(0,5):\n lst = 0\n edges = []\n in_nodes = []\n for f in fracs:\n print(_c, f, len(nodes), count)\n graph, in_nodes, edges, lst = self.addPeak(k, in_nodes, edges, count, lst, ext_nodes)\n\n kcore_exp = kexp.KCoreExperiment(graph, None, ftype='object', top=self.top)\n _, _, _, error = kcore_exp.expRandomMissingEdges(5, 10, 50)\n\n exp = histogram.KCoreHistogram(graph, None, ftype='object')\n cdata = exp.runExperiment()\n data += [[k,f] + [e[0] for e in error] + [e[1] for e in error] + d for d in cdata]\n\n self.saveData(data, t='added')\n","sub_path":"src/data/kcore_graph.py","file_name":"kcore_graph.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"497007887","text":"\nimport sys, os\nimport re\n\nsys.path.append('/home/immersinn/Gits/DB-Interface-Stuff/Python27')\nimport soupypages_helper\nimport connectMon\n\n\n######################################################################\n#{ Page base object\n######################################################################\n\ndef BestPageType(url):\n wiki = u'http://en.wikipedia.org'\n if url.startswith(wiki):\n return WikiPageObject\n else:\n return BasePageObject\n\n\n\n######################################################################\n#{ Page base object\n######################################################################\n\n\nclass BasePageObject():\n\n def __init__(self,\n info,\n source='url',):\n if source=='url':\n self.url = info['url']\n self.pageCon = info['mdb_page']\n self.pageInfoCon = info['mdb_page_info']\n self._prep()\n self._initialize()\n elif source=='mdb':\n # use to update incoming links\n # use to do any NLP / oth analyses\n self.pageCon = info['mdb_page']\n self.pageInfoCon = info['mdb_page_info']\n\n\n\n def _initialize(self):\n self._get_meta()\n self._get_content()\n self._extract_content_links()\n self._to_mongo()\n\n def _prep(self):\n self._get_page()\n self._get_soup()\n\n '''This sets .page from .url''' \n def _get_page(self):\n self = soupypages_helper.get_page(self)\n\n '''This sets both the .page and .soup attributes from .url'''\n def _get_soup(self):\n self = soupypages_helper.get_soup(self)\n \n def _set_soup(self, soup):\n self.soup = soup\n\n\n '''Get stuff that looks like content from the page'''\n def _get_content(self):\n self.content = soup.body.findAll('p')\n\n\n '''Extract all links from stuff that looks like content'''\n def _extract_content_links(self):\n '''Get everything that could be content...'''\n if 'content' not in set(dir(self)):\n self._get_content()\n '''Get links in paragraphs'''\n strucs = []\n for p in self.content:\n strucs.extend(p.findAll('a'))\n strucs = [s for s in strucs if s]\n hrefs = [ent['href'] for ent in strucs\\\n if 'href' in ent._getAttrMap()]\n c_hrefs = []\n for href in hrefs:\n if href.startswith(\"http://\"):\n c_hrefs.append(href)\n elif href.startswith('/'):\n c_hrefs.append(frontMatter+href)\n elif href.startswith('#'):\n pass\n\n## hrefsNew = WikiLinks(soup)\n## conn.MongoInsert({'link':start,\n## 'source':'Wikipedia',\n## 'out_links':list(hrefsNew)}) #rem., can't be set\n## hrefsNew = hrefsNew.difference(hrefs)\n## hrefs = hrefs.union(hrefsNew)\n## '''Start the loop....if not at max deptgh'''\n## if curDep < maxDep:\n## for href in hrefsNew:\n## #only explore Wiki pages for now\n## if href.startswith(frontMatter): \n## hrefs = WikiFandR(href,conn,hrefs,curDep=curDep+1)\n\n \n self.content_links = set(c_hrefs)\n \n","sub_path":"Python-Page-Parse/sand_PageObjects.py","file_name":"sand_PageObjects.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"169571731","text":"#----------------------------------------------------------------------------------------------------------------------\n# Introdução a Programação de Computadores - IPC\n# Universidade do Estado do Amazonas - UEA\n# Prof. Jucimar Jr\n# Erik Atilio Silva Rey 1715310059\n# Evandro Padilha Barroso Filho 1715310009\n# Felipe Eduardo Silva de Almeida 1715310031\n# Nayara da Silva Cerdeira da Costa 1715310038\n# Joelson Pereira Lima \t\t\t 1715310060\n# Ian Gabriel Costa Machado 1215120276\n#\n#20) Construa um algoritmo em PORTUGOL para calcular a média de valores PARES e\n#ÍMPARES, de 50 números que serão digitados pelo usuário. Ao final o algoritmo deve\n#mostrar estas duas médias. O algoritmo deve mostrar também o maior número PAR\n#digitado e o menor número ÍMPAR digitado. Esses dados devem ser armazenados em\n#um vetor. Além disso, devem ser impressos os valores PARES maiores que a média\n#PAR, bem como os valores ÍMPARES menor que a média ÍMPAR.\n#-----------------------------------------------------------------------------------------------------------------------\n\npar = []\nimpa = []\nfor i in range(50):\n numero = int(input(\"Digite um número: \"))\n if numero % 2 == 0:\n par.append(numero)\n if numero % 2 != 0:\n impa.append(numero)\nc = 0\nsoma_par = 0\nwhile c < len(par):\n soma_par += int(par[c])\n c += 1\ni = 0\nsoma_impa = 0\nwhile i < len(impa):\n soma_impa += int(impa[i])\n i += 1\nsize1 = int(len(par))\nmedia_par = soma_par/size1\nsize2 = int(len(impa))\nmedia_impa = soma_impa/size2\nprint(\"Media pares: \",media_par)\nprint(\"Media impa: \",media_impa)\n\nmaior = 0\nfor y in par:\n if y > maior:\n maior = y\nmenor = 3\nfor x in impa:\n if x < menor:\n menor = x\nprint(\"Maior par: \", maior)\nprint(\"Menor impa: \",menor)\nsoma = 0\n\nfor var in par:\n if var > media_par:\n soma += 1\nsoma2 = 0\nfor ver in impa:\n if ver > media_impa:\n soma2 += 1\nprint(\"impares maior que a media \",soma2)\nprint(\"pares maiores q a media \",soma)\n","sub_path":"lista06/lista06_lista02_questao20.py","file_name":"lista06_lista02_questao20.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"555399745","text":"\nclass StratsMaestro:\n\n\tdef __init__(self, pars):\n\t\tself.pars = pars\n\n\n\tdef checkForTrade(self, intra):\n\t\ttrade = {\n\t\t\t\t\t'has_trade':False\n\t\t} # se não tiver trade nesse dia o dictionary fica vazio\n\t\t\n\t\tfirst = intra._core[0]\n\n\t\tfor bar in intra._core: \n\t\t\t# ENTRY POINT\n\t\t\tif trade['has_trade']==False: # se nenhum trade tiver sido encontrado, procura por trades\n\t\t\t\tif bar != intra._core[-1]:\n\t\t\t\t\tvariation = (bar['high'] - first['open'])/first['open']\n\t\t\t\t\tif variation >= self.pars.short_after:\n\t\t\t\t\t\ttrade['has_trade'] = True\n\t\t\t\t\t\ttrade['entry'] = bar\n\t\t\t\t\t\ttrade['price'] = (1+self.pars.short_after)*first['open']\n\t\t\t\t\t\ttrade['stop'] = (1+self.pars.exit_stop)*trade['price']\n\t\t\t\t\t\ttrade['target'] = (1-self.pars.exit_target)*trade['price'] # lembrar que pra short o target é menor\n\t\t\t# EXIT POINTS\n\t\t\telse: # se já tivermos encontrado algum trade, vamos procurar exits\n\t\t\t\tif bar['high'] >= trade['stop']:\n\t\t\t\t\ttrade['exit'] = bar\n\t\t\t\t\ttrade['profit'] = -self.pars.exit_stop\n\t\t\t\t\tbreak # só pararemos a execução do loop apos encontrar uma entry e um stop\n\t\t\t\tif bar['low'] <= trade['target']:\n\t\t\t\t\ttrade['exit'] = bar\n\t\t\t\t\ttrade['profit'] = self.pars.exit_target\n\t\t\t\t\tbreak\n\t\t\t\tif bar == intra._core[-1]: # se for a última barra, fecha o trade no close da ultima barra\n\t\t\t\t\ttrade['exit'] = bar\n\t\t\t\t\ttrade['profit'] = -(bar['close'] - trade['price'])/trade['price']\n\n\t\treturn trade # se o dictionary não estiver vazio, vai retornar os dados em trade","sub_path":"pynnystock/StratsMaestro.py","file_name":"StratsMaestro.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"79229272","text":"dice1, dice2 = [int(x) for x in input().split()]\n\ncounter = [0] * (dice1 + dice2)\n\nfor x in range(dice1):\n for y in range(dice2):\n counter[x + y] += 1\n\nmax = counter[0]\nfor x in counter:\n if x > max:\n max = x\n\nfor index in range(len(counter)):\n if counter[index] == max:\n print(index + 2)","sub_path":"DiceCup.py","file_name":"DiceCup.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"228632363","text":"import os\r\nimport sys\r\nimport mne\r\nimport numpy as np\r\n\r\n\r\ndef get_labels(subjects_dir, labels_dir, parc='aparc_sub'):\r\n labels = mne.read_labels_from_annot('fsaverage', parc=parc, subjects_dir=subjects_dir)\r\n label_files = sorted([f.path for f in os.scandir(labels_dir)])\r\n if len(label_files) < len(labels):\r\n for i, label in enumerate(labels):\r\n labels[i] = label.morph(subject_to='fsaverage', grade=4, subjects_dir=subjects_dir)\r\n labels[i].save(os.path.join(labels_dir, labels[i].name))\r\n else:\r\n labels = []\r\n for file in label_files:\r\n labels.append(mne.read_label(file, 'fsaverage'))\r\n return labels\r\n\r\n\r\ndef parcellate_stc(stc, labels, agg='mean'):\r\n parc_data = np.zeros((len(labels), stc.shape[-1]))\r\n\r\n for i, label in enumerate(labels):\r\n if label.name.startswith('unknown'):\r\n continue\r\n stc_in_label = stc.in_label(label)\r\n if agg == 'mean':\r\n parc_data[i] = np.mean(stc_in_label.data, axis=0)\r\n elif agg == 'max':\r\n parc_data[i] = np.max(stc_in_label.data, axis=0)\r\n else:\r\n raise RuntimeError('\"agg\" argument must be one of (\"mean\", \"max\")')\r\n\r\n return parc_data\r\n\r\n\r\ndef main(subj, task, data_dir, subjects_dir, ext='fsaverage', agg='mean', cohorts=None, window=False):\r\n psd_dir = os.path.join(data_dir, subj, 'psd')\r\n zmap_dir = os.path.join(data_dir, subj, 'zmap')\r\n output_dir = os.path.join(data_dir, subj, 'parc')\r\n os.makedirs(output_dir, exist_ok=True)\r\n\r\n if ext == 'zmap':\r\n input_dir = zmap_dir\r\n else:\r\n input_dir = psd_dir\r\n\r\n labels_dir = '/scratch/nbe/tbi-meg/veera/labels_aparc_sub'\r\n os.makedirs(labels_dir, exist_ok=True)\r\n labels = get_labels(subjects_dir, labels_dir)\r\n\r\n if window:\r\n for i in range(40, 390, 50):\r\n if cohorts is not None:\r\n stc_fname = os.path.join(input_dir, f'{subj}-{task}-{i}-{cohorts}-psd-{ext}')\r\n else:\r\n stc_fname = os.path.join(input_dir, f'{subj}-{task}-{i}-psd-{ext}')\r\n stc = mne.read_source_estimate(stc_fname, 'fsaverage')\r\n\r\n parc_stc_data = parcellate_stc(stc, labels, agg)\r\n\r\n outfile = os.path.join(output_dir, os.path.basename(stc_fname) + f'-{agg}-aparc-data.csv')\r\n print('Saving data to', outfile)\r\n np.savetxt(outfile, parc_stc_data, fmt='%.7f', delimiter=\",\")\r\n else:\r\n if cohorts is not None:\r\n stc_fname = os.path.join(input_dir, f'{subj}-{task}-{cohorts}-psd-{ext}')\r\n else:\r\n stc_fname = os.path.join(input_dir, f'{subj}-{task}-psd-{ext}')\r\n stc = mne.read_source_estimate(stc_fname, 'fsaverage')\r\n\r\n parc_stc_data = parcellate_stc(stc, labels, agg)\r\n\r\n outfile = os.path.join(output_dir, os.path.basename(stc_fname) + f'-{agg}-aparc-data.csv')\r\n print('Saving data to', outfile)\r\n np.savetxt(outfile, parc_stc_data, fmt='%.7f', delimiter=\",\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n subject = sys.argv[1]\r\n task = sys.argv[2]\r\n data_dir = sys.argv[3]\r\n subjects_dir = sys.argv[4]\r\n if len(sys.argv) > 5:\r\n cohorts = sys.argv[5]\r\n else:\r\n cohorts = None\r\n\r\n main(subject, task, data_dir, subjects_dir, ext='zmap', agg='mean', cohorts=cohorts, window=True)\r\n","sub_path":"pipeline-tbi/parc/parcellation.py","file_name":"parcellation.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"389410009","text":"from Persona import *\r\nclass Prestamo(Persona):\r\n def __init__(self):\r\n self.Cantidadpres=0\r\n self.cantidadCuo=0\r\n self.interes=0\r\n self.estadoPrestamo=\"\"\r\n#getter\r\n def getCantidadPrestamo(self):\r\n return self.Cantidadpres\r\n def getCantidadCuotas(self):\r\n return self.cantidadCuo\r\n def getInterese(self):\r\n return self.interes\r\n def getEstados(self):\r\n return self.estadoPrestamo\r\n\r\n#setter\r\n def setCantidadPrestamo(self,Cantidadpres):\r\n self.Cantidadpres=Cantidadpres\r\n def setCantidadCuotas(self,cantidadCuo):\r\n self.cantidadCuo=cantidadCuo\r\n def setIntereses(self,Interes):\r\n self.interes=Interes\r\n def setEstados(self,estadoPrestamo):\r\n self.estadoPrestamo=estadoPrestamo\r\n#------------------------------------------------------\r\n#------------------------------------------------------\r\n def ponerCantidadPres(self,Cantidadpres):\r\n Prestamo=('150000','300000','600000')\r\n if Cantidadpres==1:\r\n return Prestamo[0]\r\n elif Cantidadpres==2:\r\n return Prestamo[1]\r\n elif Cantidadpres==3:\r\n return Prestamo[2]\r\n\r\n def ponerCantidadCuotas(self,cantidadCuo):\r\n Cuotas=('3','6','9')\r\n if Cantidadcuo==1:\r\n return Cuotas[0]\r\n elif Cantidadcuo==2:\r\n return Cuotas[1]\r\n elif Cantidadcuo==3:\r\n return Cuotas[2]\r\n\r\n def saberestadoprestamo(self,estadoPrestamo):\r\n if self.estadoPrestamo==False:\r\n return \"Ahora tiene prestamo\"\r\n else:\r\n return self.estadoPrestamo==True\r\n\r\n def activarprestamo(self,Cantidadpres,cantidadCuo):\r\n if self.Cantidadpres==\"150000\" and self.cantidadCuo==\"3\":\r\n self.estadoPrestamo=True\r\n self.pagomensual=self.Cantidadpres/self.cantidadCuo\r\n self.interes=Prestamo*0.1\r\n self.pagofinal=pagomensual+Interes\r\n return pagofinal\r\n elif Cantidadpres==150000 and cantidadCuo==6:\r\n self.estadoPrestamo=True\r\n pagomensual=Cantidadpres/cantidadCuo\r\n interes=Prestamo*0.2\r\n pagofinal=pagomensual+Interes\r\n return pagofinal\r\n elif Cantidadpres==150000 and cantidadCuo==9:\r\n self.estadoPrestamo=True\r\n pagomensual=Cantidadpres/cantidadCuo\r\n interes=Prestamo*0.3\r\n pagofinal=pagomensual+Interes\r\n return pagofinal\r\n elif Cantidadpres==300000 and cantidadCuo==3:\r\n self.estadoPrestamo=True\r\n pagomensual=Cantidadpres/cantidadCuo\r\n interes=Prestamo*0.1\r\n pagofinal=pagomensual+Interes\r\n return pagofinal\r\n elif Cantidadpres==300000 and cantidadCuo==6:\r\n self.estadoPrestamo=True\r\n pagomensual=Cantidadpres/cantidadCuo\r\n interes=Prestamo*0.2\r\n pagofinal=pagomensual+Interes\r\n return pagofinal\r\n elif Cantidadpres==300000 and cantidadCuo==9:\r\n self.estadoPrestamo=True\r\n pagomensual=Cantidadpres/cantidadCuo\r\n interes=Prestamo*0.3\r\n pagofinal=pagomensual+Interes\r\n return pagofinal\r\n elif Cantidadpres==600000 and cantidadCuo==3:\r\n self.estadoPrestamo=True\r\n pagomensual=Cantidadpres/cantidadCuo\r\n interes=Prestamo*0.1\r\n pagofinal=pagomensual+Interes\r\n return pagofinal\r\n elif Cantidadpres==600000 and cantidadCuo==6:\r\n self.estadoPrestamo=True\r\n pagomensual=Cantidadpres/cantidadCuo\r\n interes=Prestamo*0.2\r\n pagofinal=pagomensual+Interes\r\n return pagofinal\r\n elif Cantidadpres==600000 and cantidadCuo==9:\r\n self.estadoPrestamo=True\r\n pagomensual=Cantidadpres/cantidadCuo\r\n interes=Prestamo*0.3\r\n pagofinal=pagomensual+Interes\r\n return pagofinal\r\n \r\n \r\n\r\n\r\n \r\n","sub_path":"Prestamo.py","file_name":"Prestamo.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"433801684","text":"from sklearn.metrics import accuracy_score, confusion_matrix, recall_score, precision_score, f1_score\nfrom unicodedata import normalize\nimport numpy as np\nimport re\nimport sys\nimport pickle\nimport math\n# ejemplo de uso\n# python3 PreprocessingWord.py Data/ Experimento1/ 100\n\n# import ptvsd\n# # Allow other computers to attach to ptvsd at this IP address and port.\n# ptvsd.enable_attach(address=('127.0.0.1', 5677), redirect_output=True)\n\n# # Pause the program until a remote debugger is attached\n# ptvsd.wait_for_attach()\n\n\ndef noCaracteresEspeciales(texto):\n\t# -> NFD y eliminar diacríticos\n\ttexto = re.sub(\n r\"([^n\\u0300-\\u036f]|n(?!\\u0303(?![\\u0300-\\u036f])))[\\u0300-\\u036f]+\", r\"\\1\",\n normalize(\"NFD\", texto), 0, re.I\n )\n\n\t# -> NFC\n\treturn re.sub('\\W', ' ', texto).lower()\n\n\ndef crearVocabulario(archCorpus, umbral):\n\t\"\"\"Con esta función creamos el vocabulario a partir del archCorpus\n\tINPUT: archCorpus- nombre del archivo con el corpus,\n\t\tumbral- numero de apariciones minima para considerarlas relevantes\n\tOUTPUT: el vocabulario\"\"\"\n\tcorpus = open(archCorpus, \"r\").readlines()\n\tvocabulario = {\"UNK\":0}\n\tcosas = [(\"\\\\n\", \"\\n\"), (\"http://link\", \"η\"), (\"\\\\\\\"\", \"\\\"\")]\n\tfor twit in corpus:\n\t\ttwitSplit=noCaracteresEspeciales(twit)\n\t\tfor c, v in cosas:\n\t\t\ttwitSplit=twitSplit.replace(c,v)\n\t\ttwitSplit=\"ŧ\"+twitSplit+\"¶\"\n\t\tfor palabra in twitSplit.split():\n\t\t\tif palabra not in vocabulario:\n\t\t\t\tvocabulario[palabra]=1\n\t\t\telse:\n\t\t\t\tvocabulario[palabra]+=1\n\tignorados=0\n\tfor palabra in list(vocabulario):\n\t\tif vocabulario[palabra]\", addAcc)\n add.grid(row=7, sticky=NSEW)\n\n sel = Button(f1, text='Select Account')\n sel.bind(\"\", selectAcc)\n sel.grid(row=8, sticky=NSEW)\n\n impAcc = Button(f1, text='Import Accounts')\n impAcc.bind(\"\", importAcc)\n impAcc.grid(row=9, sticky=NSEW)\n\n deleteAcc = Button(f1, text='Delete Account')\n deleteAcc.bind(\"\", delAcc)\n deleteAcc.grid(row=10, sticky=NSEW)\n\n f2 = ttk.Frame(n)\n\n self.var = BooleanVar()\n c = Checkbutton(f2, text=\"Show Browser\", variable=self.var)\n c.grid(column=0, row=1)\n print(self.var.get())\n\n check = BooleanVar()\n c = Checkbutton(f2, text=\"Auto-Checkout\", variable=check)\n c.grid(column=0, row=2)\n print(self.var.get())\n\n def timeLimit(*args):\n value = timeValue.get()\n if len(value) > 5: timeValue.set(value[:5])\n\n timeValue = StringVar()\n timeValue.trace('w', timeLimit)\n\n time = Label(f2,text='Release Time (hh:mm)')\n time.grid(column=0,row=3)\n timeEntry = Entry(f2, textvariable=timeValue)\n timeEntry.grid(column=2,row=3)\n\n f4 = ttk.Frame(n)\n\n tv2 = Treeview(f4, columns=('link', 'size'), selectmode=\"extended\")\n tv2.heading('#0', text='Website')\n tv2.column('#0', stretch=YES)\n tv2.heading('#1', text='Link')\n tv2.column('#1', stretch=YES)\n tv2.heading('#2', text='Size')\n tv2.column('#2', stretch=YES)\n tv2.grid(row=0, column=0, columnspan=3, sticky=NSEW)\n\n tkvar2 = StringVar(f4)\n tkvar2.set('Nike')\n popupMenu2 = OptionMenu(f4, tkvar2, 'Nike', *choices)\n Label(f4, text=\"Website\").grid(row=1, sticky=NSEW)\n popupMenu2.grid(row=2, sticky=NSEW)\n\n link = Label(f4, text=\"Early Link\")\n link.grid(row=3, sticky=NSEW)\n linkIn = Entry(f4)\n linkIn.grid(row=4, sticky=NSEW)\n\n size = StringVar(f4)\n sizes = {'6.5', '7', '7.5', '8', '8.5', '9', '9.5', '10', '10.5', '11', '11.5','12','12.5','13','13.5','14','14.5','15'}\n size.set('6.5') # set the default option\n sizeMenu = OptionMenu(f4, size, '6.5', *sorted(sizes))\n Label(f4, text=\"Sizes\").grid(row=5, sticky=NSEW)\n sizeMenu.grid(row=6, sticky=NSEW)\n\n addShoe = Button(f4, text='Add Shoe')\n addShoe.bind(\"\", addSh)\n addShoe.grid(row=7, sticky=NSEW)\n\n sel2 = Button(f4, text='Select Shoe')\n sel2.bind(\"\", selectShoe)\n sel2.grid(row=8, sticky=NSEW)\n\n delShoe = Button(f4, text='Delete Shoe')\n delShoe.bind(\"\", deleteShoe)\n delShoe.grid(row=9, sticky=NSEW)\n\n f5 = ttk.Frame(n) # proxy list\n\n tv3 = Treeview(f5, selectmode=\"extended\") # proxy\n tv3.heading('#0', text='Proxy')\n tv3.column('#0', stretch=YES)\n tv3.grid(row=0, column=0, sticky='w')\n\n imp = Button(f5, text='Import Proxies')\n imp.bind(\"\", importProxy)\n imp.grid(row=2, sticky=NSEW)\n\n selProx = Button(f5, text='Select Proxy')\n selProx.bind(\"\", selectProxy)\n selProx.grid(row=3, sticky=NSEW)\n\n f6 = ttk.Frame(n)\n\n tv4 = Treeview(f6, columns=('user', 'pass', 'link', 'size', 'proxy'), selectmode=\"extended\")\n tv4.heading('#0', text='Website')\n tv4.column('#0', width=100)\n tv4.heading('#1', text='Username')\n tv4.column('#1', width=100)\n tv4.heading('#2', text='Password')\n tv4.column('#2', width=100)\n tv4.heading('#3', text='Link')\n tv4.column('#3', width=100)\n tv4.heading('#4', text='Size')\n tv4.column('#4', width=100)\n tv4.heading('#5', text='Proxy')\n tv4.column('#5', width=100)\n tv4.grid(row=0, column=0, columnspan=5, sticky=NSEW)\n\n addTsk = Button(f6, text='Add Task')\n addTsk.bind(\"\", addTask)\n addTsk.grid(row=1, sticky=NSEW)\n\n startTsk = Button(f6, text='Start Tasks')\n startTsk.bind(\"\", startTasks)\n startTsk.grid(row=2, sticky=NSEW)\n\n deleteTsk = Button(f6, text='Delete Task')\n deleteTsk.bind(\"\", deleteTask)\n deleteTsk.grid(row=3, sticky=NSEW)\n\n tv5 = Treeview(f6, columns=('email', 'task'), selectmode=\"extended\")\n tv5.heading('#0', text='Website')\n tv5.column('#0', width=200)\n tv5.heading('#1', text='Email')\n tv5.column('#1', width=200)\n tv5.heading('#2', text='Task')\n tv5.column('#2', width=200)\n tv5.grid(row=4, column=0, columnspan=3, sticky=NSEW)\n\n n.add(f1, text='Site Accounts')\n n.add(f4, text='Shoe Preferences')\n n.add(f5, text='Proxy Config')\n n.add(f2, text='Misc')\n n.add(f6, text='Task Monitor')\n\n n.grid()\n\n\ndef start():\n r = Toplevel()\n r.wm_title(\"CopBot\")\n my_gui = MainApplication(r)\n r.mainloop()\n\nclass LoginFrame(Frame):\n\n def __init__(self, master):\n super().__init__(master)\n self.label_1 = Label(self, text=\"Username\")\n self.label_2 = Label(self, text=\"Password\")\n\n self.entry_1 = Entry(self)\n self.entry_2 = Entry(self, show=\"*\")\n self.label_1.grid(row=0, sticky=E)\n self.label_2.grid(row=1, sticky=E)\n self.entry_1.grid(row=0, column=1)\n self.entry_2.grid(row=1, column=1)\n\n self.logbtn = Button(self, text=\"Login\")\n self.logbtn.bind(self.logged(self,False))\n self.logbtn.grid(columnspan=2)\n\n self.pack()\n\n def logged(self, useServer):\n username = self.entry_1.get()\n password = self.entry_2.get()\n if useServer:\n var = 'no'\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect(('108.254.120.248', 1320))\n sock.send((username + \":\" + password).encode())\n while 1:\n var = sock.recv(1024).decode()\n break\n sock.close()\n if var == 'ok':\n sock.close()\n tm.showinfo(\"Login info\", \"Welcome \"+username)\n self.destroy()\n start()\n else:\n sock.close()\n tm.showerror(\"Login error\", \"Incorrect username, password or IP Address\")\n else:\n tm.showinfo(\"Login info\", \"Welcome \"+username)\n self.destroy()\n start()\n\nroot = Tk()\nlf = LoginFrame(root)\nroot.mainloop()\n","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":24262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"177543470","text":"import pandas as pd\nimport os\nimport numpy as np\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef ischemic_stroke_cases(df):\n df = df[(df[\"icd_id\"] == 1) | (df[\"icd_id\"] == 2)]\n print(df.shape)\n return df\n\ndef remove_unrelated_features(df, ur_f):\n df = df.drop(ur_f, axis=1)\n print(df.shape)\n return df\n\ndef remove_timestamp_features(df, d, h, m):\n df = df.drop(d + h + m, axis=1)\n print(df.shape)\n return df\n\ndef categorical_features(df, nom_f, ord_f, bl_f, b_i, ni_in, ni_out):\n # nominal_features\n df[\"gender_tx\"][df[\"gender_tx\"] == \"M\"] = 1\n df[\"gender_tx\"][df[\"gender_tx\"] == \"F\"] = 0\n for i in df[nom_f]:\n df[i] = pd.to_numeric(df[i], errors=\"coerce\")\n df[\"opc_id\"][(df[\"opc_id\"] != 1) & (df[\"opc_id\"] != 2) & (df[\"opc_id\"] != 3)] = np.nan\n df[\"toast_id\"][(df[\"toast_id\"] != 1) & (df[\"toast_id\"] != 2) & (df[\"toast_id\"] != 3) & (\n df[\"toast_id\"] != 4) & (df[\"toast_id\"] != 5)] = np.nan\n df[\"offdt_id\"][(df[\"offdt_id\"] != 1) & (df[\"offdt_id\"] != 2) & (df[\"offdt_id\"] != 3) & (\n df[\"offdt_id\"] != 4) & (df[\"offdt_id\"] != 5)] = np.nan\n df[\"gender_tx\"][(df[\"gender_tx\"] != 1) & (df[\"gender_tx\"] != 0)] = np.nan\n\n for i in df.loc[:, \"hd_id\":\"ca_id\"]:\n df[i] = pd.to_numeric(df[i], errors=\"coerce\")\n df[i][(df[i] != 0) & (df[i] != 1) & (df[i] != 2)] = np.nan\n\n for i in df.loc[:, \"fahiid_parents_1\":\"fahiid_parents_4\"]:\n df[i] = pd.to_numeric(df[i], errors=\"coerce\")\n df[i][(df[i] != 0) & (df[i] != 1) & (df[i] != 2)] = np.nan\n\n for i in df.loc[:, \"fahiid_brsi_1\":\"fahiid_brsi_4\"]:\n df[i] = pd.to_numeric(df[i], errors=\"coerce\")\n df[i][(df[i] != 0) & (df[i] != 1) & (df[i] != 2) & (df[i] != 9)] = np.nan\n\n # ordinal_features\n for i in df[ord_f]:\n df[i] = pd.to_numeric(df[i], errors=\"coerce\")\n\n df[\"mrs_tx_1\"][(df[\"mrs_tx_1\"] != 0) & (df[\"mrs_tx_1\"] != 1) & (df[\"mrs_tx_1\"] != 2) & (\n df[\"mrs_tx_1\"] != 3) & (df[\"mrs_tx_1\"] != 4) & (df[\"mrs_tx_1\"] != 5) & (\n df[\"mrs_tx_1\"] != 6) & (df[\"mrs_tx_1\"] != 9)] = np.nan\n df[\"mrs_tx_3\"][(df[\"mrs_tx_3\"] != 0) & (df[\"mrs_tx_3\"] != 1) & (df[\"mrs_tx_3\"] != 2) & (\n df[\"mrs_tx_3\"] != 3) & (df[\"mrs_tx_3\"] != 4) & (df[\"mrs_tx_3\"] != 5) & (\n df[\"mrs_tx_3\"] != 6) & (df[\"mrs_tx_3\"] != 9)] = np.nan\n df[\"gcse_nm\"][(df[\"gcse_nm\"] != 1) & (df[\"gcse_nm\"] != 2) & (df[\"gcse_nm\"] != 3) & (\n df[\"gcse_nm\"] != 4)] = np.nan\n df[\"gcsv_nm\"][(df[\"gcsv_nm\"] != 1) & (df[\"gcsv_nm\"] != 2) & (df[\"gcsv_nm\"] != 3) & (\n df[\"gcsv_nm\"] != 4) & (df[\"gcsv_nm\"] != 5)] = np.nan\n df[\"gcsm_nm\"][(df[\"gcsm_nm\"] != 1) & (df[\"gcsm_nm\"] != 2) & (df[\"gcsm_nm\"] != 3) & (\n df[\"gcsm_nm\"] != 4) & (df[\"gcsm_nm\"] != 5) & (df[\"gcsm_nm\"] != 6)] = np.nan\n df[\"discharged_mrs\"][\n (df[\"discharged_mrs\"] != 0) & (df[\"discharged_mrs\"] != 1) & (df[\"discharged_mrs\"] != 2) & (\n df[\"discharged_mrs\"] != 3) & (df[\"discharged_mrs\"] != 4) & (\n df[\"discharged_mrs\"] != 5) & (df[\"discharged_mrs\"] != 6)] = np.nan\n\n # boolean\n for i in df[bl_f]:\n df[i].replace(\"1\", 1, inplace=True)\n df[i].replace(\"0\", 0, inplace=True)\n df[i].replace(\"Y\", 1, inplace=True)\n df[i].replace(\"N\", 0, inplace=True)\n df[i][(df[i] != 1) & (df[i] != 0)] = np.nan\n\n # barthel\n for i in df[b_i]:\n df[i] = pd.to_numeric(df[i], errors=\"coerce\")\n\n df[\"feeding\"][(df[\"feeding\"] != 0) & (df[\"feeding\"] != 5) & (df[\"feeding\"] != 10)] = np.nan\n df[\"transfers\"][(df[\"transfers\"] != 0) & (df[\"transfers\"] != 5) & (df[\"transfers\"] != 10) & (\n df[\"transfers\"] != 15)] = np.nan\n df[\"bathing\"][(df[\"bathing\"] != 0) & (df[\"bathing\"] != 5)] = np.nan\n df[\"toilet_use\"][(df[\"toilet_use\"] != 0) & (df[\"toilet_use\"] != 5) & (df[\"toilet_use\"] != 10)] = np.nan\n df[\"grooming\"][(df[\"grooming\"] != 0) & (df[\"grooming\"] != 5)] = np.nan\n df[\"mobility\"][\n (df[\"mobility\"] != 0) & (df[\"mobility\"] != 5) & (df[\"mobility\"] != 10) & (df[\"mobility\"] != 15)] = np.nan\n df[\"stairs\"][(df[\"stairs\"] != 0) & (df[\"stairs\"] != 5) & (df[\"stairs\"] != 10)] = np.nan\n df[\"dressing\"][(df[\"dressing\"] != 0) & (df[\"dressing\"] != 5) & (df[\"dressing\"] != 10)] = np.nan\n df[\"bowel_control\"][\n (df[\"bowel_control\"] != 0) & (df[\"bowel_control\"] != 5) & (df[\"bowel_control\"] != 10)] = np.nan\n df[\"bladder_control\"][\n (df[\"bladder_control\"] != 0) & (df[\"bladder_control\"] != 5) & (df[\"bladder_control\"] != 10)] = np.nan\n\n # nihss_in\n for i in df[ni_in]:\n df[i] = pd.to_numeric(df[i], errors=\"coerce\")\n\n df[\"nihs_1a_in\"][(df[\"nihs_1a_in\"] < 0) | (df[\"nihs_1a_in\"] > 3)] = np.nan\n df[\"nihs_1b_in\"][(df[\"nihs_1b_in\"] < 0) | (df[\"nihs_1b_in\"] > 2)] = np.nan\n df[\"nihs_1c_in\"][(df[\"nihs_1c_in\"] < 0) | (df[\"nihs_1c_in\"] > 2)] = np.nan\n df[\"nihs_2_in\"][(df[\"nihs_2_in\"] < 0) | (df[\"nihs_2_in\"] > 2)] = np.nan\n df[\"nihs_3_in\"][(df[\"nihs_3_in\"] < 0) | (df[\"nihs_3_in\"] > 3)] = np.nan\n df[\"nihs_4_in\"][(df[\"nihs_4_in\"] < 0) | (df[\"nihs_4_in\"] > 3)] = np.nan\n df[\"nihs_5al_in\"][(df[\"nihs_5al_in\"] < 0) | (df[\"nihs_5al_in\"] > 4)] = np.nan\n df[\"nihs_5br_in\"][(df[\"nihs_5br_in\"] < 0) | (df[\"nihs_5br_in\"] > 4)] = np.nan\n df[\"nihs_6al_in\"][(df[\"nihs_6al_in\"] < 0) | (df[\"nihs_6al_in\"] > 4)] = np.nan\n df[\"nihs_6br_in\"][(df[\"nihs_6br_in\"] < 0) | (df[\"nihs_6br_in\"] > 4)] = np.nan\n df[\"nihs_7_in\"][(df[\"nihs_7_in\"] < 0) | (df[\"nihs_7_in\"] > 2)] = np.nan\n df[\"nihs_8_in\"][(df[\"nihs_8_in\"] < 0) | (df[\"nihs_8_in\"] > 2)] = np.nan\n df[\"nihs_9_in\"][(df[\"nihs_9_in\"] < 0) | (df[\"nihs_9_in\"] > 3)] = np.nan\n df[\"nihs_10_in\"][(df[\"nihs_10_in\"] < 0) | (df[\"nihs_10_in\"] > 2)] = np.nan\n df[\"nihs_11_in\"][(df[\"nihs_11_in\"] < 0) | (df[\"nihs_11_in\"] > 2)] = np.nan\n\n # nihss_out\n for i in df[ni_out]:\n df[i] = pd.to_numeric(df[i], errors=\"coerce\")\n\n df[\"nihs_1a_out\"][(df[\"nihs_1a_out\"] < 0) | (df[\"nihs_1a_out\"] > 3)] = np.nan\n df[\"nihs_1b_out\"][(df[\"nihs_1b_out\"] < 0) | (df[\"nihs_1b_out\"] > 2)] = np.nan\n df[\"nihs_1c_out\"][(df[\"nihs_1c_out\"] < 0) | (df[\"nihs_1c_out\"] > 2)] = np.nan\n df[\"nihs_2_out\"][(df[\"nihs_2_out\"] < 0) | (df[\"nihs_2_out\"] > 2)] = np.nan\n df[\"nihs_3_out\"][(df[\"nihs_3_out\"] < 0) | (df[\"nihs_3_out\"] > 3)] = np.nan\n df[\"nihs_4_out\"][(df[\"nihs_4_out\"] < 0) | (df[\"nihs_4_out\"] > 3)] = np.nan\n df[\"nihs_5al_out\"][(df[\"nihs_5al_out\"] < 0) | (df[\"nihs_5al_out\"] > 4)] = np.nan\n df[\"nihs_5br_out\"][(df[\"nihs_5br_out\"] < 0) | (df[\"nihs_5br_out\"] > 4)] = np.nan\n df[\"nihs_6al_out\"][(df[\"nihs_6al_out\"] < 0) | (df[\"nihs_6al_out\"] > 4)] = np.nan\n df[\"nihs_6br_out\"][(df[\"nihs_6br_out\"] < 0) | (df[\"nihs_6br_out\"] > 4)] = np.nan\n df[\"nihs_7_out\"][(df[\"nihs_7_out\"] < 0) | (df[\"nihs_7_out\"] > 2)] = np.nan\n df[\"nihs_8_out\"][(df[\"nihs_8_out\"] < 0) | (df[\"nihs_8_out\"] > 2)] = np.nan\n df[\"nihs_9_out\"][(df[\"nihs_9_out\"] < 0) | (df[\"nihs_9_out\"] > 3)] = np.nan\n df[\"nihs_10_out\"][(df[\"nihs_10_out\"] < 0) | (df[\"nihs_10_out\"] > 2)] = np.nan\n df[\"nihs_11_out\"][(df[\"nihs_11_out\"] < 0) | (df[\"nihs_11_out\"] > 2)] = np.nan\n\n print(df.shape)\n return df\n\ndef continuous_features(df, cont):\n # continuous\n for i in df[cont]:\n df[i][df[i] == 999.9] = np.nan\n q1 = df[i].quantile(0.25)\n q3 = df[i].quantile(0.75)\n iqr = q3 - q1\n inner_fence = 1.5 * iqr\n\n inner_fence_low = q1 - inner_fence\n inner_fence_upp = q3 + inner_fence\n df[i][(df[i] < inner_fence_low) | (df[i] > inner_fence_upp)] = np.nan\n df[i][df[i] < 0] = np.nan\n\n print(df.shape)\n return df\n\ndef remove_high_missing_features(df):\n missing_ratio = df.isnull().sum() / len(df) * 100\n missing_ratio_index = missing_ratio[missing_ratio > 40].index\n df = df.drop(missing_ratio_index, axis=1)\n df = df.sort_values(by=[\"icase_id\", \"idcase_id\"])\n print(df.shape)\n return df\n\ndef delete_error_cases(df):\n df_1 = df[[\"icase_id\", \"idcase_id\", \"feeding\", \"transfers\", \"bathing\", \"toilet_use\", \"grooming\", \"mobility\",\n \"stairs\", \"dressing\", \"bowel_control\", \"bladder_control\", \"nihs_1a_out\", \"nihs_1b_out\", \"nihs_1c_out\",\n \"nihs_2_out\", \"nihs_3_out\", \"nihs_4_out\", \"nihs_5al_out\", \"nihs_5br_out\", \"nihs_6al_out\", \"nihs_6br_out\",\n \"nihs_7_out\", \"nihs_8_out\", \"nihs_9_out\", \"nihs_10_out\", \"nihs_11_out\", \"discharged_mrs\"]]\n print(df_1.shape)\n\n df_1[\n \"bi_total\"] = df_1.feeding + df_1.transfers + df_1.bathing + df_1.toilet_use + df_1.grooming + df_1.mobility + df_1.stairs + df_1.dressing + df_1.bowel_control + df_1.bladder_control\n df_1[\n \"nihss_total\"] = df_1.nihs_1a_out + df_1.nihs_1b_out + df_1.nihs_1c_out + df_1.nihs_2_out + df_1.nihs_3_out + df_1.nihs_4_out + df_1.nihs_5al_out + df_1.nihs_5br_out + df_1.nihs_6al_out + df_1.nihs_6br_out + df_1.nihs_7_out + df_1.nihs_8_out + df_1.nihs_9_out + df_1.nihs_10_out + df_1.nihs_11_out\n\n df_1[\"index\"] = range(0, len(df_1), 1)\n df_1.set_index(\"index\", inplace=True)\n all_0_index = df_1[(df_1[\"discharged_mrs\"] == 0) & (df_1[\"bi_total\"] == 0) & (df_1[\"nihss_total\"] == 0)].index\n df_1 = df_1.drop(df_1.index[all_0_index])\n print(df_1.shape)\n\n df_1[\"index\"] = range(0, len(df_1), 1)\n df_1.set_index(\"index\", inplace=True)\n db_0_index = df_1[(df_1[\"discharged_mrs\"] == 0) & (df_1[\"bi_total\"] == 0)].index\n df_1 = df_1.drop(df_1.index[db_0_index])\n print(df_1.shape)\n\n df_1[\"index\"] = range(0, len(df_1), 1)\n df_1.set_index(\"index\", inplace=True)\n bn_0_index = df_1[(df_1[\"bi_total\"] == 0) & (df_1[\"nihss_total\"] == 0)].index\n df_1 = df_1.drop(df_1.index[bn_0_index])\n print(df_1.shape)\n\n df_1 = df_1.dropna()\n print(df_1.shape)\n df_1[\"index\"] = range(1, len(df_1) + 1, 1)\n df_1.set_index(\"index\", inplace=True)\n\n return df_1\n\ndef outlier_detection(df, var1, var2):\n outlier_index = []\n for i in set(df[var1]):\n selected_df = df[df[var1] == i]\n selected_df_mean = selected_df[var2].mean()\n selected_df_sd = selected_df[var2].std()\n selected_df_del_index = selected_df[(selected_df[var2] < selected_df_mean - 2 * selected_df_sd) | (\n selected_df[var2] > selected_df_mean + 2 * selected_df_sd)].index.values.tolist()\n outlier_index = outlier_index + selected_df_del_index\n print(len(outlier_index))\n return outlier_index\n\n\nif __name__ == '__main__':\n # Grouping Features\n # \"icase_id\", \"idcase_id\"\n unrelated_features = [\"edu_id\", \"pro_id\", \"icd_id\", \"off_id\", \"fstatus_id_1\", \"location_id_1\", \"torg_id_1\",\n \"flu_id_1\", \"fluorg_id_1\", \"fluorg_tx_1\", \"fluresult_tx_1\", \"death_dt_1\", \"death_id_1\",\n \"deathsk_id_1\", \"deatho_tx_1\", \"veihdorg_id_1\", \"versorg_id_1\", \"torg_tx_1\", \"versorg_tx_1\",\n \"veihdorg_tx_1\", \"fstatus_id_3\", \"location_id_3\", \"torg_id_3\", \"flu_id_3\", \"fluorg_id_3\",\n \"fluorg_tx_3\", \"fluresult_tx_3\", \"deatho_tx_3\", \"versorg_id_3\", \"veihdorg_id_3\", \"torg_tx_3\",\n \"versorg_tx_3\", \"veihdorg_tx_3\", \"fstatus_id_6\", \"rfur_dt_6\", \"location_id_6\", \"torg_id_6\",\n \"flu_id_6\", \"fluorg_id_6\", \"fluorg_tx_6\", \"fluresult_tx_6\", \"death_dt_6\", \"death_id_6\",\n \"deathsk_id_6\", \"deatho_tx_6\", \"ve_id_6\", \"vers_fl_6\", \"verscich_id_6\", \"vers_dt_6\",\n \"versorg_id_6\", \"veihd_fl_6\", \"veihd_id_6\", \"veihd_dt_6\", \"veihdorg_id_6\", \"mrs_tx_6\",\n \"torg_tx_6\", \"versorg_tx_6\", \"veihdorg_tx_6\", \"fstatus_id_12\", \"rfur_dt_12\", \"location_id_12\",\n \"torg_id_12\", \"flu_id_12\", \"fluorg_id_12\", \"fluorg_tx_12\", \"fluresult_tx_12\", \"death_dt_12\",\n \"death_id_12\", \"deathsk_id_12\", \"deatho_tx_12\", \"ve_id_12\", \"vers_fl_12\", \"verscich_id_12\",\n \"vers_dt_12\", \"versorg_id_12\", \"veihd_fl_12\", \"veihd_id_12\", \"veihd_dt_12\", \"veihdorg_id_12\",\n \"mrs_tx_12\", \"torg_tx_12\", \"versorg_tx_12\", \"veihdorg_tx_12\", \"index\", \"iprotocol_id\",\n \"icase_id.1\", \"idcase_id.1\", \"cstatus_id\", \"org_id\", \"dctype24_id\", \"patient_id\", \"input_nm\",\n \"age_nm\", \"proot_tx\", \"itown_id\", \"addr_tx\", \"telh_tx\", \"telp_tx\", \"telf_tx\", \"ftitle_tx\",\n \"casememo_tx\", \"ivtpath_fl\", \"ivtpaah_fl\", \"nivtpa99_tx\", \"icd_tx\", \"icdtia_id\", \"icdo_tx\",\n \"toastscat_tx\", \"toastso_tx\", \"cich_id\", \"csah_id\", \"csaho_tx\", \"thdo_fl\", \"thdoo_tx\",\n \"trmot_tx\", \"om_id\", \"omwa_tx\", \"omand_id\", \"omli_id\", \"omliot_tx\", \"omliot2_tx\",\n \"amliot_tx\", \"amliot2_tx\", \"como_tx\", \"deto_tx\", \"offd_tx\", \"offdtorg_id\", \"offdtorg_tx\",\n \"nihsinti_tx\", \"nihsotti_tx\", \"brs_dt\", \"ctti_tx\", \"cto_tx\", \"mriti_tx\", \"mrio_tx\",\n \"ecgo_tx\", \"create_dt\", \"createstaff_id\", \"sysupd_dt\", \"sysupdstaff_id\", \"modify_nm\",\n \"iguid_ft\", \"icase_id.2\", \"idcase_id.2\", \"icase_id.3\", \"idcase_id.3\", \"index.1\",\n \"iprotocol_id.1\", \"icase_id.4\", \"idcase_id.4\", \"hdmt_id\", \"pcvamt_id\", \"pomt_id\",\n \"ua_id\", \"uamt_id\", \"urmt_id\", \"ptiamt_id\", \"hcy_nm\", \"hcmt_id\", \"hty_nm\", \"htmt_id\",\n \"dmy_nm\", \"dmmt_id\", \"padmt_id\", \"ca_tx\", \"ot_tx\", \"thishc_id\", \"iguid_ft.1\",\n \"icase_id.5\", \"idcase_id.5\", \"icase_id.6\", \"idcase_id.6\", \"index.2\", \"iprotocol_id.2\",\n \"icase_id.7\", \"cname_tx\", \"cid_id\", \"birth_dt\", \"ve_id_1\", \"ve_id_3\", \"offd_id\",\n \"nivtpa99_fl\", \"toastso_fl\", \"thdoo_fl\", \"trmot_fl\", \"omliot_fl\", \"omliot2_fl\",\n \"amliot_fl\", \"amliot2_fl\", \"como_fl\", \"deto_fl\", \"ot_id\", \"smc_nm\", \"smy_nm\", \"ecgo_fl\",\n \"omora_fl\", \"omins_fl\", \"omst_fl\", \"omns_fl\", \"cd_id\", \"tccs_id\", \"mcd_id\", \"vers_fl_1\",\n \"veihd_fl_1\", \"vers_fl_3\", \"veihd_fl_3\", \"ih_fl\", \"onset_fl\", \"ot_fl\", \"flook_fl\",\n \"fctoh_fl\", \"nivtpa_id\", \"nivtpa1_fl\", \"nivtpa2_fl\", \"nivtpa3_fl\", \"nivtpa4_fl\",\n \"nivtpa5_fl\", \"nivtpa6_fl\", \"nivtpa7_fl\", \"nivtpa8_fl\", \"nivtpa9_fl\", \"nivtpa10_fl\",\n \"nivtpa11_fl\", \"omad_fl\", \"dethoh_fl\", \"ecg_id\", \"mra_fl\", \"cta_fl\", \"dsa_fl\", \"cdr_id\",\n \"cdl_id\", \"tccsr_id\", \"tccsl_id\", \"tccsba_id\", \"mcdr_id\", \"mcdl_id\", \"mcdba_id\",\n \"mcdri_id\", \"mcdli_id\", \"vers_dt_1\", \"veihd_dt_1\", \"death_dt_3\", \"vers_dt_3\", \"veihd_dt_3\",\n \"det_id\", \"verscich_id_1\", \"veihd_id_1\", \"verscich_id_3\", \"rfur_dt_1\", \"rfur_dt_3\",\n \"veihd_id_3\", \"deathsk_id_3\"]\n\n date = [\"oh_dt\", \"onset_dt\", \"ot_dt\", \"flook_dt\", \"fct_dt\", \"nihsin_dt\", \"nihsot_dt\", \"ct_dt\", \"mri_dt\"]\n\n hour = [\"onseth_nm\", \"ottih_nm\", \"flookh_nm\", \"fcth_nm\", \"nihsinh_nm\", \"nihsoth_nm\", \"cth_nm\", \"mrih_nm\"]\n\n minute = [\"onsetm_nm\", \"ottim_nm\", \"flookm_nm\", \"fctm_nm\", \"nihsinm_nm\", \"nihsotm_nm\", \"ctm_nm\", \"mrim_nm\"]\n\n nominal_features = [\"opc_id\", \"toast_id\", \"offdt_id\", \"gender_tx\", \"hd_id\", \"pcva_id\",\n \"pcvaci_id\", \"pcvach_id\", \"po_id\", \"ur_id\", \"sm_id\", \"ptia_id\", \"hc_id\", \"hcht_id\",\n \"hchc_id\", \"ht_id\", \"dm_id\", \"pad_id\", \"al_id\", \"ca_id\", \"fahiid_parents_1\",\n \"fahiid_parents_2\", \"fahiid_parents_3\", \"fahiid_parents_4\", \"fahiid_brsi_1\",\n \"fahiid_brsi_2\", \"fahiid_brsi_3\", \"fahiid_brsi_4\"]\n\n ordinal_features = [\"mrs_tx_1\", \"mrs_tx_3\", \"gcse_nm\", \"gcsv_nm\", \"gcsm_nm\", \"discharged_mrs\"]\n\n boolean = [\"toastle_fl\", \"toastli_fl\", \"toastsce_fl\", \"toastsmo_fl\", \"toastsra_fl\", \"toastsdi_fl\",\n \"toastsmi_fl\", \"toastsantip_fl\", \"toastsau_fl\", \"toastshy_fl\", \"toastspr_fl\", \"toastsantit_fl\",\n \"toastsho_fl\", \"toastshys_fl\", \"toastsca_fl\", \"thda_fl\", \"thdh_fl\", \"thdi_fl\", \"thdam_fl\", \"thdv_fl\",\n \"thde_fl\", \"thdm_fl\", \"thdr_fl\", \"thdp_fl\", \"trman_fl\", \"trmas_fl\", \"trmti_fl\", \"trmhe_fl\",\n \"trmwa_fl\", \"trmia_fl\", \"trmfo_fl\", \"trmta_fl\", \"trmsd_fl\", \"trmre_fl\", \"trmen_fl\", \"trmag_fl\",\n \"trmcl_fl\", \"trmpl_fl\", \"trmlm_fl\", \"trmiv_fl\", \"trmve_fl\", \"trmng_fl\", \"trmdy_fl\", \"trmicu_fl\",\n \"trmsm_fl\", \"trmed_fl\", \"trmop_fl\", \"om_fl\", \"omas_fl\", \"omag_fl\", \"omti_fl\", \"omcl_fl\", \"omwa_fl\",\n \"ompl_fl\", \"omanh_fl\", \"omand_fl\", \"omli_fl\", \"am_fl\", \"amas_fl\", \"amag_fl\", \"amti_fl\", \"amcl_fl\",\n \"amwa_fl\", \"ampl_fl\", \"amanh_fl\", \"amand_fl\", \"amli_fl\", \"compn_fl\", \"comut_fl\", \"comug_fl\",\n \"compr_fl\", \"compu_fl\", \"comac_fl\", \"comse_fl\", \"comde_fl\", \"detst_fl\", \"dethe_fl\", \"detho_fl\",\n \"detha_fl\", \"detva_fl\", \"detre_fl\", \"detme_fl\", \"ct_fl\", \"mri_fl\", \"ecgl_fl\", \"ecga_fl\", \"ecgq_fl\",\n \"cortical_aca_ctr\", \"cortical_mca_ctr\", \"subcortical_aca_ctr\", \"subcortical_mca_ctr\", \"pca_cortex_ctr\",\n \"thalamus_ctr\", \"brainstem_ctr\", \"cerebellum_ctr\", \"watershed_ctr\", \"hemorrhagic_infarct_ctr\",\n \"old_stroke_ctci\", \"cortical_aca_ctl\", \"cortical_mca_ctl\", \"subcortical_aca_ctl\", \"subcortical_mca_ctl\",\n \"pca_cortex_ctl\", \"thalamus_ctl\", \"brainstem_ctl\", \"cerebellum_ctl\", \"watershed_ctl\",\n \"hemorrhagic_infarct_ctl\", \"old_stroke_ctch\", \"cortical_aca_mrir\", \"cortical_mca_mrir\",\n \"subcortical_aca_mrir\", \"subcortical_mca_mrir\", \"pca_cortex_mrir\", \"thalamus_mrir\", \"brainstem_mrir\",\n \"cerebellum_mrir\", \"watershed_mrir\", \"hemorrhagic_infarct_mrir\", \"old_stroke_mrici\", \"cortical_aca_mril\",\n \"cortical_mca_mril\", \"subcortical_aca_mril\", \"subcortical_mca_mril\", \"pca_cortex_mril\",\n \"thalamus_mril\", \"brainstem_mril\", \"cerebellum_mril\", \"watershed_mril\", \"hemorrhagic_infarct_mril\",\n \"old_stroke_mrich\"]\n\n continuous = [\"height_nm\", \"weight_nm\", \"sbp_nm\", \"dbp_nm\", \"bt_nm\", \"hr_nm\", \"rr_nm\", \"hb_nm\",\n \"hct_nm\", \"platelet_nm\", \"wbc_nm\", \"ptt1_nm\", \"ptt2_nm\", \"ptinr_nm\", \"er_nm\", \"bun_nm\",\n \"cre_nm\", \"ua_nm\", \"tcho_nm\", \"tg_nm\", \"hdl_nm\", \"alb_nm\", \"crp_nm\", \"hbac_nm\", \"ac_nm\", \"got_nm\",\n \"ldl_nm\", \"gpt_nm\", \"age\", \"hospitalised_time\"]\n\n continuous_n = [\"height_nm\", \"weight_nm\", \"sbp_nm\", \"dbp_nm\", \"bt_nm\", \"hr_nm\", \"rr_nm\", \"hb_nm\",\n \"hct_nm\", \"platelet_nm\", \"wbc_nm\", \"ptt1_nm\", \"ptt2_nm\", \"ptinr_nm\", \"er_nm\", \"bun_nm\",\n \"cre_nm\", \"ua_nm\", \"tcho_nm\", \"tg_nm\", \"hdl_nm\",\n \"ldl_nm\", \"gpt_nm\", \"age\", \"hospitalised_time\"]\n\n barthel = [\"feeding\", \"transfers\", \"bathing\", \"toilet_use\", \"grooming\", \"mobility\", \"stairs\", \"dressing\",\n \"bowel_control\", \"bladder_control\"]\n\n nihss_in = [\"nihs_1a_in\", \"nihs_1b_in\", \"nihs_1c_in\", \"nihs_2_in\", \"nihs_3_in\", \"nihs_4_in\", \"nihs_5al_in\",\n \"nihs_5br_in\", \"nihs_6al_in\", \"nihs_6br_in\", \"nihs_7_in\", \"nihs_8_in\", \"nihs_9_in\", \"nihs_10_in\",\n \"nihs_11_in\"]\n\n nihss_out = [\"nihs_1a_out\", \"nihs_1b_out\", \"nihs_1c_out\", \"nihs_2_out\", \"nihs_3_out\",\n \"nihs_4_out\", \"nihs_5al_out\", \"nihs_5br_out\", \"nihs_6al_out\", \"nihs_6br_out\", \"nihs_7_out\",\n \"nihs_8_out\", \"nihs_9_out\", \"nihs_10_out\", \"nihs_11_out\"]\n\n # import data\n csv_path = os.path.join(\"..\", \"data\", \"LINKED_DATA\", \"TSR_ALL\", \"TSR_ALL3\", \"TSR_ALL3.csv\")\n TSR_ALL3_df = pd.read_csv(csv_path, low_memory=False)\n\n # pre_procesing\n TSR_ALL3_df1 = ischemic_stroke_cases(TSR_ALL3_df)\n TSR_ALL3_df2 = remove_unrelated_features(TSR_ALL3_df1, unrelated_features)\n TSR_ALL3_df3 = remove_timestamp_features(TSR_ALL3_df2, date, hour, minute)\n TSR_ALL3_df4 = categorical_features(TSR_ALL3_df3, nominal_features, ordinal_features, boolean, barthel, nihss_in, nihss_out)\n TSR_ALL3_df5 = continuous_features(TSR_ALL3_df4, continuous)\n TSR_ALL3_df6 = remove_high_missing_features(TSR_ALL3_df5)\n\n # save pre_processed dataset\n csv_save = os.path.join(\"..\", \"data\", \"LINKED_DATA\", \"TSR_ALL\", \"TSR_ALL3\", \"TSR_ALL3_preprocessed.csv\")\n #TSR_ALL3_df6.to_csv(csv_save, index=False)\n\n # delete error cases\n TSR_ALL3_score_df = delete_error_cases(TSR_ALL3_df6)\n\n # save error cases deleted dataset\n csv_save = os.path.join(\"..\", \"data\", \"LINKED_DATA\", \"TSR_ALL\", \"TSR_ALL3\", \"TSR_ALL3_score.csv\")\n #TSR_ALL3_score_df.to_csv(csv_save, index=False)\n\n # mRS & BI * NIHSS outlier detection\n TSR_ALL3_score_df = TSR_ALL3_score_df.reset_index(drop=True)\n db_outlier = outlier_detection(TSR_ALL3_score_df, \"discharged_mrs\", \"bi_total\")\n dn_outlier = outlier_detection(TSR_ALL3_score_df, \"discharged_mrs\", \"nihss_total\")\n #bn_outlier = outlier_detection(TSR_ALL3_score_df, \"bi_total\", \"nihss_total\")\n\n # delete_union = len(set(db_outlier) & set(dn_outlier) & set(bn_outlier))\n # delete_intersection = len(set(db_outlier) | set(dn_outlier) | set(bn_outlier))\n # delete_union_num = len(set(db_outlier) & set(dn_outlier))\n # delete_intersection_num = len(set(db_outlier) | set(dn_outlier))\n delete_union_index = list(set(db_outlier) & set(dn_outlier))\n delete_intersection_index = list(set(db_outlier) | set(dn_outlier))\n\n TSR_ALL3_score_cleaned_df = TSR_ALL3_score_df.drop(TSR_ALL3_score_df.index[delete_intersection_index])\n\n # save outlier cases deleted dataset\n csv_save = os.path.join(\"..\", \"data\", \"LINKED_DATA\", \"TSR_ALL\", \"TSR_ALL3\", \"TSR_ALL3_score_cleaned.csv\")\n #TSR_ALL3_score_cleaned_df.to_csv(csv_save, index=False)\n\n # merge TSR_ALL3_score_cleaned_df to the original dataset\n TSR_ALL3_AMPUTATED_DF = pd.merge(TSR_ALL3_df6, TSR_ALL3_score_cleaned_df.iloc[:, 0:2], on=[\"icase_id\", \"idcase_id\"])\n print(TSR_ALL3_AMPUTATED_DF.shape)\n\n TSR_ALL3_AMPUTATED_DF[continuous_n] = TSR_ALL3_AMPUTATED_DF[continuous_n].fillna(9999)\n TSR_ALL3_AMPUTATED_DF[\"ih_dt\"] = pd.to_datetime(TSR_ALL3_AMPUTATED_DF[\"ih_dt\"], errors='coerce')\n TSR_ALL3_AMPUTATED_DF[\"ih_dt\"][\n (TSR_ALL3_AMPUTATED_DF[\"ih_dt\"].dt.year < 2006) | (TSR_ALL3_AMPUTATED_DF[\"ih_dt\"].dt.year > 2020)] = np.nan\n TSR_ALL3_AMPUTATED_DF = TSR_ALL3_AMPUTATED_DF.dropna()\n print(TSR_ALL3_AMPUTATED_DF.shape)\n\n csv_save = os.path.join(\"..\", \"data\", \"LINKED_DATA\", \"TSR_ALL\", \"TSR_ALL3\", \"TSR_ALL3_AMPUTATED.csv\")\n #TSR_ALL3_AMPUTATED_DF.to_csv(csv_save, index=False)\n\n TSR_ALL3_TRAIN = TSR_ALL3_AMPUTATED_DF[\n TSR_ALL3_AMPUTATED_DF[\"ih_dt\"].dt.year.isin([2006, 2007, 2008, 2009, 2010, 2011])]\n TSR_ALL3_VALIDATION = TSR_ALL3_AMPUTATED_DF[\n TSR_ALL3_AMPUTATED_DF[\"ih_dt\"].dt.year.isin([2012, 2013])]\n TSR_ALL3_TEST = TSR_ALL3_AMPUTATED_DF[\n TSR_ALL3_AMPUTATED_DF[\"ih_dt\"].dt.year.isin([2014, 2015, 2016, 2017, 2018, 2019, 2020])]\n\n csv_save = os.path.join(\"..\", \"data\", \"LINKED_DATA\", \"TSR_ALL\", \"TSR_ALL3\", \"TSR_ALL3_TRAIN.csv\")\n #TSR_ALL3_TRAIN.to_csv(csv_save, index=False)\n csv_save = os.path.join(\"..\", \"data\", \"LINKED_DATA\", \"TSR_ALL\", \"TSR_ALL3\", \"TSR_ALL3_VALIDATION.csv\")\n #TSR_ALL3_VALIDATION.to_csv(csv_save, index=False)\n csv_save = os.path.join(\"..\", \"data\", \"LINKED_DATA\", \"TSR_ALL\", \"TSR_ALL3\", \"TSR_ALL3_TEST.csv\")\n #TSR_ALL3_TEST.to_csv(csv_save, index=False)\n","sub_path":"cleaning/TSR_ALL3_PREPROCESS.py","file_name":"TSR_ALL3_PREPROCESS.py","file_ext":"py","file_size_in_byte":23417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"89655003","text":"from pyspark.sql import SparkSession\nfrom pyspark.ml.feature import StandardScaler\nfrom pyspark.ml.linalg import Vectors\nfrom pyspark.sql.functions import col\n\nimport pandas as pd\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.ml.clustering import KMeans\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pyspark.sql import SQLContext\n\n# ReadFile\nFEATURES_COL = ['Pace', 'Reb Rate', 'Pts', 'Opp Pts']\npath = 'data/3years.csv'\nspark = SparkSession.builder.appName('NBA-Analysis').getOrCreate()\ndata = spark.read.csv(path, header=True, inferSchema=True)\ndata.printSchema()\n\nvecAssembler = VectorAssembler(inputCols=FEATURES_COL, outputCol=\"features\")\ndf_kmeans = vecAssembler.transform(data).select('Team', 'features')\ndf_kmeans.show()\n\ncost = np.zeros(20)\nfor k in range(2, 20):\n kmeans = KMeans().setK(k).setSeed(1).setFeaturesCol(\"features\")\n model = kmeans.fit(df_kmeans.sample(False, 0.1, seed=42))\n cost[k] = model.computeCost(df_kmeans)\n\nplt.interactive(True)\nfig, ax = plt.subplots(1, 1, figsize=(8, 6))\nax.plot(range(2, 20), cost[2:20])\nax.set_xlabel('k')\nax.set_ylabel('cost')\nplt.ioff()\nfig.show()\nplt.savefig('K_Selection.png')\n\nk = 5\nkmeans = KMeans().setK(k).setSeed(1).setFeaturesCol(\"features\")\nmodel = kmeans.fit(df_kmeans)\ncenters = model.clusterCenters()\n\nprint(\"Cluster Centers: \")\nfor center in centers:\n print(center)\n\ntransformed = model.transform(df_kmeans).select('Team', 'prediction')\nrows = transformed.collect()\nprint(rows[:3])\n\ndf_pred = data.join(transformed, 'Team')\npddf_pred = df_pred.toPandas().set_index('Team')\n\nthreedee = plt.figure(figsize=(12, 10)).gca(projection='3d')\nthreedee.scatter(pddf_pred['Pts'], pddf_pred['Opp Pts'], pddf_pred['Reb Rate'], s=20,\n c=pddf_pred.prediction)\nthreedee.set_xlabel('Pts')\nthreedee.set_ylabel('Opp Pts')\nthreedee.set_zlabel('Reb Rate')\nplt.interactive(True)\nplt.ioff()\nplt.show()\nplt.savefig('KMeans.png')\n","sub_path":"Clustering/ClusterExample.py","file_name":"ClusterExample.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"295514788","text":"########################################################################\n# This class handles configuration files in the so-called\n# \"INI\" style format.\n#\n# File can contain [Section]'s and initializing \"templates\",\n# which are defined by a leading \"$\"\n########################################################################\n\nimport os.path\nimport ConfigParser\nimport StringIO\n\n'''\n# Class def start\n'''\nclass Config:\n\n ########################################################################\n # Constructor\n ########################################################################\n def __init__(self):\n # Does nothing\n return\n \n ########################################################################\n # Load the file & parse into key/value pairs per section\n ########################################################################\n def loadFile(self, configFilename):\n \"\"\"\n Modify the input file as needed, pass it to the config parser & return the output.\n See https://docs.python.org/2/library/configparser.html\n Any expansion parameters (e.g. $BASENAME) are put into the DEFAULT section (because that is how the config\n parser works)\n :param configFilename: The file to modify, using $ for the substitution flag\n :return: a python config object. To get a value in a section use:\n config.get('Section_name', 'Param_name')\n Every parameter needs to be in a section, except for the expansion params, which need to start with $ as the first\n character on the line.\n \"\"\"\n\n # Sanity check\n if( not os.path.exists(configFilename) ):\n print ('ERROR: File \"%s\" not found' % configFilename)\n return False\n\n # Open the file & read it all in\n with open(configFilename) as handle:\n input_lines = handle.readlines()\n \n param_sub_map = {}\n default_section = []\n default_section.append('[DEFAULT]\\n')\n \n section_list = []\n for line in input_lines:\n if line and (line.startswith('#') or line.startswith(';') or line.startswith('\\n')):\n continue\n if line.startswith('$'):\n default_section.append(line.replace('$', ''))\n split_line = line.split('=')\n new_token = '%(' + split_line[0].strip()[1:] + ')s'\n param_sub_map[split_line[0].strip()] = new_token\n elif '$' in line:\n new_line = line\n for token, replacement in param_sub_map.items():\n new_line = line.replace(token, replacement)\n section_list.append(new_line)\n \n else:\n section_list.append(line)\n \n combined_sections = default_section + section_list\n stringObj = StringIO.StringIO(''.join(combined_sections))\n\n # Save the config object\n self.config = ConfigParser.SafeConfigParser()\n self.config.readfp(stringObj)\n\n return True\n\n \n # =============================================================\n # Get the value associated with the specified\n # section and keyword as a string\n #\n # If the section/keyword does not exist in the dictionary,\n # then \"None\" is returned.\n # =============================================================\n def getString( self,\n section,\n key ):\n try:\n value = self.config.get( section, key )\n except:\n print('ERROR: Section/Key \"%s/%s\" combination not found' % (section,key))\n return None\n return value\n\n \n # =====================================================================\n # Get the value associated with the specified\n # section and keyword as a float.\n #\n # If the keyword does not exist in the dictionary, or the value cannot\n # be represented as an int, then \"None\" is returned.\n # =====================================================================\n def getInt( self,\n section,\n key ):\n\n # Get the value as a string\n strVal = self.getString(section,key)\n # Bail if it's not there\n if( strVal is None ): \n return None\n\n # Try to convert to int\n try:\n intVal = int(strVal)\n return intVal\n # No-can-do...\n except:\n print ('ERROR: Value \"%s\" for key \"%s\" cannot be converted to int' \\\n % (strVal,key))\n return None\n\n \n # =====================================================================\n # Get the value associated with the specified\n # section and keyword as a float.\n #\n # If the keyword does not exist in the dictionary, or the value cannot\n # be represented as a float, then \"None\" is returned.\n # =====================================================================\n def getFloat( self,\n section,\n key ):\n\n # Get the value as a string\n strVal = self.getString(section,key)\n # Bail if it's not there\n if( strVal is None ): \n return None\n\n # Try to convert to float\n try:\n floatVal = float(strVal)\n return floatVal\n # No-can-do...\n except:\n print ('ERROR: Value \"%s\" for key \"%s\" cannot be converted to float' \\\n % (strVal,key))\n return None\n\n \n","sub_path":"utilities/config2.py","file_name":"config2.py","file_ext":"py","file_size_in_byte":5568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"568125047","text":"from aiohttp.test_utils import AioHTTPTestCase\nfrom aiologger.loggers.json import JsonLogger\nfrom asynctest import patch, Mock, MagicMock\n\nfrom app.api import Api\n\n\nclass AppBaseTest(AioHTTPTestCase):\n async def get_application(self):\n api = Api()\n api.app[\"mongo_db\"] = MagicMock()\n return api.app\n\n def setUp(self):\n self.logger = Mock(spec=JsonLogger)\n self.logger_patch = patch(\n \"app.api.JsonLogger.with_default_handlers\", return_value=self.logger\n )\n self.logger_patch.start()\n super().setUp()\n\n def tearDown(self):\n self.logger_patch.stop()\n self.loop.run_until_complete(self.clear_mongo())\n super().tearDown()\n\n async def clear_mongo(self):\n collections = await self.app[\"mongo_db\"].list_collection_names()\n for collection in collections:\n await self.app[\"mongo_db\"][collection].drop()\n","sub_path":"tests/base_tests.py","file_name":"base_tests.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"217240047","text":"import git, glob, pytest, sqlparse, os\nfrom automig.lib import githelp, ref_diff, diffing\nfrom .test_diffing import ARGS\n\nSHAS = {\n 'create-t1': '2801578',\n 'add-t1-col': '2ff9297cb26c9491c159af728ad6734ad06f8542',\n 'add-t2-t1a': 'f8b1048fd12b6ef41568801867b67d3ca74904f3',\n 'unsup-alter-col': 'c479bb0',\n}\nGLOB ='test/schema/*.sql'\n\ndef test_get_paths():\n repo = git.Repo()\n tree = repo.commit(SHAS['create-t1']).tree\n assert githelp.get_paths(tree, os.path.join(repo.working_dir, GLOB)) == ['test/schema/sql.sql']\n\n@pytest.mark.skip\ndef test_create():\n diff = ref_diff.ref_range_diff(ARGS, git.Repo(), SHAS['create-t1'], SHAS['add-t1-col'], GLOB)\n raise NotImplementedError\n\ndef test_addcol():\n diff = ref_diff.ref_range_diff(ARGS, git.Repo(), SHAS['create-t1'], SHAS['add-t1-col'], GLOB)\n assert diff == {\n SHAS['add-t1-col']: {'t1': ['alter table t1 add column b int;']},\n }\n\ndef test_add_multi_commit():\n diff = ref_diff.ref_range_diff(ARGS, git.Repo(), SHAS['create-t1'], SHAS['add-t2-t1a'], GLOB)\n assert diff == {\n SHAS['add-t1-col']: {\n 't1': ['alter table t1 add column b int;'],\n },\n SHAS['add-t2-t1a']: {\n 't1': ['create index t1a on t1 (a);'],\n 't2': ['create table t2 (a int primary key);'],\n },\n }\n\ndef test_add_multi_commit_opaque():\n diff = ref_diff.ref_range_diff(ARGS, git.Repo(), SHAS['create-t1'], SHAS['add-t2-t1a'], GLOB, opaque=True)\n assert diff == {SHAS['add-t2-t1a']: {\n 't1': ['alter table t1 add column b int;', 'create index t1a on t1 (a);'],\n 't2': ['create table t2 (a int primary key);'],\n }}\n\nMOD_COLUMN = [\n 'create table t1 (a int primary key, b int);',\n 'create table t1 (a int primary key, b int unique);',\n]\n\ndef test_error_bubbling():\n sha_table_stmts = {'sha': diffing.diff(ARGS, *map(sqlparse.parse, MOD_COLUMN))}\n errors = ref_diff.extract_errors(sha_table_stmts)\n manual = {'sha': {'t1': ['hello']}}\n remaining = ref_diff.try_repair_errors(errors, manual, sha_table_stmts)\n assert not remaining\n assert sha_table_stmts['sha']['t1'] == ['hello']\n","sub_path":"test/test_git.py","file_name":"test_git.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"476947368","text":"\"\"\"\nTokenCheck API\n\"\"\"\n\nfrom werkzeug.exceptions import BadRequest\nfrom flask import Response\nfrom flask_restx import Resource\nfrom flask_login import current_user\n\nfrom app import API\nfrom app.services import (\n FormService,\n TokenService,\n GroupService,\n FormResultService\n)\n\n\nTOKEN_CHECK_NS = API.namespace('tokens/', description='TokenCheck APIs')\n\n\n@TOKEN_CHECK_NS.route(\"/check_token\")\nclass TokenCheckAPI(Resource):\n \"\"\"\n TokenCheck API\n\n url: '/tokens/{token}/check_token'\n methods: get\n \"\"\"\n\n @API.doc(\n responses={\n 204: 'No Content',\n 400: 'Invalid data'\n },\n params={\n 'token': 'token to check'\n }\n )\n #pylint: disable=no-self-use\n def get(self, token):\n \"\"\"\n Check whether token is valid\n\n :param token: token to check\n \"\"\"\n token_instance = TokenService.get_by_token(token)\n if token_instance is None:\n raise BadRequest('Wrong token')\n\n token_data = TokenService.decode_token_for_check(token)\n if token_data is None:\n raise BadRequest('Wrong token') # Not enough token segments\n\n is_correct, _ = TokenService.validate_data(token_data)\n if not is_correct:\n raise BadRequest('Wrong token') # Token isn't valid\n\n form_id = token_data.get('form_id')\n form = FormService.get_by_id(form_id)\n if form is None:\n raise BadRequest('Wrong token') # Form doesn't exist\n\n group_id = token_data.get('group_id')\n if group_id is not None:\n group = GroupService.get_by_id(group_id)\n if group is None:\n raise BadRequest('Wrong token') # Group doesn't exist\n\n return Response(status=204)\n\n\n@TOKEN_CHECK_NS.route(\"/check_user\")\nclass TokenUserCheckAPI(Resource):\n \"\"\"\n TokenUserCheck API\n\n url: '/tokens/{token}/check_user'\n methods: get\n \"\"\"\n\n @API.doc(\n responses={\n 204: 'No Content',\n 400: 'Invalid data'\n },\n params={\n 'token': 'token to form'\n }\n )\n #pylint: disable=no-self-use\n def get(self, token):\n \"\"\"\n Check whether user can answer to form by using this token\n :param token: token to check\n \"\"\"\n token_instance = TokenService.get_by_token(token)\n if token_instance is None:\n raise BadRequest(\"Token doesn't exist\")\n\n if current_user.is_authenticated:\n user_can_answer = FormResultService.check_whether_user_passed_form(\n user_id=current_user.id,\n token_id=token_instance.id,\n )\n if not user_can_answer:\n raise BadRequest(\"You have already passed this form using this token\")\n\n return Response(status=204)\n","sub_path":"src/app/routers/token_check.py","file_name":"token_check.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"333489916","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 11 09:07:26 2015\n@author: albert\n\"\"\"\n# pylint: disable=R0903\nfrom __future__ import absolute_import\nimport re\nfrom jetplex.tree import Tree\n\n\nclass Part(Tree):\n \"\"\"\n Describe the structure of\n part numbers, part revision and test types.\n \"\"\"\n def __init__(self, *children, **kwargs):\n \"\"\"Instantiates a new object\"\"\"\n Tree.__init__(self, *children, **kwargs)\n if hasattr(self, 'sn'):\n self.prog = re.compile(self.sn)\n\n def find_matches(self, ser_num, tt_list):\n \"\"\"\n Find all the part leafs,\n with mathching serial number\n and with matching test type.\n Return a list of path lists\n \"\"\"\n pns = []\n if len(self) > 0:\n for child in self:\n pns.extend(child.find_matches(ser_num, tt_list))\n else:\n if (\n self.tt in tt_list and\n self.prog is not None and\n self.prog.match(ser_num)\n ):\n pns.append(self.path_list())\n return pns\n\n def __getattr__(self, attr_name):\n \"\"\"if I don't have this attribute, try to find it at my parent\"\"\"\n if self.parent:\n return getattr(self.parent, attr_name)\n else:\n raise Exception('cannot find attribute: %s' % attr_name)\n","sub_path":"jetplex/part.py","file_name":"part.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"239849924","text":"#!/usr/bin/env python\n\n\"\"\"\nsetup.py file for logtop\n\"\"\"\n\nfrom distutils.core import setup, Extension\n\n\nlogtop_module = Extension('_logtop',\n sources=['logtop_wrap.c',\n 'src/logtop.c',\n 'src/avl.c',\n 'src/history.c',\n 'src/libavl/avl.c']\n )\n\nsetup (name = 'logtop',\n version = '0.2',\n author = \"Julien Palard\",\n description = \"\"\"Live line frequency analyzer\"\"\",\n ext_modules = [logtop_module],\n py_modules = [\"logtop\"],\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"580845966","text":"# coding=utf-8\n\n\nimport sys\n\nfileName = sys.argv[1] if(len(sys.argv) >= 2) else \"nope\"\nverbose = True \nkmerCount = {}\nkmerList = []\nadapters = {}\n\n\ndef haveOverlap(seq1, seq2):\n minOverlap = min(len(seq1), len(seq2)) - 1\n if(seq1[-minOverlap:] == seq2[:minOverlap]):\n return(seq1 + seq2[minOverlap:])\n else:\n return(\"\")\n\n\ndef updateCount(kCounter, key):\n if(key in kCounter.keys()):\n kCounter[key] += 1\n else:\n kCounter[key] = 1\n return(kCounter)\n\n\nwith open(fileName, 'r') as f:\n loop = True\n ident = \"\"\n seq = \"\"\n next(f) # skipping first line\n while(loop):\n try:\n line = next(f)\n except StopIteration:\n loop = False\n else:\n noErr, oneErr, twoErr, km = line.rstrip(\"\\n\").split(\"\\t\")\n kmerList.append(km)\n kmerCount[km] = (noErr, oneErr, twoErr)\n\nfor km in kmerList:\n ov = km\n found = True\n used = []\n while(found):\n found = False\n for km2 in kmerList:\n if(km2 not in used):\n direct = haveOverlap(ov, km2)\n reverse = haveOverlap(km2, ov)\n\n if(direct != \"\" and reverse == \"\"):\n ov = direct\n found = True\n used.append(km2)\n break\n elif(reverse != \"\" and direct == \"\"):\n ov = reverse\n found = True\n used.append(km2)\n break\n if(verbose):\n pass\n if(verbose):\n #print(\"FINAL OVERLAP\")\n print(ov)\n adapters = updateCount(adapters, ov)\nfor value, adapt in sorted([(v, k) for k, v in adapters.items()])[::-1]:\n print(adapt, value)\n","sub_path":"simplOverlap.py","file_name":"simplOverlap.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"460646158","text":"'''\n实现一个用户管理系统,可以与管理员用户进行交互(本次先不实现验证密码之类的),\n根据用户输入的指令(增删改查),可以进行相应的操作:\n 比如 1.输入delete,则让用户输入”用户名”格式字符串,根据用户名查找内存里面的数据,\n 若存在数据则将该数据移除,若用户数据不存在,则提示不存在;\n 2.输入update,则让用户输入”用户名:年龄:联系方式”格式字符串,并使用:分隔用户数据,\n 根据用户名查找内存中数据,若存在数据则将改数据更新数据,若用户数据不存在,\n 则提示不存在;\n 3. 用户输入find,则让用户输入”用户名”格式字符串,根据用户名查找内存中数据包\n 含输入字符串的用户信息,并打印;\n 4.用户输入list,则打印所有用户信息;打印用户第一个行数据为用户信息描述,从第二行开始为用户数据;\n 5.用户输入exit,则提示用户并且保存已经修改的用户信息,退出程序;\n 注意:首次运行时候或者用户为0的时候,需提示用户先添加数据。\n'''\nUname = []\nUage = []\nUcontact = [] # 用户的三个信息分别建个列表\nuser_counts = 0 # 这个用来方便打印insert信息,及统计用户总数\nuser_id = 0\nif Uname == []: # 如果列表里没有数据,让用户输入信息并分别追加到列表\n while True:\n print(\"Please add data>>>\")\n Uname.append(input('UserName:'))\n Uage.append(input('UserAge:'))\n Ucontact.append(input('UserContact:'))\n print('A data insert>> {}:{}:{}'.format(Uname[user_counts], Uage[user_counts], Ucontact[user_counts]))\n user_counts += 1\n choice = input('continue insert? (y/n)') # 如果用户输入Y,则继续插入下个用户信息,否则退出\n if choice == 'y':\n pass\n else:\n break\n# 列表有数据后提示用户键入命令\nprint('start your service>>>')\n\nwhile True:\n print('options: delete/update/find/list/exit')\n command = input('cmd# ')\n# ________________________\n if command == 'delete':\n print('enter the user you want to delete>>>')\n tmp_ = input('delete# ') # 临时变量存着用户要删除的用户\n if tmp_ in Uname: # 如果用户输入的名字在列表,则依次删除用户信息,用户数-1\n tmp_id = Uname.index(tmp_)\n del Uname[tmp_id]\n del Uage[tmp_id]\n del Ucontact[tmp_id]\n user_counts -= 1\n else:\n print('user not exist')\n\n\n if command == 'update':\n print('enter this format: username:age:contact >>>')\n tmp_ = input('update# ')\n tmp_name = tmp_.split(':')[0] # 取出用户名看其是否存在,若存在,更新另外两项\n if tmp_name in Uname:\n tmp_id = Uname.index(tmp_name)\n Uage[tmp_id] = tmp_.split(':')[1]\n Ucontact[tmp_id] = tmp_.split(':')[2]\n print('user updated successfully')\n else:\n print('user not exist ')\n\n\n if command == 'find': # 同上\n print('input a user name>>>')\n tmp_ = input('find# ')\n if tmp_ in Uname:\n tmp_id = Uname.index(tmp_)\n print('{}:{}:{}'.format(Uname[tmp_id], Uage[tmp_id], Ucontact[tmp_id]))\n else:\n print('no such user')\n\n\n if command == 'list':\n print('__{} {:>5} {:>10}__'.format('user', 'age', 'contact'))\n lst_len = len(Uname) # 列表的长度即用户总数\n for i in range(lst_len): # 做循环依次打印几个列表的数据\n print(' {} {:>5} {:>10}'.format(Uname[i], Uage[i], Ucontact[i]))\n\n if command == 'exit':\n break\n# 逻辑上没有什么,想想如何用一个while 实现呢?","sub_path":"P17074_YuJinZhao/first_week.py","file_name":"first_week.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"119101883","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\n\nfrom forms import CommentForm\nfrom itertools import ifilter\nfrom models import *\nimport bitcoinrpc\nconn = bitcoinrpc.connect_to_local() #need to have a configuration file specified and need site_packages/bitcoinrpc/read_default_config() to look at config file\n#how do i connect to a remote wallet (to pay users)\n'''\nadd posts\nways to vote\nways to sort posts by 'upvotes'\ncreate a user\nchange/update password\n\n'''\n\ndef logout_view(request):\n logout(request)\n return redirect('myapp.views.index')\n \n\ndef profile(request):\n if request.user.is_authenticated():\n logged_in=True\n account_string=str(request.user)\n '''\n #shows balance on my local wallet\n bal_msg= \"Your balance is %f\" % (conn.getbalance(),)\n #validating an address\n btc_add=\"17Aze6KsuZasYk2XtRBX1ZtpVzhFZs9nU6\" #some coinbase add\n rv = conn.validateaddress(btc_add)\n if rv.isvalid:\n val_msg= \"The address that you provided is valid\"\n else:\n val_msg= \"The address that you provided is invalid, please correct\"\n #send bitcoin from your address\n if request.method == \"POST\":\n conn.sendtoaddress(btc_add, 0.0002)\n '''\n #https://en.bitcoin.it/wiki/Accounts_explained\n #1) Get account Address for the user. User clicks on the button, gets an address (on your wallet) that he can send BTC to and use to buy stuff with\n if request.method == \"POST\":\n new_add=conn.getnewaddress(account_string) #getaccountaddress gets same address each time, getnewaddress new address eachtime\n account_balance=conn.getbalance(account_string)\n account_addresses=conn.getaddressesbyaccount(account_string)\n else:\n logged_in=False\n return render(request, 'myapp/profile.html', locals())\n\ndef index(request):\n top_post = Post.objects.order_by('-post_date')[0]\n second_row = Post.objects.order_by('-post_date')[1:4]\n third_row = Post.objects.order_by('-post_date')[4:7]\n fourth_row = Post.objects.order_by('-post_date')[7:10]\n if request.user.is_authenticated():\n logged_in=True\n else:\n logged_in=False\n return render(request, 'myapp/index.html', locals())\n\ndef article(request,art_id,location):\n post=Post.objects.get(pk=art_id)\n if request.user.is_authenticated():\n logged_in=True\n form = CommentForm(request.POST or None)\n if request.method == \"POST\":\n if form.is_valid():\n temp = form.save(commit=False)\n parent = form['parent'].value()\n temp.user=request.user\n temp.post=post\n if parent == '':\n #Set a blank path then save it to get an ID\n temp.path = []\n temp.save()\n temp.path = [temp.id]\n location=temp.id\n else:\n #Get the parent node\n node = Comment.objects.get(id=parent)\n location=node.id\n temp.depth = node.depth + 1\n '''\n temp.path = node.path\n #Store parents path then apply comment ID\n temp.save()\n temp.path.append(temp.id)\n '''\n s = str(node.path) #modded for mysql\n temp.path = eval(s)\n #store parents path than apply comment ID\n temp.save()\n id= int(temp.id)\n temp.path.append(id)\n #Final save for parents and children\n temp.save()\n return redirect('myapp.views.article', art_id,location)\n #Retrieve all comments and sort them by path\n comment_tree = Comment.objects.filter(post=post).order_by('-path')\n else:\n logged_in=False\n comment_tree = Comment.objects.filter(post=post).order_by('-path')\n return render(request, 'myapp/article.html', locals())\n","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"216610123","text":"\nclass CuentaHandler(object):\n\n def __init__(self,sucesor):\n self.sucesor = sucesor\n\n def set_sucesor(self,sucesor):\n pass\n\n def pagar(self,monto):\n pass\n\n#se crean tres cuentas con capacidades de pagar distintas\nclass cuentaUno(CuentaHandler):\n def __init__(self):\n self.capacity = 150\n\n def pagar(self,monto):\n if monto == self.capacity:\n return f'Tu pago se realizo correctamente'\n elif monto != self.capacity:\n self.set_sucesor(cuentaDos)\n print(\"Tu pago no se realizo\")\n return monto\n\n\nclass cuentaDos(CuentaHandler):\n def __init__(self):\n self.capacity = 250\n\n def pagar(self,monto):\n if monto == self.capacity:\n return f'Tu pago se realizo correctamente'\n elif monto != self.capacity:\n self.set_sucesor(cuentaTres)\n print(\"Tu pago no se realizo\")\n return monto\n\n\nclass cuentaTres(CuentaHandler):\n def __init__(self):\n self.capacity = 200\n def pagar(self, monto):\n if monto == self.capacity:\n return f'Tu pago se realizo correctamente'\n elif monto != self.capacity:\n return f'No quedan mas formas de pago'\n\n\nif __name__ == \"__main__\":\n #se crean instancias de las cuentas\n c1 = cuentaUno()\n c2 = cuentaDos()\n c3 = cuentaTres()\n\n #se llama al def pagar y se delega la responsabilidad hasta la tercer cuenta\n pagar1 = c1.pagar(200)\n pagar2 = c2.pagar(pagar1)\n pagar3 = c3.pagar(pagar2)\n\n #pruebas\n print(pagar1)\n print(pagar2)\n print(pagar3)\n\n\n\n\n","sub_path":"Ene-Jun-2020/almaguer-constante-juan-de-dios/Parcial1/Practica3/Ejercicio3/chain-of-responsibility.py","file_name":"chain-of-responsibility.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"396173445","text":"#!/usr/bin/env python\n#coding:utf-8\n#\n# original code from package: gameobjects\n# Home-page: http://code.google.com/p/gameobjects/\n# Author: Will McGugan\n# Download-URL: http://code.google.com/p/gameobjects/downloads/list\n#\n# Adaptation for package geoalg, API is not compatible to gameobjects.Matrix44\n# Author: mozman\n# Created: 19.04.2010\n# Purpose: 4x4 matrix math\n# module belongs to package geoalg\n# License: MIT License\n\nfrom math import sin, cos, tan\nfrom array import array\n\nclass Matrix44(object):\n _identity = (\n 1.0, 0.0, 0.0, 0.0,\n 0.0, 1.0, 0.0, 0.0,\n 0.0, 0.0, 1.0, 0.0,\n 0.0, 0.0, 0.0, 1.0\n )\n __slots__ = ('matrix',)\n\n def __init__(self, *args):\n \"\"\"\n If no parameteres are given, the Matrix44 is initialised to identity.\n If 1 parameter is given it should be an iterable with the 16 components\n of the matrix.\n If 4 parameters are given they should be 4 sequences of up to 4 values.\n Missing values in each row are padded out with values from the identity matix\n (so you can use Vector3's or tuples of 3 values).\n \"\"\"\n self.matrix = array('d', Matrix44._identity)\n nargs = len(args)\n if nargs == 0:\n return\n elif nargs == 1:\n self.matrix = array('d', args[0])\n elif nargs == 4:\n for index, row in enumerate(args):\n self.set_row(index, row)\n else:\n raise ValueError(\"Invalid count of arguments (4 row vectors \" \\\n \"or one list with 16 values).\")\n\n def __repr__(self):\n def format_row(row):\n return \"(%s)\" % \", \".join(str(value) for value in row )\n return \"Matrix44(%s)\" % \\\n \", \".join(format_row(row) for row in self.rows())\n\n def get_row(self, row):\n index = row * 4\n return tuple(self.matrix[index:index+4])\n\n def set_row(self, row, values):\n index = row * 4\n self.matrix[index:index+len(values)] = array('d', values)\n\n def get_col(self, col):\n \"\"\"Returns a column as a tuple of 4 values.\"\"\"\n m = self.matrix\n return (m[col], m[col+4], m[col+8], m[col+12])\n\n def set_col(self, col, values):\n \"\"\"Sets the values in a column.\"\"\"\n m = self.matrix\n a, b, c, d = values\n m[col] = float(a)\n m[col + 4] = float(b)\n m[col + 8] = float(c)\n m[col + 12] = float(d)\n\n def get_numpy_matrix_string(self):\n def format_row(row):\n return \" \".join([str(value) for value in row])\n return \"; \".join([format_row(row) for row in self.rows()])\n\n def to_list(self):\n \"\"\"Converts the matrix in to a list of values, suitable for using\n with glLoadMatrix*\n \"\"\"\n return self.matrix[:]\n\n def set(self, row0, row1, row2, row3):\n \"\"\"Sets all four rows of the matrix.\"\"\"\n for index, row in enumerate((row0, row1, row2, row3)):\n self.set_row(index, row)\n\n @classmethod\n def from_iter(cls, iterable):\n \"\"\"Creates a Matrix44 from an iterable of 16 values.\"\"\"\n matrix = cls.__new__(cls, object)\n matrix.matrix = array('d', iterable)\n if len(matrix.matrix) != 16:\n raise ValueError(\"Iterable must have 16 values\")\n return matrix\n\n def copy(self):\n return self.from_iter(self.matrix)\n __copy__ = copy\n\n @classmethod\n def identity(cls):\n \"\"\"Creates and identity Matrix44.\"\"\"\n return Matrix44()\n\n @classmethod\n def scale(cls, scale_x, scale_y=None, scale_z=None):\n \"\"\"Creates a scale Matrix44.\n If one parameter is given the scale is uniform, if three parameters are\n give the scale is different (potentialy) on each x axis.\n \"\"\"\n if scale_y is None:\n scale_y = scale_x\n if scale_z is None:\n scale_z = scale_x\n\n return cls.from_iter([\n float(scale_x), 0., 0., 0.,\n 0., float(scale_y), 0., 0.,\n 0., 0., float(scale_z), 0.,\n 0., 0., 0., 1.\n ])\n\n @classmethod\n def translate(cls, x, y, z):\n \"\"\"Creates a translation Matrix44 to (x, y, z).\"\"\"\n return cls.from_iter([\n 1., 0., 0., 0.,\n 0., 1., 0., 0.,\n 0., 0., 1., 0.,\n float(x), float(y), float(z), 1.\n ])\n\n @classmethod\n def x_rotate(cls, angle):\n \"\"\"Creates a Matrix44 that does a rotation about the x axis.\n\n angle -- Angle of rotation (in radians)\n \"\"\"\n cos_a = cos(angle)\n sin_a = sin(angle)\n return cls.from_iter([\n 1., 0., 0., 0.,\n 0., cos_a, sin_a, 0.,\n 0., -sin_a, cos_a, 0.,\n 0., 0., 0., 1.\n ])\n\n @classmethod\n def y_rotate(cls, angle):\n \"\"\"Creates a Matrix44 that does a rotation about the y axis.\n angle -- Angle of rotation (in radians)\n \"\"\"\n cos_a = cos(angle)\n sin_a = sin(angle)\n return cls.from_iter([\n cos_a, 0., -sin_a, 0.,\n 0., 1., 0., 0.,\n sin_a, 0., cos_a, 0.,\n 0., 0., 0., 1.\n ])\n\n @classmethod\n def z_rotate(cls, angle):\n \"\"\"Creates a Matrix44 that does a rotation about the z axis.\n\n angle -- Angle of rotation (in radians)\n \"\"\"\n cos_a = cos(angle)\n sin_a = sin(angle)\n return cls.from_iter([\n cos_a, sin_a, 0., 0.,\n -sin_a, cos_a, 0., 0.,\n 0., 0., 1., 0.,\n 0., 0., 0., 1.\n ])\n\n @classmethod\n def axis_rotate(cls, axis, angle):\n \"\"\"Creates a Matrix44 that does a rotation about an axis.\n\n axis -- A vector of the axis\n angle -- Angle of rotation\n \"\"\"\n c = cos(angle)\n s = sin(angle)\n omc = 1. - c\n x, y, z = axis\n return cls.from_iter([\n x*x*omc+c, y*x*omc+z*s, x*z*omc-y*s, 0.,\n x*y*omc-z*s, y*y*omc+c, y*z*omc+x*s, 0.,\n x*z*omc+y*s, y*z*omc-x*s, z*z*omc+c, 0.,\n 0., 0., 0., 1.\n ])\n\n @classmethod\n def xyz_rotate(cls, angle_x, angle_y, angle_z):\n \"\"\"Creates a Matrix44 that does a rotation about each axis.\n\n angle_x -- Angle of rotation, about x\n angle_y -- Angle of rotation, about y\n angle_z -- Angle of rotation, about z\n \"\"\"\n cx = cos(angle_x)\n sx = sin(angle_x)\n cy = cos(angle_y)\n sy = sin(angle_y)\n cz = cos(angle_z)\n sz = sin(angle_z)\n\n sxsy = sx*sy\n cxsy = cx*sy\n\n return cls.from_iter([\n cy*cz, sxsy*cz+cx*sz, -cxsy*cz+sx*sz, 0.,\n -cy*sz, -sxsy*sz+cx*cz, cxsy*sz+sx*cz, 0.,\n sy, -sx*cy, cx*cy, 0.,\n 0., 0., 0., 1.])\n\n @classmethod\n def perspective_projection(cls, left, right, top, bottom, near, far):\n \"\"\"Creates a Matrix44 that projects points in to 2d space.\n\n left -- Coordinate of left of screen\n right -- Coordination of right of screen\n top -- Coordination of the top of the screen\n bottom -- Coordination of the borrom of the screen\n near -- Coordination of the near clipping plane\n far -- Coordinate of the far clipping plane\n \"\"\"\n return cls.from_iter([\n (2.*near)/(right-left), 0., 0., 0.,\n 0., (2.*near)/(top-bottom), 0., 0.,\n (right+left)/(right-left), (top+bottom)/(top-bottom), -((far+near)/(far-near)), -1.,\n 0., 0., -((2.*far*near)/(far-near)), 0.\n ])\n\n\n @classmethod\n def perspective_projection_fov(cls, fov, aspect, near, far):\n \"\"\"Creates a Matrix44 that projects points in to 2d space\n\n fov -- The field of view (in radians)\n aspect -- The aspect ratio of the screen (width / height)\n near -- Coordinate of the near clipping plane\n far -- Coordinate of the far clipping plane\n \"\"\"\n vrange = near*tan(fov/2.)\n left = -vrange*aspect\n right = vrange*aspect\n bottom = -vrange\n top = vrange\n return cls.perspective_projection(left, right, bottom, top, near, far)\n\n @staticmethod\n def chain(*matrices):\n \"\"\"Compose a transformation matrix from <*matrices>.\"\"\"\n transformation = Matrix44()\n for matrix in matrices:\n transformation *= matrix\n return transformation\n\n def __hash__(self):\n \"\"\"Allows matrices to be used as keys in a dictionary.\"\"\"\n return self.matrix.__hash__()\n\n def __setitem__(self, coord, value):\n \"\"\"Set element in the Matrix44.\n is a tuple of (row, column)\n \"\"\"\n row, col = coord\n self.matrix[row * 4 + col] = float(value)\n\n def __getitem__(self, coord):\n \"\"\"Get element in the Matrix44.\n is a tuple of (row, column)\n \"\"\"\n row, col = coord\n return self.matrix[row * 4 + col]\n\n def __iter__(self):\n \"\"\"Iterates over all 16 values in the Matrix44.\"\"\"\n return iter(self.matrix)\n\n def __mul__(self, other):\n \"\"\"Returns the result of multiplying this Matrix44 by another, called\n by the * (multiply) operator.\"\"\"\n res_matrix = self.copy()\n res_matrix.__imul__(other)\n return res_matrix\n\n def __imul__(self, other):\n \"\"\"Multiplies this Matrix44 by another, called by the *= operator.\"\"\"\n m1 = self.matrix\n m2 = other.matrix\n self.matrix = array('d', [\n m1[0] * m2[0] + m1[1] * m2[4] + m1[2] * m2[8] + m1[3] * m2[12],\n m1[0] * m2[1] + m1[1] * m2[5] + m1[2] * m2[9] + m1[3] * m2[13],\n m1[0] * m2[2] + m1[1] * m2[6] + m1[2] * m2[10] + m1[3] * m2[14],\n m1[0] * m2[3] + m1[1] * m2[7] + m1[2] * m2[11] + m1[3] * m2[15],\n\n m1[4] * m2[0] + m1[5] * m2[4] + m1[6] * m2[8] + m1[7] * m2[12],\n m1[4] * m2[1] + m1[5] * m2[5] + m1[6] * m2[9] + m1[7] * m2[13],\n m1[4] * m2[2] + m1[5] * m2[6] + m1[6] * m2[10] + m1[7] * m2[14],\n m1[4] * m2[3] + m1[5] * m2[7] + m1[6] * m2[11] + m1[7] * m2[15],\n\n m1[8] * m2[0] + m1[9] * m2[4] + m1[10] * m2[8] + m1[11] * m2[12],\n m1[8] * m2[1] + m1[9] * m2[5] + m1[10] * m2[9] + m1[11] * m2[13],\n m1[8] * m2[2] + m1[9] * m2[6] + m1[10] * m2[10] + m1[11] * m2[14],\n m1[8] * m2[3] + m1[9] * m2[7] + m1[10] * m2[11] + m1[11] * m2[15],\n\n m1[12] * m2[0] + m1[13] * m2[4] + m1[14] * m2[8] + m1[15] * m2[12],\n m1[12] * m2[1] + m1[13] * m2[5] + m1[14] * m2[9] + m1[15] * m2[13],\n m1[12] * m2[2] + m1[13] * m2[6] + m1[14] * m2[10] + m1[15] * m2[14],\n m1[12] * m2[3] + m1[13] * m2[7] + m1[14] * m2[11] + m1[15] * m2[15]\n ])\n return self\n\n def fast_mul(self, other):\n \"\"\"Multiplies this matrix by .\n\n Assumes that both matrices have a right column of (0, 0, 0, 1). This is\n true for matrices composed of rotations, translations and scales.\n fast_mul is approximately 25% quicker than the *= operator.\n \"\"\"\n m1 = self.matrix\n m2 = other.matrix\n self.matrix = array('d', [\n m1[0] * m2[0] + m1[1] * m2[4] + m1[2] * m2[8],\n m1[0] * m2[1] + m1[1] * m2[5] + m1[2] * m2[9],\n m1[0] * m2[2] + m1[1] * m2[6] + m1[2] * m2[10],\n 0.0,\n\n m1[4] * m2[0] + m1[5] * m2[4] + m1[6] * m2[8],\n m1[4] * m2[1] + m1[5] * m2[5] + m1[6] * m2[9],\n m1[4] * m2[2] + m1[5] * m2[6] + m1[6] * m2[10],\n 0.0,\n\n m1[8] * m2[0] + m1[9] * m2[4] + m1[10] * m2[8],\n m1[8] * m2[1] + m1[9] * m2[5] + m1[10] * m2[9],\n m1[8] * m2[2] + m1[9] * m2[6] + m1[10] * m2[10],\n 0.0,\n\n m1[12] * m2[0] + m1[13] * m2[4] + m1[14] * m2[8] + m2[12],\n m1[12] * m2[1] + m1[13] * m2[5] + m1[14] * m2[9] + m2[13],\n m1[12] * m2[2] + m1[13] * m2[6] + m1[14] * m2[10] + m2[14],\n 1.0\n ])\n return self\n\n def rows(self):\n \"\"\"Returns an iterator for the rows in the Matrix44.\"\"\"\n return (self.get_row(index) for index in (0, 1, 2, 3))\n\n def columns(self):\n \"\"\"Returns an iterator for the columns in the Matrix44.\"\"\"\n return (self.get_col(index) for index in (0, 1, 2, 3))\n\n def transform(self, vector):\n \"\"\"Transforms a Vector3 and returns the result as a tuple.\"\"\"\n m = self.matrix\n x, y, z = vector\n return ( x * m[0] + y * m[4] + z * m[8] + m[12],\n x * m[1] + y * m[5] + z * m[9] + m[13],\n x * m[2] + y * m[6] + z * m[10] + m[14] )\n\n def multi_transform(self, vectors):\n \"\"\"Transform multiple vectors.\"\"\"\n return [self.transform(vec) for vec in vectors]\n\n def transpose(self):\n \"\"\"Swaps the rows for columns.\"\"\"\n m00, m01, m02, m03, \\\n m10, m11, m12, m13, \\\n m20, m21, m22, m23, \\\n m30, m31, m32, m33 = self.matrix\n\n self.matrix = array('d', [\n m00, m10, m20, m30,\n m01, m11, m21, m31,\n m02, m12, m22, m32,\n m03, m13, m23, m33\n ])\n\n def get_transpose(self):\n \"\"\"Returns a Matrix44 that is a copy of this, but with rows and\n columns swapped.\n \"\"\"\n matrix = self.copy()\n matrix.transpose()\n return matrix\n\n def determinant(self):\n e11, e12, e13, e14, \\\n e21, e22, e23, e24, \\\n e31, e32, e33, e34, \\\n e41, e42, e43, e44 = self.matrix\n return e11*e22*e33*e44 - e11*e22*e34*e43 + e11*e23*e34*e42 - e11*e23*e32*e44 \\\n + e11*e24*e32*e43 - e11*e24*e33*e42 - e12*e23*e34*e41 + e12*e23*e31*e44 \\\n - e12*e24*e31*e43 + e12*e24*e33*e41 - e12*e21*e33*e44 + e12*e21*e34*e43 \\\n + e13*e24*e31*e42 - e13*e24*e32*e41 + e13*e21*e32*e44 - e13*e21*e34*e42 \\\n + e13*e22*e34*e41 - e13*e22*e31*e44 - e14*e21*e32*e43 + e14*e21*e33*e42 \\\n - e14*e22*e33*e41 + e14*e22*e31*e43 - e14*e23*e31*e42 + e14*e23*e32*e41\n\n def inverse(self):\n \"\"\"Calculates the inverse of the matrix.\n\n Raises ZeroDivisionError if matrix has no inverse.\n \"\"\"\n det = self.determinant()\n f = 1./det # catch ZeroDivisionError by caller\n m00, m01, m02, m03, \\\n m10, m11, m12, m13, \\\n m20, m21, m22, m23, \\\n m30, m31, m32, m33 = self.matrix\n self.matrix = array('d', ( \\\n (m12*m23*m31 - m13*m22*m31 + m13*m21*m32 - m11*m23*m32 - m12*m21*m33 + m11*m22*m33)*f,\n (m03*m22*m31 - m02*m23*m31 - m03*m21*m32 + m01*m23*m32 + m02*m21*m33 - m01*m22*m33)*f,\n (m02*m13*m31 - m03*m12*m31 + m03*m11*m32 - m01*m13*m32 - m02*m11*m33 + m01*m12*m33)*f,\n (m03*m12*m21 - m02*m13*m21 - m03*m11*m22 + m01*m13*m22 + m02*m11*m23 - m01*m12*m23)*f,\n (m13*m22*m30 - m12*m23*m30 - m13*m20*m32 + m10*m23*m32 + m12*m20*m33 - m10*m22*m33)*f,\n (m02*m23*m30 - m03*m22*m30 + m03*m20*m32 - m00*m23*m32 - m02*m20*m33 + m00*m22*m33)*f,\n (m03*m12*m30 - m02*m13*m30 - m03*m10*m32 + m00*m13*m32 + m02*m10*m33 - m00*m12*m33)*f,\n (m02*m13*m20 - m03*m12*m20 + m03*m10*m22 - m00*m13*m22 - m02*m10*m23 + m00*m12*m23)*f,\n (m11*m23*m30 - m13*m21*m30 + m13*m20*m31 - m10*m23*m31 - m11*m20*m33 + m10*m21*m33)*f,\n (m03*m21*m30 - m01*m23*m30 - m03*m20*m31 + m00*m23*m31 + m01*m20*m33 - m00*m21*m33)*f,\n (m01*m13*m30 - m03*m11*m30 + m03*m10*m31 - m00*m13*m31 - m01*m10*m33 + m00*m11*m33)*f,\n (m03*m11*m20 - m01*m13*m20 - m03*m10*m21 + m00*m13*m21 + m01*m10*m23 - m00*m11*m23)*f,\n (m12*m21*m30 - m11*m22*m30 - m12*m20*m31 + m10*m22*m31 + m11*m20*m32 - m10*m21*m32)*f,\n (m01*m22*m30 - m02*m21*m30 + m02*m20*m31 - m00*m22*m31 - m01*m20*m32 + m00*m21*m32)*f,\n (m02*m11*m30 - m01*m12*m30 - m02*m10*m31 + m00*m12*m31 + m01*m10*m32 - m00*m11*m32)*f,\n (m01*m12*m20 - m02*m11*m20 + m02*m10*m21 - m00*m12*m21 - m01*m10*m22 + m00*m11*m22)*f))\n","sub_path":"geoalg/matrix44.py","file_name":"matrix44.py","file_ext":"py","file_size_in_byte":16070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"167750900","text":"# This script is going to create the skelton of our LTER\n# database and begin to populate it with raw data\n# Note this is the newer version of the schema\n# that was based on the meeting that tool place on\n# December 21, 2015 with Aldo, Tom, and myself\n# In addition rather than using psycopg2 as a module\n# to populate the database, we are strictly using\n# sqlalchemy with a psycog\n# interpreter as our module for talking to postgresql\n# THIS ASSUMES YOU HAVE THE DATABASE ALREADY CREATED\n# IN POSTGRESQL\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import *\nfrom sqlalchemy.dialects.postgresql import *\nimport pandas as pd\nimport sys, os\nif sys.platform == \"darwin\":\n rootpath = (\n \"/Users/bibsian/Desktop/git/database-development/\")\n end = \"/\"\n\nelif sys.platform == \"win32\":\n rootpath = (\n \"C:\\\\Users\\MillerLab\\\\Desktop\\\\database-development\\\\\")\n end = \"\\\\\"\n\nlterex = pd.read_csv(\n\trootpath + 'db' + end + 'lter_table_test.csv')\nltertablename = 'lter_table'\n\n\n# Here we are using the packageage sqlalchemy\n# connecting to the lter databse\n# by specifying the title of the database\n# and the user name we will be working under.\n# Note, postgres is the super user and can do\n# everything possible (CREATE,INSERT, MANIPULATE, etc.)\n#create_engine = create_engine(\n# 'postgresql+psycopg2:///',\n# echo=True)\n#conn = create_engine.connect()\n#conn.execute(\"commit\")\n#conn.execute(\"CREATE DATABASE popler_3\")\n#conn.close()\n#create_engine.dispose()\n\nengine = create_engine(\n 'postgresql+psycopg2://--/popler_test',\n echo=True)\n\n\n# Note that the relationships in the database (i.e. entity-relation\n#-ship diagram or ED diagram) can be visualized after all the tables\n# have been created. So, the first step to setting up the skeleton\n# of out LTER database\n# is going to be to create all tables, and attributes within each,\n# and then use our open source database software manager (DBeaver)\n# to visualize the layout\n\n########################\n# Table creation\n#########################\n\n# Now we're going to begin to create the tables and\n# specify their attributes/attribute classes\n\n# The first step of this process is to create a database\n# metadata catalog. With the object title 'metadata', we\n# can create all the tables, their columns, primary, and foreign\n# keys from the 'Table' command and use the 'metadata' object\n# to compile all the information. Then it can be written\n# to the postegresql database with a special method called\n# 'create_all'\nmetadata = MetaData()\n\n\n# Raw climate station data\nclimate_raw_table = Table(\n 'climate_raw_table', metadata,\n Column('metarecordid_', Integer, primary_key=True),\n Column('title', TEXT),\n Column('stationid', None, ForeignKey(\n 'climate_station_table.stationid', ondelete=\"CASCADE\")),\n Column('year', NUMERIC),\n Column('month', NUMERIC),\n Column('day', NUMERIC),\n # Terrestiral environmental variables\n Column('avetempobs', NUMERIC),\n Column('avetempmeasure', VARCHAR(50)),\n Column('mintempobs', NUMERIC),\n Column('mintempmeasure', VARCHAR(50)),\n Column('maxtempobs', NUMERIC),\n Column('maxtempmeasure', VARCHAR(50)),\n Column('aveprecipobs', NUMERIC),\n Column('aveprecipmeasure', VARCHAR(50)),\n Column('minprecipobs', NUMERIC),\n Column('minprecipmeasure', VARCHAR(50)),\n Column('maxprecipobs', NUMERIC),\n Column('maxprecipmeasure', NUMERIC),\n Column('avewindobs', NUMERIC),\n Column('avewindmeasure', VARCHAR(50)),\n Column('minwindobs', NUMERIC),\n Column('minwindmeasure', VARCHAR(50)),\n Column('maxwindobs', NUMERIC),\n Column('maxwindmeasure', NUMERIC),\n Column('avelightobs', NUMERIC),\n Column('avelightmeasure', VARCHAR(50)),\n Column('minlightobs', NUMERIC),\n Column('minlightmeasure', VARCHAR(50)),\n Column('maxlightobs', NUMERIC),\n Column('maxlightmeasure', NUMERIC),\n # Aquatic environmental vairables\n Column('avewatertempobs', NUMERIC),\n Column('avewatertempmeasure', VARCHAR(50)),\n Column('minwatertempobs', NUMERIC),\n Column('minwatertempmeasure', VARCHAR(50)),\n Column('maxwatertempobs', NUMERIC),\n Column('maxwatertempmeasure', VARCHAR(50)),\n Column('avephobs', NUMERIC),\n Column('avephmeasure', VARCHAR(50)),\n Column('minphobs', NUMERIC),\n Column('minphmeasure', VARCHAR(50)),\n Column('maxphobs', NUMERIC),\n Column('maxphmeasure', NUMERIC),\n Column('avecondobs', NUMERIC),\n Column('avecondmeasure', VARCHAR(50)),\n Column('mincondobs', NUMERIC),\n Column('mincondmeasure', VARCHAR(50)),\n Column('maxcondobs', NUMERIC),\n Column('maxcondmeasure', VARCHAR(50)),\n Column('aveturbidityobs', NUMERIC),\n Column('aveturbiditymeasure', VARCHAR(50)),\n Column('minturbidityobs', NUMERIC),\n Column('minturbiditymeasure', VARCHAR(50)),\n Column('maxturbidityobs', NUMERIC),\n Column('maxturbiditymeasure', VARCHAR(50)),\n Column('covariates', TEXT),\n Column('knbid_', VARCHAR(200)),\n Column('metalink_', VARCHAR(200)),\n Column('authors_', VARCHAR(200)),\n Column('authors_contact_', VARCHAR(200)))\n\n# climate_site_table: This is the initial table\n# that will contain information\n# regarding the LTER sites themselves. Column names\n# should be self explanatory.\nclimate_station_table = Table(\n 'climate_station_table', metadata,\n Column('stationid', VARCHAR(200), primary_key=True),\n Column('lterid', None,\n ForeignKey('lter_table.lterid')),\n Column('lat_climate', NUMERIC),\n Column('lng_climate', NUMERIC),\n Column('descript', TEXT))\n\n\n#lter_table will be the link from climate data to study data\nlter_table = Table(\n 'lter_table', metadata,\n Column('lterid', VARCHAR(10), primary_key=True),\n Column('lter_name', TEXT),\n Column('lat_lter', NUMERIC),\n Column('lng_lter', NUMERIC),\n Column('currently_funded', VARCHAR(50)),\n Column('current_principle_investigator', VARCHAR(200)),\n Column('current_contact_email', VARCHAR(200)),\n Column('alt_contact_email', VARCHAR(200)),\n Column('homepage', VARCHAR(200)))\n\n\n# site_info: Table regarding site information for within each\n# individual study. The table will be related to lter_info and\n# the 'foreign key'= lterid/'lterid'\n# (i.e. no entries are allowed in this table unless the site\n# information originates at a given lter_id)\nstudy_site_table = Table(\n 'study_site_table', metadata,\n Column('study_site_key', VARCHAR(200), primary_key=True),\n Column('lter_table_fkey', VARCHAR(10),\n ForeignKey('lter_table.lterid')),\n Column('lat_study_site', NUMERIC),\n Column('lng_study_site', NUMERIC),\n Column('descript', TEXT))\n\n\n\nproject_table = Table(\n 'project_table', metadata,\n # This column is the unique index that we created\n # in order to keep track of all the datasets that\n # will be uploaded\n Column('proj_metadata_key', INTEGER, primary_key=True),\n Column('lter_project_fkey', VARCHAR(10),\n ForeignKey('lter_table.lterid')),\n Column('title', TEXT),\n\n # META: This column specifies the type of information\n # about the sampling organisms life stage\n # ie. adult, juvenile, size, etc\n Column('samplingunits', VARCHAR(50)),\n\n # META: This column specifies the type of data that was\n # collected (i.e. count, biomass, percent cover, etc.)\n Column('datatype', VARCHAR(50)),\n\n # META: This column specifies the type of information\n # about the sampling organisms life stage\n # ie. size, age, life-stage \n Column('structured_type_1', VARCHAR(50)),\n Column('structured_type_1_units', VARCHAR(50)),\n Column('structured_type_2', VARCHAR(50)),\n Column('structured_type_2_units', VARCHAR(50)),\n Column('structured_type_3', VARCHAR(50)),\n Column('structured_type_3_units', VARCHAR(50)),\n Column('structured_type_4', VARCHAR(50)),\n Column('structured_type_4_units', VARCHAR(50)),\n\n \n Column('studystartyr', NUMERIC),\n Column('studyendyr', NUMERIC),\n Column('duration_years', Integer),\n \n # META: This column relates to the frequency of sampling\n # i.e. seasonal, monthly, month:yr, season:yr, daily, etc.\n Column('samplefreq', TEXT),\n\n # META: This column list whether the study was observational\n # or experimental (which includes historic experiemental\n # events)\n Column('studytype', VARCHAR(50)),\n\n # META: This column indicates whether the study contained\n # community level data (i.e. data over multiple\n # taxonomic groups\n Column('community', VARCHAR(50)),\n\n # Spatial replicate informatoin\n # META:\n # sp_repX_ext: columns describes the\n # spatial extent sample at that level of spatial\n # replication\n\n # sp_repX_ext_units: column describes the unit\n # of measurement corresponding to that level of spatial\n # replication\n\n # sp_repX_label: describes the labeling scheme used\n # by the study\n\n #Derived:\n # sp_repX_uniquelevels: count of the number of unique\n # levels within that replicate level for a given site;\n # encompassed all time and taxa units.\n \n Column('spatial_replication_level_1_extent', NUMERIC),\n Column('spatial_replication_level_1_extent_units', VARCHAR(200)),\n Column('spatial_replication_level_1_label', VARCHAR(200)),\n Column('spatial_replication_level_1_number_of_unique_reps', INTEGER),\n \n Column('spatial_replication_level_2_extent', NUMERIC),\n Column('spatial_replication_level_2_extent_units', VARCHAR(200)),\n Column('spatial_replication_level_2_label', VARCHAR(200)),\n Column('spatial_replication_level_2_number_of_unique_reps', INTEGER),\n \n Column('spatial_replication_level_3_extent', NUMERIC),\n Column('spatial_replication_level_3_extent_units', VARCHAR(200)),\n Column('spatial_replication_level_3_label', VARCHAR(200)),\n Column('spatial_replication_level_3_number_of_unique_reps', INTEGER),\n \n Column('spatial_replication_level_4_extent', NUMERIC),\n Column('spatial_replication_level_4_extent_units', VARCHAR(200)),\n Column('spatial_replication_level_4_label', VARCHAR(200)),\n Column('spatial_replication_level_4_number_of_unique_reps', INTEGER),\n\n Column('spatial_replication_level_5_extent', NUMERIC),\n Column('spatial_replication_level_5_extent_units', VARCHAR(200)),\n Column('spatial_replication_level_5_label', VARCHAR(200)),\n Column('spatial_replication_level_5_number_of_unique_reps', INTEGER),\n \n # Columns regarding treatments\n Column('treatment_type_1', VARCHAR(200)),\n Column('treatment_type_2', VARCHAR(200)),\n Column('treatment_type_3', VARCHAR(200)),\n Column('control_group', VARCHAR(200)),\n\n Column('derived', VARCHAR(200)),\n \n # Columns relating to author, metadata, other sources\n Column('authors', TEXT),\n Column('authors_contact', VARCHAR(200)),\n Column('metalink', TEXT),\n Column('knbid', VARCHAR(200)))\n\n\n\n# main: Table describing the raw data that was collected\n# for each individual project\n# 'foreign key' ='siteid'\n# This is in case there is a project that does not give a specific\n# 'siteid' that can be used in the schema and to ensure that\n# any site data entered comes from\n\nsite_in_project_table = Table(\n 'site_in_project_table', metadata,\n Column(\n 'site_in_project_key',\n Integer, primary_key=True),\n Column('study_site_table_fkey', None,\n ForeignKey(\n 'study_site_table.study_site_key')),\n\n Column('project_table_fkey', None,\n ForeignKey('project_table.proj_metadata_key')),\n\n # DERIVED: start year of data collection for\n # a particular site\n Column('sitestartyr', NUMERIC),\n\n # DERIVED: end year of data collection for\n # a particular site\n Column('siteendyr', NUMERIC),\n\n # DERIVED: This will be the total observation\n # related to this project. THis includes\n # all temporal and spatial levels and\n # all taxa units\n Column('totalobs', NUMERIC),\n\n # DERIVED: calculates the number of unique\n # taxonomic units from raw data\n Column('uniquetaxaunits', NUMERIC))\n\n# taxa: Table regarding taxanomic information. Change from\n# last time involves the forgein key and the addition of\n# a column for species code (in case raw table information, does\n# not contain a key for translation).\n# 'foreign key' = site_info/'siteid'\ntaxa_table = Table(\n 'taxa_table', metadata,\n Column('taxa_table_key', Integer, primary_key=True),\n Column('site_in_project_taxa_key', None, ForeignKey(\n 'site_in_project_table.site_in_project_key', ondelete=\"CASCADE\")),\n Column('sppcode', VARCHAR(100)),\n Column('kingdom', VARCHAR(100)),\n Column('subkingdom', VARCHAR(100)),\n Column('infrakingdom', VARCHAR(100)),\n Column('superdivision', VARCHAR(100)),\n Column('division', VARCHAR(100)),\n Column('subdivision', VARCHAR(100)),\n Column('superphylum', VARCHAR(100)),\n Column('phylum', VARCHAR(100)),\n Column('subphylum', VARCHAR(100)),\n Column('clss', VARCHAR(100)),\n Column('subclass', VARCHAR(100)),\n Column('ordr', VARCHAR(100)),\n Column('family', VARCHAR(100)),\n Column('genus', VARCHAR(100)),\n Column('species', VARCHAR(100)),\n Column('common_name', VARCHAR(100)),\n Column('authority', VARCHAR(100)),\n Column('metadata_taxa_key', Integer))\n\ntaxa_accepted_table = Table(\n 'taxa_accepted_table', metadata,\n Column('taxa_accepted_table_key', Integer, primary_key=True),\n Column('taxa_original_fkey', None, ForeignKey(\n 'taxa_table.taxa_table_key', ondelete=\"CASCADE\")),\n Column('sppcode', VARCHAR(100)),\n Column('kingdom_accepted', VARCHAR(100)),\n Column('subkingdom_accepted', VARCHAR(100)),\n Column('infrakingdom_accepted', VARCHAR(100)),\n Column('superdivision_accepted', VARCHAR(100)),\n Column('division_accepted', VARCHAR(100)),\n Column('superphylum_accepted', VARCHAR(100)),\n Column('phylum_accepted', VARCHAR(100)),\n Column('subphylum_accepted', VARCHAR(100)),\n Column('subdivision_accepted', VARCHAR(100)),\n Column('clss_accepted', VARCHAR(100)),\n Column('subclass_accepted', VARCHAR(100)),\n Column('ordr_accepted', VARCHAR(100)),\n Column('family_accepted', VARCHAR(100)),\n Column('genus_accepted', VARCHAR(100)),\n Column('species_accepted', VARCHAR(100)),\n Column('common_name_accepted', VARCHAR(100)),\n Column('authority', VARCHAR(100)))\n\n\n\n# Count table\ncount_table = Table(\n 'count_table', metadata,\n Column('count_table_key', Integer, primary_key=True),\n Column('taxa_count_fkey', None, ForeignKey(\n 'taxa_table.taxa_table_key', ondelete=\"CASCADE\")),\n Column('site_in_project_count_fkey', None, ForeignKey(\n 'site_in_project_table.site_in_project_key', ondelete=\"CASCADE\")),\n Column('year', NUMERIC),\n Column('month', NUMERIC),\n Column('day', NUMERIC),\n Column('spatial_replication_level_1', VARCHAR(50)),\n Column('spatial_replication_level_2', VARCHAR(50)),\n Column('spatial_replication_level_3', VARCHAR(50)),\n Column('spatial_replication_level_4', VARCHAR(50)),\n Column('spatial_replication_level_5', VARCHAR(50)),\n Column('treatment_type_1', VARCHAR(200)),\n Column('treatment_type_2', VARCHAR(200)),\n Column('treatment_type_3', VARCHAR(200)),\n Column('structure_type_1', VARCHAR(200)),\n Column('structure_type_2', VARCHAR(200)),\n Column('structure_type_3', VARCHAR(200)),\n Column('structure_type_4', VARCHAR(50)),\n Column('count_observation', NUMERIC),\n Column('covariates', TEXT),\n Column('metadata_count_key', Integer))\n\n\n# Biomass Table\nbiomass_table = Table(\n 'biomass_table', metadata,\n Column('biomass_table_key', Integer, primary_key=True),\n Column('taxa_biomass_fkey', None, ForeignKey(\n 'taxa_table.taxa_table_key', ondelete=\"CASCADE\")),\n Column('site_in_project_biomass_fkey', None, ForeignKey(\n 'site_in_project_table.site_in_project_key', ondelete=\"CASCADE\")),\n Column('year', NUMERIC),\n Column('month', NUMERIC),\n Column('day', NUMERIC),\n Column('spatial_replication_level_1', VARCHAR(50)),\n Column('spatial_replication_level_2', VARCHAR(50)),\n Column('spatial_replication_level_3', VARCHAR(50)),\n Column('spatial_replication_level_4', VARCHAR(50)),\n Column('spatial_replication_level_5', VARCHAR(50)),\n Column('treatment_type_1', VARCHAR(200)),\n Column('treatment_type_2', VARCHAR(200)),\n Column('treatment_type_3', VARCHAR(200)),\n Column('structure_type_1', VARCHAR(200)),\n Column('structure_type_2', VARCHAR(200)),\n Column('structure_type_3', VARCHAR(200)),\n Column('structure_type_4', VARCHAR(50)),\n Column('biomass_observation', NUMERIC),\n Column('covariates', TEXT),\n Column('metadata_biomass_key', Integer))\n\n\n# Density Table\ndensity_table = Table(\n 'density_table', metadata,\n Column('density_table_key', Integer, primary_key=True),\n Column('taxa_density_fkey', None, ForeignKey(\n 'taxa_table.taxa_table_key', ondelete=\"CASCADE\")),\n Column('site_in_project_density_fkey', None, ForeignKey(\n 'site_in_project_table.site_in_project_key', ondelete=\"CASCADE\")),\n Column('year', NUMERIC),\n Column('month', NUMERIC),\n Column('day', NUMERIC),\n Column('spatial_replication_level_1', VARCHAR(50)),\n Column('spatial_replication_level_2', VARCHAR(50)),\n Column('spatial_replication_level_3', VARCHAR(50)),\n Column('spatial_replication_level_4', VARCHAR(50)),\n Column('spatial_replication_level_5', VARCHAR(50)),\n Column('treatment_type_1', VARCHAR(200)),\n Column('treatment_type_2', VARCHAR(200)),\n Column('treatment_type_3', VARCHAR(200)),\n Column('structure_type_1', VARCHAR(200)),\n Column('structure_type_2', VARCHAR(200)),\n Column('structure_type_3', VARCHAR(200)),\n Column('structure_type_4', VARCHAR(50)),\n Column('density_observation', NUMERIC),\n Column('covariates', TEXT),\n Column('metadata_density_key', Integer))\n\n# Percent Cover Table\npercent_cover_table = Table(\n 'percent_cover_table', metadata,\n Column('percent_cover_table_key', Integer, primary_key=True),\n Column('taxa_percent_cover_fkey', None, ForeignKey(\n 'taxa_table.taxa_table_key', ondelete=\"CASCADE\")),\n Column('site_in_project_percent_cover_fkey', None, ForeignKey(\n 'site_in_project_table.site_in_project_key', ondelete=\"CASCADE\")),\n Column('year', NUMERIC),\n Column('month', NUMERIC),\n Column('day', NUMERIC),\n Column('spatial_replication_level_1', VARCHAR(50)),\n Column('spatial_replication_level_2', VARCHAR(50)),\n Column('spatial_replication_level_3', VARCHAR(50)),\n Column('spatial_replication_level_4', VARCHAR(50)),\n Column('spatial_replication_level_5', VARCHAR(50)),\n Column('treatment_type_1', VARCHAR(200)),\n Column('treatment_type_2', VARCHAR(200)),\n Column('treatment_type_3', VARCHAR(200)),\n Column('structure_type_1', VARCHAR(200)),\n Column('structure_type_2', VARCHAR(200)),\n Column('structure_type_3', VARCHAR(200)),\n Column('structure_type_4', VARCHAR(50)),\n Column('percent_cover_observation', NUMERIC),\n Column('covariates', TEXT),\n Column('metadata_percent_cover_key', Integer))\n\n\n# Individual Table\nindividual_table = Table(\n 'individual_table', metadata,\n Column('individual_table_key', Integer, primary_key=True),\n Column('taxa_individual_fkey', None, ForeignKey(\n 'taxa_table.taxa_table_key', ondelete=\"CASCADE\")),\n Column('site_in_project_individual_fkey', None, ForeignKey(\n 'site_in_project_table.site_in_project_key', ondelete=\"CASCADE\")),\n Column('year', NUMERIC),\n Column('month', NUMERIC),\n Column('day', NUMERIC),\n Column('spatial_replication_level_1', VARCHAR(50)),\n Column('spatial_replication_level_2', VARCHAR(50)),\n Column('spatial_replication_level_3', VARCHAR(50)),\n Column('spatial_replication_level_4', VARCHAR(50)),\n Column('spatial_replication_level_5', VARCHAR(50)),\n Column('treatment_type_1', VARCHAR(200)),\n Column('treatment_type_2', VARCHAR(200)),\n Column('treatment_type_3', VARCHAR(200)),\n Column('structure_type_1', VARCHAR(200)),\n Column('structure_type_2', VARCHAR(200)),\n Column('structure_type_3', VARCHAR(200)),\n Column('structure_type_4', VARCHAR(50)),\n Column('individual_observation', NUMERIC),\n Column('covariates', TEXT),\n Column('metadata_individual_key', Integer))\n\n\n# This command takes all the information that was stored in the\n# metadata catalog and uses it to populate the database that\n# we connected to with our engine (user=postgres, databse=LTER)\nmetadata.create_all(engine)\n\n\nlterex.to_sql(\n ltertablename, con=engine, if_exists=\"append\", index=False)\n","sub_path":"db/dbcreationv3-migrated.py","file_name":"dbcreationv3-migrated.py","file_ext":"py","file_size_in_byte":20646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"123886852","text":"\r\nfrom socket import *\r\nimport time\r\nfrom _thread import *\r\nfrom random import randint\r\n\r\ndef ini():\r\n host = input(\"Server Address: \")\r\n port = int(input(\"Port: \"))\r\n return host, port\r\n\r\ndef crearSocket():\r\n s = socket(AF_INET, SOCK_STREAM)\r\n return s\r\n\r\ndef conectarse (host, port, s):\r\n s.connect((host, port))\r\n\r\ndef intentoConexion(host, port, s):\r\n\r\n while True:\r\n print(\"\\nIntentando conectarse a:\", host + \":\" + str(port))\r\n try:\r\n conectarse(host, port, s)\r\n break\r\n except:\r\n print(\"No hay un servidor en:\", host + \":\" + str(port))\r\n print(\"Intentando en 5 segs\\n\")\r\n time.sleep(5)\r\n\r\ndef enviar(s):\r\n\r\n while True:\r\n\r\n global exit\r\n\r\n try:\r\n msg = input(\"\")\r\n msg = jugador +\": \" + msg\r\n if msg == jugador+\": salir\":\r\n exit = True\r\n msg = \"El \"+jugador+\" Jugador se ha ido\"\r\n s.send(msg.encode(\"UTF-8\"))\r\n s.close\r\n break\r\n else:\r\n s.send(msg.encode(\"UTF-8\"))\r\n start_new_thread(recibir,(s,))\r\n\r\n\r\n except:\r\n print(\"Algo ocurrió\\n\")\r\n print(\"Reintentando en 5 segs\")\r\n time.sleep(5)\r\n\r\ndef recibir(s):\r\n while True:\r\n\r\n try:\r\n reply = s.recv(2048)\r\n print(reply.decode(\"UTF-8\"))\r\n break\r\n\r\n\r\n except:\r\n print(\"No se puede recibir respuesta\\n\")\r\n print(\"Reintentando en 5 segs\")\r\n time.sleep(5)\r\n\r\ndef recibirEspecial(s):\r\n global jugador\r\n jugador = s.recv(2048).decode(\"UTF-8\")\r\n\r\nexit=False # Si el jugador envia salir, exit se pone en true y el\r\n # el programa termina\r\njugador = \"\"\r\n\r\ndef main():\r\n\r\n\r\n host, port = ini()\r\n s = crearSocket()\r\n intentoConexion(host,port,s)\r\n recibirEspecial(s)\r\n print(\"\\nConexión al servidor establecida!\\nEl servidor es:\", host+\":\"+str(port)+\"\\n\")\r\n #############CODIGO DEL JUEGO###################\r\n n = 5\r\n board = []\r\n\r\n for x in range(n):\r\n board.append([\"O\"] * n)\r\n\r\n def print_board(board):\r\n for row in board:\r\n print((\" \").join(row))\r\n\r\n print(\"JUGUEMOS A 'ENCUENTRA MI BARCO'!\")\r\n jugador1=input('Nombre del jugador 1... ')\r\n jugador2=input('Nombre del jugador 2... ')\r\n\r\n print(str(n)+\"x\"+str(n))\r\n print(\"El tablero empieza con 0,0 hasta \"+str(n-1)+\",\"+str(n-1))\r\n print(\"\"\"\r\n fila -> -\r\n columna -> |\r\n \"\"\")\r\n print_board(board)\r\n\r\n def random_row(board):\r\n return randint(0, len(board) - 1)\r\n def random_col(board):\r\n return randint(0, len(board[0]) - 1)\r\n\r\n ship_row = random_row(board)\r\n ship_col = random_col(board)\r\n\r\n for turn in range(1,1000):\r\n\r\n if turn % 2 != 0:\r\n print (\"\\nTurno\", turn)\r\n print(\"Turno de\", jugador1 )\r\n guess_row = int(input(\"\\nAdivina la fila:\"))\r\n guess_col = int(input(\"Adivina la columna:\"))\r\n winner1 = True\r\n winner2 = False\r\n\r\n else:\r\n print (\"\\nTurno\", turn)\r\n print(\"Turno de\", jugador2 )\r\n guess_row = int(input(\"\\nAdivina la fila:\"))\r\n guess_col = int(input(\"Adivina la columna:\"))\r\n winner1 = False\r\n winner2 = True\r\n\r\n if guess_row == ship_row and guess_col == ship_col:\r\n board[guess_row][guess_col] = \"+\"\r\n\r\n if winner1:\r\n print(\"\\nFELICIDADES \"+jugador1+\" HUNDISTE MI BARCO QUE ESTABA EN \" +str(ship_row)+\",\"+str(ship_col)+\"!\")\r\n print(jugador1+\", ¡HAS GANADO EL JUEGO!\")\r\n print_board(board)\r\n break\r\n elif winner2:\r\n print(\"\\nFELICIDADES \"+jugador2+\" HUNDISTE MI BARCO QUE ESTABA EN \" +str(ship_row)+\",\"+str(ship_col)+\"!\")\r\n print(jugador2+\", ¡HAS GANADO EL JUEGO!\")\r\n break\r\n print_board(board)\r\n\r\n else:\r\n if (guess_row < 0 or guess_row > n-1) or (guess_col < 0 or guess_col > n-1):\r\n print(\"\\nLo siento, eso ni estuvo en el oceano.\")\r\n elif(board[guess_row][guess_col] == \"X\"):\r\n print(\"\\nYa habías dicho ese.\")\r\n else:\r\n print(\"\\nFallaste mi barco!\\n\")\r\n board[guess_row][guess_col] = \"X\"\r\n turn =+ 1\r\n print_board(board)\r\n #############CODIGO DEL JUEGO###################\r\n print(\"Escribe tus mensajes\\n\")\r\n start_new_thread(enviar,(s,))\r\n\r\n while exit!=True: # Necesarios para que los hilos no mueran\r\n pass\r\n\r\n print(\"\\nLo sentimos, algo salió mal! Has perdido la conexión al servidor.:(\")\r\n print(\"Cerrando las ventanas en 5 seg\")\r\n time.sleep(10)\r\n\r\nmain()\r\n","sub_path":"ene-jun-2018/Juan Carlos Sleiman/Game/client-side/BSclient.py","file_name":"BSclient.py","file_ext":"py","file_size_in_byte":4890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"647010609","text":"# Тут - искать возможное кол-во фиолетовых чисел в сотне\n\nimport quadsave\n\nf = open('.violist.txt', 'r')\n\nqmax = quadsave.get_quad()\nst = quadsave.get_last()\nvl = []\nk = 0\nnfd = False\nfor line in f:\n el = int(line)\n if el > st:\n vl.append(el)\n if el < vl[0] + 100:\n k += 1\n elif el >= vl[0] + 100:\n quadsave.save_last(vl[0])\n if k > qmax:\n quadsave.save_quad(k)\n qmax = k\n k += 1\n while vl[0] + 100 <= el:\n vl.pop(0)\n k -= 1\n if nfd and len(vl) == 1:\n print(vl[0])\n quadsave.save_zero(vl[0])\n nfd = False\n\n","sub_path":"quadsearch.py","file_name":"quadsearch.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"369900012","text":"import paho.mqtt.client as mqtt\n\ndef on_message_msgs(mosq, obj, msg):\n print(int(msg.payload))\n return str(msg.payload)\n\n\nmqttc = mqtt.Client()\nmqttc.message_callback_add(\"home/orbitrack/steps\", on_message_msgs)\nmqttc.connect(\"kotok.asuscomm.com\", 1883, 60)\nmqttc.subscribe(\"home/orbitrack/steps\", 0)\n#mqttc.loop_forever()\n\n#if __name__==\"__main__\":\nprint(on_message_msgs)","sub_path":"libs/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"587829680","text":"# ! this program is too heavy when use pred_per !\nfrom sklearn.externals import joblib\nimport numpy as np\nfrom collections import defaultdict\nimport pickle\nfrom knock72 import pn_list,stem_list\n\ndef sig(x):\n s = 1 / (1 + np.exp(-x))\n return s\n\n#print(len(pn_list))\nlr = joblib.load('lr.pkl')\nweight = lr.coef_\nbias = lr.intercept_\n\nwith open('word_ids.pkl','rb') as ids:\n word_ids = pickle.load(ids)\n\npl_list = []\n\n\nfor i,line in enumerate(stem_list):\n sent_word_id = [[0] * len(word_ids)]\n for word in stem_list[i]:\n sent_word_id[0][word_ids[word]] += 1\n\n pred_label = lr.predict(sent_word_id)\n pl_list.append(pred_label[0])\n# print(pred_label)\n# print(pl_list)\nif __name__ == ' __main__':\n for i,line in enumerate(stem_list):\n sent_word_id = [[0] * len(word_ids)]\n for word in stem_list[i]:\n sent_word_id[0][word_ids[word]] += 1\n\n# print(lr.coef_[0][i]) # 推定値、偏回帰係数\n# print(lr.intercept_) # 切片\n# print(lr.predict(sent_word_id)[0]) # 予想ラベル\n# 必要:正解ラベル、予想ラベル、予想確率 = ax + b形\n ans = pn_list\n pred_label = lr.predict(sent_word_id)\n# pred_per = sig(np.dot(weight.T,sent_word_id) + bias)\n print('correct:{}\\tpred:{}'.format(pn_list[i],pred_label[0]))\n#,pred_per[0][0]\n","sub_path":"yohta/chapter08/knock76.py","file_name":"knock76.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"644277244","text":"# -*- coding: utf-8 -*-\n'''\nBeacon to fire events at login of users as registered in the wtmp file\n\n.. code-block:: yaml\n\n beacons:\n wtmp: {}\n'''\n\n# Import Python libs\nfrom __future__ import absolute_import\nimport os\nimport struct\n\n# Import 3rd-party libs\nfrom salt.ext.six.moves import range\n\n# Import salt libs\nimport salt.utils\n\n__virtualname__ = 'wtmp'\nWTMP = '/var/log/wtmp'\nFMT = ' 0:\n\n # Stores the current number in the current unsolved square\n curr_num = solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]]\n\n # Stores the next number that will be tried in the current unsolved square\n possible_next_num = possible_nums[\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] % len(possible_nums)]\n\n # Makes sure that the code doesn't get stuck trying the same combos\n if try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] == len(possible_nums):\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Makes sure that the code doesn't get stuck on trying the same number\n if possible_next_num == curr_num:\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Sets the unsolved square to the next number that is to be tried\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = possible_next_num\n\n # Changes which index will be used to find a different number if the new number does not work\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] += 1\n\n # if there are no possible numbers for the current square, it backtracks to the last number that can change\n else:\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Determines if there is still an empty unsolved square left\n if sudoku.has_next_emtpy_node(nodes[len(nodes) - 1]):\n nodes.append(sudoku.find_next_empty_node(nodes[len(nodes) - 1]))\n else:\n update_grid(screen, (nodes[len(nodes) - 1][0], nodes[len(nodes) - 1][1]), solved_board, myfont)\n draw_lines(screen, [0, 0, 0])\n done = True\n\n\nclass SudokuGraphics:\n\n def __init__(self, sudoku, s):\n pygame.init()\n\n pygame.font.init()\n\n myfont = pygame.font.SysFont('Times New Roman', 30)\n\n screen = pygame.display.set_mode((540, 600))\n\n orig_board = sudoku.board\n\n solve_board = deepcopy(orig_board)\n\n solved = s\n\n strikes = [0, 0, 0]\n\n grid = [[0 for i in range(9)] for j in range(9)]\n\n clicked_grid = [[False for i in range(9)] for j in range(9)]\n\n clicked = None\n\n curr_num = 0\n\n for row in range(9):\n for col in range(9):\n pygame.draw.rect(screen, (255, 255, 255), (col * 60, row * 60, 60, 60))\n if orig_board[row][col] != 0:\n textsurface = myfont.render(str(orig_board[row][col]), True, (0, 0, 0))\n screen.blit(textsurface, (col*60+23, row*60+15))\n grid[row][col] = Node(row*60+60, col*60+60)\n\n draw_lines(screen, strikes)\n\n run = True\n while run:\n pygame.time.delay(50)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_1:\n curr_num = 1\n elif event.key == pygame.K_2:\n curr_num = 2\n elif event.key == pygame.K_3:\n curr_num = 3\n elif event.key == pygame.K_4:\n curr_num = 4\n elif event.key == pygame.K_5:\n curr_num = 5\n elif event.key == pygame.K_6:\n curr_num = 6\n elif event.key == pygame.K_7:\n curr_num = 7\n elif event.key == pygame.K_8:\n curr_num = 8\n elif event.key == pygame.K_9:\n curr_num = 9\n elif event.key == pygame.K_0:\n curr_num = 0\n elif event.key == pygame.K_SPACE:\n sudoku.board = solve_board\n solve_soduku(sudoku, screen)\n if clicked is not None:\n if curr_num == solved[clicked[0]][clicked[1]]:\n solve_board[clicked[0]][clicked[1]] = curr_num\n update_grid(screen, clicked, solve_board, myfont)\n draw_lines(screen, strikes)\n else:\n run = strike(strikes)\n draw_lines(screen, strikes)\n\n if pygame.mouse.get_pressed()[0] == 1:\n clicked = find_clicked_node(grid, pygame.mouse.get_pos(), clicked_grid, orig_board)\n\n if clicked is not None and orig_board[clicked[0]][clicked[1]] == 0:\n update_grid(screen, clicked, solve_board, myfont)\n draw_lines(screen, strikes)\n\n pygame.display.update()\n\n pygame.quit()\n","sub_path":"SudokuGraphics.py","file_name":"SudokuGraphics.py","file_ext":"py","file_size_in_byte":9350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"497696405","text":"#!/usr/bin/env python\n\nimport logging\nimport tornado\nimport tornado.web\nfrom tornado import httpserver\nfrom tornado import ioloop\nfrom tornado import websocket\n\nimport os\nimport sys\nimport json\nimport webbrowser\nimport nbformat\nfrom queue import Queue\n\nfrom execute import ThreadedExecutor\nfrom cells import Notebook\n\n\nSTATIC_PATH = os.path.join(os.path.split(__file__)[0], '..', 'client')\n\n\ndef serialize_binary_message(msg):\n \"\"\"serialize a message as a binary blob\n Header:\n 4 bytes: number of msg parts (nbufs) as 32b int\n 4 * nbufs bytes: offset for each buffer as integer as 32b int\n Offsets are from the start of the buffer, including the header.\n Returns\n -------\n The message serialized to bytes.\n \"\"\"\n from jupyter_client.jsonutil import date_default\n import struct\n # don't modify msg or buffer list in-place\n msg = msg.copy()\n buffers = list(msg.pop('buffers'))\n if sys.version_info < (3, 4):\n buffers = [x.tobytes() for x in buffers]\n bmsg = json.dumps(msg, default=date_default).encode('utf8')\n buffers.insert(0, bmsg)\n nbufs = len(buffers)\n offsets = [4 * (nbufs + 1)]\n for buf in buffers[:-1]:\n offsets.append(offsets[-1] + len(buf))\n offsets_buf = struct.pack('!' + 'I' * (nbufs + 1), nbufs, *offsets)\n buffers.insert(0, offsets_buf)\n return b''.join(buffers)\n\n\nclass PeriodicOutputCallback(object):\n \"\"\"\n Sets up a periodic callback to push output to cells by polling from\n the queue pushed to by the ThreadedExecutor.\n \"\"\"\n\n def __init__(self, server, notebook, period=20):\n self.server = server\n self.notebook = notebook\n self.period = period\n\n\n def start(self):\n self.callback = ioloop.PeriodicCallback(self.__call__, self.period)\n self.callback.start()\n\n def stop(self):\n self.callback.stop()\n\n def __call__(self):\n \"Processes queue pushed to by ThreadedExecutor\"\n try:\n val = self.server.queue.get_nowait()\n self.server.queue.task_done()\n result, status = val\n except:\n return\n\n connection = (self.server.BROWSER_CONNECTIONS[0]\n if self.server.BROWSER_CONNECTIONS else None)\n\n if connection and (status == 'comm_open'):\n print(\"REQUEST TO OPEN COMM FOR JS: %s\" % result) # TODO: buffers\n self.notebook.message(connection, 'comm_open', result['content'])\n # e.g:\n # {'data': {}, 'comm_id': 'ee0a39d3728945cdb4ad30848b7856fc',\n # 'target_name': 'ZOO', 'target_module': None}\n return\n elif connection and (status == 'comm_msg'):\n buffers = result['buffers']\n if buffers == []:\n self.notebook.message(connection, 'comm_msg', # FIXME: redundant 'comm_msg'\n {'msg_type': 'comm_msg',\n 'content': result['content']})\n else:\n msg = {'msg_type': 'comm_msg',\n 'content': result['content']}\n connection.write_message(serialize_binary_message(# FIXME: use message method\n {'cmd':'comm_msg', 'args':msg, 'buffers': buffers}), binary=True)\n return\n\n\n else:\n outnode, execution_count = result, status\n\n if connection:\n cell = self.notebook.find_cell(execution_count)\n if cell is None: return # There may be no cell if running a silent execution\n position = self.notebook.cell_position(cell)\n\n if execution_count is None:\n return # silent execution before *any* output\n if outnode is None and (cell.prompt == execution_count):\n return # no need to update prompt for silent execution\n self.notebook.update_cell_outputs(connection, position, outnode)\n\n\nclass LabServer(websocket.WebSocketHandler):\n\n BROWSER_CONNECTIONS = []\n\n NOTEBOOK = None\n\n def open(self):\n self.queue = Queue()\n\n # Note that there are multiple LabServer instances and we want only one notebook!\n # (for now)\n if LabServer.NOTEBOOK is None:\n LabServer.NOTEBOOK = Notebook(ThreadedExecutor(\"threaded-kernel\", self.queue))\n LabServer.NOTEBOOK.STATIC_PATH = STATIC_PATH\n\n self.output_callback = PeriodicOutputCallback(self, LabServer.NOTEBOOK)\n self.output_callback.start()\n logging.info(\"Connection opened\")\n\n\n def on_message(self, message):\n \"Websocket on_message handler. Tracks connection type.\"\n logging.info(u\"Received message: {0}\".format(message))\n try:\n payload = json.loads(message)\n except Exception as e:\n logging.info('JSON parse exception: %s' % str(e))\n return\n\n if payload.get('init',False):\n if payload['init'] == 'browser':\n self.BROWSER_CONNECTIONS.append(self)\n logging.info('Added browser client connection')\n if len(LabServer.NOTEBOOK.cells) > 0:\n logging.info(\"Restart with previously opened notebook\")\n LabServer.NOTEBOOK.reload(self)\n # If you hit reload in the browser, the CSS needs to be re-sent\n LabServer.NOTEBOOK.update_style(self, css=None)\n return\n\n # SOME COMMANDS (e.g mirroring) should happen even without a browser tab open!\n connection = self.BROWSER_CONNECTIONS[0] if len(self.BROWSER_CONNECTIONS) else None\n LabServer.NOTEBOOK.dispatch(connection, payload)\n\n\n def check_origin(self, origin):\n return True\n\n def on_close(self):\n logging.info(\"ON_CLOSE\")\n if self in self.BROWSER_CONNECTIONS:\n self.BROWSER_CONNECTIONS.remove(self)\n\n self.output_callback.stop()\n\nif __name__ == \"__main__\":\n import tornado.options\n tornado.options.parse_command_line()\n\n\n html_handler = (r'/(.*)', tornado.web.StaticFileHandler,\n {'path': STATIC_PATH})\n\n\n tornado.web.Application([html_handler]).listen(8000)\n ws_server = httpserver.HTTPServer(tornado.web.Application([(r\"/\", LabServer)]))\n ws_server.listen(9999, \"127.0.0.1\")\n logging.info(\"STARTED: Server start listening\")\n ioloop.IOLoop.instance().start()\n","sub_path":"server/labmode.py","file_name":"labmode.py","file_ext":"py","file_size_in_byte":6360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"469660384","text":"import sys, re\n\ndef main():\n\twith open(sys.argv[1]) as f:\n\t\tfor line in f:\n\t\t\tnumber, res = line.rstrip().split(';')\n\t\t\tnumber = substitute(number, res.split(','))\n\t\t\tprint(number)\n\ndef substitute(number, res):\n\tfor i in range(0, len(res), 2):\n\t\tprint(res[i], res[i + 1], number)\n\t\tnumber = re.sub(res[i], res[i + 1], number)\n\t\tprint(number)\n\treturn number\n\nif __name__ == '__main__':\n\tmain()","sub_path":"Codeeval/String Substitution/P.py","file_name":"P.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"269222587","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 1 11:37:38 2018\n\n@author: beleaf\n\"\"\"\n\ndef run_formula(dv, param = None):\n defult_param = {'t1':60}\n if not param:\n param = defult_param\n \n AD_hl60 = dv.add_formula('AD_hl60', \n \"Ts_Mean(Sign(Delta(close,1))*(high-low)*volume,%s)\"%(param['t1']),\n is_quarterly=False)\n \n return AD_hl60","sub_path":"因子创造py文件/AD_hl60.py","file_name":"AD_hl60.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"554868915","text":"Rillaboom = {\n \"name\": \"Rillaboom\",\n \"nature\": \"Adamant\",\n \"ability\": \"Grassy Surge\",\n \"item\": \"Assault Vest\",\n \"attack\": [\"Grassy Glide\", \"Wood Hammer\", \"Knock Off\", \"High Horsepower\"],\n \"ev\": [\n '252', # HP\n '252', # ATK\n '0', # DEF\n '0', # SPA\n '0', # SPD\n '0' # SPE\n ]\n}\n\nTogekiss = {\n \"name\": \"Togekiss\",\n \"nature\": \"Modest\",\n \"ability\": \"Super Luck\",\n \"item\": \"Scope Lens\",\n \"attack\": [\"Dazzling Gleam\", \"Air Slash\", \"Heat Wave\"],\n \"ev\": [\n '252', # HP\n '0', # ATK\n '0', # DEF\n '252', # SPA\n '0', # SPD\n '0' # SPE\n ]\n}\n\nTyranitar = {\n \"name\": \"Tyranitar\",\n \"nature\": \"Adamant\",\n \"ability\": \"Sand Stream\",\n \"item\": \"Weakness Policy\",\n \"attack\": [\"Rock Slide\", \"Lash Out\", \"Superpower\"],\n \"ev\": [\n '252', # HP\n '252', # ATK\n '0', # DEF\n '0', # SPA\n '0', # SPD\n '0' # SPE\n ]\n}\n\nCinderace = {\n \"name\": \"Cinderace\",\n \"nature\": \"Jolly\",\n \"ability\": \"Libero\",\n \"item\": \"Life Orb\",\n \"attack\": [\"Pyro Ball\", \"High Jump Kick\", \"Iron Head\", \"Sucker Punch\"],\n \"ev\": [\n '4', # HP\n '252', # ATK\n '0', # DEF\n '0', # SPA\n '0', # SPD\n '252' # SPE\n ]\n}\n\nCharizard = {\n \"name\": \"Charizard\",\n \"nature\": \"Timid\",\n \"ability\": \"Solar Power\",\n \"item\": \"Life Orb\",\n \"attack\": [\"Heat Wave\", \"Air Slash\", \"Solar Beam\", \"Scorching Sands\"],\n \"ev\": [\n '4', # HP\n '0', # ATK\n '0', # DEF\n '252', # SPA\n '0', # SPD\n '252' # SPE\n ]\n}\n\nVenusaur = {\n \"name\": \"Venusaur\",\n \"nature\": \"Timid\",\n \"ability\": \"Chlorophyll\",\n \"item\": \"Life Orb\",\n \"attack\": [\"Giga Drain\", \"Sludge Bomb\", \"Earth Power\"],\n \"ev\": [\n '4', # HP\n '0', # ATK\n '0', # DEF\n '252', # SPA\n '0', # SPD\n '252' # SPE\n ]\n}\n\nIndeedee = {\n \"name\": \"Indeedee\",\n \"nature\": \"Modest\",\n \"ability\": \"Psychic Surge\",\n \"item\": \"Focus Sash\",\n \"attack\": [\"Expanding Force\"],\n \"ev\": [\n '4', # HP\n '0', # ATK\n '0', # DEF\n '252', # SPA\n '0', # SPD\n '252' # SPE\n ]\n}\n\natk_pokemon = [Rillaboom, Togekiss, Tyranitar, Cinderace, Charizard, Venusaur, Indeedee]\n\n# print(atk_pokemon[1]['ev'])\n","sub_path":"Pokemon_Attacker.py","file_name":"Pokemon_Attacker.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"504075676","text":"from pdfminer.pdfparser import PDFParser\nfrom pdfminer.pdfdocument import PDFDocument\nfrom pdfminer.pdfpage import PDFPage\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.converter import PDFPageAggregator\nfrom pdfminer.layout import LAParams, LTTextBox, LTTextLine\nfrom tikapp import TikaApp\nimport PyPDF2, subprocess, os, sys\n\nsys.path.append(os.path.dirname(os.getcwd()))\nfrom common.recursive_folders import recursive_folders\n\nclass pdf_to_text():\n \"\"\"Converts pdf to text with pdfminer\"\"\"\n def __init__(self):\n pass\n \n def convert_pdfminer(self, fname):\n fp = open(fname, 'rb')\n parser = PDFParser(fp)\n doc = PDFDocument(parser)\n rsrcmgr = PDFResourceManager()\n laparams = LAParams()\n device = PDFPageAggregator(rsrcmgr, laparams=laparams)\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n text = ''\n for page in PDFPage.create_pages(doc):\n interpreter.process_page(page)\n layout = device.get_result()\n for lt_obj in layout:\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\n text += lt_obj.get_text()\n return text\n\n def convert_PyPDF2(self,fname):\n pdfFileObj = open(fname,'rb')\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n text = ''\n for i in range(pdfReader.numPages):\n pageObj = pdfReader.getPage(i)\n text += pageObj.extractText() + '\\n'\n return text\n\n def convert_Tika(self,fname):\n tika_client = TikaApp(file_jar=os.getcwd()+'/tika-app-1.20.jar')\n return tika_client.extract_only_content(fname)\n\nif __name__ == '__main__':\n path = sys.argv[1]\n p = pdf_to_text()\n r = recursive_folders()\n for arq in r.find_files(path):\n if arq[-3:] == 'pdf' or arq[-3:] == 'doc' or arq[-4:] == 'docx':\n texto = p.convert_Tika(arq)\n arq = open(arq.replace('pdf','txt').replace('docx','txt').replace('doc','txt'),'w')\n arq.write(texto)\n arq.close()","sub_path":"common_nlp/pdf_to_text.py","file_name":"pdf_to_text.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"601789286","text":"# n=input('enter the string:')\n# l=int(len(n))\n# for i in range(int(l/2+1)):\n# if n[i]==n[-i-1]:\n# print('palindrome')\n# break\n# else:\n# print('not')\n\n\ndef reverse(str1):\n if len(str1)==0:\n return(str1)\n return reverse(str1[1:])+str1[0]\na=reverse('hello')\nprint(a)\n\n\n\n","sub_path":"palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"66148297","text":"import time\nstart_time = time.time()\nfile_data = open('hmmmodel.txt', 'r').read()\ndata_split_list = file_data.split(\"\\n\")\ntags_words_dict = eval(data_split_list[0])\ntags_dict = eval(data_split_list[1])\ndistinct_tags = eval(data_split_list[2])\ndistinct_words = eval(data_split_list[3])\nlen_transition = eval(data_split_list[4])\nlen_emission = eval(data_split_list[5])\nwords_dict = eval(data_split_list[6])\ntarget = open('hmmoutput.txt', 'w')\ntry:\n for sent in open('catalan_corpus_dev_raw.txt'):\n sentence = sent.split()\n sentlen = len(sentence)\n viterbi = []\n backpointer = []\n first_viterbi = {}\n first_backpointer = {}\n if sentence[0] in distinct_words:\n tag_list = words_dict[sentence[0]]\n else:\n tag_list = distinct_tags\n for tag in tag_list:\n if tag == \"START\": continue\n if sentence[0] in distinct_words:\n first_viterbi[tag] = float(tags_dict['START'][tag] + 1)/float(len(tags_dict['START']) + len_transition['START']) * float(tags_words_dict[tag][sentence[0]])/float(len_emission[tag])\n else:\n first_viterbi[tag] = float(tags_dict['START'][tag] + 1)/float(len(tags_dict['START']) + len_transition['START'])\n first_backpointer[ tag ] = \"START\"\n\n viterbi.append(first_viterbi)\n backpointer.append(first_backpointer)\n\n max = 0.0\n currbest = first_viterbi.keys()[0]\n for tag in first_viterbi.keys():\n if first_viterbi[tag] >= max:\n max = first_viterbi[tag]\n currbest = tag\n\n for wordindex in range(1, len(sentence)):\n this_viterbi = { }\n this_backpointer = { }\n prev_viterbi = viterbi[-1]\n if sentence[wordindex] in distinct_words:\n tag_list = words_dict[sentence[wordindex]]\n else:\n tag_list = distinct_tags\n for tag in tag_list:\n if tag == \"START\": continue\n\n if sentence[wordindex] in distinct_words:\n\n best_previous = prev_viterbi.keys()[0]\n max = 0.0\n for prevtag in prev_viterbi.keys():\n val = prev_viterbi[ prevtag ] * float(tags_dict[prevtag][tag] + 1)/float(len(tags_dict[prevtag]) + len_transition[prevtag]) * float(tags_words_dict[tag][sentence[wordindex]])/float(len_emission[tag])\n if val >= max:\n max = val\n best_previous = prevtag\n\n this_viterbi[ tag ] = prev_viterbi[ best_previous] * \\\n float(tags_dict[best_previous][tag] + 1)/float(len(tags_dict[best_previous]) + len_transition[best_previous]) * float(tags_words_dict[tag][sentence[wordindex]])/float(len_emission[tag])\n else:\n\n best_previous = prev_viterbi.keys()[0]\n max = 0.0\n for prevtag in prev_viterbi.keys():\n val = prev_viterbi[ prevtag ] * float(tags_dict[prevtag][tag] + 1)/float(len(tags_dict[prevtag]) + len_transition[prevtag])\n if val >= max:\n max = val\n best_previous = prevtag\n\n this_viterbi[ tag ] = prev_viterbi[ best_previous] * \\\n float(tags_dict[best_previous][tag] + 1)/float(len(tags_dict[best_previous]) + len_transition[best_previous])\n this_backpointer[ tag ] = best_previous\n\n max = 0.0\n currbest = this_viterbi.keys()[0]\n for tag in this_viterbi.keys():\n if this_viterbi[tag] >= max:\n max = this_viterbi[tag]\n currbest = tag\n\n viterbi.append(this_viterbi)\n backpointer.append(this_backpointer)\n prev_viterbi = viterbi[-1]\n best_previous = prev_viterbi.keys()[0]\n max = 0.0\n for prevtag in prev_viterbi.keys():\n val = prev_viterbi[ prevtag ] * float(tags_dict[prevtag][currbest] + 1)/float(len(tags_dict[prevtag]) + len_transition[prevtag])\n if val >= max:\n max = val\n best_previous = prevtag\n\n prob_tagsequence = prev_viterbi[ best_previous ] * float(tags_dict[best_previous][currbest] + 1)/float(len(tags_dict[best_previous]) + len_transition[best_previous])\n\n best_tagsequence = [ best_previous ]\n backpointer.reverse()\n current_best_tag = best_previous\n for bp in backpointer:\n best_tagsequence.append(bp[current_best_tag])\n current_best_tag = bp[current_best_tag]\n\n best_tagsequence.reverse()\n best_tagsequence.pop(0)\n count = 1\n for w, t in zip(sentence, best_tagsequence):\n if count < len(sentence):\n target.write(w+\"/\"+t+\" \")\n count += 1\n else:\n target.write(w+\"/\"+t)\n target.write(\"\\n\")\nfinally:\n target.close()\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))","sub_path":"hmmdecode.py","file_name":"hmmdecode.py","file_ext":"py","file_size_in_byte":5131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"426393468","text":"import discogs_client\n\nfrom .apikey import ds_key, ds_secret, ds_token\n\nd = discogs_client.Client('test_muxrb')\nd.set_consumer_key(ds_key, ds_secret)\nd = discogs_client.Client('test_muxrb', user_token=ds_token)\nme = d.identity()\n\n\ndef get_disc(song):\n data = d.search(song.encode('utf-8'), type='relase')\n if data is None:\n return\n year = 0\n try:\n first = data[0]\n except:\n return\n try:\n if first.title:\n r_album = first.title\n except:\n return\n r_genre = first.genres\n r_style = first.styles\n try:\n r_country = first.country\n except:\n r_country = \"None\"\n try:\n r_image = first.images[0]['uri']\n except:\n r_image = \"None\"\n try:\n r_thumb = first.thumb\n except:\n r_thumb = \"None\"\n try:\n r_labels = first.labels\n except:\n r_labels = \"None\"\n for yy in data:\n try:\n rok = yy.year\n except:\n rok = 0\n if int(rok) > 1900:\n year = yy.year\n break\n return r_album, r_labels, r_genre, r_style, r_country, r_image, r_thumb, year\n","sub_path":"radioszpieg/scrobbell/management/commands/_discogs.py","file_name":"_discogs.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"235864898","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nimport base64\nimport contextlib\nimport io\n\nfrom odoo import api, fields, models, tools, _\nfrom odoo.tools.misc import xlwt\n\nclass QuantValuationExport(models.TransientModel):\n _name = \"quant.valuation.export\"\n\n name = fields.Char('File Name', readonly=True)\n data = fields.Binary('File', readonly=True)\n state = fields.Selection([('choose', 'choose'), ('get', 'get')], default='choose')\n\n @api.multi\n def _get_headers(self):\n return [\n 'Producto',\n 'Tienda',\n 'Cantidad',\n 'Valor',\n ]\n \n @api.multi\n def act_getfile(self):\n this = self[0]\n domain = [('product_id.type', '=', 'product'),('location_id.usage', '=', 'internal')]\n quant_data = self.env['stock.quant'].read_group(domain,fields=['product_id','location_id','quantity'], groupby=['product_id','location_id'])\n locations = self.env['stock.location'].search([('usage', '=', 'internal')])\n print (locations)\n location_qty = {'product_id': '', 'quantity': 0.0, 'inventory_value': 0.0}\n location_header = {}\n for location in locations:\n warehouse = self.env['stock.warehouse'].search([('lot_stock_id', '=', location.id)], limit=1)\n location_qty.setdefault(location.id, 0.0)\n location_header.setdefault(location.id, warehouse.name if warehouse else location.name)\n quant_result = []\n for qd in quant_data:\n product = self.env['product.product'].browse(qd['product_id'][0])\n quant_item = location_qty.copy()\n quant_item.update({'product_id': qd['product_id'][1], 'quantity': qd['quantity'], 'inventory_value': product.stock_value})\n if '__domain' in qd:\n quant_item.update({'product_id': qd['product_id'][1], 'inventory_value': product.stock_value})\n quant_location = self.env['stock.quant'].read_group(qd['__domain'],fields=['location_id','quantity'], groupby=['location_id'])\n for ql in quant_location:\n quant_item.update({ql['location_id'][0]: ql['quantity']})\n quant_result.append(quant_item)\n with contextlib.closing(io.BytesIO()) as buf:\n workbook = xlwt.Workbook()\n worksheet = workbook.add_sheet('Reporte Valoración Inventario')\n header_bold = xlwt.easyxf(\"font: bold on; pattern: pattern solid, fore_colour gray25;\")\n\n worksheet.write(0,0,'Producto',header_bold)\n key_locations = []\n col = 1\n for key,value in location_header.items():\n worksheet.write(0,col,value,header_bold)\n key_locations.append(key)\n col += 1\n worksheet.write(0,col,'Cantidad Total',header_bold)\n worksheet.write(0,col + 1,'Valor',header_bold)\n\n row = 1\n col = 1\n total_value = 0.0\n for qr in quant_result:\n cell_num = xlwt.easyxf(num_format_str=\"#,##0.00\")\n worksheet.write(row,0,qr['product_id'])\n col = 1\n for key_location in key_locations:\n worksheet.write(row,col,qr[key_location],cell_num)\n col += 1\n worksheet.write(row,col,qr['quantity'],cell_num)\n worksheet.write(row,col + 1,qr['inventory_value'],cell_num)\n total_value += qr['inventory_value']\n row += 1\n cell_num = xlwt.easyxf(\"font: bold on; pattern: pattern solid, fore_colour gray25;\", num_format_str=\"#,##0.00\")\n worksheet.write(row,col,\"Total:\",cell_num)\n worksheet.write(row,col + 1,total_value,cell_num)\n\n workbook.save(buf)\n out = base64.encodestring(buf.getvalue())\n\n filename = 'Reporte valor de Inventario'\n extension = 'xls'\n name = \"%s.%s\" % (filename, extension)\n this.write({'state': 'get', 'data': out, 'name': name})\n return {\n 'type': 'ir.actions.act_window',\n 'res_model': 'quant.valuation.export',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'res_id': this.id,\n 'views': [(False, 'form')],\n 'target': 'new',\n }\n","sub_path":"sale_premiumpaint/wizard/export_quant_line.py","file_name":"export_quant_line.py","file_ext":"py","file_size_in_byte":4361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"366011153","text":"import cv2\nimport numpy as np\nimport face_recognition\nimport os\nimport pickle\nfrom PIL import Image, ImageDraw, ImageFont\nfrom constant import RESIZE_SCALE\nimport DatabaseUtils\nfrom datetime import datetime\n\nFONT_PATH = './arial.ttf'\nfont = ImageFont.truetype(FONT_PATH, 32)\ndef attendance(name):\n with open('attendance.csv', 'r+') as f:\n myDataList = f.readlines()\n nameList = []\n for line in myDataList:\n entry = line.split(',')\n nameList.append(entry[0])\n if name not in nameList:\n now = datetime.now()\n datetimeString = now.strftime('%H:%M:%S')\n # f.writelines(f'\\n{name}, {datetimeString}')\n\nfacesData = DatabaseUtils.getAllFaceData()\n# print(facesData[1].StudentID)\n# print(pickle.loads(facesData[1].EncodedImage))\n\n\nvideo_capture = cv2.VideoCapture(0)\n\nwhile True:\n success, img = video_capture.read()\n resizedImg = cv2.resize(img, (0, 0), None, 1 / RESIZE_SCALE, 1 / RESIZE_SCALE)\n resizedImg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n facesCurrentLocation = face_recognition.face_locations(resizedImg)\n encodedCurrentFaces = face_recognition.face_encodings(resizedImg, facesCurrentLocation)\n for currentFace, location in zip(encodedCurrentFaces, facesCurrentLocation):\n minDistance = 0\n id = \"\"\n name = \"Unknown\"\n # knownFace = pickle.loads(facesData[0].EncodedImage)\n # test = face_recognition.compare_faces([knownFace], encodedCurrentFaces[0])\n # print(test)\n for i, face in enumerate(facesData):\n faceEncodedData = pickle.loads(face.EncodedImage)\n if face_recognition.compare_faces([faceEncodedData],currentFace):\n print('run')\n faceDistance = face_recognition.face_distance([faceEncodedData], currentFace);\n if i == 0:\n minDistance = faceDistance\n id = face.StudentID\n elif faceDistance < minDistance:\n minDistance = faceDistance\n id = face.StudentID\n y1, x2, y2, x1 = location\n cv2.rectangle(img, (x1 - 5, y1 - 5), (x2 + 5, y2 + 5), (0, 255, 0), 2)\n if id != \"\" and minDistance < 0.5:\n studentData = DatabaseUtils.getStudentNameById(id)\n # cv2.rectangle(img, (x1, y2-35), (x2, y2), (0,255,0), cv2.FILLED)\n\n name = studentData.FullName +\" - \"+id\n attendance(studentData.FullName)\n imagePIL = Image.fromarray(img)\n draw = ImageDraw.Draw(imagePIL)\n draw.text((x1, y2 + 25), name, font = font, fill=(255, 255, 255, 0), )\n img = np.array(imagePIL)\n cv2.imshow(\"Cam\", img)\n if cv2.waitKey(1) == ord('q'):\n break;\nvideo_capture.release()\ncv2.destroyAllWindows()","sub_path":"AttendanceTest.py","file_name":"AttendanceTest.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"242140250","text":"# Create a Book class with instance Library_name, book_name, author, pages?class Employee():\nclass Book:\n def details(self ,library_name,book_name,auther,page):\n self.library_name= library_name\n self.book_name= book_name\n self.auther=auther\n self.page=page\n print(\"libraryname=\", self.library_name)\n print(\"bookname=\", self.book_name)\n print(\"auther=\", self.auther)\n print(\"page=\", self.page)\n\n\nob=Book()\nob.details(\"alarvadi\", \"Mathil\",\"vhgh\",125)","sub_path":"Exam2/Book.py","file_name":"Book.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"377173713","text":"'''\nbitcoind fork of jmcorgan, branch addrindex-0.9.2:\nhttps://github.com/jmcorgan/bitcoin/tree/addrindex-0.9.2\n'''\nimport logging\nimport binascii\nimport hashlib\nimport json\n\nfrom lib import config, util, util_bitcoin\n\ndef is_multisig(address):\n array = address.split('_')\n return (len(array) > 1)\n\ndef search_raw_transactions(address):\n result = util.call_jsonrpc_api('search_raw_transactions', {'address': address})\n return result['result']\n\ndef get_unspent_txouts(address, return_confirmed=False):\n result = util.call_jsonrpc_api('get_unspent_txouts', {'address': address, 'return_confirmed': return_confirmed})\n return result['result']\n\ndef get_block_count():\n return int(util.bitcoind_rpc('getblockcount', None))\n\ndef check():\n pass\n\ndef getinfo():\n return {\n \"info\": {\n \"blocks\": get_block_count()\n }\n }\n\ndef listunspent(address):\n outputs = get_unspent_txouts(address)\n utxo = []\n for txo in outputs:\n newtxo = {\n 'address': address,\n 'txid': txo['txid'],\n 'vout': txo['vout'],\n 'ts': 0,\n 'scriptPubKey': txo['scriptPubKey'],\n 'amount': float(txo['amount']),\n 'confirmations': txo['confirmations'],\n 'confirmationsFromCache': False\n }\n utxo.append(newtxo)\n return utxo\n\ndef getaddressinfo(address):\n outputs = get_unspent_txouts(address, return_confirmed=True)\n balance = sum(out['amount'] for out in outputs['confirmed'])\n unconfirmed_balance = sum(out['amount'] for out in outputs['all']) - balance\n \n if is_multisig(address):\n array = address.split('_')\n # TODO: filter transactions\n raw_transactions = reversed(search_raw_transactions(array[1:-1][1]))\n else:\n raw_transactions = reversed(search_raw_transactions(address))\n\n transactions = []\n for tx in raw_transactions:\n if 'confirmations' in tx and tx['confirmations'] > 0:\n transactions.append(tx['txid'])\n\n return {\n 'addrStr': address,\n 'balance': balance,\n 'balanceSat': balance * config.UNIT,\n 'unconfirmedBalance': unconfirmed_balance,\n 'unconfirmedBalanceSat': unconfirmed_balance * config.UNIT,\n 'transactions': transactions\n }\n \n return None\n\ndef gettransaction(tx_hash):\n tx = util.bitcoind_rpc('getrawtransaction', [tx_hash, 1])\n valueOut = 0\n for vout in tx['vout']:\n valueOut += vout['value']\n return {\n 'txid': tx_hash,\n 'version': tx['version'],\n 'locktime': tx['locktime'],\n 'confirmations': tx['confirmations'] if 'confirmations' in tx else 0,\n 'blocktime': tx['blocktime'] if 'blocktime' in tx else 0,\n 'blockhash': tx['blockhash'] if 'blockhash' in tx else 0,\n 'time': tx['time'] if 'time' in tx else 0,\n 'valueOut': valueOut,\n 'vin': tx['vin'],\n 'vout': tx['vout']\n }\n return None\n\ndef get_pubkey_from_transactions(address, raw_transactions):\n #for each transaction we got back, extract the vin, pubkey, go through, convert it to binary, and see if it reduces down to the given address\n for tx in raw_transactions:\n #parse the pubkey out of the first sent transaction\n for vin in tx['vin']:\n scriptsig = vin['scriptSig']\n asm = scriptsig['asm'].split(' ')\n pubkey_hex = asm[1]\n try:\n if util_bitcoin.pubkey_to_address(pubkey_hex) == address:\n return pubkey_hex\n except:\n pass\n return None\n\ndef get_pubkey_for_address(address):\n if is_multisig(address):\n array = address.split('_')\n addresses = array[1:-1]\n else:\n addresses = [address]\n \n pubkeys = []\n\n for address in addresses:\n raw_transactions = search_raw_transactions(address)\n pubkey = get_pubkey_from_transactions(address, raw_transactions)\n if pubkey: pubkeys.append(pubkey)\n\n return pubkeys\n","sub_path":"lib/blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"338622612","text":"from argg_hdl import *\nfrom argg_hdl.examples import *\n\nclass Counter(v_clk_entity):\n def __init__(self, clk , InputType=v_slv(32)):\n super().__init__(clk)\n self.Data_out = port_Stream_Master(axisStream( InputType))\n self.architecture()\n\n @architecture\n def architecture(self):\n data = v_slv(32)\n data2 = v_slv(32)\n data_out = get_handle(self.Data_out)\n @rising_edge(self.clk)\n def proc():\n data << data + 1\n if data_out and data > 10:\n data_out << data2\n data2 << data2 + 1\n data << 0\n\n end_architecture()\n\nclass tb(v_entity):\n def __init__(self):\n super().__init__()\n self.architecture()\n\n\n\n @architecture\n def architecture(self):\n clkgen = v_create(clk_generator())\n cnt = v_create(Counter(clkgen.clk))\n\n cnt_out = get_handle(cnt.Data_out)\n data = v_slv(32)\n data2 = v_slv(32)\n opt_data = optional_t()\n @rising_edge(clkgen.clk)\n def proc():\n cnt_out >> data\n cnt_out >> opt_data \n \n \n\n end_architecture()\n\ntb1 = v_create(tb())\n\n#run_simulation(tb1, 30000,\"optional_t.vcd\")\nconvert_to_hdl(tb1,\"ex3\")","sub_path":"argg_hdl/tests/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"633752995","text":"import numpy as np\nimport torch\nimport torchvision.transforms as trans\nfrom torch.utils.data import DataLoader, random_split\n\n\ndef load_raw(path, latent_filter=None):\n data_zip = np.load(path, allow_pickle=True)\n\n imgs = data_zip['imgs']\n latents_values = data_zip['latents_values']\n latents_classes = data_zip['latents_classes']\n\n if latent_filter is not None:\n idx = latent_filter(latents_values)\n\n imgs = imgs[idx]\n latents_values = latents_classes[idx]\n latents_classes = latents_classes[idx]\n\n imgs = torch.from_numpy(imgs).to(dtype=torch.float32)\n latents_values = torch.from_numpy(latents_values).to(dtype=torch.float32)\n latents_classes = torch.from_numpy(latents_classes).to(dtype=torch.float32)\n\n return imgs, latents_classes, latents_values\n\n\nclass BatchGenerator:\n def __init__(self, data, batch_size, shuffle=True, random_state=None):\n if random_state is None or isinstance(random_state, int):\n random_state = np.random.RandomState(random_state)\n\n self.imgs, self.latent_values, self.classes = data\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.random_state = random_state\n\n def __len__(self):\n return len(self.imgs) // self.batch_size\n\n def __iter__(self):\n order = np.arange(len(self.imgs))\n if self.shuffle:\n self.random_state.shuffle(order)\n\n for i in range(len(self)):\n idx = np.arange(i * self.batch_size, (i + 1) * self.batch_size)\n yield self._get_batch(order[idx])\n\n def _get_batch(self, idx):\n imgs = self.imgs[idx]\n latent_values = self.latent_values[idx]\n latent_classes = self.classes[idx]\n\n return imgs, latent_values, latent_classes\n\n\nclass UnsupervisedLoader(BatchGenerator):\n def _get_batch(self, idx):\n imgs, latent_values, latent_classes = super()._get_batch(idx)\n imgs = imgs.reshape(-1, 64 * 64)\n return imgs, imgs\n\n\nclass SupervisedLoader(BatchGenerator):\n def _get_batch(self, idx):\n imgs, latent_values, latent_classes = super()._get_batch(idx)\n imgs = imgs.reshape(-1, 64 * 64)\n return imgs, latent_classes\n\n\nclass SemiSupervisedLoader(BatchGenerator):\n def _get_batch(self, idx):\n imgs, latent_values, latent_classes = super()._get_batch(idx)\n imgs = imgs.reshape(-1, 64 * 64)\n return imgs, (latent_classes, imgs)\n\n\nclass ValidationWrapper:\n def __init__(self, generator, n_batches):\n self.generator = generator\n self.n_batches = n_batches\n\n def __len__(self):\n return self.n_batches\n\n def __iter__(self):\n for i, batch in enumerate(self.generator):\n if i >= self.n_batches:\n break\n yield batch\n\n\ndef get_loader(setting):\n if setting == 'unsupervised':\n return UnsupervisedLoader\n elif setting == 'supervised':\n return SupervisedLoader\n elif setting == 'semi-supervised':\n return SemiSupervisedLoader\n raise ValueError('Unrecognized setting \"{}\"'.format(setting))\n\n\ndef load_dsprites(path, setting, batch_size=32, data_filters=(None, None), \n val_ratio=0.2, shuffle=True, random_state=None):\n train_filter, test_filter = data_filters\n\n train_data = load_raw(path, train_filter)\n test_data = train_data if test_filter is None else load_raw(path,test_filter)\n val_n_batches = np.ceil(len(train_data) * val_ratio / batch_size)\n\n loader = get_loader(setting)\n\n train_data = loader(train_data, batch_size, True, random_state)\n test_data = loader(test_data, batch_size, True, random_state)\n val_data = ValidationWrapper(train_data, val_n_batches)\n\n return train_data, test_data, val_data\n","sub_path":"src/dataset/dsprites.py","file_name":"dsprites.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"588041833","text":"## Reference\n# 513 https://leetcode.com/problems/find-bottom-left-tree-value/#/description\n\n## Tags - Medium\n# BinaryTree; DFS; BFS\n\n## Description\n# Given a binary tree, find the leftmost value in the last row of the tree.\n\n# Example:\n# input 213, output1\n\n## Analysis\n# DFS: while + stack\n# binary tree => divide and conquer: how about the left subtree; how about right subtree\n# how about their relationship.\n# we need to compare left, right subtree level, therefore, there should be a helper\n# to return level and value, and at the same time, we need maintain level during dfs\n\n# BFS: while + queue(deque)\n# we should maintain a leftvalue, and we needn't keep others\n# but just make sure the first value of the level will be updated, others won't\n# AND be careful that, the left value may be 0. Therefore, pay attention to \n# the judegement when checking its existence.\n\n\n## Solution:\n# definition of binary tree\nclass TreeNode(Object):\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\nclass Solution(Object):\n # DFS: recursion(divide and conquer)\n # @params: root of the binary tree\n # @return: left most value\n def findBottomLeftValue1(self, root):\n # corner case\n if not root:\n return None\n _, val = self.dfshelper(root, 0)\n return val\n # @params: root of the binary tree; level of current root\n # @return: left most level and value \n def dfshelper(self, root, level):\n # base case\n if not root:\n return -1, None\n if not root.left and not root.right:\n return level, root.val\n # divide and conquer\n llevel, lval = self.dfshelper(root.left, level + 1)\n rlevel, rval = self.dfshelper(root.right, level + 1)\n if llevel >= rlevel:\n return llevel, lval\n return rlevel, rval\n\n # BFS: while + queue(deque)\n def findBottomLeftValue2(self, root):\n if not root:\n return None\n from collections import deque\n q = deque([root])\n result = root.val\n while q:\n leftval = None\n qlen = len(q)\n for i in xrange(qlen):\n current = q.popleft()\n if leftval is None:\n leftval = current.val\n seq = (current.left, current.right)\n for node in seq:\n if node:\n q.append(node)\n result = leftval\n return result\n\n\n\n","sub_path":"data_structures/tree/binary_tree/find_bottom_left_value.py","file_name":"find_bottom_left_value.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"592107682","text":"__author__ = 'ELITE BOOK'\n\nimport MessageController\n\nclass TestMessageController():\n\n msgController = None\n\n def __init__(self):\n self.msgController = MessageController()\n self.testCheckMessage()\n\n def testCheckMessage(self):\n checkSplit = self.msgController.checkMessage(\"0000/username/fileID/chunkID/Data...dbchbdovhuscd\")\n print(\"Data from the string is: \", checkSplit)\n\n\nif __name__ == '__main__':\n TestMessageController()\n","sub_path":"Serendipity/Serendipity/Serendipity/Storage Server 3/UnitTests/TestMessageController.py","file_name":"TestMessageController.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"24525650","text":"from pyp5js import *\r\nfrom random import choice\r\n\r\n\r\nPEQUENA, MEDIA, GRANDE, MARACANA = 200, 300, 600, 900\r\nNUM_FATIAS = [4, 8, 12, 16]\r\n\r\n\r\ndef polar_coordinate(x0, y0, r, angle):\r\n x = x0 + r * cos(angle)\r\n y = y0 + r * sin(angle)\r\n\r\n return x, y\r\n\r\n\r\ndef area_pizza(tamanho):\r\n return PI * (tamanho / 2) ** 2\r\n\r\n\r\nclass Pizza():\r\n\r\n def __init__(self, tamanho):\r\n self.x, self.y = width / 2, height / 2\r\n self.tamanho = tamanho\r\n self.raio = tamanho / 2\r\n self.proporcao = area_pizza(self.tamanho) / area_pizza(GRANDE)\r\n\r\n def massa(self):\r\n fill(255, 199, 0)\r\n ellipse(width / 2, height / 2, self.tamanho, self.tamanho)\r\n\r\n def adiciona_ingrediente(self, ingrediente, quantidade):\r\n for i in range(int(quantidade * self.proporcao)):\r\n x = random(width)\r\n y = random(height)\r\n while dist(x, y, self.x, self.y) > (self.raio - 10):\r\n x = random(width)\r\n y = random(height)\r\n\r\n ingrediente(x, y)\r\n\r\n def fatiada(self, pedacos=8):\r\n noFill()\r\n stroke(27, 27, 27, 60)\r\n strokeWeight(2)\r\n angle_inc = TWO_PI / pedacos\r\n\r\n for i in range(1, pedacos + 1):\r\n arc(\r\n self.x,\r\n self.y,\r\n self.tamanho,\r\n self.tamanho,\r\n angle_inc * (i - 1),\r\n angle_inc * i,\r\n PIE\r\n )\r\n\r\n def borda_catupiry(self):\r\n strokeWeight(12 * self.proporcao)\r\n stroke(230)\r\n for i in range(360):\r\n angle = radians(i)\r\n r = self.raio + map(sin(i), -1, 1, -15, 5)\r\n x, y = polar_coordinate(self.x, self.y, r, angle)\r\n point(x, y)\r\n\r\n def borda_tradicional(self):\r\n noFill()\r\n strokeWeight(12 * self.proporcao)\r\n stroke(150, 100, 19)\r\n ellipse(self.x, self.y, self.tamanho, self.tamanho)\r\n\r\n\r\ndef muzzarella(x, y):\r\n noStroke()\r\n fill(255, 255, 0)\r\n rect(x, y, 5, 5)\r\n\r\ndef calabresa(x, y):\r\n noStroke()\r\n fill(255, 0, 0)\r\n ellipse(x, y, 30, 30)\r\n\r\ndef oregano(x, y):\r\n dir = p5.Vector.random2D()\r\n dir.mult(5)\r\n strokeWeight(1)\r\n stroke(2)\r\n line(x, y, x + dir.x, y + dir.y)\r\n\r\ndef alho(x, y):\r\n dir = p5.Vector.random2D()\r\n dir.mult(8)\r\n fill(27)\r\n triangle(x, y, x + dir.x, y, x, y + dir.y)\r\n\r\ndef azeitona(x, y):\r\n noFill()\r\n stroke(142, 0, 92)\r\n strokeWeight(5)\r\n ellipse(x, y, 20, 20)\r\n\r\ndef setup():\r\n createCanvas(950, 950)\r\n background(250)\r\n\r\n\r\ndef draw():\r\n pizza = Pizza(GRANDE)\r\n pizza.massa()\r\n pizza.adiciona_ingrediente(muzzarella, 5000)\r\n\r\n ingredientes_extras = [\r\n (calabresa, 100),\r\n (oregano, 2000),\r\n (alho, 1500),\r\n (azeitona, 50)\r\n ]\r\n for ingrediente, quantidade in ingredientes_extras:\r\n if random(1) > 0.5:\r\n pizza.adiciona_ingrediente(ingrediente, quantidade)\r\n\r\n pizza.fatiada(choice(NUM_FATIAS))\r\n\r\n if random(1) > 0.5:\r\n pizza.borda_tradicional()\r\n else:\r\n pizza.borda_catupiry()\r\n\r\n noLoop()\r\n","sub_path":"s_245/pyzza.py","file_name":"pyzza.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"564610535","text":"# Author: Luke Mei\n# Student Number : A01075487\n# Created time : 2021/1/25 11:08 \n# File Name: catalogue.py\n\nfrom library_item import LibraryItem\nimport difflib\nfrom library_item_generator import LibraryItemGenerator\n\n\nclass Catalogue:\n \"\"\"\n Implement a Catalogue class will now be responsible for maintaining a list of books.\n Move all the methods and code related to searching, adding and removing books to this new class\n \"\"\"\n\n def __init__(self, item_list: list[LibraryItem]) -> None:\n \"\"\"\n Initialize the library with a list of books.\n :param item_list: a sequence of book objects.\n \"\"\"\n self._item_list = item_list\n\n @property\n def item_list(self) -> list[LibraryItem]:\n \"\"\"\n Property for item list.\n\n :return: the property for item list\n \"\"\"\n return self._item_list\n\n def find_items(self, title: str) -> list:\n \"\"\"\n Find books with the same and similar title.\n :param title: a string\n :return: a list of titles.\n \"\"\"\n title_list = [library_book.get_title() for library_book in self._item_list]\n results = difflib.get_close_matches(title.title(), title_list, cutoff=0.2)\n return results\n\n def add_item(self) -> None:\n \"\"\"\n Add a brand new book to the library with a unique call number.\n \"\"\"\n new_item = LibraryItemGenerator.create_library_item()\n\n found_item = self.retrieve_item_by_call_number(new_item.call_number)\n if found_item:\n print(f\"Could not add Item with call number \"\n f\"{new_item.call_number}. It already exists. \")\n else:\n self._item_list.append(new_item)\n print(\"Item added successfully! Item details:\")\n print(new_item)\n\n def remove_item(self, call_number: str) -> None:\n \"\"\"\n Remove an existing book from the library\n :param call_number: a string\n :precondition call_number: a unique identifier\n \"\"\"\n found_book = self.retrieve_item_by_call_number(call_number)\n if found_book:\n self._item_list.remove(found_book)\n print(f\"Successfully removed {found_book.get_title()} with \"\n f\"call number: {call_number}\")\n else:\n print(f\"book with call number: {call_number} not found.\")\n\n def reduce_item_count(self, call_number) -> bool:\n \"\"\"\n Decrement the book count for an book with the given call number\n in the library.\n :param call_number: a string\n :precondition call_number: a unique identifier\n :return: True if the book was found and count decremented, false\n otherwise.\n \"\"\"\n library_book = self.retrieve_item_by_call_number(call_number)\n if library_book:\n library_book.decrement_number_of_copies()\n return True\n return False\n\n def increment_item_count(self, call_number) -> bool:\n \"\"\"\n Increment the book count for an book with the given call number\n in the library.\n :param call_number: a string\n :precondition call_number: a unique identifier\n :return: True if the book was found and count incremented, false\n otherwise.\n \"\"\"\n library_book = self.retrieve_item_by_call_number(call_number)\n if library_book:\n library_book.increment_number_of_copies()\n return True\n return False\n\n def return_item(self, call_number) -> None:\n \"\"\"\n Return an book with the given call number from the library.\n :param call_number: a string\n :precondition call_number: a unique identifier\n \"\"\"\n status = self.increment_item_count(call_number)\n if status:\n print(\"book returned successfully!\")\n else:\n print(f\"Could not find book with call number {call_number}\"\n f\". Return failed.\")\n\n def retrieve_item_by_call_number(self, call_number: str) -> LibraryItem or None:\n \"\"\"\n A private method that encapsulates the retrieval of an book with\n the given call number from the library.\n :param call_number: a string\n :return: book object if found, None otherwise\n \"\"\"\n for library_item in self._item_list:\n if library_item.call_number == call_number:\n return library_item\n return None","sub_path":"Labs/Lab3/catalogue.py","file_name":"catalogue.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"563006305","text":"from collections import Counter\n\na = int(input())\nb = []\nfor i in range(a):\n c = str(input())\n b.append(c)\nb = sorted(b, reverse=True)\nd = Counter(b).most_common()\nprint('%s %s' % (d[0][0], d[0][1]))\n\n\n\n","sub_path":"9612 Maximum Word Frequency.py","file_name":"9612 Maximum Word Frequency.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"520507036","text":"import re\nimport sqlite3\nimport json\n\nFILE_QandQ_NAME = 'law-Q&A-001'\n\nquestionPattern = re.compile(\"(.+?)\\(A\\)(.+?)\\(B\\)(.+?)\\(C\\)(.+?)\\(D\\)(.+)\")\n\nwith open(FILE_QandQ_NAME + '.txt', 'r') as f:\n # f.write(docText)\n i = 1\n questionsList = []\n answers = dict(json.loads(f.readline()))\n answers = {str(int(k)): v for k, v in answers.items()}\n answers[200] = 'A'\n print(answers)\n for q in f:\n # print(q)\n q = questionPattern.search(q).groups()\n q = [i.strip() for i in q]\n # print(len(q))\n questionsList.append((q[0], q[1], q[2], q[3], q[4], answers[str(i)]))\n i += 1\n\nconn = sqlite3.connect('ncbexQandA.db')\nc = conn.cursor()\n# Create table\ntableName = '`NBE SAMPLE TEST ' + FILE_QandQ_NAME + '`'\ncreateColumns = 'id integer primary key, question text, propA text, propB text, propC text, propD text, answer text'\ninsertColumns = 'question, propA, propB, propC, propD, answer'\ncommand = 'CREATE TABLE {} ({})'.format(tableName, createColumns)\nc.execute('DROP TABLE IF EXISTS {}'.format(tableName))\nc.execute(command)\n# Insert a row of data\nc.executemany(\"INSERT INTO {} ({}) VALUES (?,?,?,?,?,?)\".format(tableName, insertColumns), questionsList)\n# Save (commit) the changes\nconn.commit()\n# We can also close the connection if we are done with it.\n# Just be sure any changes have been committed or they will be lost.\nconn.close()\n","sub_path":"write-in-db.py","file_name":"write-in-db.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"280406314","text":"import io\nimport re\nimport random\n\nimport discord\nfrom discord.ext import commands\nfrom PIL import Image, ImageOps, ImageFilter\n\n\nclass Images:\n \"\"\"Image related commands\"\"\"\n\n @commands.command()\n async def invert(self, ctx, *, member: discord.Member = None):\n \"\"\"Inverts an image.\"\"\"\n\n member = member or ctx.author\n\n async with ctx.session.get(member.avatar_url_as(format='png')) as r:\n with io.BytesIO(await r.read()) as f:\n file = await ctx.bot.loop.run_in_executor(None, self.invert_image, f)\n\n await ctx.send(file=discord.File(file, f'inverted.png'))\n\n def invert_image(self, file):\n with Image.open(file) as image:\n if image.mode == 'RGBA':\n r, g, b, a = image.split()\n r, g, b = map(lambda image: image.point(lambda p: 255 - p), (r, g, b))\n inverted_image = Image.merge(image.mode, (r, g, b, a))\n else:\n inverted_image = ImageOps.invert(image)\n\n f = io.BytesIO()\n inverted_image.save(f, format='png')\n f.seek(0)\n\n return f\n\n @commands.command()\n async def needsmorejpeg(self, ctx, *, member: discord.Member = None):\n \"\"\"Lowers the quality of an image to its minimum.\"\"\"\n\n member = member or ctx.author\n\n async with ctx.session.get(member.avatar_url_as(format='png')) as r:\n with Image.open(io.BytesIO(await r.read())) as image:\n file = io.BytesIO()\n image.save(file, format='jpeg', quality=1)\n file.seek(0)\n\n await ctx.send(file=discord.File(file, 'needsmore.jpeg'))\n\n @commands.command()\n async def edge(self, ctx, *, member: discord.Member = None):\n member = member or ctx.author\n\n async with ctx.session.get(member.avatar_url_as(format='png')) as r:\n with io.BytesIO(await r.read()) as f:\n file = await ctx.bot.loop.run_in_executor(None, self.make_edge, f)\n\n await ctx.send(file=discord.File(file, 'edge.png'))\n\n def make_edge(self, file):\n with Image.open(file).convert('RGB') as image:\n horizontal = image.filter(ImageFilter.Kernel((3, 3), [-1, 0, 1, -1, 0, 1, -1, 0, 1], 1.0))\n vertical = image.filter(ImageFilter.Kernel((3, 3), [-1, -1, -1, 0, 0, 0, 1, 1, 1], 1.0))\n modified = Image.blend(horizontal, vertical, 0.5)\n\n f = io.BytesIO()\n modified.save(f, format='png')\n f.seek(0)\n\n return f\n\n @commands.command()\n async def retro(self, ctx, line_1: str, line_2: str = '', *, line_3: str = ''):\n if not re.fullmatch(r'[A-Za-z0-9 ]+', line_1):\n return await ctx.send('First line only supports alphanumerical characters.')\n\n data = {\n 'bcg': random.randint(1, 5),\n 'txt': random.randint(1, 4),\n 'text1': line_1,\n 'text2': line_2,\n 'text3': line_3,\n }\n\n async with ctx.session.post('https://photofunia.com/effects/retro-wave', data=data) as r:\n txt = await r.text()\n\n link = re.search(r'(https?.+?\\.jpg\\?download)', txt)\n async with ctx.session.get(link.group(1)) as r:\n await ctx.send(file=discord.File(io.BytesIO(await r.read()), 'retro.jpg'))\n\n @commands.command()\n async def moom(self, ctx, *, member: discord.Member = None):\n member = member or ctx.author\n\n await self.mirror(ctx, member.avatar_url_as(format='png'))\n\n @commands.command()\n async def dood(self, ctx, *, member: discord.Member = None):\n member = member or ctx.author\n\n await self.mirror(ctx, member.avatar_url_as(format='png'))\n\n async def mirror(self, ctx, link):\n async with ctx.session.get(link) as r:\n with io.BytesIO(await r.read()) as f:\n file = await ctx.bot.loop.run_in_executor(None, self.mirror_image, f, ctx.command.name)\n\n await ctx.send(file=discord.File(file, f'{ctx.command.name}.png'))\n\n def mirror_image(self, file, command_name):\n with Image.open(file) as image:\n width, height = image.size\n\n if command_name == 'dood':\n left = image.crop((0, 0, width / 2, height))\n else:\n left = ImageOps.mirror(image.crop((width / 2, 0, width, height)))\n right = ImageOps.mirror(left)\n final = Image.new('RGB', image.size)\n final.paste(left)\n final.paste(right, (int(width / 2), 0))\n\n f = io.BytesIO()\n final.save(f, format='png')\n f.seek(0)\n\n return f\n\n\ndef setup(bot):\n bot.add_cog(Images())\n","sub_path":"cogs/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"392312654","text":"import time\nimport torch\nimport torch.nn.functional as F\n\nfrom tensorboardX import SummaryWriter\nfrom torch.utils.data import DataLoader\nfrom util.config import DATASET_PARAMETERS, NETWORKS_PARAMETERS\nfrom util.parse_dataset import csv_to_list\nfrom network import restore_train, get_network\nfrom utils import Meter, cycle, save_model, get_collate_fn, Logger\nfrom dataset import VoiceDataset, FaceDataset, Voice_Face_Dataset\n\n# dataset and dataloader\nprint('Parsing your dataset...')\nvoice_list, face_list, id_class_num, emotion_class_num = csv_to_list(DATASET_PARAMETERS)\nprint('voice samples num = %d, face samples num = %d' %(len(voice_list),len(face_list)))\nprint('Preparing the datasets...')\n# voice_face_dataset = Voice_Face_Dataset(voice_list,face_list,DATASET_PARAMETERS['nframe_range'])\nvoice_dataset = VoiceDataset(voice_list,DATASET_PARAMETERS['nframe_range'])\nface_dataset = FaceDataset(face_list)\n\nprint('Preparing the dataloaders...')\ncollate_fn = get_collate_fn(DATASET_PARAMETERS['nframe_range'])\n\nvoice_loader = DataLoader(voice_dataset, shuffle=True, drop_last=True,\n batch_size=DATASET_PARAMETERS['batch_size'],\n num_workers=DATASET_PARAMETERS['workers_num'], # 使用多进程加载的进程数\n collate_fn=collate_fn\n ) # 如何将多个样本数据拼接成一个batch\nface_loader = DataLoader(face_dataset, shuffle=True, drop_last=True,\n batch_size=DATASET_PARAMETERS['batch_size'],\n num_workers=DATASET_PARAMETERS['workers_num'])\n\nvoice_iterator = iter(cycle(voice_loader))\nface_iterator = iter(cycle(face_loader))\n\nprint('Initializing networks...')\nNETWORKS_PARAMETERS['e']['output_channel'] = id_class_num\ne_net, e_optimizer = get_network('e', NETWORKS_PARAMETERS, test=True) # 部分训练\nNETWORKS_PARAMETERS['g']['input_channel'][1] = emotion_class_num\ng_net, g_optimizer = get_network('g', NETWORKS_PARAMETERS, train=True)\n# NETWORKS_PARAMETERS['d1-condition']['input_channel'][1] = emotion_class_num\nd1_net, d1_optimizer = get_network('d0', NETWORKS_PARAMETERS, train=True)\nd2_net, d2_optimizer = get_network('d0', NETWORKS_PARAMETERS, train=True)\nf1_net, f1_optimizer = get_network('f', NETWORKS_PARAMETERS, train=True)\nf2_net, f2_optimizer = get_network('f', NETWORKS_PARAMETERS, train=True)\n\nNETWORKS_PARAMETERS['c']['output_channel'] = id_class_num\nc1_net, c1_optimizer = get_network('c', NETWORKS_PARAMETERS, train=True)\nNETWORKS_PARAMETERS['c']['output_channel'] = emotion_class_num\nc2_net, c2_optimizer = get_network('c', NETWORKS_PARAMETERS, train=True)\n\n\n\n# 接力训练,载入已有的模型\nif NETWORKS_PARAMETERS['finetune']:\n restore_train(g_net, d1_net, f1_net, f2_net)\n\n# label for real/fake faces\nreal_label = torch.full((DATASET_PARAMETERS['batch_size'], 1), 1)\nfake_label = torch.full((DATASET_PARAMETERS['batch_size'], 1), 0)\nD_loss_positive = torch.tensor(1, dtype=torch.float)\nD_loss_negative = D_loss_positive * -1\n\n# Meters for recording the training status 日志模块 #\nwriter = SummaryWriter(\"./models/log\")\nlogger = Logger(DATASET_PARAMETERS['log_dir'], time.strftime(\"%Y-%m-%d,%H,%M\"))\niteration = Meter('Iter', 'sum', ':5d')\ndata_time = Meter('Data', 'sum', ':4.2f')\nbatch_time = Meter('Time', 'sum', ':4.2f')\nD_real = Meter('D_real', 'avg', ':4.3f')\nD_fake = Meter('D_fake', 'avg', ':4.3f')\nC1_real = Meter('C1_real', 'avg', ':4.3f')\nC2_real= Meter('C2_real', 'avg', ':4.3f')\nC1_fake = Meter('C1_fake', 'avg', ':4.3f')\nC2_fake= Meter('C2_fake', 'avg', ':4.3f')\nGD_fake = Meter('G_D_fake', 'avg', ':4.3f')\n\ncriterionL1 = torch.nn.L1Loss()\nprint('Training models...')\nfor it in range(600000+1):\n # data\n start_time = time.time()\n # voice, face, voice_identity_label, voice_emotion_label = next(voice_face_iterator)\n # face_identity_label, face_emotion_label = voice_identity_label, voice_emotion_label\n voice, voice_identity_label, voice_emotion_label = next(voice_iterator)\n face, face_identity_label, face_emotion_label = next(face_iterator)\n\n noise = 0.05*torch.randn(DATASET_PARAMETERS['batch_size'], 64, 1, 1) # 标准正态分布\n\n # use GPU or not\n if NETWORKS_PARAMETERS['GPU']:\n voice, voice_identity_label, voice_emotion_label = voice.cuda(), voice_identity_label.cuda(), voice_emotion_label.cuda()\n face, face_identity_label, face_emotion_label = face.cuda(), face_identity_label.cuda(), face_emotion_label.cuda()\n real_label, fake_label = real_label.cuda(), fake_label.cuda()\n noise = noise.cuda()\n D_loss_positive, D_loss_negative = D_loss_positive.cuda(), D_loss_negative.cuda()\n\n\n # get embeddings and generated faces\n embeddings = e_net(voice)\n embeddings = F.normalize(embeddings)\n # introduce some permutations\n embeddings = embeddings + noise\n embeddings = F.normalize(embeddings)\n embeddings = embeddings.squeeze() # 压缩维度从64,128,1,1 --> 64,128\n\n # 扩展维度从64,1 --> 64, 8, 128, 128 , nn.Embedding(emotion_class_num,emotion_class_num)\n face_EM_label_ = torch.zeros((DATASET_PARAMETERS['batch_size'], emotion_class_num)).scatter_(1, face_emotion_label.type(torch.LongTensor).unsqueeze(1), 1)\n face_EM_label_ = face_EM_label_.unsqueeze(2).unsqueeze(3).expand(DATASET_PARAMETERS['batch_size'], emotion_class_num, face.size(2), face.size(3))\n face_EM_label_ = face_EM_label_.cuda()\n voice_EM_label_ = torch.zeros((DATASET_PARAMETERS['batch_size'], emotion_class_num)).scatter_(1, voice_emotion_label.type(torch.LongTensor).unsqueeze(1), 1)\n voice_EM_label_ = voice_EM_label_.unsqueeze(2).unsqueeze(3).expand(DATASET_PARAMETERS['batch_size'], emotion_class_num, face.size(2), face.size(3))\n voice_EM_label_ = voice_EM_label_.cuda()\n\n fake_face = g_net(embeddings.unsqueeze(2).unsqueeze(3)) # G条件输入\n\n \"\"\" --------------------update Discriminators and classifers-------------------------- \"\"\"\n f1_optimizer.zero_grad()\n f2_optimizer.zero_grad()\n d1_optimizer.zero_grad()\n d2_optimizer.zero_grad()\n c1_optimizer.zero_grad()\n c2_optimizer.zero_grad()\n\n # ------- 真实样本score------- #\n real_score_out_1 = d1_net(f1_net(face)) # D1无条件输入\n real_score_out_2 = d2_net(f2_net(face)) # D2无条件输入\n D1_real_loss= F.binary_cross_entropy(torch.sigmoid(real_score_out_1), real_label) # BCE loss\n D2_real_loss = F.binary_cross_entropy(torch.sigmoid(real_score_out_2), real_label) # BCE loss\n D_real_loss = 1*D1_real_loss+0*D2_real_loss\n\n # ------- 生成样本score------- #\n fake_score_out_1 = d1_net(f1_net(fake_face.detach())) # D1无条件输入\n fake_score_out_2 = d2_net(f2_net(fake_face.detach())) # D2无条件输入\n D1_fake_loss = F.binary_cross_entropy(torch.sigmoid(fake_score_out_1), fake_label)\n D2_fake_loss = F.binary_cross_entropy(torch.sigmoid(fake_score_out_2), fake_label)\n D_fake_loss = 1*D1_fake_loss+0*D2_fake_loss\n\n real_id_label_out = c1_net(f1_net(face)) # 计算 c1,c2 loss\n real_emotion_label_out = c2_net(f2_net(face)) # 计算 c1,c2 loss\n C1_real_loss = F.nll_loss(F.log_softmax(real_id_label_out, dim=1), face_identity_label)\n C2_real_loss = F.nll_loss(F.log_softmax(real_emotion_label_out, dim=1), face_emotion_label)\n\n (D_fake_loss + D_real_loss + 1*C1_real_loss + 1*C2_real_loss).backward()\n\n f1_optimizer.step()\n f2_optimizer.step()\n d1_optimizer.step()\n d2_optimizer.step()\n c1_optimizer.step()\n c2_optimizer.step()\n # ---------------------------------------------\n D_real.update(D_real_loss.item())\n D_fake.update(D_fake_loss.item())\n C1_real.update(C1_real_loss.item())\n C2_real.update(C2_real_loss.item())\n # ---------------------------------------------\n\n \"\"\" --------------------------------update Generator --------------------------------------\"\"\"\n g_optimizer.zero_grad()\n\n fake_id_label_out = c1_net(f1_net(fake_face))\n fake_emotion_label_out = c2_net(f2_net(fake_face))\n fake_score_out_1 = d1_net(f1_net(fake_face)) # D无条件输入\n fake_score_out_2 = d2_net(f2_net(fake_face))\n\n GD_fake_loss1 = F.binary_cross_entropy(torch.sigmoid(fake_score_out_1), real_label)\n GD_fake_loss2 = F.binary_cross_entropy(torch.sigmoid(fake_score_out_2), real_label)\n GD_fake_loss = 1*GD_fake_loss1 +0*GD_fake_loss2\n\n GC1_fake_loss = F.nll_loss(F.log_softmax(fake_id_label_out, dim=1), voice_identity_label) # 用真实标签替代随机标签?\n GC2_fake_loss = F.nll_loss(F.log_softmax(fake_emotion_label_out, dim=1), voice_emotion_label)\n # loss_G_L1_1 = criterionL1(fake_face, face) * 100\n # GD_fake_loss = fake_score_out.mul(-1).mean() # hing loss\n (GD_fake_loss + 1*GC1_fake_loss + 1*GC2_fake_loss).backward()\n\n g_optimizer.step()\n\n # ---------------------------------------------\n GD_fake.update(GD_fake_loss.item())\n C1_fake.update(GC1_fake_loss.item())\n C2_fake.update(GC2_fake_loss.item())\n batch_time.update(time.time() - start_time)\n # ---------------------------------------------\n\n # print status\n if it % 10 == 0:\n logger.info([iteration.__str__() + batch_time.__str__() +\n D_real.__str__() + D_fake.__str__() + C1_real.__str__() +C2_real.__str__()+C1_fake.__str__()+C2_fake.__str__()+\n GD_fake.__str__() ])\n\n writer.add_scalars('data/scalar_group', {\"D_real\": D_real_loss,\n \"D_fake\": D_fake_loss,\n \"C1_real_loss\":C1_real_loss,\n \"C2_real_loss\":C2_real_loss,\n \"C1_fake_loss\": GC1_fake_loss,\n \"C2_fake_loss\": GC2_fake_loss,\n \"GD_fake_loss\":GD_fake_loss}, it)\n\n # info = {'image/real_images': real_images(face, 8), 'image/generated_images': generate_img(fake_face, 8)}\n # writer.add_images('image/generated_images', generate_img(fake_face, 8), it)\n batch_time.reset()\n D_real.reset()\n D_fake.reset()\n C1_real.reset()\n C2_real.reset()\n C1_fake.reset()\n C2_fake.reset()\n GD_fake.reset()\n\n # snapshot\n if it % 2000 == 0:\n s_time = time.strftime(\"%m-%d,%H,%M\") + '-' + str(it) + '-'\n # save_model(e_net, 'models/voice_embedding/{}voice_embedding.pth'.format(s_time))\n save_model(g_net, 'models/generator/{}generator.pth'.format(s_time))\n # save_model(d1_net, 'models/discriminator/{}discriminator.pth'.format(s_time))\n # save_model(f1_net, 'models/face_embedding/{}face_embedding1.pth'.format(s_time))\n # save_model(f2_net, 'models/face_embedding/{}face_embedding2.pth'.format(s_time))\n\n iteration.update(1)\n# writer.export_scalars_to_json(\"./models/log/all_scalars.json\")\n# writer.close()\n\n\n","sub_path":"gan_train_v2.py","file_name":"gan_train_v2.py","file_ext":"py","file_size_in_byte":11070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"584263844","text":"import sys\n\ntry:\n import boto3\n\n print(\"imported boto3 successfully\")\nexcept Exception as e:\n print(e)\n sys.exit(1)\nsource_region = 'eu-west-1'\ndestination_region = 'eu-west-2'\nsession = boto3.session.Session(profile_name='cooper')\nec2_source_client = session.client(service_name='ec2', region_name=source_region)\nsts_client = session.client(service_name='sts', region_name=source_region)\naccount_id = sts_client.get_caller_identity().get('Account')\nprint(account_id)\nall_snaps = []\nf_bkp = {\"Name\": \"tag:env\", \"Values\": [\"dev\"]}\nfor each_snap in ec2_source_client.describe_snapshots(OwnerIds=[account_id], Filters=[f_bkp]).get('Snapshots'):\n all_snaps.append(each_snap.get('SnapshotId'))\n print(all_snaps)\n\nec2_des_client = session.client(service_name='ec2', region_name=destination_region)\nfor each_source_snap in all_snaps:\n print(\"taking backup for id of {} into of {}\".format(each_source_snap, destination_region))\n ec2_des_client.copy_snapshot(\n Description=\"Disaster recovery\",\n SourceRegion=source_region,\n DestinationRegion=destination_region,\n SourceSnapshotId=each_source_snap,\n )\nprint(\"EBS Snapshot copy destination region completed\")\nprint(\"Modifying tags\")\nfor each_source_snap in f_bkp:\n ec2_source_client.delete_tags(\n Resources=[each_source_snap],\n Tags=[\n {\n 'Key': 'env',\n 'Value': 'dev'\n }\n ]\n )\n print(\"Creating new tags for {}\".format(each_source_snap))\n ec2_source_client.create_tags(\n Resources=[each_source_snap],\n Tags=[\n {\n 'Key': 'env',\n 'Value': 'dev-copy'\n }\n ]\n )\n","sub_path":"lambda/cross-region-ebs-auto-snap-copy.py","file_name":"cross-region-ebs-auto-snap-copy.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"559513399","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n####\n# 10/2010 Bernd Schlapsi \n#\n# This script is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# gPodder is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n# Dependencies:\n# * python-eyed3 (eyeD3 python library - http://eyed3.nicfit.net/)\n# * steghide (steganography program - http://steghide.sourceforge.net/)\n#\n# The script extract the shownotes from the \"Tin Foil Hat\" podcast\n# You can find the instructions how to extract shownotes for the\n# \"Tin Foil Hat\" podcast here:\n# http://cafeninja.blogspot.com/2010/10/tin-foil-hat-show-episode-001.html\n\nimport gpodder\nimport os\nimport shlex\nimport subprocess\nimport tempfile\n\nimport logging\nlogger = logging.getLogger(__name__)\n\ntry:\n import eyeD3\nexcept:\n logger.error( '(tfh shownotes hook) Could not find eyeD3')\n\n\nTFH_URL='http://feeds.feedburner.com/TinFoilHat'\nSTEGHIDE_CMD='steghide extract -f -p %(pwd)s -sf %(img)s -xf %(file)s'\n\n\ndef extract_image(filename):\n \"\"\"extract image from the podcast file\"\"\"\n imagefile = None\n try:\n if eyeD3.isMp3File(filename):\n tag = eyeD3.Mp3AudioFile(filename).getTag()\n images = tag.getImages()\n if images:\n tempdir = tempfile.gettempdir()\n img = images[0]\n imagefile = img.getDefaultFileName()\n img.writeFile(path=tempdir, name=imagefile)\n imagefile = \"%s/%s\" % (tempdir, imagefile)\n else:\n logger.info(u'No image found in %s' % filename)\n except:\n pass\n\n return imagefile\n\n\ndef extract_shownotes(imagefile, remove_image=True):\n \"\"\"extract shownotes from the FRONT_COVER.jpeg\"\"\"\n shownotes = None\n password = 'tinfoilhat'\n shownotes_file = '/tmp/shownotes.txt'\n\n if not os.path.exists(imagefile):\n return shownotes\n\n cmd = STEGHIDE_CMD % {\n 'pwd': password,\n 'img': imagefile,\n 'file': shownotes_file\n }\n myprocess = subprocess.Popen(shlex.split(cmd),\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n (stdout, stderr) = myprocess.communicate()\n\n if remove_image:\n os.remove(imagefile)\n\n if myprocess.returncode == 0:\n #read shownote file\n f = open(shownotes_file, 'r')\n shownotes = unicode(f.read(), \"utf-8\")\n f.close()\n else:\n logger.error(u'Error extracting shownotes from the image file %s' % imagefile)\n\n return shownotes\n\n\nclass gPodderHooks(object):\n def __init__(self):\n logger.info('\"Tin Foil Hat\" shownote extractor extension is initializing.')\n\n def on_episode_downloaded(self, episode):\n if episode.channel.url == TFH_URL:\n filename = episode.local_filename(create=False, check_only=True)\n if filename is None:\n return\n \n imagefile = extract_image(filename)\n if imagefile is None:\n return\n\n shownotes = extract_shownotes(imagefile)\n if shownotes is None:\n return\n\n # save shownotes in the database\n if episode.description.find(shownotes) == -1:\n episode.description = \"%s\\n\\n
%s
\" % (episode.description, shownotes)\n episode.save()\n episode.db.commit()\n logger.info(u'updated shownotes for podcast: (%s/%s)' % (episode.channel.title, episode.title))\n","sub_path":"tfh_shownotes_hook.py","file_name":"tfh_shownotes_hook.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"111596587","text":"import requests\n\nfrom django.contrib import messages\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy\nfrom django.views.generic import FormView, TemplateView\n\nfrom .forms import TokenForm\nfrom .models import Project\n\n\nclass HomeView(TemplateView):\n template_name = \"project_admin/home.html\"\n\n def get(self, request, *args, **kwargs):\n token = None\n self.member_data = None\n\n if 'master_access_token' in request.session:\n token = request.session['master_access_token']\n self.member_data = self.token_for_memberlist(token)\n if not self.member_data:\n del request.session['master_access_token']\n\n if self.member_data:\n return super().get(request, *args, **kwargs)\n else:\n return redirect('login')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['member_data'] = self.member_data\n return context\n\n def token_for_memberlist(self, token):\n req_url = ('https://www.openhumans.org/api/direct-sharing/project/'\n 'members/?access_token={}'.format(token))\n req = requests.get(req_url)\n if req.status_code == 200:\n return req.json()\n else:\n messages.error(self.request, 'Token not valid. Maybe a fresh one is needed?')\n return None\n\n\nclass LoginView(FormView):\n template_name = 'project_admin/login.html'\n form_class = TokenForm\n success_url = reverse_lazy('home')\n\n def form_valid(self, form):\n token = form.cleaned_data['token']\n req_url = (\"https://www.openhumans.org/api/direct-sharing/project/?access_token={}\".format(token))\n params = {'token': token}\n r = requests.get(req_url, params=params).json()\n try:\n Project.objects.update_or_create(id=r['id'], defaults=r)\n self.request.session['master_access_token'] = token\n except Exception as e:\n # Handle expired master tokens, or serve error message\n if 'Expired token' in r['detail']:\n messages.error(self.request, 'Token has expired. Refresh your token in the project management interface.')\n else:\n messages.error(self.request, e)\n \n return redirect('home')\n","sub_path":"project_admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"238675206","text":"from starlette.endpoints import HTTPEndpoint\nfrom starlette.requests import Request\nfrom starlette.responses import RedirectResponse\nfrom starlette.templating import Jinja2Templates\n\nfrom psion.webtools import urlencode\n\nfrom example.provider import provider\nfrom example.settings import BASEDIR\n\n\ntemplates = Jinja2Templates(BASEDIR / \"templates\")\n\n\nclass AuthorizationEndpoint(HTTPEndpoint):\n async def get(self, request: Request):\n if not request.user:\n url = urlencode(str(request.url_for(\"auth:login\")), next=str(request.url))\n return RedirectResponse(url, 303)\n\n request = await provider.create_request(request)\n response = await provider.authorize(request)\n return await provider.create_response(response)\n\n\nclass ErrorEndpoint(HTTPEndpoint):\n async def get(self, request: Request):\n error_description = request.query_params.get(\"error_description\")\n error = request.query_params.get(\"error\")\n\n return templates.TemplateResponse(\n \"error.j2\",\n {\n \"request\": request,\n \"title\": \"Error\",\n \"error_description\": error_description,\n \"error\": error,\n },\n )\n\n\nclass RevocationEndpoint(HTTPEndpoint):\n async def post(self, request: Request):\n request = await provider.create_request(request)\n response = await provider.revoke(request)\n return await provider.create_response(response)\n\n\nclass TokenEndpoint(HTTPEndpoint):\n async def post(self, request: Request):\n request = await provider.create_request(request)\n response = await provider.token(request)\n return await provider.create_response(response)\n","sub_path":"example/views/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"529310696","text":"\nimport numpy as np\nfrom math import sqrt, isnan\nfrom obspy.signal.cross_correlation import xcorr\nfrom scipy.signal.signaltools import fftconvolve\nimport warnings\n\ndef my_centered(arr, newsize):\n # get the center portion of a 1-dimensional array\n n = len(arr)\n i0 = (n - newsize) // 2\n if n%2 == 0:\n i0 += 1\n i1 = i0 + newsize\n return arr[i0:i1]\n\ndef classic_xcorr(trace1, trace2, max_lag_samples):\n \n x_corr = xcorr(trace1.data, trace2.data,\\\n max_lag_samples, True)[2]\n \n return x_corr\n\ndef get_correlation_params(data1,data2):\n\n if len(data1) == 0 or len(data2) == 0:\n return(0,0,0,0,0,0)\n # Get the signal energy; most people normalize by the square root of that\n ren1 = np.correlate(data1,data1,mode='valid')[0]\n ren2 = np.correlate(data2,data2,mode='valid')[0]\n\n # Get the window rms\n \n rms1 = sqrt(ren1 / len(data1))\n \n rms2 = sqrt(ren2 / len(data2)) \n \n \n # A further parameter to 'see' impulsive events: range of standard deviations\n nsmp = int(len(data1)/4)\n\n std1 = [0,0,0,0]\n std2 = [0,0,0,0]\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n for i in range(4):\n \n \n std1[i] = np.std(data1[i*nsmp:(i+1)*nsmp])\n if isnan(std1[i]):\n return(0,0,0,0,0,0)\n std2[i] = np.std(data2[i*nsmp:(i+1)*nsmp])\n if isnan(std1[i]):\n return(0,0,0,0,0,0)\n \n # Add small value not to divide by zero\n tol = np.max(std1) * 1e-6 \n if tol != 0:\n rng1 = max(std1) / (min(std1) + tol)\n rng2 = max(std2) / (min(std2) + tol)\n else:\n rng1 = 0\n rng2 = 0\n\n return(rms1,rms2,ren1,ren2,rng1,rng2)\n\n \ndef cross_covar(data1, data2, max_lag_samples, normalize, params=False):\n \n #ToDo: deal with params\n \n# remove mean and normalize; this should have no effect on the energy-normalized \n#correlation result, but may avoid precision issues if trace values are very small\n #if normalize:\n # scale1 = 1./np.max(np.abs(data1))\n # scale2 = 1./np.max(np.abs(data2))\n # data1*=scale1\n # data2*=scale2\n \n if len(data1) == 0 or len(data2) == 0:\n return([],[])\n\n \n data1-=np.mean(data1)\n data2-=np.mean(data2)\n \n \n # Make the data more convenient for C function np.correlate\n\n data1 = np.ascontiguousarray(data1, np.float32)\n data2 = np.ascontiguousarray(data2, np.float32)\n \n if params:\n params = get_correlation_params(data1,data2)\n ren1, ren2 = params[2:4]\n else:\n ren1 = np.correlate(data1,data1,mode='valid')[0]\n ren2 = np.correlate(data2,data2,mode='valid')[0]\n\n if ren1 == 0.0 or ren2 == 0.0 and normalize == True:\n return([],[])\n\n\n\n # scipy.fftconvolve is way faster than np.correlate, and zeropads for non-circular convolution\n ccv = fftconvolve(data1[::-1],data2,mode='same')\n \n if normalize:\n ccv /= ( sqrt(ren1) * sqrt(ren2) )\n\n return my_centered(ccv,2*max_lag_samples+1),params\n\n\n\n\n","sub_path":"ants_2/tools/correlations.py","file_name":"correlations.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"279378600","text":"from PyQt5.QtCore import QObject, QThreadPool\n\nfrom neanno.utils.list import get_set_of_list_and_keep_sequence, not_none\nfrom neanno.utils.threading import ParallelWorker, ParallelWorkerSignals\nfrom neanno.utils.text import extract_annotations_as_list, annotate_text\n\n\nclass PredictionPipeline(QObject):\n \"\"\" Predicts different annotations for a text.\"\"\"\n\n _predictors = {}\n _threadpool = QThreadPool()\n\n def add_predictor(self, predictor):\n self._predictors[predictor.name] = predictor\n\n def remove_predictor(self, name):\n del self._predictors[name]\n\n def has_predictor(self, name):\n return name in self._predictors\n\n def has_predictors(self):\n return len(self._predictors) > 0\n\n def get_predictor(self, name):\n return self._predictors[name]\n\n def get_all_predictors(self):\n return self._predictors.values()\n\n def get_all_prediction_enabled_predictors(self):\n return [\n predictor\n for predictor in self._predictors.values()\n if predictor.is_prediction_enabled\n ]\n\n def invoke_predictors(self, function_name, *args, **kwargs):\n for predictor in self.get_all_predictors():\n if hasattr(predictor, function_name):\n getattr(predictor, function_name)(*args, **kwargs)\n\n def collect_from_predictors(\n self, function_name, make_result_distinct, filter_none_values, *args, **kwargs\n ):\n result = []\n for predictor in self.get_all_predictors():\n if hasattr(predictor, function_name):\n predictor_response = getattr(predictor, function_name)(*args, **kwargs)\n if predictor_response:\n result = result.extend(\n getattr(predictor, function_name)(*args, **kwargs)\n )\n if filter_none_values:\n result = not_none(result)\n if make_result_distinct:\n result = get_set_of_list_and_keep_sequence(result)\n return result\n\n def learn_from_annotated_text(self, annotated_text, language):\n self.invoke_predictors(\"learn_from_annotated_text\", annotated_text, language)\n\n def learn_from_annotated_dataset_async(\n self,\n dataset,\n text_column,\n is_annotated_column,\n language_column,\n categories_column,\n categories_to_train,\n entity_codes_to_train,\n signal_slots=ParallelWorkerSignals.default_slots(),\n ):\n parallel_worker = ParallelWorker(\n self.invoke_predictors,\n signal_slots,\n \"learn_from_annotated_dataset\",\n dataset,\n text_column,\n is_annotated_column,\n language_column,\n categories_column,\n categories_to_train,\n entity_codes_to_train,\n )\n self._threadpool.start(parallel_worker)\n\n def learn_from_annotated_dataset(\n self,\n dataset,\n text_column,\n is_annotated_column,\n language_column,\n categories_column,\n categories_to_train,\n entity_codes_to_train,\n signal_slots=ParallelWorkerSignals.default_slots(),\n ):\n # call the async version of this method\n self.learn_from_annotated_dataset_async(\n dataset,\n text_column,\n is_annotated_column,\n language_column,\n categories_column,\n categories_to_train,\n entity_codes_to_train,\n signal_slots,\n )\n # wait for done\n # note: this waits until the entire threadpool is done\n # TODO: check if there is a way to wait only for this worker\n self._threadpool.waitForDone()\n\n def predict_inline_annotations(self, text, language=\"en-US\"):\n if not text:\n return \"\"\n annotations = []\n for predictor in self.get_all_prediction_enabled_predictors():\n annotations_by_predictor = extract_annotations_as_list(\n predictor.predict_inline_annotations(text, language)\n )\n annotations.extend(annotations_by_predictor)\n return annotate_text(text, annotations)\n\n def predict_text_categories(self, text, language=\"en-US\"):\n if not text:\n return \"\"\n result = []\n for predictor in self.get_all_prediction_enabled_predictors():\n new_text_categories = predictor.predict_text_categories(text, language)\n result.extend(new_text_categories)\n result = get_set_of_list_and_keep_sequence(result)\n return result\n\n def get_parent_terms_for_named_entity(self, term, entity_code):\n return \", \".join(\n not_none(\n self.collect_from_predictors(\n \"get_parent_terms_for_named_entity\", True, True, term, entity_code\n )\n )\n )\n\n def mark_key_term_for_removal(self, key_term):\n self.invoke_predictors(\"mark_key_term_for_removal\", key_term)\n\n def reset_key_terms_marked_for_removal(self):\n self.invoke_predictors(\"reset_key_terms_marked_for_removal\")\n\n def mark_named_entity_term_for_removal(self, term, entity_code):\n self.invoke_predictors(\"mark_named_entity_term_for_removal\", term, entity_code)\n\n def reset_named_entity_terms_marked_for_removal(self):\n self.invoke_predictors(\"reset_named_entity_terms_marked_for_removal\")\n","sub_path":"neanno/prediction/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":5435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"431575194","text":"\"\"\"\nGiven two arrays arr1 and arr2, the elements of arr2 are distinct, and all elements in arr2 are also\nin arr1.\n\nSort the elements of arr1 such that the relative ordering of items in arr1 are the same as in arr2.\nElements that don't appear in arr2 should be placed at the end of arr1 in ascending order.\n\"\"\"\nclass Solution(object):\n def relativeSortArray(self, arr1, arr2):\n \"\"\"\n :type arr1: List[int]\n :type arr2: List[int]\n :rtype: List[int]\n \"\"\"\n d = {v: i for i, v in enumerate(arr2)}\n return sorted(arr1, key=lambda i: d.get(i, 1000 + i)) # how to explain this\n\n def relativeSortArray2(self, arr1, arr2):\n import collections\n ans, cnt = [], collections.Counter(arr1) # Count each number in arr1\n for i in arr2:\n if cnt[i]: ans.extend([i] * cnt.pop(i)) # Sort the common numbers in both arrays by the order of arr2.\n for i in range(1001):\n if cnt[i]: ans.extend([i] * cnt.pop(i)) # Sort the numbers only in arr1.\n return ans\n\n def relativeSortArray3(self, arr1, arr2):\n # Count sort\n cnt = [0] * 1001\n for n in arr1:\n cnt[n] += 1\n i = 0\n for n in arr2:\n while cnt[n] > 0:\n arr1[i] = n\n i += 1\n cnt[n] -= 1\n for n in range(len(cnt)):\n while cnt[n] > 0:\n arr1[i] = n\n i += 1\n cnt[n] -= 1\n return arr1\n\n\narr1, arr2 = [2,3,1,3,2,4,6,7,9,2,19], [2,1,4,3,9,6]\nprint(Solution().relativeSortArray3(arr1, arr2))\n\n","sub_path":"1122RelativeSortArr.py","file_name":"1122RelativeSortArr.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"265082105","text":"#! /usr/bin/env python\n\n\nfrom __future__ import print_function\n\nimport rospy\nimport sys\nimport copy\nimport moveit_commander\nimport moveit_msgs.msg\nimport geometry_msgs.msg\nimport actionlib\nimport math\n\nfrom pkg_vb_sim.srv import vacuumGripper\nfrom pkg_vb_sim.srv import vacuumGripperRequest\nfrom pkg_vb_sim.srv import vacuumGripperResponse\n\nfrom pkg_vb_sim.srv import conveyorBeltPowerMsg\nfrom pkg_vb_sim.srv import conveyorBeltPowerMsgRequest\nfrom pkg_vb_sim.srv import conveyorBeltPowerMsgResponse\n\n\nfrom hrwros_gazebo.msg import LogicalCameraImage\npkg1 = []\npkg2 = []\npkg3 = []\n\nclass task3:\n\n def __init__(self):\n\n rospy.init_node('node_task3', anonymous=True)\n\n self._planning_group = \"ur5_1_planning_group\"\n self._commander = moveit_commander.roscpp_initialize(sys.argv)\n self._robot = moveit_commander.RobotCommander()\n self._scene = moveit_commander.PlanningSceneInterface()\n self._group = moveit_commander.MoveGroupCommander(self._planning_group)\n self._display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path', moveit_msgs.msg.DisplayTrajectory, queue_size=1)\n\n self._exectute_trajectory_client = actionlib.SimpleActionClient('execute_trajectory', moveit_msgs.msg.ExecuteTrajectoryAction)\n self._exectute_trajectory_client.wait_for_server()\n\n self._planning_frame = self._group.get_planning_frame()\n self._eef_link = self._group.get_end_effector_link()\n self._group_names = self._robot.get_group_names()\n\n\n rospy.loginfo('\\033[94m' + \"Planning Group: {}\".format(self._planning_frame) + '\\033[0m')\n rospy.loginfo('\\033[94m' + \"End Effector Link: {}\".format(self._eef_link) + '\\033[0m')\n rospy.loginfo('\\033[94m' + \"Group Names: {}\".format(self._group_names) + '\\033[0m')\n rospy.loginfo('\\033[94m' + \" >>> Ur5Moveit init done.\" + '\\033[0m')\n\n\n self.box_name = \"package\"\n\n\n def set_joint_angles(self, arg_list_joint_angles):\n\n list_joint_values = self._group.get_current_joint_values()\n rospy.loginfo('\\033[94m' + \">>> Current Joint Values:\" + '\\033[0m')\n rospy.loginfo(list_joint_values)\n\n self._group.set_joint_value_target(arg_list_joint_angles)\n self._group.plan()\n flag_plan = self._group.go(wait=True)\n\n list_joint_values = self._group.get_current_joint_values()\n rospy.loginfo('\\033[94m' + \">>> Final Joint Values:\" + '\\033[0m')\n rospy.loginfo(list_joint_values)\n\n pose_values = self._group.get_current_pose().pose\n rospy.loginfo('\\033[94m' + \">>> Final Pose:\" + '\\033[0m')\n rospy.loginfo(pose_values)\n\n if flag_plan == True:\n rospy.loginfo(\n '\\033[94m' + \">>> set_joint_angles() Success\" + '\\033[0m')\n else:\n rospy.logerr(\n '\\033[94m' + \">>> set_joint_angles() Failed.\" + '\\033[0m')\n\n return flag_plan\n\n\n\n def wait_for_state_update(self, box_is_known=False, box_is_attached=False, timeout=4):\n\n start = rospy.get_time()\n seconds = rospy.get_time()\n\n while (seconds - start < timeout) and not rospy.is_shutdown():\n attached_objects = self._scene.get_attached_objects([self.box_name])\n\n is_attached = len(attached_objects.keys()) > 0\n\n is_known = self.box_name in self._scene.get_known_object_names()\n\n if(box_is_attached == is_attached) and (box_is_known == is_known):\n return True\n\n rospy.sleep(0.1)\n self._seconds = rospy.get_time()\n\n return False\n\n\n def update_data(self, data):\n global pkg1 #to store details of packagen1\n global pkg2 #to store details of packagen2\n global pkg3 #to store details of packagen3\n del pkg1[:]\n del pkg2[:]\n del pkg3[:]\n try:\n if data.models[0].type == 'packagen1':\n pkg1.append(data.models[0].pose.position.x)\n pkg1.append(data.models[0].pose.position.y)\n pkg1.append(data.models[0].pose.position.z)\n elif data.models[1].type == 'packagen1':\n pkg1.append(data.models[1].pose.position.x)\n pkg1.append(data.models[1].pose.position.y)\n pkg1.append(data.models[1].pose.position.z)\n elif data.models[2].type == 'packagen1':\n pkg1.append(data.models[2].pose.position.x)\n pkg1.append(data.models[2].pose.position.y)\n pkg1.append(data.models[2].pose.position.z)\n elif data.models[3].type == 'packagen1':\n pkg1.append(data.models[3].pose.position.x)\n pkg1.append(data.models[3].pose.position.y)\n pkg1.append(data.models[3].pose.position.z)\n except:\n '''\n packagen1 is not yet reached to the logical camera\n '''\n try:\n if data.models[0].type == 'packagen2':\n pkg2.append(data.models[0].pose.position.x)\n pkg2.append(data.models[0].pose.position.y)\n pkg2.append(data.models[0].pose.position.z)\n elif data.models[1].type == 'packagen2':\n pkg2.append(data.models[1].pose.position.x)\n pkg2.append(data.models[1].pose.position.y)\n pkg2.append(data.models[1].pose.position.z)\n elif data.models[2].type == 'packagen2':\n pkg2.append(data.models[2].pose.position.x)\n pkg2.append(data.models[2].pose.position.y)\n pkg2.append(data.models[2].pose.position.z)\n elif data.models[3].type == 'packagen2':\n pkg2.append(data.models[3].pose.position.x)\n pkg2.append(data.models[3].pose.position.y)\n pkg2.append(data.models[3].pose.position.z)\n except:\n '''\n packagen2 is not yet reached to the logical camera\n '''\n try:\n if data.models[0].type == 'packagen3':\n pkg3.append(data.models[0].pose.position.x)\n pkg3.append(data.models[0].pose.position.y)\n pkg3.append(data.models[0].pose.position.z)\n elif data.models[1].type == 'packagen3':\n pkg3.append(data.models[1].pose.position.x)\n pkg3.append(data.models[1].pose.position.y)\n pkg3.append(data.models[1].pose.position.z)\n elif data.models[2].type == 'packagen3':\n pkg3.append(data.models[2].pose.position.x)\n pkg3.append(data.models[2].pose.position.y)\n pkg3.append(data.models[2].pose.position.z)\n elif data.models[3].type == 'packagen3':\n pkg3.append(data.models[3].pose.position.x)\n pkg3.append(data.models[3].pose.position.y)\n pkg3.append(data.models[3].pose.position.z)\n except:\n '''\n packagen3 is not yet reached to the logical camera\n '''\n\n\n def ee_cartesian_translation(self, trans_x, trans_y, trans_z):\n # 1. Create a empty list to hold waypoints\n waypoints = []\n\n # 2. Add Current Pose to the list of waypoints\n waypoints.append(self._group.get_current_pose().pose)\n\n # 3. Create a New waypoint\n wpose = geometry_msgs.msg.Pose()\n wpose.position.x = waypoints[0].position.x + (trans_x)\n wpose.position.y = waypoints[0].position.y + (trans_y)\n wpose.position.z = waypoints[0].position.z + (trans_z)\n # # This to keep EE parallel to Ground Plane\n # wpose.orientation.x = -0.5\n # wpose.orientation.y = -0.5\n # wpose.orientation.z = 0.5\n # wpose.orientation.w = 0.5\n\n\n # 4. Add the new waypoint to the list of waypoints\n waypoints.append(copy.deepcopy(wpose))\n\n\n # 5. Compute Cartesian Path connecting the waypoints in the list of waypoints\n (plan, fraction) = self._group.compute_cartesian_path(\n waypoints, # waypoints to follow\n 0.01, # Step Size, distance between two adjacent computed waypoints will be 1 cm\n 0.0) # Jump Threshold\n rospy.loginfo(\"Path computed successfully. Moving the arm.\")\n\n # The reason for deleting the first two waypoints from the computed Cartisian Path can be found here,\n # https://answers.ros.org/question/253004/moveit-problem-error-trajectory-message-contains-waypoints-that-are-not-strictly-increasing-in-time/?answer=257488#post-id-257488\n num_pts = len(plan.joint_trajectory.points)\n if num_pts >= 3:\n del plan.joint_trajectory.points[0]\n del plan.joint_trajectory.points[1]\n\n # 6. Make the arm follow the Computed Cartesian Path\n self._group.execute(plan)\n\n\n\n def add_box(self, xc, yc, zc, timeout=4): #To add the box to the planning scene in RViz\n box_pose = geometry_msgs.msg.PoseStamped()\n box_pose.header.frame_id = \"/world\"\n box_pose.pose.position.x = xc\n box_pose.pose.position.y = yc\n box_pose.pose.position.z = zc\n box_pose.pose.orientation.x = 0.0\n box_pose.pose.orientation.y = 0.0\n box_pose.pose.orientation.z = 0.0\n box_pose.pose.orientation.w = 1.0\n\n self._scene.add_box(self.box_name, box_pose, size=(0.15, 0.15, 0.15))\n return self.wait_for_state_update(box_is_known=True, timeout=timeout)\n\n\n\n def attach_box(self, timeout=4):\n grasping_group = \"ur5_1_planning_group\"\n touch_links = self._robot.get_link_names(group=grasping_group)\n self._scene.attach_box(self._eef_link, self.box_name, touch_links=touch_links)\n return self.wait_for_state_update(box_is_attached=True, box_is_known=False, timeout=timeout)\n\n\n\n def detach_box(self, timeout=4):\n self._scene.remove_attached_object(self._eef_link, name=self.box_name)\n return self.wait_for_state_update(box_is_known=True, box_is_attached=False, timeout=timeout)\n\n\n\n def remove_box(self, timeout=4):\n self._scene.remove_world_object(self.box_name)\n return self.wait_for_state_update(box_is_attached=False, box_is_known=False, timeout=timeout)\n\n\n\n def vacuum_on_off(self, val_tf):\n rospy.wait_for_service('/eyrc/vb/ur5_1/activate_vacuum_gripper')\n try:\n handle_vacuum = rospy.ServiceProxy('/eyrc/vb/ur5_1/activate_vacuum_gripper', vacuumGripper)\n final_res = handle_vacuum(val_tf)\n return final_res.result\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\"%e)\n\n def conveyor_belt_control(self, pwr):\n rospy.wait_for_service('/eyrc/vb/conveyor/set_power')\n try:\n handle_conveyor = rospy.ServiceProxy('/eyrc/vb/conveyor/set_power', conveyorBeltPowerMsg)\n final_res = handle_conveyor(pwr)\n return final_res\n except rospy.ServiceException as e:\n print(\"service call failed %s\"%e)\n\n\n def __del__(self):\n moveit_commander.roscpp_shutdown()\n rospy.loginfo('\\033[94m' + \"Object of class Ur5Moveit Deleted.\" + '\\033[0m')\n\n\n\ndef main():\n\n t3 = task3()\n\n global pkg1\n global pkg2\n global pkg3\n handle_sub_cam = rospy.Subscriber('/eyrc/vb/logical_camera_2', LogicalCameraImage, t3.update_data)\n rospy.sleep(1)\n\n try:\n intial_joint_angles = [math.radians(172),\n math.radians(-40),\n math.radians(58),\n math.radians(-108),\n math.radians(-89),\n math.radians(-100)]\n\n green_box_joint_angles = [math.radians(169),\n math.radians(-58),\n math.radians(90),\n math.radians(-121),\n math.radians(-87),\n math.radians(-100)]\n\n blue_box_joint_angles = [math.radians(174),\n math.radians(-24),\n math.radians(27),\n math.radians(-93),\n math.radians(-87),\n math.radians(-100)]\n\n red_bin_joint_angles = [math.radians(75),\n math.radians(-60),\n math.radians(70),\n math.radians(-107),\n math.radians(-91),\n math.radians(-100)]\n\n green_bin_joint_angles = [math.radians(-1),\n math.radians(-60),\n math.radians(70),\n math.radians(-107),\n math.radians(-91),\n math.radians(-100)]\n\n blue_bin_joint_angles = [math.radians(100),\n math.radians(-125),\n math.radians(-79),\n math.radians(-65),\n math.radians(87),\n math.radians(11)]\n\n rospy.sleep(4)\n t3.set_joint_angles(intial_joint_angles)\n\n #pkg1,pkg2,pkg3 is a list containing x,y,z coordinates of packagen1,packagen2,and packagen3 respectively as seen in logical camera.\n #till package1 is not seen by logical camera\n while len(pkg1) == 0:\n t3.conveyor_belt_control(100)\n #when packagen1 is in visible range of logical camera\n while pkg1[1] > 0.080080:\n t3.conveyor_belt_control(80)\n t3.conveyor_belt_control(0)\n t3.add_box(-0.800369, -0.001243, 0.995000)\n t3.vacuum_on_off(True)\n t3.attach_box()\n t3.set_joint_angles(red_bin_joint_angles)\n t3.detach_box()\n t3.vacuum_on_off(False)\n t3.remove_box()\n #packagen1 DONE\n\n\n t3.conveyor_belt_control(50)\n t3.set_joint_angles(intial_joint_angles)\n #till packagen2 is not seen by logical camera\n while len(pkg2) == 0:\n t3.conveyor_belt_control(100)\n #when packagen1 is in visible range of logical camera\n while pkg2[1] > 0.080080:\n t3.conveyor_belt_control(80)\n t3.conveyor_belt_control(0)\n t3.set_joint_angles(green_box_joint_angles)\n t3.add_box(-0.660569, 0.008008, 0.995000)\n t3.vacuum_on_off(True)\n t3.attach_box()\n t3.set_joint_angles(green_bin_joint_angles)\n t3.detach_box()\n t3.vacuum_on_off(False)\n t3.remove_box()\n #packagen2 DONE\n\n\n t3.conveyor_belt_control(50)\n t3.set_joint_angles(intial_joint_angles)\n #till packagen3 is not seen by logical camera\n while len(pkg3) == 0:\n t3.conveyor_belt_control(100)\n #when packagen1 is in visible range of logical camera\n while pkg3[1] > 0.07880:\n t3.conveyor_belt_control(80)\n t3.conveyor_belt_control(0)\n t3.set_joint_angles(blue_box__joint_angles)\n t3.add_box(-0.900779, -0.022788, 0.995000)\n t3.vacuum_on_off(True)\n t3.attach_box()\n t3.set_joint_angles(blue_bin_joint_angles)\n t3.detach_box()\n t3.vacuum_on_off(False)\n t3.remove_box()\n t3.set_joint_angles(intial_joint_angles)\n #packagen3 DONE\n\n except rospy.ROSInterruptException:\n return\n except KeyboardInterrupt:\n return\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pkg_task3/scripts/node_task3.py","file_name":"node_task3.py","file_ext":"py","file_size_in_byte":15589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"393440494","text":"#Ilan Ackelsberg\n#George Hawk\n#Julie Bodian\n#CS374\n#12/17/19\n\nimport sys\nimport math\nimport random\nimport csv\nimport operator\nimport numpy as np\n\n\n'''\n\nThis helper function reads in our dataset, randomizes it, and outputs 100 instances\nto a csv file for use in our evaluation. \n\n'''\n\n\ndef readChords(fileName):\n with open(fileName, mode = 'r') as csv_file:\n reader = csv.reader(csv_file)\n bigData = [] #init big list which will hold whole dataset\n for row in reader:\n song = list(row) #grab each row\n progression = [] #init sublist\n for chord in song[:16]:\n if chord != '': #get rid of commas\n progression.append(chord) #add each chord to list of chord progression instance\n bigData.append(progression) #add chord progressiont to dataset \n \n #print(progression)\n \n #print(bigData)\n random.shuffle(bigData)\n print(bigData)\n \n with open(\"theirChords1.csv\",\"w\") as f:\n wr = csv.writer(f)\n for row in bigData[:100]:\n #print(row)\n wr.writerow(row) \n \n return bigData\n\ndef main():\n readChords(\"data2.csv\")\n \n \nif __name__ == '__main__':\n main()","sub_path":"dataprocess.py","file_name":"dataprocess.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"570861065","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/event_mark1/vehicle_messages.py\nfrom gui.Scaleform.daapi.view.battle.shared import messages\nfrom gui.battle_control import g_sessionProvider\n\nclass Mark1VehicleMessages(messages.VehicleMessages):\n\n def _addGameListeners(self):\n super(Mark1VehicleMessages, self)._addGameListeners()\n vehicleCtrl = g_sessionProvider.shared.vehicleState\n if vehicleCtrl is not None:\n vehicleCtrl.onRespawnBaseMoving += self.__onRrespawnBaseMoving\n return\n\n def _removeGameListeners(self):\n vehicleCtrl = g_sessionProvider.shared.vehicleState\n if vehicleCtrl is not None:\n vehicleCtrl.onRespawnBaseMoving -= self.__onRrespawnBaseMoving\n super(Mark1VehicleMessages, self)._removeGameListeners()\n return\n\n def __onRrespawnBaseMoving(self):\n self.clear()\n","sub_path":"PythonFiles/WOT/res/scripts/client/gui/Scaleform/daapi/view/battle/event_mark1/vehicle_messages.py","file_name":"vehicle_messages.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"455762601","text":"from ...common.decorators import *\nfrom ...common.errors import *\nfrom enum import IntEnum\nfrom .data_element import DataElement\nfrom decimal import *\nimport json\nimport dateutil.parser\n\nclass CorrelationQueryValue:\n def __init__(self, value=None, datatype=None):\n if type(value) == DataElement:\n self.dataelement = value\n else:\n self.dataelement = DataElement(value=value, datatype=datatype)\n\n @staticmethod\n def from_dict(dct):\n val = None\n try:\n de = DataElement.from_dict(dct)\n val = InActionQueryValue(value=de)\n except Exception as e:\n raise DeserializationError(CorrelationQueryValue, 'dict', dct) from e\n return val\n\n def to_dict(self):\n dct = {}\n try:\n dct = self.dataelement.to_dict()\n except Exception as e:\n raise SerializationError(CorrelationQueryValue, 'dict') from e\n return dct\n\n\nclass CorrelationQueryResult(IntEnum):\n INSTANCE = 1\n ACTION = 2\n TOPOLOGY = 4\n\n def to_string_list(val):\n lst = []\n if (val&CorrelationQueryResult.INSTANCE) == CorrelationQueryResult.INSTANCE:\n lst.append(\"Instance\")\n if (val&CorrelationQueryResult.ACTION) == CorrelationQueryResult.ACTION:\n lst.append(\"Action\")\n if (val&CorrelationQueryResult.TOPOLOGY) == CorrelationQueryResult.TOPOLOGY:\n lst.append(\"Topology\")\n return lst\n\n\n @staticmethod\n def from_string_list(lst):\n val = 0\n for e in lst:\n if e == \"Instance\":\n val |= CorrelationQueryResult.INSTANCE\n elif e == \"Action\":\n val |= CorrelationQueryResult.ACTION\n elif e == \"Topology\":\n val |= CorrelationQueryResult.TOPOLOGY\n return val\n\n\nclass CorrelationQuery:\n def __init__(self, elements=None, query_results=CorrelationQueryResult.INSTANCE):\n self.elements = elements\n self.query_results = query_results\n\n @staticmethod\n def from_dict(dct):\n retval = None\n try:\n if type(dct).__name__ == 'list':\n retval = []\n for d in dct:\n retval.append(CorrelationQuery.from_dct(d))\n else:\n elements = list(map(CorrelationQueryValue.from_dict, dct['data'])) if 'data' in dct else None\n query_result = CorrelationQueryResult.from_string_list(dct.get('link', []))\n retval = CorrelationQuery(elements, query_results)\n except Exception as e:\n raise DeserializationError(CorrelationQuery, 'dict', dct) from e\n return retval\n\n @staticmethod\n def from_json(json_string):\n query = None\n try:\n data = json.loads(json_string)\n query = InActionQuery.from_dict(data)\n except Exception as e:\n raise DeserializationError(InActionQuery, 'json', json_string) from e\n return query\n\n def to_dict(self):\n dct = None\n try:\n dct = {}\n if self.and_set is not None:\n dct['and'] = list(map(lambda o: o.to_dict(), self.and_set))\n if self.or_set is not None:\n dct['or'] = list(map(lambda o: o.to_dict(), self.or_set))\n if self.not_set is not None:\n dct['not'] = list(map(lambda o: o.to_dict(), self.not_set))\n if self.action_filter is not None:\n dct['action_id'] = self.action_filter\n if self.topology_filter is not None:\n dct['topology_id'] = self.topology_filter\n except Exception as e:\n raise SerializationError(InActionQuery, 'dict') from e\n return dct\n\n def to_json(self):\n json_string = \"\"\n try:\n dct = self.to_dict()\n json_string = json.dumps(dct)\n except Exception as e:\n raise SerializationError(InActionQuery, 'json') from e\n return json_string\n\n\n","sub_path":"dyna/dynizer/types/correlation_query.py","file_name":"correlation_query.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"243005886","text":"import configparser\nfrom abc import ABC\nfrom typing import AnyStr, List, Union, Any\n\n\ndef _type_cast(variable: AnyStr) -> Union[AnyStr, int, float, bool]:\n variable = variable.strip()\n if variable.lower() == 'true':\n return True\n if variable.lower() == 'false':\n return False\n try:\n return int(variable)\n except ValueError:\n try:\n return float(variable)\n except ValueError:\n return variable\n\n\nclass ConfigModule(ABC):\n def __init__(self, config_files: Union[AnyStr, List[AnyStr]] = None):\n self._config = configparser.ConfigParser()\n self._config.read(config_files or [])\n # NOTE: configure文件中的变量并不区分大小写\n\n def read_config(self,\n section: AnyStr,\n field: AnyStr = None,\n fallback: Any = None\n ) -> Any:\n if field is None:\n section, field = 'default', section\n if section.lower() != 'default' and fallback is None:\n if 'default' in self._config and field in self._config['default']:\n fallback = _type_cast(self._config.get('default', field))\n if section not in self._config or field not in self._config[section]:\n return fallback\n return _type_cast(self._config.get(section, field))\n","sub_path":"test_parallel/utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"435097115","text":"import functools\nimport logging\nimport time\n\n\ndef logged(log='trace'):\n def wrap(function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n logger = logging.getLogger(log)\n logger.debug(\"Calling function '{}' with args={} kwargs={}\"\n .format(function.__name__, args, kwargs))\n try:\n response = function(*args, **kwargs)\n except Exception as error:\n logger.debug(\"Function '{}' raised {} with error '{}'\"\n .format(function.__name__,\n error.__class__.__name__,\n str(error)))\n raise error\n logger.debug(\"Function '{}' returned {}\"\n .format(function.__name__,\n response))\n return response\n return wrapper\n return wrap\n\ndef slow_down(func):\n @functools.wraps(func)\n def wrapper_slow_down(*args, **kwargs):\n time.sleep(1)\n return func(*args, **kwargs)\n return wrapper_slow_down\n\n\ndef debug(func):\n @functools.wraps(func)\n def wrapper_debug(*args, **kwargs):\n args_repr = [repr(a) for a in args]\n kwargs_repr = [f\"{k}={v!r}\" for k, v in kwargs.items()]\n signature = \", \".join(args_repr + kwargs_repr)\n print(f\"Calling {func.__name__}({signature})\")\n value = func(*args, **kwargs)\n print(f\"{func.__name__!r} returned {value!r}\")\n return value\n return wrapper_debug\n\n\n\ndef timer(func):\n @functools.wraps(func)\n def wrapper_timer(*args, **kwargs):\n start_time = time.perf_counter()\n value = func(*args, **kwargs)\n end_time = time.perf_counter()\n run_time = end_time - start_time\n print(f\"Finished {func.__name__!r} in {run_time:.4f} secs\")\n return value\n return wrapper_timer","sub_path":"ProjektyStudentow/2018_MP/Decorator.py","file_name":"Decorator.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"622998500","text":"import os\nfrom HorseRace.common import *\nfrom pathlib import Path\nfrom urllib import request\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport pandas as pd\nfrom datetime import timedelta\nimport numpy as np\nfrom urllib import parse\nimport csv\n\ndef get_pages(url):\n\tres = request.urlopen(url)\n\tsoup = BeautifulSoup(res,\"html.parser\")\n\tres.close()\n\treturn soup\n\ndef get_tables(url):\n\tdata = pd.read_html(url)\n\treturn data\n\ndef get_driver(opt=None):\n\t# dp = r\"C:\\driver\\96\\chromedriver.exe\"\n\toptions = Options()\n\tif opt:\n\t\toptions.add_argument('--headless')\n\treturn webdriver.Chrome(ChromeDriverManager().install(), options=options)\n\t# return webdriver.Chrome(dp, options=options)\n\n\ndef set_login(d):\n\td.get(r\"https://regist.netkeiba.com/account/?pid=login\")\n\t# d.switch_to_window(d.window_handles[1])\n\tloginid = d.find_element_by_name(\"login_id\")\n\tpswd = d.find_element_by_name('pswd')\n\tlogbtn = d.find_element_by_class_name('loginBtn__wrap').find_element_by_tag_name('input')\n\tloginid.send_keys(os.environ['email'])\n\tpswd.send_keys(os.environ['passwd'])\n\tlogbtn.click()\n\treturn d\n\n\ndef categorize_races(rname):\n\tf = open(r\"D:\\horserace\\data\\race\\mst\\race.csv\",encoding='utf8')\n\tif not f.exists():\n\t\tf = open(r\"E:\\horserace\\data\\race\\mst\\race.csv\",encoding='utf8')\n\ttxt = csv.reader(f)\n\ttext = list(txt)\n\tG1,G2,G3=[],[],[]\n\tfor t in text:\n\t\tif t[0] == 'G1':\n\t\t\tG1.append(t[1])\n\t\telif t[0] == 'G2':\n\t\t\tG2.append(t[1])\n\t\telif t[0] == 'G3':\n\t\t\tG3.append(t[1])\n\tcategory = ''\n\tif 'G1' in rname or rname in G1:\n\t\tcategory = '01.G1'\n\telif 'G2' in rname or rname in G2:\n\t\tcategory = '02.G2'\n\telif 'G3' in rname or rname in G3:\n\t\tcategory = '03.G3'\n\telif 'OP' in rname or 'S' in rname or 'ステークス' in rname or '賞' in rname or '杯' in rname or '特別' in rname :\n\t\tcategory = '04.OP'\n\telif 'L' in rname:\n\t\tcategory = '05.L'\n\telif '1600万下' in rname:\n\t\tcategory = '06.1600万下'\n\telif '1000万下' in rname:\n\t\tcategory = '07.1000万下'\n\telif '500万下' in rname:\n\t\tcategory = '08.500万下'\n\telif '3勝クラス' in rname:\n\t\tcategory = '09.3勝クラス'\n\telif '2勝クラス' in rname:\n\t\tcategory = '10.2勝クラス'\n\telif '1勝クラス' in rname:\n\t\tcategory = '11.1勝クラス'\n\telif '2歳未勝利' in rname:\n\t\tcategory = '12.2歳未勝利'\n\telif '3歳未勝利' in rname:\n\t\tcategory = '13.3歳未勝利'\n\telif '3歳以上未勝利' in rname:\n\t\tcategory = '13.3歳未勝利'\n\telif '3歳新馬' in rname:\n\t\tcategory = '14.3歳新馬'\n\telif '2歳新馬' in rname:\n\t\tcategory = '15.2歳新馬'\n\telse:\n\t\tprint(rname, 'OP?')\n\t\tcategory = '04.OP'\n\tf.close()\n\treturn category\n\ndef categorize_races_bk(rname):\n\tcategory = ''\n\tif '(G1)' in rname:\n\t\tcategory = '01.G1'\n\telif '(G2)' in rname:\n\t\tcategory = '02.G2'\n\telif '(G3)' in rname:\n\t\tcategory = '03.G3'\n\telif '(OP)' in rname:\n\t\tcategory = '04.OP'\n\telif '(L)' in rname:\n\t\tcategory = '05.L'\n\telif '1600万下' in rname:\n\t\tcategory = '06.1600万下'\n\telif '1000万下' in rname:\n\t\tcategory = '07.1000万下'\n\telif '500万下' in rname:\n\t\tcategory = '08.500万下'\n\telif '3勝クラス' in rname:\n\t\tcategory = '09.3勝クラス'\n\telif '2勝クラス' in rname:\n\t\tcategory = '10.2勝クラス'\n\telif '1勝クラス' in rname:\n\t\tcategory = '11.1勝クラス'\n\telif '2歳未勝利' in rname:\n\t\tcategory = '12.2歳未勝利'\n\telif '3歳未勝利' in rname:\n\t\tcategory = '13.3歳未勝利'\n\telif '3歳新馬' in rname:\n\t\tcategory = '14.3歳新馬'\n\telif '2歳新馬' in rname:\n\t\tcategory = '15.2歳新馬'\n\treturn category\n\n\n\ndef split_distance(df):\n\tprint(type(df),df['距離'])\n\tdtype = ''\n\tif isinstance(df['距離'], str):\n\t\tif df['距離'][0] == '芝':\n\t\t\tdtype = '芝'\n\t\telif df['距離'][0] == 'ダ':\n\t\t\tdtype = 'ダ'\n\t\telif df['距離'][0] == '障':\n\t\t\tdtype = '障'\n\telse:\n\t\tif df['距離'].str[0] == '芝':\n\t\t\tdtype = '芝'\n\t\telif df['距離'].str[0] == 'ダ':\n\t\t\tdtype = 'ダ'\n\t\telif df['距離'].str[0] == '障':\n\t\t\tdtype = '障'\n\t\telse:\n\t\t\tprint('不明', df['距離'])\n\t\t\treturn ''\n\tdf['距離'] = df['距離'].replace('芝','').replace('ダ','').replace('障','')\n\tdf['タイプ'] = dtype\n\treturn df\n\ndef convert_time(time):\n\tm = int(time[0])\n\ts = int(time[2:4])\n\tss = int(time[5])*100000\n\ttd = timedelta(minutes=m,seconds=s,microseconds=ss).total_seconds()\n\treturn td\n\ndef convert_time2(time):\n\tif type(time) is str:\n\t\tm = int(str(time[0]))\n\t\ts = int(str(time[2:4]))\n\t\tss = int(str(time[5]))*100000\n\t\ttd = timedelta(minutes=m,seconds=s,microseconds=ss).total_seconds()\n\telse:\n\t\tprint(type(time),time)\n\t\ttd = None\n\treturn td\n\ndef convert_time3(time):\n\tif type(time) is str:\n\t\t# m = int(str(time[0]))\n\t\ts = int(str(time[0:2]))\n\t\tss = int(str(time[3:4]))*100000\n\t\ttd = timedelta(seconds=s,microseconds=ss).total_seconds()\n\telse:\n\t\tprint(type(time),time)\n\t\ttd = None\n\treturn td\n\ndef get_all_horse_data():\n\thp = Path(r\"D:\\horserace\\data\\horse\")\n\tif not hp.exists():\n\t\thp = Path(r\"E:\\horserace\\data\\horse\")\n\thds = hp.glob('*.csv')\n\tfor i,hd in enumerate(hds):\n\t\thdd = pd.read_csv(hd)\n\t\thdd['hid'] = hd.name[:10]\n\t\tif i == 0:\n\t\t\thdd2 = hdd.copy()\n\t\telse:\n\t\t\thdd2 = hdd2.append(hdd)\n\n\n\t# Index(['Unnamed: 0', '日付', '開催', '天気', 'レース名', '距離', '頭数', '枠番', '馬番', '人気',\n # '着順', '着差', '騎手', '斤量', 'タイム', '通過', 'ペース', '上り', '馬体重', '勝馬', 'hid',\n # 'Unnamed: 0.1'],\n # dtype='object')\n\thdd2 = hdd2.dropna(how='any')\n\thdd2['開催'] = hdd2['開催'].replace('[0-9]','',regex=True)\n\thdd2['グレード'] = hdd2['レース名'].apply(categorize_races)\n\thdd2 = hdd2.apply(lambda x: split_distance(x), axis=1)\n\t# 距離 intに\n\thdd2['time2'] = hdd2['タイム'].apply(convert_time)\n\treturn hdd2\n\ndef select_rid(url):\n\tpu = parse.urlparse(url)\n\tridd = pu.query.replace('race_id=','')[:12]\n\tprint(ridd)\n\treturn ridd\n\ndef select_racename(r):\n lpos = r.find('R ')\n rpos = r.find(' 結果')\n return r[lpos+3:rpos]\n\ndef select_hid(url):\n\tpu = parse.urlparse(url)\n\thidd = pu.query.replace('id=','')[:10]\n\tprint(hidd)\n\treturn hidd\n","sub_path":"scraping/HorseRace/HorseRace/common/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"39588904","text":"# The based unit of graph convolutional networks.\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass LinearOnGraph(nn.Module):\n r\"\"\"Applies a linear transformation to the incoming data: :math:`y = xA^T + b`\n\n Args:\n adjacency: the initial value of the learnable weights of the module of shape\n bias: If set to False, the layer will not learn an additive bias.\n Default: ``True``\n\n Shape:\n - Input: :math:`(N, *, \\text{in\\_features})` where :math:`*` means any number of\n additional dimensions\n - Output: :math:`(N, *, \\text{out\\_features})` where all but the last dimension\n are the same shape as the input.\n\n Attributes:\n weight: the learnable weights of the module of shape\n :math:`(\\text{out\\_features}, \\text{in\\_features})`. The values are\n initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})`, where\n :math:`k = \\frac{1}{\\text{in\\_features}}`\n bias: the learnable bias of the module of shape :math:`(\\text{out\\_features})`.\n If :attr:`bias` is ``True``, the values are initialized from\n :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where\n :math:`k = \\frac{1}{\\text{in\\_features}}`\n\n \"\"\"\n\n def __init__(self, adjacency, bias=False):\n super(LinearOnGraph, self).__init__()\n self.in_features = adjacency.size(0)\n self.out_features = adjacency.size(1)\n self.adjacency = nn.Parameter(adjacency, requires_grad=False)\n self.weight = nn.Parameter(adjacency)\n if bias:\n self.bias = nn.Parameter(torch.Tensor(self.out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.weight.size(1))\n #self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)\n\n def forward(self, input):\n output = input.matmul(self.weight+self.adjacency)\n if self.bias is not None:\n output += torch.jit._unwrap_optional(self.bias)\n return output\n\n def extra_repr(self):\n return 'in_features={}, out_features={}, bias={}'.format(\n self.in_features, self.out_features, self.bias is not None\n )\n\n\nclass NormLinearOnGraph(nn.Module):\n r\"\"\"Applies a linear transformation to the incoming data: :math:`y = xA^T + b`\n\n Args:\n adjacency: the initial value of the learnable weights of the module of shape\n bias: If set to False, the layer will not learn an additive bias.\n Default: ``True``\n\n Shape:\n - Input: :math:`(N, *, \\text{in\\_features})` where :math:`*` means any number of\n additional dimensions\n - Output: :math:`(N, *, \\text{out\\_features})` where all but the last dimension\n are the same shape as the input.\n\n Attributes:\n weight: the learnable weights of the module of shape\n :math:`(\\text{out\\_features}, \\text{in\\_features})`. The values are\n initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})`, where\n :math:`k = \\frac{1}{\\text{in\\_features}}`\n bias: the learnable bias of the module of shape :math:`(\\text{out\\_features})`.\n If :attr:`bias` is ``True``, the values are initialized from\n :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where\n :math:`k = \\frac{1}{\\text{in\\_features}}`\n\n \"\"\"\n\n def __init__(self, adjacency, bias=False):\n super(NormLinearOnGraph, self).__init__()\n self.in_features = adjacency.size(0)\n self.out_features = adjacency.size(1)\n #self.weight = nn.Parameter(adjacency)\n self.weight = nn.Parameter(torch.where(adjacency>0.0, adjacency, torch.Tensor([-100.0])))\n if bias:\n self.bias = nn.Parameter(torch.Tensor(self.out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.weight.size(1))\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)\n\n def forward(self, input):\n #output = input.matmul(F.relu(self.weight))\n #adjsum = torch.sum(F.relu(self.weight), 0, keepdim=True)\n #output = output.div(adjsum)\n output = input.matmul(F.softmax(self.weight, dim=0))\n if self.bias is not None:\n output += torch.jit._unwrap_optional(self.bias)\n return output\n\n def extra_repr(self):\n return 'in_features={}, out_features={}, bias={}'.format(\n self.in_features, self.out_features, self.bias is not None\n )\n\n\n\nclass ConvTemporalGraphical(nn.Module):\n\n r\"\"\"The basic module for applying a graph convolution.\n\n Args:\n in_channels (int): Number of channels in the input sequence data\n out_channels (int): Number of channels produced by the convolution\n t_kernel_size (int): Size of the temporal convolving kernel\n t_stride (int, optional): Stride of the temporal convolution. Default: 1\n t_padding (int, optional): Temporal zero-padding added to both sides of\n the input. Default: 0\n t_dilation (int, optional): Spacing between temporal kernel elements.\n Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the output.\n Default: ``True``\n\n Shape:\n - Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format\n - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format\n - Output[0]: Outpu graph sequence in :math:`(N, out_channels, T_{out}, V)` format\n - Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V)` format\n\n where\n :math:`N` is a batch size,\n :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,\n :math:`T_{in}/T_{out}` is a length of input/output sequence,\n :math:`V` is the number of graph nodes. \n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n adjacency,\n t_kernel_size=1,\n t_stride=1,\n t_padding=0,\n t_dilation=1,\n bias=True):\n super().__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n\n self.conv = nn.Conv2d(\n in_channels,\n out_channels*kernel_size,\n kernel_size=(t_kernel_size, 1),\n padding=(t_padding, 0),\n stride=(t_stride, 1),\n dilation=(t_dilation, 1),\n bias=bias)\n self.linear = LinearOnGraph(\n adjacency,\n bias=False)\n\n def forward(self, x, imp):\n x = self.conv(x)\n\n x = torch.cat(torch.split(x, self.out_channels, dim=1), dim=3)\n\n n, c, t, v = x.size()\n x = x.view(n*c*t, v)\n x = self.linear(x)\n nct, w = x.size()\n x = x.view(n, c, t, w)\n\n return x.contiguous(), imp\n","sub_path":"net/utils/tgcn2.py","file_name":"tgcn2.py","file_ext":"py","file_size_in_byte":7232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"17782522","text":"while True:\n a,b,c = map(int, input().split())\n\n v = a * b * c\n\n aresta = v ** (1/3)\n\n if a == 0 and b == 0 and c == 0 :\n break\n \n print(str(int(aresta)))\n\n","sub_path":"uoj/topicos/1573.py","file_name":"1573.py","file_ext":"py","file_size_in_byte":181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"300070153","text":"import pymysql.cursors\n\nDB_HOST = \"127.0.0.1\"\nDB_USER = \"root\"\n\ncnx = pymysql.connect(host=DB_HOST, user=DB_USER)\n\nclass Database:\n\n\t@staticmethod\n\tdef one(sql):\n\t\tcursor = Database.query(sql)\n\t\tif cursor.rowcount > 0:\n\t\t\tone = cursor.fetchone()\n\t\t\tcursor.close()\n\t\t\treturn one\n\t\telse:\n\t\t\treturn None\n\n\t@staticmethod\n\tdef all(sql):\n\t\tcursor = Database.query(sql)\n\t\tif cursor.rowcount > 0:\n\t\t\tmany = cursor.fetchall()\n\t\t\tcursor.close()\n\t\t\treturn many\n\t\telse:\n\t\t\treturn []\n\n\tdef insert(sql):\n\t\tDatabase.query(sql)\n\n\t@staticmethod\n\tdef query(query):\n\t\ttry:\n\t\t\twith cnx.cursor() as cursor:\n\t\t\t\tcursor.execute(query)\n\t\t\t\tcnx.commit()\n\t\t\t\treturn cursor\n\t\texcept Exception as inst:\n\t\t\traise inst\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"239253718","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport cms.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cms', '0012_auto_20150607_2207'),\n ('staff', '0003_auto_20150825_1126'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='staffmember',\n name='contact',\n field=cms.models.fields.PlaceholderField(related_name='contact', slotname=b'staff_contact', editable=False, to='cms.Placeholder', null=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='staffmember',\n name='bio',\n field=cms.models.fields.PlaceholderField(related_name='bio', slotname=b'staff_bio', editable=False, to='cms.Placeholder', null=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"staff/migrations/0004_auto_20150901_1033.py","file_name":"0004_auto_20150901_1033.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"223392869","text":"from asapy.result.Result import Result\nfrom asapy.result.Chunk import Chunk\nfrom asapy.result.Morph import Morph\n\n# 解析結果を出力するためのクラス\n\n\nclass Output():\n\n def outputAll(self, result: Result) -> None:\n print(\"sentence: \" + result.surface)\n for chunk in result.chunks:\n self.__outputChunk(chunk)\n\n def __outputChunk(self, chunk: Chunk) -> None:\n print(\"ID: \" + str(chunk.id) + \" \" + chunk.surface)\n print(\"\\tlink: \" + str(chunk.link))\n print(\"\\ttype: \" + chunk.ctype)\n if chunk.main:\n print(\"\\tmain: \" + chunk.main)\n if chunk.part:\n print(\"\\tpart: \" + chunk.part)\n if chunk.category:\n print(\"\\tcategory: \" + chunk.category[0])\n if chunk.semrole:\n print(\"\\tsemrole: \" + \"|\".join(chunk.semrole))\n if chunk.arg:\n print(\"\\targ: \" + \"|\".join(chunk.arg))\n if chunk.similar > 0.0:\n print(\"\\tscore: \" + str(round(chunk.similar, 6)))\n if chunk.semantic:\n print(\"\\tsemantic: \" + chunk.semantic)\n modchunks = chunk.modifiedchunks\n if modchunks:\n frame = []\n for modchunk in modchunks:\n if modchunk.semrole:\n frame_line = str(modchunk.id) + \"-\" + \"|\".join(modchunk.semrole) + \"-\" + \"|\".join(modchunk.arg)\n if frame_line not in frame:\n frame.append(frame_line)\n else:\n frame_line = str(modchunk.id) + \"-\" + modchunk.ctype\n if frame_line not in frame:\n frame.append(frame_line)\n print(\"\\tframe: \" + \",\".join(frame))\n if chunk.idiom:\n print(\"\\tidiom: \" + chunk.idiom)\n print(\"\\tfilter: \" + str(chunk.idiom_score))\n ids = sorted([str(morph.chunk.id) + \"-\" + str(morph.id) for morph in chunk.idiom_morph])\n print(\"\\tidiom_id: \" + \",\".join(ids))\n\n if (chunk.phrase):\n print(\"\\tphrase: \" + \",\".join(chunk.phrase))\n if (chunk.voice):\n print(\"\\tvoice: \" + chunk.voice)\n if (chunk.tense):\n print(\"\\ttense: \" + chunk.tense)\n if (chunk.sentelem):\n print(\"\\tsentelem: \" + chunk.sentelem)\n if (chunk.polarity):\n print(\"\\tpolarity: \" + chunk.polarity)\n if (chunk.mood):\n print(\"\\tmood: \" + chunk.mood)\n for morph in chunk.morphs:\n self.__outputMorph(morph)\n if (chunk.noun_agentiveL):\n print(\"\\tnoun_adjective: \" + chunk.noun_agentiveL)\n if (chunk.noun_arg):\n print(\"\\tnoun_arg: \" + chunk.noun_arg)\n if (chunk.noun_semantic):\n print(\"\\tnoun_semantic: \" + chunk.noun_semantic)\n if (chunk.noun_semrole):\n print(\"\\tnoun_semrole: \" + chunk.noun_semrole)\n if (chunk.noun_semantic):\n frame = []\n for modchunk in modchunks:\n if modchunk.noun_arg and modchunk.noun_agentiveRole:\n frame.append(str(modchunk.id) + \"-\" + modchunk.noun_arg + \"-\" + modchunk.noun_agentiveRole)\n elif modchunk.noun_arg:\n frame.append(str(modchunk.id) + \"-\" + modchunk.noun_arg)\n print(\"\\tnoun_agentiveRole: \" + \",\".join(frame))\n\n def __outputMorph(self, morph: Morph) -> None:\n morphs = [str(morph.id), morph.surface, morph.read, morph.base, morph.pos, morph.cform, morph.ctype, morph.ne]\n print(\"\\t\\t\" + \"\\t\".join(morphs))\n\n","sub_path":"asapy/output/Output.py","file_name":"Output.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"156456014","text":"# 순열 사이클\n#\nimport sys\nfor _ in range(int(sys.stdin.readline())):\n N = int(sys.stdin.readline())\n permutation = list(map(int, sys.stdin.readline().split()))\n permutationList = [(i+1, item) for i, item in enumerate(permutation)]\n\n visitedList = []\n cycleCount = 0\n for node, nextNode in permutationList:\n firstNode = node\n cycleList = []\n if node not in visitedList:\n cycleList.extend([node, nextNode])\n while True:\n innerNextNode = cycleList[-1]\n if innerNextNode not in visitedList:\n visitedList.append(innerNextNode)\n if innerNextNode == firstNode:\n cycleCount += 1\n break\n cycleList.append(permutation[innerNextNode-1])\n\n print(cycleCount)\n","sub_path":"SsangWoo/python/baekjoon/7567.py","file_name":"7567.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"132566029","text":"#! /usr/bin/env python3\n\nimport requests\nimport json\nfrom colorama import Fore, Style\n\n\nAPI_URL = 'https://googledictionaryapi.eu-gb.mybluemix.net/'\nLANGUAGE_CODE = 'en'\n\n\nclass ConnectionError(Exception):\n pass\n\n\nclass word():\n def __init__(self, word_str):\n word.word_str = word_str\n\n request_url = API_URL + '?define=' + word_str + '&lang' + LANGUAGE_CODE\n response = requests.get(request_url)\n\n if response.status_code == 500:\n raise ConnectionError('Cannot connect to the server')\n if response.status_code == 404:\n raise ValueError('Word not found')\n\n word_data = json.loads(response.content.decode('utf-8'))[0]\n word.phonetic = word_data['phonetic']\n word.meaning = word_data['meaning']\n\n @staticmethod\n def print_data_with_indentation(data, level=0):\n if type(data) == str:\n print(' ' * level, data)\n return\n\n if type(data) == dict:\n for key, value in data.items():\n print(' ' * level, '-', Fore.GREEN, key.strip(),\n Style.RESET_ALL, ':', end='')\n if type(value) == str:\n print(' ', value)\n else:\n print()\n word.print_data_with_indentation(value, level + 1)\n else:\n for key in data:\n word.print_data_with_indentation(key, level)\n\n def print_phonetic(self):\n print('Phonetic: {}'.format(self.phonetic))\n\n def print_meaning(self):\n word.print_data_with_indentation(self.meaning, level=0)\n\n\nif __name__ == '__main__':\n w = word('hello')\n w.print_meaning()\n","sub_path":"gdict/word.py","file_name":"word.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"243086556","text":"import cv2\nimport PIL.Image as Image\nimport math\nimport numpy as np\nimport time\nimport argparse\nimport yaml\nimport os\nimport csv\n\nimport torch\nfrom torchvision import models\nimport torch.nn as nn\nfrom torchvision import transforms\n\n\nimport sys\nsys.path.append('../')\nfrom common import dnn_network\n\nclass FrameInferEval:\n def __init__(self,CFG):\n print(\"Eval Frame Infer\")\n\n self.frame_infer_log_top_path = CFG[\"frame_infer_log_top_path\"]\n self.frame_infer_log_file_name = CFG[\"frame_infer_log_file_name\"]\n\n self.dataset_data_top_path = CFG[\"dataset_data_top_path\"]\n self.dataset_data_file_name = CFG[\"dataset_data_file_name\"]\n\n self.saved_log_csv_top_path = CFG[\"saved_log_csv_top_path\"]\n self.saved_log_csv_file_name = CFG[\"saved_log_csv_file_name\"]\n\n self.loop_period = CFG[\"loop_period\"]\n\n self.bookmark_list = []\n\n self.do_eval()\n self.save_result_csv()\n\n def save_result_csv(self):\n result_csv_path = os.path.join(self.saved_log_csv_top_path, self.saved_log_csv_file_name)\n csv_file = open(result_csv_path, 'w')\n csv_w = csv.writer(csv_file)\n\n for row in self.bookmark_list:\n csv_w.writerow(row)\n \n csv_file.close()\n\n def do_eval(self):\n log_path = os.path.join(self.frame_infer_log_top_path, self.frame_infer_log_file_name)\n dataset_path = os.path.join(self.dataset_data_top_path, self.dataset_data_file_name)\n\n log_list = []\n with open(log_path) as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n log_list.append(row)\n\n dataset_list = []\n with open(dataset_path) as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n dataset_list.append(row)\n\n loop_bar = zip(log_list, dataset_list)\n \n for row_log, row_dataset in loop_bar:\n #pic_path = os.path.join(dataset_data_top_path, row_log[5])\n log_pic = cv2.imread(row_log[3])\n \n log_x = float(row_log[0])\n log_y = float(row_log[1])\n log_z = float(row_log[2])\n log_var = row_log[3]\n log_epistemic = row_log[4]\n\n data_x = float(row_dataset[0])/9.8\n data_y = float(row_dataset[1])/9.8\n data_z = float(row_dataset[2])/9.8\n\n print(log_x, log_y, log_z)\n print(data_x, data_y, data_z)\n\n print(\"\\n\")\n\n diff_x = abs(float(log_x) - float(data_x))\n diff_y = abs(float(log_y) - float(data_y))\n diff_z = abs(float(log_z) - float(data_z))\n\n tmp_bookmark_list = [row_log[3], log_x, log_y, log_z, diff_x, diff_y, diff_z]\n\n print(\"diff_x : \", diff_x)\n print(\"diff_y : \", diff_y)\n print(\"diff_z : \", diff_z)\n \n print(\"Do you want to save this picture's data? answer in y/n .\")\n print(\"If you want to exit, press q key\")\n\n cv2.imshow('image_log',log_pic)\n answer = cv2.waitKey(0)\n \n if answer == ord('y'):\n self.bookmark_list.append(tmp_bookmark_list)\n print(\"Save picture and data\\n\")\n elif answer == ord('q'):\n print(\"Stop evaluation\")\n cv2.destroyAllWindows()\n break\n else:\n print(\"\\n\")\n\n cv2.destroyAllWindows()\n print(\"\\n\")\n\n\n\n\n\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(\"./eval_frame_infer.py\")\n\n parser.add_argument(\n '--eval_frame_infer_config', '-efic',\n type=str,\n required=False,\n default='/home/ros_catkin_ws/src/dnn_attitude_predictor_with_image/config/eval_frame_infer_config.yaml',\n help='Eval frame infer config yaml file',\n )\n\n FLAGS, unparsed = parser.parse_known_args()\n\n #load yaml file\n try:\n print(\"Opening frame infer config file %s\", FLAGS.eval_frame_infer_config)\n CFG = yaml.safe_load(open(FLAGS.eval_frame_infer_config, 'r'))\n except Exception as e:\n print(e)\n print(\"Error opening frame infer config file %s\", FLAGS.eval_frame_infer_config)\n quit()\n\n frame_infer_eval = FrameInferEval(CFG)","sub_path":"pysrc/regression/eval_frame_infer.py","file_name":"eval_frame_infer.py","file_ext":"py","file_size_in_byte":4314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"563353994","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\ntry:\n from mpl_toolkits.mplot3d import Axes3D as _Axes3D\nexcept:\n _Axes3d = None\n\n\n\n# some useful kwarg dictionaries for different plot layouts\nkwargs_mono = dict(mc='k',\n lc='.5',\n hllc='k',\n hlmc='k',\n hlms=7,\n strlc='k')\n\n\ndef _ax_map2d_fast(ax, sensor_net, proj='default', \n m='x', mew=.5, mc='b', ms=3,):\n if hasattr(sensor_net, 'sensors'):\n sensor_net = sensor_net.sensors\n \n locs = sensor_net.getLocs2d(proj=proj)\n h = plt.plot(locs[:,0], locs[:,1], m, color=mc, ms=ms, markeredgewidth=mew)\n \n return h\n\n \ndef _ax_map2d(ax, sensor_net, proj='default', hl=[], \n labels='name', lc='k', ls=8, l_dist=.01, # labels, l colors, l size\n m='x', mew=.5, mc='b', ms=3, # marker, m edge width, m color, m size,\n strm=None, strc=None, strms=None, strlc='r', # ...same for string labels; None -> same as digits\n hlm='*', hlmc='r', hlms=5, hllc='r'): # ...same for highlight\n # in case sensor_net parent is submitted\n if hasattr(sensor_net, 'sensors'):\n sensor_net = sensor_net.sensors\n \n if strm == None:\n strm = m\n if strc == None:\n strmc = mc\n if strms == None:\n strms = ms\n if strlc == None:\n strlc = lc\n \n ax.set_aspect('equal')\n ax.set_frame_on(False)\n ax.set_axis_off()\n \n locs = sensor_net.getLocs2d(proj=proj)\n # labels\n# if labels == 'name':\n# labels = []\n# markers\n# colorList = []\n# for s in sensor_net:\n# label = s.name\n# if label.isdigit():\n# label = r'$'+label+'$'\n# colorList.append([])\n# else:\n# colorList.append('r')\n# labels.append(label)\n# elif labels == 'id':\n# labels = range(sensor_net.n)\n# colorList = ['k'] * len(labels)\n# elif labels== 'legend':\n# separator=':'\n# labels = [r\"$%s$%s%s\"%(i, separator, s.name) for i, s \\\n# in enumerate(sensor_net) ]\n# colorList = ['k']*len(labels)\n# else:\n# colorList = labels = [None]*sensor_net.n\n# # markers\n# markers = np.array([marker] * sensor_net.n, dtype='S2')\n# markers[highlight] = highlightMarker\n #transOffset = plt.offset_copy(plt.gca().transData, fig=fig, x = 0.05, y=0.10, units='inches')\n for i in range(sensor_net.n):\n x = locs[i,0]\n y = locs[i,1]\n # label\n if labels is None:\n label = None\n elif labels == 'id':\n label = label_for_c = str(i)\n elif labels == 'legend':\n separator=':'\n label_for_c = sensor_net.names[i]\n label = r\"$%s$%s%s\"%(i, separator, label_for_c)\n else:\n label = label_for_c = sensor_net.names[i]\n # properties\n if i in hl:\n marker, marker_c, marker_s, label_c, label_s = hlm, hlmc, hlms, hllc, ls\n elif (label!=None) and label_for_c.isdigit():\n marker, marker_c, marker_s, label_c, label_s = m, mc, ms, lc, ls\n else:\n marker, marker_c, marker_s, label_c, label_s = strm, strmc, strms, strlc, ls\n plt.plot([x],[y], marker, color=marker_c, ms=marker_s, markeredgewidth=mew)#,label=label)\n if label != None:\n plt.text(x, y+l_dist, label, fontsize=label_s,# style='oblique', \n horizontalalignment='center', verticalalignment='bottom', \n color=label_c)\n\n\n\ndef map2d(sensor_net, figsize=(5,5), frame=.01, **kwargs):\n \"\"\"\n Arguments\n ---------\n \n ax: mpl.axes or ``None``\n target axes; a new fiigure is created if ``None``\n \n figsize:\n mpl figsize\n \n highlight: = []\n sensors which should be highlighted \n \n labels: \n how the sensors should be labelled: ``'name'``, ``'id'``, ``'legend'`` \n (names and id), ``None``. Labels can be custmized with the following \n additional arguments: ``lc='k'`` (label color), ``ls=8`` (label \n font size), and ``ldist`` (distance from the marker).\n \n markers: \n markers can be customized with the following arguments: ``m='x'`` \n (marker symbol), ``mc='b'`` (color), ``ms=3`` (size) and ``mew=0.5`` \n (marker edge width).\n \n proj:\n Transform to apply to 3 dimensional sensor coordinates for plotting \n locations in a plane\n \n \"\"\" \n # figure\n fig = plt.figure(figsize=figsize, facecolor='w')\n ax = plt.axes([frame, frame, 1 - 2 * frame, 1 - 2 * frame])\n # the following does not make the plot\n# fig = mpl.figure.Figure(figsize=figsize, facecolor='w')\n# ax = fig.add_axes([0,0,1,1])\n _ax_map2d(ax, sensor_net, **kwargs)\n \n return fig\n\n\n\n\ndef map3d(sensor_net, marker='c*', labels=False, headBall=0):\n \"\"\"not very helpful...\"\"\"\n if _Axes3D is None:\n raise ImportError(\"mpl_toolkits.mplot3d.Axes3D could not be imported\")\n \n if hasattr(sensor_net, 'sensors'):\n sensor_net = sensor_net.sensors\n locs = sensor_net.locs3d\n fig = plt.gcf()\n ax = _Axes3D(fig)\n ax.scatter(locs[:,0], locs[:,1], locs[:,2])\n # plot head ball\n if headBall>0:\n u = np.linspace(0, 1 * np.pi, 10)\n v = np.linspace(0, np.pi, 10)\n \n x = 5 * headBall * np.outer( np.cos(u), np.sin(v))\n z = 10 * (headBall * np.outer( np.sin(u), np.sin(v)) -.5) # vertical\n y = 5 * headBall * np.outer( np.ones(np.size(u)), np.cos(v)) # axis of the sphere\n ax.plot_surface(x, y, z, rstride=1, cstride=1, color='w')\n #n = 100\n #for c, zl, zh in [('r', -50, -25), ('b', -30, -5)]:\n #xs, ys, zs = zip(*\n # [(random.randrange(23, 32),\n # random.randrange(100),\n # random.randrange(zl, zh)\n # ) for i in range(n)])\n #ax.scatter(xs, ys, zs, c=c)\n","sub_path":"eelbrain/plot/sensors.py","file_name":"sensors.py","file_ext":"py","file_size_in_byte":6037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"237930868","text":"import numpy as np\nimport os\nimport SimpleITK as sitk\n# import nibabel as nib\nimport pandas as pd\nimport copy\nimport PySimpleGUI as gui\nimport matplotlib.pyplot as plt\nimport glob\nimport sys\nfrom myutil.myutil import load_itk, get_gdth_pred_names, one_hot_encode_3d\n\n\n# %%\ndef show_itk(itk, idx):\n ref_surface_array = sitk.GetArrayViewFromImage(itk)\n plt.figure()\n plt.imshow(ref_surface_array[idx])\n plt.show()\n\n return None\n\n\ndef computeQualityMeasures(lP, lT, spacing, metrics_type=None):\n \"\"\"\n\n :param lP: prediction, shape (x, y, z)\n :param lT: ground truth, shape (x, y, z)\n :param spacing: shape order (x, y, z)\n :return: quality: dict contains metircs\n \"\"\"\n quality = {}\n labelPred = sitk.GetImageFromArray(lP, isVector=False)\n labelPred.SetSpacing(spacing)\n labelTrue = sitk.GetImageFromArray(lT, isVector=False)\n labelTrue.SetSpacing(spacing) # spacing order (x, y, z)\n\n voxel_metrics = ['dice', 'jaccard', 'precision', 'recall', 'fpr', 'fnr', 'vs']\n distance_metrics = ['hd', 'hd95', 'msd', 'mdsd', 'stdsd']\n metrics_type = set([]) if metrics_type is None else set(metrics_type)\n # to save time, we need to determine which metrics we need to compute\n if set(voxel_metrics).intersection(metrics_type) or not metrics_type:\n pred = lP.astype(int) # float data does not support bit_and and bit_or\n gdth = lT.astype(int) # float data does not support bit_and and bit_or\n fp_array = copy.deepcopy(pred) # keep pred unchanged\n fn_array = copy.deepcopy(gdth)\n gdth_sum = np.sum(gdth)\n pred_sum = np.sum(pred)\n intersection = gdth & pred\n union = gdth | pred\n intersection_sum = np.count_nonzero(intersection)\n union_sum = np.count_nonzero(union)\n\n tp_array = intersection\n\n tmp = pred - gdth\n fp_array[tmp < 1] = 0\n\n tmp2 = gdth - pred\n fn_array[tmp2 < 1] = 0\n\n tn_array = np.ones(gdth.shape) - union\n\n tp, fp, fn, tn = np.sum(tp_array), np.sum(fp_array), np.sum(fn_array), np.sum(tn_array)\n\n smooth = 0.001\n precision = tp / (pred_sum + smooth)\n recall = tp / (gdth_sum + smooth)\n\n false_positive_rate = fp / (fp + tn + smooth)\n false_negtive_rate = fn / (fn + tp + smooth)\n\n jaccard = intersection_sum / (union_sum + smooth)\n dice = 2 * intersection_sum / (gdth_sum + pred_sum + smooth)\n\n dicecomputer = sitk.LabelOverlapMeasuresImageFilter()\n dicecomputer.Execute(labelTrue > 0.5, labelPred > 0.5)\n\n quality[\"dice\"] = dice\n quality[\"jaccard\"] = jaccard\n quality[\"precision\"] = precision\n quality[\"recall\"] = recall\n quality[\"false_negtive_rate\"] = false_negtive_rate\n quality[\"false_positive_rate\"] = false_positive_rate\n quality[\"volume_similarity\"] = dicecomputer.GetVolumeSimilarity()\n\n if set(distance_metrics).intersection(metrics_type) or not metrics_type:\n slice_idx = 300\n # Surface distance measures\n signed_distance_map = sitk.SignedMaurerDistanceMap(labelTrue > 0.5, squaredDistance=False,\n useImageSpacing=True) # It need to be adapted.\n # show_itk(signed_distance_map, slice_idx)\n\n ref_distance_map = sitk.Abs(signed_distance_map)\n # show_itk(ref_distance_map, slice_idx)\n\n ref_surface = sitk.LabelContour(labelTrue > 0.5, fullyConnected=True)\n # show_itk(ref_surface, slice_idx)\n ref_surface_array = sitk.GetArrayViewFromImage(ref_surface)\n\n statistics_image_filter = sitk.StatisticsImageFilter()\n statistics_image_filter.Execute(ref_surface > 0.5)\n\n num_ref_surface_pixels = int(statistics_image_filter.GetSum())\n\n signed_distance_map_pred = sitk.SignedMaurerDistanceMap(labelPred > 0.5, squaredDistance=False,\n useImageSpacing=True)\n # show_itk(signed_distance_map_pred, slice_idx)\n\n seg_distance_map = sitk.Abs(signed_distance_map_pred)\n # show_itk(seg_distance_map, slice_idx)\n\n seg_surface = sitk.LabelContour(labelPred > 0.5, fullyConnected=True)\n # show_itk(seg_surface, slice_idx)\n seg_surface_array = sitk.GetArrayViewFromImage(seg_surface)\n\n seg2ref_distance_map = ref_distance_map * sitk.Cast(seg_surface, sitk.sitkFloat32)\n # show_itk(seg2ref_distance_map, slice_idx)\n\n ref2seg_distance_map = seg_distance_map * sitk.Cast(ref_surface, sitk.sitkFloat32)\n # show_itk(ref2seg_distance_map, slice_idx)\n\n statistics_image_filter.Execute(seg_surface > 0.5)\n\n num_seg_surface_pixels = int(statistics_image_filter.GetSum())\n\n seg2ref_distance_map_arr = sitk.GetArrayViewFromImage(seg2ref_distance_map)\n seg2ref_distances = list(seg2ref_distance_map_arr[seg2ref_distance_map_arr != 0])\n seg2ref_distances = seg2ref_distances + list(np.zeros(num_seg_surface_pixels - len(seg2ref_distances)))\n ref2seg_distance_map_arr = sitk.GetArrayViewFromImage(ref2seg_distance_map)\n ref2seg_distances = list(ref2seg_distance_map_arr[ref2seg_distance_map_arr != 0])\n ref2seg_distances = ref2seg_distances + list(np.zeros(num_ref_surface_pixels - len(ref2seg_distances))) #\n\n all_surface_distances = seg2ref_distances + ref2seg_distances\n quality[\"mean_surface_distance\"] = np.mean(all_surface_distances)\n quality[\"median_surface_distance\"] = np.median(all_surface_distances)\n quality[\"std_surface_distance\"] = np.std(all_surface_distances)\n quality[\"95_surface_distance\"] = np.percentile(all_surface_distances, 95)\n quality[\"Hausdorff\"] = np.max(all_surface_distances)\n\n return quality\n\n\ndef get_metrics_dict_all_labels(labels, gdth, pred, spacing, metrics_type=None):\n \"\"\"\n\n :param metrics_type:\n :param labels: not include background, e.g. [4,5,6,7,8] or [1]\n :param gdth: shape: (x, y, z, channels), channels is equal to len(labels) or equal to len(labels)+1 (background)\n :param pred: the same as above\n :param spacing: spacing order should be (x, y, z) !!!\n :return: metrics_dict_all_labels a dict which contain all metrics\n \"\"\"\n metrics_parameters_dict = {}\n\n Hausdorff_list = []\n Dice_list = []\n Jaccard_list = []\n Volume_list = []\n mean_surface_dis_list = []\n median_surface_dis_list = []\n std_surface_dis_list = []\n nine5_surface_dis_list = []\n precision_list = []\n recall_list = []\n false_positive_rate_list = []\n false_negtive_rate_list = []\n\n for i, label in enumerate(labels):\n print('start get metrics for label: ', label)\n pred_per = pred[..., i] # select onlabel\n gdth_per = gdth[..., i]\n\n metrics = computeQualityMeasures(pred_per, gdth_per, spacing=spacing, metrics_type=metrics_type)\n print(metrics)\n\n Dice_list.append(metrics[\"dice\"])\n Jaccard_list.append(metrics[\"jaccard\"])\n precision_list.append(metrics[\"precision\"])\n recall_list.append(metrics[\"recall\"])\n false_negtive_rate_list.append(metrics[\"false_negtive_rate\"])\n false_positive_rate_list.append(metrics[\"false_positive_rate\"])\n Volume_list.append(metrics[\"volume_similarity\"])\n\n mean_surface_dis_list.append(metrics[\"mean_surface_distance\"])\n median_surface_dis_list.append(metrics[\"median_surface_distance\"])\n std_surface_dis_list.append(metrics[\"std_surface_distance\"])\n nine5_surface_dis_list.append(metrics[\"95_surface_distance\"])\n Hausdorff_list.append(metrics[\"Hausdorff\"])\n\n metrics_dict_all_labels = {'dice': Dice_list,\n 'jaccard': Jaccard_list,\n 'precision': precision_list,\n 'recall': recall_list,\n 'fpr': false_positive_rate_list,\n 'fnr': false_negtive_rate_list,\n 'vs': Volume_list,\n 'hd': Hausdorff_list,\n 'msd': mean_surface_dis_list,\n 'mdsd': median_surface_dis_list,\n 'stdsd': std_surface_dis_list,\n 'hd95': nine5_surface_dis_list}\n\n metrics_dict = {k: v for k, v in metrics_dict_all_labels.items() if v} # remove empty values\n\n return metrics_dict\n\n\ndef write_metrics(labels, gdth_path, pred_path, csv_file, metrics=None):\n \"\"\"\n\n :param labels: exclude background\n :param gdth_path: a absolute directory path or file name\n :param pred_path: a absolute directory path or file name\n :param csv_file: filename to save the metrics\n :return: metrics_dict_all_labels: a dict which save metrics\n \"\"\"\n print('start calculate all metrics (volume and distance) and write them to csv')\n if '/' not in gdth_path.split('.')[-1]: # gdth is a file instead of a directory\n gdth_names, pred_names = [gdth_path], [pred_path]\n else:\n gdth_names, pred_names = get_gdth_pred_names(gdth_path, pred_path)\n\n for gdth_name, pred_name in zip(gdth_names, pred_names):\n gdth, gdth_origin, gdth_spacing = load_itk(gdth_name)\n pred, pred_origin, pred_spacing = load_itk(pred_name)\n\n gdth = one_hot_encode_3d(gdth, labels=labels)\n pred = one_hot_encode_3d(pred, labels=labels)\n print('start calculate all metrics for image: ', pred_name)\n metrics_dict_all_labels = get_metrics_dict_all_labels(labels, gdth, pred, spacing=gdth_spacing[::-1],\n metrics_type=metrics)\n metrics_dict_all_labels['filename'] = pred_name # add a new key to the metrics\n data_frame = pd.DataFrame(metrics_dict_all_labels)\n data_frame.to_csv(csv_file, mode='a', header=not os.path.exists(csv_file), index=False)\n\n return metrics_dict_all_labels\n\n\ndef main():\n labels = [0, 4, 5, 6, 7, 8]\n gdth_path = 'data/gdth'\n pred_path = 'data/pred'\n csv_file = 'metrics.csv'\n\n write_metrics(labels=labels[1:], # exclude background\n gdth_path=gdth_path,\n pred_path=pred_path,\n csv_file=csv_file)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"seg_metrics/seg_metrics.py","file_name":"seg_metrics.py","file_ext":"py","file_size_in_byte":10328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"110247693","text":"import os, datetime, time, argparse\nimport math, cv2, numpy as np\nimport data, model\nimport torch\nimport torch.nn.functional as F\nimport misc.flow_vis as fv\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--test_name', type=str, required=True)\nparser.add_argument('--data_path', type=str, required=True)\nparser.add_argument('--model_path', type=str, required=True)\nparser.add_argument('--batch_size', type=int, default=1)\nparser.add_argument('--hr_size', type=int, default=720)\nparser.add_argument('--lr_size', type=int, default=180)\nparser.add_argument('--blur', action='store_true')\nparser.add_argument('--save_lr', action='store_true')\nparser.add_argument('--save_hr', action='store_true')\nparser.add_argument('--save_flows', action='store_true')\nparser.add_argument('--save_warps', action='store_true')\nargs = parser.parse_args()\n\n## MODEL LOADING\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ncp_dict = torch.load(args.model_path, map_location=device)\nflow, inter = cp_dict['flow_net'], cp_dict['inter_net']\ndelta = cp_dict['delta']\nC = cp_dict['channels']\n\nflow_c_in = 2 * C\nflow_c_out = 2\ninter_c_in = 4 * C * delta + 2 * C + 4 * delta + 2\ninter_c_out = C\n\nflow_net = getattr(__import__('models.' + flow, fromlist=[flow]), flow)(flow_c_in, flow_c_out)\ninter_net = getattr(__import__('models.' + inter, fromlist=[inter]), inter)(inter_c_in, inter_c_out)\nsr_model = model.SuperResModel(delta, flow_net, inter_net)\nsr_model.load_state_dict(cp_dict['state_dict'])\n\nif torch.cuda.device_count() > 1:\n\tsr_model = torch.nn.DataParallel(sr_model)\nsr_model.to(device)\n\ngridX, gridY = np.meshgrid(np.arange(args.lr_size), np.arange(args.lr_size))\nnum_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1\ngridX = torch.tensor(gridX, requires_grad=False, device=device).repeat(num_devices, 1, 1)\ngridY = torch.tensor(gridY, requires_grad=False, device=device).repeat(num_devices, 1, 1)\n\n## DATA LOADING\n\nprint('Loading test data...')\n\ntestset = data.SuperRes(args.data_path, 'convert', delta, 0, 0, C)\ntestloader = torch.utils.data.DataLoader(testset, batch_size = args.batch_size, shuffle = False, num_workers = 4)\n\nprint('Loaded ' + str(len(testset)) + ' test examples')\n\n## TEST LOOP\n\nif args.save_lr:\n\tos.makedirs(os.path.join('converted', args.test_name, 'lr'), exist_ok=True)\nif args.save_hr:\n\tos.makedirs(os.path.join('converted', args.test_name, 'hr'), exist_ok=True)\nif args.save_flows:\n\tfor i in range(2 * delta + 1):\n\t\tos.makedirs(os.path.join('converted', args.test_name, 'flows_%02d' % i), exist_ok=True)\nif args.save_warps:\n\tfor i in range(2 * delta + 1):\n\t\tos.makedirs(os.path.join('converted', args.test_name, 'warps_%02d' % i), exist_ok=True)\nos.makedirs(os.path.join('converted', args.test_name, 'pred'), exist_ok=True)\n\ndef gaussian_kernel(size, sigma=2., dim=2, channels=3):\n kernel_size = 2*size + 1\n kernel_size = [kernel_size] * dim\n sigma = [sigma] * dim\n kernel = 1\n meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])\n for size, std, mgrid in zip(kernel_size, sigma, meshgrids):\n mean = (size - 1) / 2\n kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-((mgrid - mean) / (2 * std)) ** 2)\n kernel = kernel / torch.sum(kernel)\n kernel = kernel.view(1, 1, *kernel.size())\n kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))\n return kernel\n\ngaussian_filter = gaussian_kernel(1, sigma=1.5, channels=C).to(device)\n\ndef write_data(test_frames, output, test_index, C):\n\tfor i in range(output.shape[0]):\n\t\tif args.save_lr:\n\t\t\timg_lr = np.moveaxis(input[i, delta].cpu().numpy(), 0, 2) * 255\n\t\t\tcv2.imwrite(os.path.join('converted', args.test_name, 'lr', 'frame%06d.jpg' % (test_index * args.batch_size + i + 1)), img_lr)\n\t\tif args.save_hr:\n\t\t\timg_hr = np.moveaxis(test_frames[i, delta].cpu().numpy(), 0, 2) * 255\n\t\t\tcv2.imwrite(os.path.join('converted', args.test_name, 'hr', 'frame%06d.jpg' % (test_index * args.batch_size + i + 1)), img_hr)\n\t\timg_pred = np.moveaxis(output[i, :C].cpu().numpy(), 0, -1) * 255\n\t\tcv2.imwrite(os.path.join('converted', args.test_name, 'pred', 'frame%06d.jpg' % (test_index * args.batch_size + i + 1)), img_pred)\n\t\tif args.save_flows:\n\t\t\tfor f in range(2 * delta + 1):\n\t\t\t\tuv = np.moveaxis(output[i, C + f * 2 : C + (f + 1) * 2].cpu().numpy(), 0, -1)\n\t\t\t\tflow_img = fv.flow_to_color(uv, convert_to_bgr=True)\n\t\t\t\tcv2.imwrite(os.path.join('converted', args.test_name, 'flows_%02d' % f, 'frame%06d.jpg' % (test_index * args.batch_size + i + 1)), flow_img)\n\t\tif args.save_warps:\n\t\t\tfirst_c = C if not args.save_flows else (C + 2 * (2 * delta + 1))\n\t\t\tfor f in range(2 * delta + 1):\n\t\t\t\twarp_img = np.moveaxis(output[i, first_c + f * C : first_c + (f + 1) * C].cpu().numpy(), 0, -1) * 255\n\t\t\t\tcv2.imwrite(os.path.join('converted', args.test_name, 'warps_%02d' % f, 'frame%06d.jpg' % (test_index * args.batch_size + i + 1)), warp_img)\n\nprint('Converting...')\n\nwith torch.no_grad():\n\tfor test_index, test_frames in enumerate(testloader, 0):\n\t\tif test_index % int(len(testloader) / 100) == 0:\n\t\t\tprint('.', end='', flush=True)\n\t\tB, NF, C, H_hr, W_hr = test_frames.shape\n\t\ttest_frames = test_frames.to(device)\n\t\tinput = test_frames.view(B * NF, C, H_hr, W_hr)\n\t\tif args.blur:\n\t\t\tinput = F.conv2d(input, gaussian_filter, padding=1, groups=C)\n\t\tinput = F.interpolate(input, size=args.lr_size, mode='area')\n\t\tinput = input.view(B, NF, C, args.lr_size, args.lr_size)\n\t\toutput = sr_model(input, args.hr_size, gridX, gridY, out_flows=args.save_flows, out_warps=args.save_warps)\n\t\twrite_data(test_frames, output, test_index, C)\n","sub_path":"convert_video.py","file_name":"convert_video.py","file_ext":"py","file_size_in_byte":5610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"389349564","text":"#-*- coding: utf-8 -*-\n\nimport numpy as np\nfrom sklearn.qda import QDA\nfrom sklearn.metrics import accuracy_score\n\"\"\"\nQuadratic Discriminant Analysis\nMeta-parameters:\n NONE\n\"\"\"\n\ndef train_classifier(xTrain_s, yTrain_s, kwargs):\n \"\"\"\n Train a naive baise classifier on xTrain and yTrain and return the trained\n classifier\n \"\"\"\n if type(xTrain_s) != list:\n classifier_s = QDA(**kwargs)\n classifier_s.fit(xTrain_s, yTrain_s)\n\n else:\n classifier_s = train_classifier_8(xTrain_s, yTrain_s, kwargs)\n\n return classifier_s\n\ndef train_classifier_8(xsTrain_s, yTrain_s, kwargs):\n \"\"\"\n performs the training and returns the predictors\n \"\"\"\n # If we work with the splitted dataset:\n\n classifier_s = []\n\n for n in range(len(xsTrain_s)):\n # Training:\n classifier = train_classifier(xsTrain_s[n], yTrain_s[n], kwargs)\n classifier_s.append(classifier)\n\n return classifier_s\n\ndef predict_proba(classifier_s, dataset_s):\n \"\"\"\n Given a dataset and a classifier, compute the proba prediction\n This function can be use for validation as well as for the test.\n \"\"\"\n if type(classifier_s) != list:\n # Probability of being in each label\n proba_predicted_s = classifier_s.predict_proba(dataset_s) #[:,1]\n\n else:\n proba_predicted_s = predict_proba_8(classifier_s, dataset_s)\n\n return proba_predicted_s\n\ndef predict_proba_8(classifier_s, dataset_s):\n \"\"\"\n Predict the output of this classifier on the the dataset divided in 8 groups\n \"\"\"\n\n # If we work with the splitted dataset:\n proba_predicted_s = []\n\n for n in range(len(dataset_s)):\n proba_predicted = predict_proba(classifier_s[n], dataset_s[n])\n proba_predicted_s.append(proba_predicted)\n\n return proba_predicted_s\n\n\ndef get_classification_error(y_predicted_s, y_true_s, normalize= True):\n\n if type(y_predicted_s) == list:\n prediction_error_s = []\n\n for n in range(len(y_predicted_s)):\n prediction_error_s.append(accuracy_score(y_true_s[n],\n y_predicted_s[n],\n normalize=normalize))\n else:\n prediction_error_s = accuracy_score(y_true_s, y_predicted_s,\n normalize=normalize)\n\n return prediction_error_s\n\n\n","sub_path":"Analyses/qda.py","file_name":"qda.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"283078459","text":"from unittest.mock import MagicMock\n\nimport pytest\nfrom PySide2.QtTest import QTest\n\nfrom node_launcher.constants import (\n BITCOIN_MAINNET_PEER_PORT,\n BITCOIN_MAINNET_RPC_PORT,\n TARGET_BITCOIN_RELEASE\n)\nfrom node_launcher.gui.menu.manage_bitcoind import BitcoindPortsLayout\n\n\n@pytest.fixture\ndef bitcoind_ports_layout() -> BitcoindPortsLayout:\n bitcoin = MagicMock()\n bitcoin.node_port = BITCOIN_MAINNET_PEER_PORT\n bitcoin.rpc_port = BITCOIN_MAINNET_RPC_PORT\n bitcoin.zmq_block_port = 18500\n bitcoin.zmq_tx_port = 18501\n bitcoin.zmq_tx_port = 18501\n bitcoin.software.release_version = TARGET_BITCOIN_RELEASE\n layout = BitcoindPortsLayout(bitcoin)\n return layout\n\n\nclass TestBitcoindConfigurationTab(object):\n def test_bitcoin_network_port(self,\n bitcoind_ports_layout,\n qtbot: QTest):\n assert bitcoind_ports_layout.bitcoin_network_port.text().endswith(\n str(BITCOIN_MAINNET_PEER_PORT)\n )\n\n def test_rpc_port(self,\n bitcoind_ports_layout,\n qtbot: QTest):\n assert bitcoind_ports_layout.rpc_port.text().endswith(\n str(BITCOIN_MAINNET_RPC_PORT)\n )\n\n def test_zmq_ports(self,\n bitcoind_ports_layout,\n qtbot: QTest):\n assert bitcoind_ports_layout.zmq_ports.text().endswith('18500/18501')\n\n","sub_path":"tests/test_gui/test_menu/test_manage_bitcoind/test_bitcoind_ports_layout.py","file_name":"test_bitcoind_ports_layout.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"39053513","text":"from ChallengeClient import challengeinterface\r\nimport base64\r\n\r\n#############################################################\r\n# Function declarations\r\n#############################################################\r\nC_UPPER = range(ord('A'),ord('Z')+1) #goddammed +1 is required: fucked me up for an hour >_<\r\nC_LOWER = range(ord('a'),ord('z')+1)\r\n\r\n#rot-n function\r\ndef rot_n(n, msg):\r\n decoded = ''\r\n for c in msg:\r\n rotted = ord(c) + n\r\n if ord(c) in C_UPPER:\r\n if rotted > ord('Z'):\r\n rotted = (rotted - ord('Z')) + ord('A') - 1\r\n decoded += chr(rotted)\r\n else:\r\n decoded += chr(rotted)\r\n elif ord(c) in C_LOWER:\r\n if rotted > ord('z'):\r\n rotted = (rotted - ord('z')) + ord('a') - 1\r\n decoded += chr(rotted)\r\n else:\r\n decoded += chr(rotted)\r\n else:\r\n decoded += c\r\n\r\n return decoded\r\n\r\ndef all_casings(s):\r\n if not s:\r\n yield \"\"\r\n else:\r\n first = s[:1]\r\n if first.lower() == first.upper():\r\n for sub_casing in all_casings(s[1:]):\r\n yield first + sub_casing\r\n else:\r\n for sub_casing in all_casings(s[1:]):\r\n yield first.lower() + sub_casing\r\n yield first.upper() + sub_casing\r\n\r\n# select_rline\r\n# Takes the full challenge text as input and trims it down to\r\n# the line that you input, counting from the end of the string\r\n# e.g. if you input line=2, it will return the second last line\r\ndef select_rline(fulltext, rline):\r\n lines = fulltext.rsplit(\"\\n\")\r\n problemtext = lines[len(lines) - rline]\r\n return problemtext\r\n\r\n# solve_problem\r\n# Solve the problem in this function\r\ndef solve_problem(problemtext):\r\n # split string into fragments\r\n # 4 b64 characters represents 3 ascii characters\r\n i = 0\r\n frag = []\r\n while (i < len(problemtext)):\r\n frag.append(problemtext[i:i + 4])\r\n i += 4\r\n\r\n possible_rots = []\r\n for n in range(0, 26):\r\n possible_rots.append(n)\r\n\r\n found = {}\r\n for f in frag:\r\n perms = all_casings(f)\r\n #print(\"frag\",f)\r\n breakout = False\r\n for p in perms:\r\n if (breakout):\r\n breakout = False\r\n break\r\n #print(\"frag\",f,\"perm\",p)\r\n for n in possible_rots:\r\n new_r = rot_n(n, p) + \"===\"\r\n new_b = base64.b64decode(new_r)\r\n try:\r\n new_s = new_b.decode('ascii')\r\n good = True\r\n for c in new_s:\r\n if (not (ord(c) in C_UPPER or ord(c) == ord(\" \"))):\r\n good = False\r\n #print(c, ord(c), \"is not a good char\")\r\n break\r\n #else:\r\n #print(c,ord(c),\"is a good char\")\r\n if (good):\r\n #print(p, n, \"->\", new_r, \"=>\", new_s)\r\n if (n not in found):\r\n found[n] = []\r\n found[n].append(p)\r\n #breakout = True\r\n #break\r\n except UnicodeDecodeError:\r\n #print(\"decode error\",n,new_r)\r\n continue\r\n\r\n #print(found)\r\n #possible_rots = [*found.keys()]\r\n #print(possible_rots)\r\n\r\n #find longest array in found, which will give us the rot\r\n maxlen = 0\r\n index = -1\r\n for i in found:\r\n if (len(found[i]) > maxlen):\r\n maxlen = len(found[i])\r\n index = i\r\n coded = rot_n(index, \"\".join(found[index]))\r\n coded2 = base64.b64decode(coded)\r\n answer = coded2.decode('ascii')\r\n return answer\r\n\r\n#############################################################\r\n# Main code starts here\r\nif __name__ == \"__main__\":\r\n level = '6'\r\n serverip = \"15.223.13.29\"\r\n challengeport = 8001\r\n\r\n # start the challenge game\r\n challenge = challengeinterface(serverip, challengeport)\r\n print(challenge.start())\r\n\r\n # choose the level to run\r\n challengetext = challenge.select_level(level)\r\n print('\\nChallenge Text is:\\n' + challengetext)\r\n\r\n # trim the text down to the problem statement\r\n problemtext = select_rline(challengetext, 2)\r\n print('\\nProblem Text is:\\n' + problemtext)\r\n\r\n # solve the problem\r\n solution = solve_problem(problemtext)\r\n print('\\nYour solution is:\\n' + solution)\r\n\r\n # submit the answer\r\n result = challenge.submit_answer(solution)\r\n print('\\n Result is:\\n' + result)\r\n\r\n # close the socket at the end of the program\r\n challenge.exit()","sub_path":"Solutions/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":4754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"450751814","text":"#!/usr/bin/env python3\nimport math\nimport client as ct\nimport ast\nimport matplotlib.pyplot as plt\nimport networkx as nx\nfrom hierarchy_pos import hierarchy_pos\nimport time\n\nimport time\n\n\nclass State:\n \"\"\"Represents a node in the minimax decision tree and a possible state of the game at a given (hypothetical)\n time\"\"\"\n\n instances = 0\n\n action_offset = {\n \"stay\": (0, 0),\n \"north\": (0, -1),\n \"south\": (0, 1),\n \"east\": (1, 0),\n \"west\": (-1, 0)\n }\n\n goal_positions = None\n max_rounds = None\n obstacle_matrix = None\n\n @classmethod\n def set_obstacle_matrix(cls, obstacle_matrix):\n \"\"\"Defines the obstacle matrix, where at indexes [x][y] 1 means there is an obstacle at position at (x,y),\n 0 otherwise. Also defines the dimensions of the board\"\"\"\n cls.obstacle_matrix = obstacle_matrix\n cls.columns = len(obstacle_matrix)\n cls.rows = len(obstacle_matrix[0])\n\n @classmethod\n def set_goal_positions(cls, goal_positions):\n \"\"\"Defines the goal positions for the round that will be examined by the algorithm\"\"\"\n cls.goal_positions = goal_positions\n\n @classmethod\n def set_max_rounds(cls, max_rounds):\n \"\"\"Defines the duration of the game, which is also the depth of the search tree\"\"\"\n cls.max_rounds = max_rounds\n\n def __init__(self, is_max_turn, min_pos, max_pos, previous_round, graph=None, previous_name=None, action=None):\n \"\"\"Defines the state's attributes and, if visualization is enabled (work in progress), adds a corresponding node\n and edge to the tree graph.\"\"\"\n\n self.is_max_turn = is_max_turn\n self.min_pos = min_pos\n self.max_pos = max_pos\n self.round = previous_round + (1 if is_max_turn else 0)\n self.graph = graph\n self.name = \"root\"\n self.action = action\n\n # TODO: fix graph issues\n if self.graph is not None and (previous_name is not None):\n self.name = previous_name + str(\"Max\" if self.is_max_turn else \"Min\") + \\\n str(self.max_pos if self.is_max_turn else self.min_pos)\n graph.add_node(self.name) # NetworkX\n graph.add_edge(previous_name, self.name, action=action) # NetworkX\n\n State.instances += 1\n\n def result(self, action):\n \"\"\"Defines the state that results from doing a certain action in the state.\n In other words, this function is the transition model.\n\n Parameters:\n action (string): action description string\n\n Returns:\n (State): the resulting state\n \"\"\"\n if self.is_max_turn:\n new_max_pos = (self.max_pos[0] + self.action_offset[action][0],\n self.max_pos[1] + self.action_offset[action][1])\n new_min_pos = self.min_pos\n else:\n new_min_pos = (self.min_pos[0] + self.action_offset[action][0],\n self.min_pos[1] + self.action_offset[action][1])\n new_max_pos = self.max_pos\n return State(not self.is_max_turn, new_min_pos, new_max_pos, self.round,\n graph=self.graph, previous_name=self.name, action=action)\n\n def utility(self):\n \"\"\"Utility function (or payoff function). Defines the final numeric value for the game that ends in the state.\n Since the state knows which player is next and the game is a two-player game, there is no need to pass the\n player as an argument.\n\n Returns:\n (int) 0 if the minimizing player wins, 1 if the maximizing player wins\n \"\"\"\n u = 0 if self.min_pos in self.goal_positions else 1\n if self.graph is not None:\n self.graph.nodes[self.name][\"value\"] = u\n return u\n\n def is_terminal(self):\n \"\"\"Checks whether or not the game is over. Returns True if so, False otherwise.\n In other words, checks this state is a terminal state.\n\n Returns:\n (bool): whether or the state is a terminal state.\n \"\"\"\n return self.min_pos in State.goal_positions or self.round >= State.max_rounds\n\n def is_legal(self, action):\n \"\"\"Checks whether or not the next player can perform a certain action.\n\n Parameters:\n action (str): the action description string\n\n Returns:\n (bool): whether or not the next player can perform the given action\n \"\"\"\n player_pos = self.max_pos if self.is_max_turn else self.min_pos\n other_pos = self.min_pos if self.is_max_turn else self.max_pos\n offset = self.action_offset[action]\n new_x = (player_pos[0] + offset[0]) % self.columns\n new_y = (player_pos[1] + offset[1]) % self.rows\n return all((\n State.obstacle_matrix[new_x][new_y] == 0,\n (new_x, new_y) != other_pos,\n (not self.is_max_turn or (self.is_max_turn and (new_x, new_y) not in State.goal_positions))))\n\n @staticmethod\n def manhattan_distance(pos1, pos2):\n \"\"\" The Manhattan distance between two positions\n\n Parameters:\n pos1 (tuple or list): a position\n pos2 (tuple or list): another position\n\n Returns:\n (int or float): the Manhattan distance between pos1 and pos2\n \"\"\"\n return abs(pos2[0] - pos1[0]) + abs(pos2[1] - pos1[1])\n\n def killer_moves(self, action):\n \"\"\"Heuristic ordering function for the actions, based on the Manhattan distance to the goal or related tiles.\n\n Parameters:\n action (str): the action to rank\n\n Returns:\n (int): if it's the maximizing player's turn, returns the Manhattan distance from the position resulting\n from the action to the goal-adjacent tile that is closest to the minimizing player. If it's the\n minimizing player's turn, returns the Manhattan distance from position that results from the action to\n the goal.\n \"\"\"\n closest_goal = min(self.goal_positions, key=lambda x: self.manhattan_distance(x, self.min_pos))\n if not self.is_max_turn:\n hypothetical_pos = (self.max_pos[0] + self.action_offset[action][0],\n self.max_pos[1] + self.action_offset[action][1])\n dx, dy = closest_goal[0]-self.min_pos[0], closest_goal[1]-self.min_pos[1]\n if dx >= 0 and dx >= abs(dy):\n return self.manhattan_distance(hypothetical_pos, (closest_goal[0] + 1, closest_goal[1]))\n if dx <= 0 and dx >= abs(dy):\n return self.manhattan_distance(hypothetical_pos, (closest_goal[0] - 1, closest_goal[1]))\n if dy >= 0 and dx < abs(dy):\n return self.manhattan_distance(hypothetical_pos, (closest_goal[0], closest_goal[1] + 1))\n if dy <= 0 and dx < abs(dy):\n return self.manhattan_distance(hypothetical_pos, (closest_goal[0], closest_goal[1] - 1))\n else:\n hypothetical_pos = (self.min_pos[0] + self.action_offset[action][0],\n self.min_pos[1] + self.action_offset[action][1])\n return self.manhattan_distance(hypothetical_pos, closest_goal)\n\n\n def actions(self):\n \"\"\"Returns all actions that the next player is allowed to perform in its turn.\n\n Returns:\n (list) list of strings representing all legal actions (\"north\", \"south\", \"east\", \"west\", \"stay\"),\n ordered by the killer-moves heuristic\n \"\"\"\n return sorted([action for action in State.action_offset if self.is_legal(action)], key=self.killer_moves)\n #return [action for action in State.action_offset if self.is_legal(action)]\n\n def max_value(self, alpha, beta, action):\n \"\"\"Explores, in a tree-like fashion, the outcomes of all possible actions in the state from the perspective of\n the minimizing player, without ever exploring the outcomes that could have no influence on the final decision.\n\n Parameters:\n alpha (int): the value of the best choice found so far in the path for the maximizing player\n beta (int): the value of the best choice found so far in the path for the minimizing player\n action (str): the action that resulted in the state that the function is given\n\n Returns:\n (str): the action that resulted in the value to assign to this state\n (int): the value to assign to this state, according to the minimax algorithm, from the perspective of\n the minimizing player.\n \"\"\"\n\n if self.is_terminal():\n return action, self.utility()\n\n value = -1000\n for a in self.actions():\n action, value = max((action, value),\n (a, self.result(a).min_value(alpha, beta, a)[1]),\n key=lambda x: x[1])\n if value >= beta:\n return action, value\n alpha = min(alpha, value)\n\n if self.graph is not None:\n self.graph.nodes[self.name][\"value\"] = value\n\n return action, value\n\n def min_value(self, alpha, beta, action):\n \"\"\"Explores, in a tree-like fashion, the outcomes of all possible actions in the state from the perspective of\n the minimizing player, without ever exploring the outcomes that could have no influence on the final decision.\n\n Parameters:\n alpha (int): the value of the best choice found so far in the path for the maximizing player\n beta (int): the value of the best choice found so far in the path for the minimizing player\n action (str): the action that resulted in the state that the function is given\n\n Returns:\n (str): the action that resulted in the value to assing to this state\n (int): the value to assign to this state, according to the minimax algorithm, from the perspective of the\n minimizing player.\n \"\"\"\n\n if self.is_terminal():\n return action, self.utility()\n\n value = 1000\n for a in self.actions():\n action, value = min((action, value),\n (a, self.result(a).max_value(alpha, beta, a)[1]),\n key=lambda x: x[1])\n if value <= alpha:\n return action, value\n beta = min(beta, value)\n\n if self.graph is not None:\n self.graph.nodes[self.name][\"value\"] = value\n return action, value\n\n\nclass Agent:\n \"\"\"Describes an adversarial agent\"\"\"\n\n def __init__(self):\n \"\"\"Simply initializes the agent\"\"\"\n self.current_state = None\n\n def set_state(self, state_description):\n \"\"\"Defines the current state of the game from a state description dictionary provided by the Agent1 server\n\n Parameters:\n state_description (dict): the state description dictionary\n\n \"\"\"\n self.current_state = State(state_description[\"agent_id\"] == 0,\n state_description[\"agents\"][0],\n state_description[\"agents\"][1],\n state_description[\"round\"],\n None, # nx.Graph(),\n None,\n \"root\")\n\n def alpha_beta_search(self):\n \"\"\"Returns the action description string that corresponds to the best action the agent can execute, that is, to\n the action that leads to the outcome with the best utility for the agent, assuming the adversary wants to\n minimize it.\n This search is optimized using a technique called alpha-beta pruning, a technique that prevents the minimax\n algorithm from exploring outcomes that have no possible influence on the final decision\n\n Returns:\n (str): the action description string.\n \"\"\"\n a, v = self.current_state.max_value(-1000, 1000, \"stay\")\n\n if self.current_state.graph is not None:\n labels = nx.get_node_attributes(self.current_state.graph, \"value\")\n edge_labels = nx.get_edge_attributes(self.current_state.graph, \"action\")\n # print(edge_labels)\n plt.figure(figsize=(20, 20))\n\n pos = hierarchy_pos(self.current_state.graph, \"root\", width=2 * math.pi, xcenter=0)\n new_pos = {u: (r * math.cos(theta), r * math.sin(theta)) for u, (theta, r) in pos.items()}\n\n nx.draw(self.current_state.graph, new_pos, node_size=20, alpha=0.5, node_color=\"blue\", labels=labels)\n nx.draw_networkx_edge_labels(self.current_state.graph, new_pos, edge_labels=edge_labels)\n plt.axis(\"equal\")\n plt.show()\n\n return a\n\ndef parse_last_dict(bad_string):\n \"\"\"Returns the string that corresponds to the last open and closed curly brackets.\n This is necessary due to the way the server/client interaction works, so as to identify the last server response.\n Ideally, this would be unnecessary, but this had to be implemented due to time restrictions that prevented the group\n from exploring the software.\n\n Parameters:\n bad_string (str): the string to parse\n\n Returns:\n (str): the resulting good string\n\n \"\"\"\n return bad_string[bad_string.rindex(\"{\"):]\n\n\ndef main(rounds):\n \"\"\"Game loop. Creates two clients and cycles between them.\n The first client is the minimizing player, or the human,\n and so the program waits for user input and sends the corresponding action value pair to the server.\n The second\n client is the maximizing player, and so the program calculates the best possible decision based on the minimax algorithm and sends\n the corresponding action value pair to the server.\n\n Parameters:\n rounds (int): the number of game rounds\n\n Returns:\n None\n \"\"\"\n client_min = ct.Client('127.0.0.1', 50000)\n client_max = ct.Client('127.0.0.1', 50000)\n res_min = client_min.connect()\n res_max = client_max.connect()\n if all(res != -1 for res in (res_min, res_max)):\n\n agent = Agent()\n State.set_max_rounds(rounds)\n\n while True:\n command, action = input(\"Min > \").split(\" \")\n client_min.execute(command, action)\n\n state = ast.literal_eval(parse_last_dict(client_max.receiveData()))\n State.set_goal_positions(state[\"goals\"])\n State.set_obstacle_matrix(state[\"obstacles\"])\n agent.set_state(state)\n start = time.perf_counter()\n action = agent.alpha_beta_search()\n stop = time.perf_counter()\n print(\"Max > command\", action)\n print(\"Elapsed time:\", stop - start, \"Generated nodes:\", State.instances)\n State.instances = 0\n client_max.execute(\"command\", action)\n\n if agent.current_state.result(action).is_terminal():\n input(\"O jogo terminou.\")\n break\n\n\nif __name__ == \"__main__\":\n main(rounds=5)\n","sub_path":"client/alphabeta_oo.py","file_name":"alphabeta_oo.py","file_ext":"py","file_size_in_byte":15086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"436765993","text":"\"\"\"\nThis module provides signals, which are a simple dispatching system that allows any number of interested parties\nto subscribe to events (\"signals\").\n\nThis is similar to the Blinker library (https://pypi.org/project/blinker/), with the following changes:\n - provides only a small subset of Blinker's functionality\n - supports type hints\n - supports async receivers.\n\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport inspect\nimport weakref\nfrom collections.abc import Awaitable\nfrom collections.abc import Callable\nfrom typing import Any\nfrom typing import cast\nfrom typing import Generic\nfrom typing import ParamSpec\nfrom typing import TypeVar\n\n\nP = ParamSpec(\"P\")\nR = TypeVar(\"R\")\n\n\ndef make_weak_ref(obj: Any) -> weakref.ReferenceType:\n \"\"\"\n Like weakref.ref(), but using weakref.WeakMethod for bound methods.\n \"\"\"\n if hasattr(obj, \"__self__\"):\n return cast(weakref.ref, weakref.WeakMethod(obj))\n else:\n return weakref.ref(obj)\n\n\n# We're running into https://github.com/python/mypy/issues/6073 here,\n# which is why the base class is a mixin and not a generic superclass.\nclass _SignalMixin:\n def __init__(self) -> None:\n self.receivers: list[weakref.ref[Callable]] = []\n\n def connect(self, receiver: Callable) -> None:\n \"\"\"\n Register a signal receiver.\n\n The signal will only hold a weak reference to the receiver function.\n \"\"\"\n receiver = make_weak_ref(receiver)\n self.receivers.append(receiver)\n\n def disconnect(self, receiver: Callable) -> None:\n self.receivers = [r for r in self.receivers if r() != receiver]\n\n def notify(self, *args, **kwargs):\n cleanup = False\n for ref in self.receivers:\n r = ref()\n if r is not None:\n yield r(*args, **kwargs)\n else:\n cleanup = True\n if cleanup:\n self.receivers = [r for r in self.receivers if r() is not None]\n\n\nclass _SyncSignal(Generic[P], _SignalMixin):\n def connect(self, receiver: Callable[P, None]) -> None:\n assert not asyncio.iscoroutinefunction(receiver)\n super().connect(receiver)\n\n def disconnect(self, receiver: Callable[P, None]) -> None:\n super().disconnect(receiver)\n\n def send(self, *args: P.args, **kwargs: P.kwargs) -> None:\n for ret in super().notify(*args, **kwargs):\n assert ret is None or not inspect.isawaitable(ret)\n\n\nclass _AsyncSignal(Generic[P], _SignalMixin):\n def connect(self, receiver: Callable[P, Awaitable[None] | None]) -> None:\n super().connect(receiver)\n\n def disconnect(self, receiver: Callable[P, Awaitable[None] | None]) -> None:\n super().disconnect(receiver)\n\n async def send(self, *args: P.args, **kwargs: P.kwargs) -> None:\n await asyncio.gather(\n *[\n aws\n for aws in super().notify(*args, **kwargs)\n if aws is not None and inspect.isawaitable(aws)\n ]\n )\n\n\n# noinspection PyPep8Naming\ndef SyncSignal(receiver_spec: Callable[P, None]) -> _SyncSignal[P]:\n \"\"\"\n Create a synchronous signal with the given function signature for receivers.\n\n Example:\n\n s = SyncSignal(lambda event: None) # all receivers must accept a single \"event\" argument.\n def receiver(event):\n print(event)\n\n s.connect(receiver)\n s.send(\"foo\") # prints foo\n s.send(event=\"bar\") # prints bar\n\n def receiver2():\n ...\n\n s.connect(receiver2) # mypy complains about receiver2 not having the right signature\n\n s2 = SyncSignal(lambda: None) # this signal has no arguments\n s2.send()\n \"\"\"\n return cast(_SyncSignal[P], _SyncSignal())\n\n\n# noinspection PyPep8Naming\ndef AsyncSignal(receiver_spec: Callable[P, Awaitable[None] | None]) -> _AsyncSignal[P]:\n \"\"\"\n Create an signal that supports both regular and async receivers:\n\n Example:\n\n s = AsyncSignal(lambda event: None)\n async def receiver(event):\n print(event)\n s.connect(receiver)\n await s.send(\"foo\") # prints foo\n \"\"\"\n return cast(_AsyncSignal[P], _AsyncSignal())\n","sub_path":"mitmproxy/utils/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":4189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"487566172","text":"#!/usr/bin/env python3\n\nimport sys\nimport nfc\n\n# PaSoRi RC-S380\n#PASORI_S380_PATH = 'usb:001:004' # usb:bus:device rerative\nPASORI_S380_PATH = 'usb:054c:06c3' # usb:vendorID abusolute identifier\n\ndef sc_from_raw(sc):\n return nfc.tag.tt3.ServiceCode(sc >> 6, sc & 0x3f)\n\ndef on_startup(targets):\n return targets\n\ndef on_connect(tag):\n print(\"[*] connected:\", tag)\n sc1 = sc_from_raw(0x200B)\n bc1 = nfc.tag.tt3.BlockCode(0, service=0)\n bc2 = nfc.tag.tt3.BlockCode(1, service=0)\n block_data = tag.read_without_encryption([sc1], [bc1, bc2])\n print(\"Student ID: \" + block_data[1:9].decode(\"utf-8\"))\n print(\"Shizudai ID: \" + block_data[24:32].decode(\"utf-8\"))\n return True\n\ndef on_release(tag):\n print(\"[*] released: \", tag)\n\ndef main(args):\n with nfc.ContactlessFrontend(PASORI_S380_PATH) as clf:\n while clf.connect(rdwr={\n 'on-startup': on_startup,\n 'on-connect': on_connect,\n 'on-release': on_release,\n }):\n pass\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"mysrc/copipe/orig_IDparser.py","file_name":"orig_IDparser.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"26708869","text":"'''\nDados 2 arrays (assumindo nenhum valor duplicado)\nUm array é a rotação do outro? Retornar True/False\nmesmo tamanho e elementos, porém o índice inicial é diferente\n\nBigO(n) nós vamos passar por cada array 2 vezes, lembrando que O(2n) = O(n) \numa vez que para listas de tamanhos infinitos a constante não tem significado\n\nSelecionar uma posição indexada na lista1 e obtem seu valor. \nBusque o mesmo elemento na lista2 e cheque índice por índice de lá\nObtendo o último item sem um False, signifca True\n'''\n\ndef rotacao(lista1,lista2):\n\tif len(lista1) != len(lista2):\n\t\treturn False\n\n\tchave = lista1[0]\n\tindice_chave = 0\n\n\tfor i in range(len(lista2)):\n\t\tif lista2[i] == chave:\n\t\t\tindice_chave = i\n\t\t\tbreak\n\n\tif indice_chave == 0:\n\t\treturn False\n\n\tfor x in range(len(lista1)):\n\t\tindice_lista2 = (indice_chave + x) % len(lista1)\n\n\t\tif lista1[x] != lista2[indice_lista2]:\n\t\t\treturn False\n\treturn True\n\nprint(rotacao([1,2,3,4,5,6,7],[4,5,6,7,1,2,3]))\nprint(rotacao([1,2,3,4],[3,4,1,2]))","sub_path":"Algoritmos e Scripts/arrays_teste.py","file_name":"arrays_teste.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"561142280","text":"import math\nimport numpy as np\nfrom scipy.misc import imread, imresize\n\ndef read_images(image_paths):\n images = np.empty([image_paths.shape[0], 160, 320, 3])\n\n for i, path in enumerate(image_paths):\n images[i] = imread('data/'+path)\n\n return images\n\n\ndef preprocess(images):\n shape = (200, 66, 3)\n height, width, channels = shape\n images_resized = np.empty([images.shape[0], height, width, channels])\n for i, img in enumerate(images):\n images_resized[i] = imresize(img, shape)\n\n images = images_resized\n\n return images\n\n\ndef augment(images, angles):\n new_images = np.empty_like(images)\n new_angles = np.empty_like(angles)\n for i, (img, angle) in enumerate(zip(images, angles)):\n if np.random.choice(2):\n new_images[i] = np.fliplr(img)\n new_angles[i] = angle * -1\n else:\n new_images[i] = img\n new_angles[i] = angle\n\n images = new_images\n angles = new_angles\n\n return images, angles\n\n\ndef get_samples_per_epoch(array_size, batch_size):\n num_batches = array_size / batch_size\n # return value must be a number than can be divided by batch_size\n samples_per_epoch = math.ceil((num_batches / batch_size) * batch_size)\n samples_per_epoch = samples_per_epoch * batch_size\n return samples_per_epoch\n\n\ndef get_batch(images, angles, batch_size):\n\n samples = len(images)\n\n while True:\n selected = np.random.choice(samples, batch_size)\n images_batch, angles_batch = read_images(images[selected]), angles[selected].astype(float)\n\n images_batch, angles_batch = augment(preprocess(images_batch), angles_batch)\n\n yield images_batch, angles_batch","sub_path":"data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"434375833","text":"import pandas as pd\nimport numpy as np\nfrom sklearn import svm\n\nfeature_selected = ['Danceability', \n 'Energy', \n 'Speechiness', \n 'Acousticness', \n 'Instrumentalness', \n 'Liveness',\n 'Valence',\n 'Loudness',\n 'Tempo',\n 'Artist_Score']\n\n\ntrain_set = pd.read_excel('./train_set/train.xlsx')\nXtrain = np.array(train_set[feature_selected])\nYtrain = np.array(train_set['label'], dtype=float)\ntest_set = pd.read_excel('./test_set/test.xlsx')\nXtest = np.array(test_set[feature_selected])\nYtest = np.array(test_set['label'], dtype=float)\n\nclf = svm.SVC(kernel='linear')\n\nclf.fit(Xtrain, Ytrain)\n\ntrain_predict = clf.predict(Xtrain)\ntrain_accuracy = (train_predict==Ytrain).mean()\nprint(\"Train accuracy:\", train_accuracy)\n\ntest_predict = clf.predict(Xtest)\ntest_accuracy = (test_predict==Ytest).mean()\nprint(\"Test accuracy:\", test_accuracy)\n\n","sub_path":"SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"573664984","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Built-in BERT datasets class for multiple framework backends.\"\"\"\n\nimport os\nimport logging\nimport json\nimport dataclasses\nfrom dataclasses import dataclass\nfrom typing import List, Optional, Union\nfrom neural_compressor.utils.utility import LazyImport\nfrom .dataset import dataset_registry, Dataset\ntorch = LazyImport('torch')\ntransformers = LazyImport('transformers')\n\nlogger = logging.getLogger(\"neural_compressor\")\n\n@dataset_registry(dataset_type=\"bert\", framework=\"pytorch\", dataset_format='')\nclass PytorchBertDataset(Dataset):\n \"\"\"PyTorch dataset used for model Bert.\n \n This Dataset is to construct from the Bert TensorDataset and not a full implementation\n from yaml config. The original repo link is: https://github.com/huggingface/transformers.\n When you want use this Dataset, you should add it before you initialize your DataLoader.\n (TODO) add end to end support for easy config by yaml by adding the method of\n load examples and process method.\n\n Args: dataset (list): list of data.\n task (str): the task of the model, support \"classifier\", \"squad\".\n model_type (str, default='bert'): model type, support 'distilbert', 'bert',\n 'xlnet', 'xlm'.\n transform (transform object, default=None): transform to process input data.\n filter (Filter objects, default=None): filter out examples according\n to specific conditions.\n\n Examples::\n\n dataset = [[\n [101,2043,2001],\n [1,1,1],\n [[0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0]],\n [1,1,1],\n [1,1,1],\n [[0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0]]\n ]]\n dataset = PytorchBertDataset(dataset=dataset, task='classifier', model_type='bert',\n transform=preprocess, filter=filter)\n \"\"\"\n\n def __init__(self, dataset, task, model_type='bert', transform=None, filter=None):\n \"\"\"Initialize the attributes of class.\"\"\"\n self.dataset = dataset\n assert task in (\"classifier\", \"squad\"), \"Bert task support only classifier squad\"\n self.task = task\n self.transform = transform\n self.model_type = model_type\n\n def __len__(self):\n \"\"\"Length of the dataset.\"\"\"\n return len(self.dataset)\n\n def __getitem__(self, index):\n \"\"\"Magic method.\n\n x[i] is roughly equivalent to type(x).__getitem__(x, index)\n \"\"\"\n sample = self.dataset[index]\n if self.transform is not None:\n sample = self.transform(sample)\n if self.task == 'classifier':\n inputs = {\n 'input_ids': sample[0],\n 'attention_mask': sample[1],\n 'labels': sample[3]}\n\n if self.model_type != 'distilbert':\n # XLM, DistilBERT and RoBERTa don't use segment_ids\n if self.model_type in ['bert', 'xlnet']:\n inputs['token_type_ids'] = sample[2]\n sample = (inputs, inputs['labels'])\n\n elif self.task == 'squad':\n inputs = {\n 'input_ids': sample[0],\n 'attention_mask': sample[1], }\n if self.model_type != 'distilbert':\n # XLM, DistilBERT and RoBERTa don't use segment_ids\n inputs['token_type_ids'] = sample[2] if self.model_type in [\n 'bert', 'xlnet'] else None\n if self.model_type in ['xlnet', 'xlm']:\n inputs.update({'cls_index': sample[4], 'p_mask': sample[5]})\n example_indices = sample[3]\n sample = (inputs, example_indices)\n return sample\n\n\n@dataset_registry(dataset_type=\"GLUE\", framework=\"onnxrt_qlinearops, \\\n onnxrt_integerops\", dataset_format='')\nclass ONNXRTBertDataset(Dataset):\n \"\"\"ONNXRT dataset used for model Bert.\n\n Args: data_dir (str): The input data dir.\n model_name_or_path (str): Path to pre-trained student model or shortcut name,\n selected in the list:\n max_seq_length (int, default=128): The maximum length after tokenization.\n Sequences longer than this will be truncated,\n sequences shorter will be padded.\n do_lower_case (bool, default=True): Whether to lowercase the input when tokenizing.\n task (str, default=mrpc): The name of the task to fine-tune.\n Choices include mrpc, qqp, qnli, rte,\n sts-b, cola, mnli, wnli.\n model_type (str, default='bert'): model type, support 'distilbert', 'bert',\n 'mobilebert', 'roberta'.\n dynamic_length (bool, default=False): Whether to use fixed sequence length.\n evaluate (bool, default=True): Whether do evaluation or training.\n transform (transform object, default=None): transform to process input data.\n filter (Filter objects, default=None): filter out examples according\n to specific conditions.\n\n Examples::\n\n dataset = ONNXRTBertDataset(data_dir=data_dir, model_name_or_path='bert-base-uncase',\n transform=preprocess, filter=filter)\n \"\"\"\n def __init__(self, data_dir, model_name_or_path, max_seq_length=128,\\\n do_lower_case=True, task='mrpc', model_type='bert', dynamic_length=False,\\\n evaluate=True, transform=None, filter=None):\n \"\"\"Initialize the attributes of class.\"\"\"\n task = task.lower()\n model_type = model_type.lower()\n assert task in ['mrpc', 'qqp', 'qnli', 'rte', 'sts-b', 'cola', \\\n 'mnli', 'wnli'], 'Unsupported task type'\n assert model_type in ['distilbert', 'bert', 'mobilebert', 'roberta'], 'Unsupported \\\n model type'\n\n self.dynamic_length = dynamic_length\n self.model_type = model_type\n self.max_seq_length = max_seq_length\n tokenizer = transformers.AutoTokenizer.from_pretrained(model_name_or_path,\n do_lower_case=do_lower_case)\n self.dataset = load_and_cache_examples(data_dir, model_name_or_path, \\\n max_seq_length, task, model_type, tokenizer, evaluate)\n\n def __len__(self):\n \"\"\"Length of the dataset.\"\"\"\n return len(self.dataset)\n\n def __getitem__(self, index):\n \"\"\"Magic method.\n\n x[i] is roughly equivalent to type(x).__getitem__(x, index)\n \"\"\"\n return self.dataset[index]\n\n\ndef load_and_cache_examples(data_dir, model_name_or_path, max_seq_length, task, \\\n model_type, tokenizer, evaluate):\n \"\"\"Load and cache the examples.\n\n Helper Function for ONNXRTBertDataset.\n \"\"\"\n from torch.utils.data import TensorDataset\n\n processor = transformers.glue_processors[task]()\n output_mode = transformers.glue_output_modes[task]\n # Load data features from cache or dataset file\n if not os.path.exists(\"./dataset_cached\"):\n os.makedirs(\"./dataset_cached\")\n cached_features_file = os.path.join(\"./dataset_cached\", 'cached_{}_{}_{}_{}'.format(\n 'dev' if evaluate else 'train',\n list(filter(None, model_name_or_path.split('/'))).pop(),\n str(max_seq_length),\n str(task)))\n if os.path.exists(cached_features_file):\n logger.info(\"Load features from cached file {}.\".format(cached_features_file))\n features = torch.load(cached_features_file)\n else:\n logger.info(\"Create features from dataset file at {}.\".format(data_dir))\n label_list = processor.get_labels()\n if task in ['mnli', 'mnli-mm'] and model_type in ['roberta']:\n # HACK(label indices are swapped in RoBERTa pretrained model)\n label_list[1], label_list[2] = label_list[2], label_list[1]\n examples = processor.get_dev_examples(data_dir) if evaluate else \\\n processor.get_train_examples(data_dir)\n features = convert_examples_to_features(examples,\n tokenizer,\n task=task,\n label_list=label_list,\n max_length=max_seq_length,\n output_mode=output_mode,\n )\n logger.info(\"Save features into cached file {}.\".format(cached_features_file))\n torch.save(features, cached_features_file)\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n all_seq_lengths = torch.tensor([f.seq_length for f in features], dtype=torch.long)\n if output_mode == \"classification\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.float)\n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, \\\n all_seq_lengths, all_labels)\n return dataset\n\n\ndef convert_examples_to_features(\n examples,\n tokenizer,\n max_length=128,\n task=None,\n label_list=None,\n output_mode=\"classification\",\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n):\n \"\"\"Convert examples to features.\n\n Helper function for load_and_cache_examples.\n \"\"\"\n processor = transformers.glue_processors[task]()\n if label_list is None:\n label_list = processor.get_labels()\n logger.info(\"Use label list {} for task {}.\".format(label_list, task))\n label_map = {label: i for i, label in enumerate(label_list)}\n features = []\n for (ex_index, example) in enumerate(examples):\n inputs = tokenizer.encode_plus(\n example.text_a,\n example.text_b,\n add_special_tokens=True,\n max_length=max_length,\n return_token_type_ids=True,\n truncation=True,\n )\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n seq_length = len(input_ids)\n padding_length = max_length - len(input_ids)\n\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + \\\n ([0 if mask_padding_with_zero else 1] * padding_length)\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_length, \\\n \"Error with input_ids length {} vs {}\".format(\n len(input_ids), max_length)\n assert len(attention_mask) == max_length, \\\n \"Error with attention_mask length {} vs {}\".format(\n len(attention_mask), max_length\n )\n assert len(token_type_ids) == max_length, \\\n \"Error with token_type_ids length {} vs {}\".format(\n len(token_type_ids), max_length\n )\n if output_mode == \"classification\":\n label = label_map[example.label]\n elif output_mode == \"regression\":\n label = float(example.label)\n else:\n raise KeyError(output_mode)\n\n feats = InputFeatures(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n label=label,\n seq_length=seq_length,\n )\n features.append(feats)\n return features\n\n\n@dataclass(frozen=True)\nclass InputFeatures:\n \"\"\"Single set of features of data.\n\n Property names are the same names as the corresponding inputs to a model.\n\n Args:\n input_ids: Indices of input sequence tokens in the vocabulary.\n attention_mask: Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``: Usually ``1`` for tokens that are NOT MASKED,\n ``0`` for MASKED (padded) tokens.\n token_type_ids: (Optional) Segment token indices to indicate first and second\n portions of the inputs. Only some models use them.\n label: (Optional) Label corresponding to the input. Int for classification problems,\n float for regression problems.\n seq_length: (Optional) The length of input sequence before padding.\n \"\"\"\n\n input_ids: List[int]\n attention_mask: Optional[List[int]] = None\n token_type_ids: Optional[List[int]] = None\n label: Optional[Union[int, float]] = None\n seq_length: Optional[List[int]] = None\n\n def to_json_string(self):\n \"\"\"Serialize this instance to a JSON string.\"\"\"\n return json.dumps(dataclasses.asdict(self)) + \"\\n\"\n\n\n@dataset_registry(dataset_type=\"bert\", framework=\"tensorflow, tensorflow_itex\", dataset_format='')\nclass TensorflowBertDataset(Dataset):\n \"\"\"Tensorflow dataset used for model Bert.\n\n This dataset supports tfrecord data, please refer to Guide to create tfrecord file first.\n\n Args: root (str): path of dataset.\n label_file (str): path of label file.\n task (str, default='squad'): task type of model.\n model_type (str, default='bert'): model type, support 'bert'.\n transform (transform object, default=None): transform to process input data.\n filter (Filter objects, default=None): filter out examples according\n to specific conditions\n \"\"\"\n\n def __init__(self, root, label_file, task='squad',\n model_type='bert', transform=None, filter=None):\n \"\"\"Initialize the attributes of class.\"\"\"\n import json\n with open(label_file) as lf:\n label_json = json.load(lf)\n assert label_json['version'] == '1.1', 'only support squad 1.1'\n self.label = label_json['data']\n self.root = root\n self.transform = transform\n self.filter = filter\n\n def __getitem__(self, index):\n \"\"\"Magic method.\n\n x[i] is roughly equivalent to type(x).__getitem__(x, index).\n \"\"\"\n return self.root, self.label\n\n def __len__(self):\n \"\"\"Length of the dataset.\"\"\"\n return 1\n\n\nclass ParseDecodeBert():\n \"\"\"Helper function for TensorflowModelZooBertDataset.\n\n Parse the features from sample.\n \"\"\"\n\n def __call__(self, sample):\n \"\"\"Parse the sample data.\n\n Args:\n sample: Data to be parsed.\n \"\"\"\n import tensorflow as tf\n # Dense features in Example proto.\n feature_map = {\n 'input_ids':\n tf.compat.v1.VarLenFeature(dtype=tf.int64),\n 'input_mask':\n tf.compat.v1.VarLenFeature(dtype=tf.int64),\n 'segment_ids':\n tf.compat.v1.VarLenFeature(dtype=tf.int64),\n }\n\n features = tf.io.parse_single_example(sample, feature_map)\n\n input_ids = features['input_ids'].values\n input_mask = features['input_mask'].values\n segment_ids = features['segment_ids'].values\n\n return (input_ids, input_mask, segment_ids)\n\n@dataset_registry(dataset_type=\"mzbert\", framework=\"tensorflow, tensorflow_itex\", dataset_format='')\nclass TensorflowModelZooBertDataset(Dataset):\n \"\"\"Tensorflow dataset for three-input Bert in tf record format.\n\n Root is a full path to tfrecord file, which contains the file name.\n Please use Resize transform when batch_size > 1\n Args: root (str): path of dataset.\n label_file (str): path of label file.\n task (str, default='squad'): task type of model.\n model_type (str, default='bert'): model type, support 'bert'.\n transform (transform object, default=None): transform to process input data.\n filter (Filter objects, default=None): filter out examples according.\n \"\"\"\n\n def __init__(self, root, label_file, task='squad',\n model_type='bert', transform=None, filter=None, num_cores=28):\n \"\"\"Initialize the attributes of class.\"\"\"\n import json\n with open(label_file) as lf:\n label_json = json.load(lf)\n assert label_json['version'] == '1.1', 'only support squad 1.1'\n self.label = label_json['data']\n import tensorflow as tf\n record_iterator = tf.compat.v1.python_io.tf_record_iterator(root)\n example = tf.train.SequenceExample()\n for element in record_iterator:\n example.ParseFromString(element)\n break\n feature = example.context.feature\n if len(feature['input_ids'].int64_list.value) == 0 \\\n and len(feature['input_mask'].int64_list.value) == 0:\n raise ValueError(\"Tfrecord format is incorrect, please refer\\\n 'https://github.com/tensorflow/models/blob/master/research/\\\n object_detection/dataset_tools/' to create correct tfrecord\")\n # pylint: disable=no-name-in-module\n from tensorflow.python.data.experimental import parallel_interleave\n tfrecord_paths = [root]\n ds = tf.data.TFRecordDataset.list_files(tfrecord_paths)\n ds = ds.apply(\n parallel_interleave(tf.data.TFRecordDataset,\n cycle_length=num_cores,\n block_length=5,\n sloppy=True,\n buffer_output_elements=10000,\n prefetch_input_elements=10000))\n if transform is not None:\n transform.transform_list.insert(0, ParseDecodeBert())\n else:\n transform = ParseDecodeBert()\n ds = ds.map(transform, num_parallel_calls=None)\n if filter is not None:\n ds = ds.filter(filter)\n ds = ds.prefetch(buffer_size=1000)\n from ..dataloaders.tensorflow_dataloader import TFDataDataLoader\n ds = TFDataDataLoader(ds)\n self.root = []\n for inputs in ds:\n self.root.append(inputs)\n self.transform = transform\n self.filter = filter\n\n def __getitem__(self, index):\n \"\"\"Magic method.\n\n x[i] is roughly equivalent to type(x).__getitem__(x, index)\n \"\"\"\n return self.root[index], self.label\n\n def __len__(self):\n \"\"\"Length of the dataset.\"\"\"\n return len(self.root)\n","sub_path":"neural_compressor/experimental/data/datasets/bert_dataset.py","file_name":"bert_dataset.py","file_ext":"py","file_size_in_byte":19430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"525073202","text":"from django.shortcuts import render,redirect\nfrom django.contrib import messages\nfrom.models import Contacts\nfrom django.core.mail import send_mail\n\n\ndef contact(request):\n if request.method == \"POST\":\n name = request.POST['name']\n listing_id = request.POST['listing_id']\n listing = request.POST['listing']\n email = request.POST['email']\n phone = request.POST['phone']\n message = request.POST['message']\n id_user = request.POST['id_user']\n realtor_email = request.POST['realtor_email']\n\n if request.user.is_authenticated:\n id_user = request.user.id\n has_contactred = Contacts.objects.all().filter(id_user=id_user,listing_id=listing_id)\n if has_contactred:\n messages.error(request,'You have alredy made')\n return redirect('listing')\n\n\n contact =Contacts(listing=listing,listing_id=listing_id,name=name,phone=phone,email=email,message=message,id_user=id_user,realtor_email=realtor_email)\n\n contact.save()\n send_mail('property','there aye','richard.black96@mail.ru',[realtor_email,'techgueinfo@mail.ru'],fail_silently=False)\n messages.success(request,'Yourrequest has beensubmited')\n return redirect('listings')\n","sub_path":"btre/contacts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"622577438","text":"from pygame.sprite import Group\r\nimport pygame, sys\r\nimport game_function as g_f\r\nfrom background import Background\r\nfrom ship import Ship\r\nfrom settings import Settings\r\ndef init_game():\r\n pygame.init()\r\n game_settings = Settings()\r\n screan = pygame.display.set_mode((game_settings.screan_width, game_settings.screan_height))\r\n ship=Ship(screan)\r\n bullets=Group()\r\n aliens=Group()\r\n g_f.create_fleet(game_settings,screan,aliens,ship)\r\n background=Background(screan)\r\n\r\n pygame.display.set_caption(\"Dota 3\")\r\n while True:\r\n g_f.check_events(game_settings,screan,ship,bullets)\r\n g_f.update_screan(background,ship,bullets,aliens)\r\n ship.update()\r\n bullets.update()\r\n\r\n for bullet in bullets.copy():\r\n if bullet.rect.bottom<=0:\r\n bullets.remove(bullet)\r\n\r\ninit_game()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"362964109","text":"__author__ = \"Paul Schultz, Jobst Heitzig\"\n__date__ = \"Dec 15, 2016\"\n__version__ = \"v2.2\"\n\n# This file is based on the network creation algorithm published in:\n#\n# A Random Growth Model for Power Grids and Other Spatially Embedded Infrastructure Networks\n# Paul Schultz, Jobst Heitzig, and Juergen Kurths\n# Eur. Phys. J. Special Topics on \"Resilient power grids and extreme events\" (2014)\n# DOI: 10.1140/epjst/e2014-02279-6\n#\n\n\n# TODOs:\n# reparamterize ns\n\n\nimport numpy as np\nimport scipy.spatial.distance as sp\nfrom igraph import Graph\nfrom rtree.index import Index as rtree # install via conda install --channel https://conda.anaconda.org/IOOS rtree\n\n\nclass RpgAlgorithm(object):\n\n\n def __init__(self, L):\n\n # parameters for the algorithm\n self.L = L\n\n self.w = [.945,.05,.005] # JH: list of relative frequencies of nodes by level n_\\phi^l = w^l * n_\\phi\n self.n = [100,100,99800] #[2000,2000,60] this is basically n_\\phi summed over all levels\n self.n0 = [100,100,100] #[1250,250,50]\n self.p = [0,.1,.3]\n self.q = [0,.075,.075]\n self.s = [.2,.05,.0]\n self.r = [0., 0.7, 1.4]\n self.u = [0., 2., 2.]\n\n self.sampling = \"clotty\"\n self.alpha = 0.5\n self.beta = 0.95\n self.gamma = 0.5\n\n # counters\n self.levnodes = [[] for l in range(self.L)] # list of nodes in level\n self.cumnodes = [[] for l in range(self.L)] # list of nodes in level or higher\n self.added_nodes = [0 for l in range(self.L)]\n self.added_edges = [0 for l in range(self.L)]\n self.noffset = 0 # total no. nodes added so far\n\n # node coordinates\n self.lon = []\n self.lat = []\n self.lev = [] # level of node\n self.density = [] # distance to closest\n\n # CHANGE WITH CAUTION!\n self.distance_measure = \"euclidean\"\n self.debug = False\n\n \n def __str__(self):\n print(\"----------\")\n #print self.graph.num_vertices(), \"nodes and\", self.graph.num_edges(), \"edges\"\n for attr in vars(self):\n if attr in [\"identifier\", \"added_nodes\", \"n\", \"n0\", \"p\", \"q\", \"r\", \"s\", \"L\", \"w\"]:\n print((attr, \":\", str(getattr(self, attr))))\n return \"----------\"\n\n\n ###############################################################################\n # ## PUBLIC FUNCTIONS ## #\n ###############################################################################\n\n\n def set_params(self, **kwargs):\n for key in kwargs:\n if not hasattr(self, key):\n print((\"ERROR: There is no parameter called:\", key))\n print(\"Possible choices: n,n0,p,q,r,s,u\")\n continue\n else:\n if self._validation(key, kwargs[key]):\n setattr(self, key, kwargs[key])\n else:\n print((\"ERROR: invalid parameter value for\", key, kwargs[key]))\n\n\n def prepare(self, data=None):\n \"\"\"this should be called after set_params\"\"\"\n self.totaln = np.sum(self.n)\n\n self.setup_locations(sampling=self.sampling, locations=data)\n\n self.mst_edges = [None for l in range(self.L)]\n self.init_edges = [None for l in range(self.L)]\n\n #TODO: find way to dynamically add nodes without index problems instead of having a Graph of size totaln\n # see also the comment in _update_graphs()\n\n self.levgraph = [Graph(self.totaln) for l in range(self.L)] # one graph per level, storing only that level's edges\n self.cumgraph = [Graph(self.totaln) for l in range(self.L)] # one graph per level, storing that and all higher levels' edges\n\n self.levrtree = [None for l in range(self.L)] # one RTree per level, storing coordinates of level's nodes\n self.cumrtree = [None for l in range(self.L)] # one RTree per level, storing coordinates of this and higher level's nodes\n\n\n def initialise(self, l): # JH: l = level to initialise\n assert self.n[l] >= self.n0[l]\n\n # step I1: draw random locations from density and add nodes\n #######################################################\n self._get_locations(l, self.noffset, self.noffset + self.n0[l], init=True)\n\n # step I2: construct minimum spanning tree\n ##########################################\n edge_mask = self._initial_mst(l) # will store only nodes and edges on this level\n\n if np.sum(self.u) > 0:\n # extend and recompute density list:\n self.density += [0 for i in range(self.n0[l])]\n for node in range(self.noffset):\n ds = self._get_distances([node], list(range(self.noffset)))[0, :]\n ds[node] = 1e100\n self.density[node] = (1. / ds**2).sum()\n\n if self.debug:\n print((\"I2\", edge_mask))\n\n # step I3: add redundant links\n ##############################\n # CAUTION: logic has changed from original version! now it's simply the same as step G4, i.e., a little less optimal!\n \n m = min(int(np.floor(self.n0[l] * (1 - self.s[l]) * (self.p[l] + self.q[l]))), self.n0[l] * (self.n0[l] - 1) / 2 - (self.n0[l] - 1))\n\n for dummy in range(m):\n self._G34(l, np.random.choice(self.levnodes[l]))\n\n # assert self.added_edges[l] == (len(self.adjacency[l].keys()) / 2)\n assert self.added_edges[l] == self.levgraph[l].ecount()\n assert self.added_nodes[l] == len(self.levnodes[l])\n\n # label initial edges\n self.init_edges[l] = self.levgraph[l].get_edgelist()\n\n # step I4: add one link from first of level l added to nearest of level l-1:\n ############################################################################\n if l>0:\n node = self.noffset - self.n0[l]\n lat,lon = self.lat[node],self.lon[node]\n target = list(self.levrtree[l-1].nearest((lat,lon,lat,lon),1))[0]\n\n # update graphs:\n d = self._get_distances([node], [target])[0, 0]\n self._update_graphs(l-1, edges=[(target, node)], weights=[d])\n\n if self.debug:\n print((\"I4\", (node, target)))\n \n \n def grow(self, lmax):\n \"\"\"adds total no. of n[lmax] nodes to levels 0 <= l <= lmax\"\"\"\n\n new_nodes = list(range(self.noffset, self.n[lmax] - self.n0[lmax] + self.noffset))\n\n # draw level for node:\n # TODO: think about w, do we really need it? use n_phi^l instead?\n levels = np.random.choice(list(range(lmax + 1)), p=self.w[:lmax + 1] / np.sum(self.w[:lmax + 1]), size=len(new_nodes))\n #levels = np.repeat(range(lmax+1), repeats=self.n[:lmax+1])\n #np.random.shuffle(levels)\n\n\n\n for l, node in zip(levels, new_nodes):\n\n self.lev.append(l)\n\n # register new node\n self._update_graphs(l, nodes=[node])\n\n if self.debug:\n print(\"---------\")\n print((\"adding node\", node, \"to level\", l))\n\n # step G5: split random link at midpoint\n ########################################\n if (np.random.random() < self.s[l]) and self.levgraph[l].ecount() > 0:\n self._G5(l, node)\n\n else:\n # step G2: link to nearest\n ##########################\n self._G2(l, node)\n\n # step G3: add optimal redundant link to node\n #############################################\n if np.random.random() < self.p[l]:\n self._G34(l, node)\n\n # step G4: add another optimal redundant link to random node\n ############################################################\n if np.random.random() < self.q[l]:\n self._G34(l, np.random.choice(self.levnodes[l]))\n\n\n def cleanup(self):\n \"\"\" remove objects from memory\"\"\"\n del self.levrtree\n del self.cumrtree\n for level in range(self.L):\n del self.levgraph[level]\n del self.cumgraph[level]\n\n\n def setup_locations(self, sampling=\"uniform\", locations = None, centre=None, boundaries=None):\n \"\"\"\n setup function that returns locations, either randomly or from data\n :param sampling:\n :param locations:\n :param centre:\n :param boundaries:\n :return:\n \"\"\"\n if locations is not None:\n assert len(locations) == np.sum(self.n[:self.L])\n self.locations = locations\n self.counter = 0\n\n self.sampling = sampling\n self.centre = centre\n self.boundaries = boundaries\n\n # JH: docstring says this returns locations, but it returns nothing??\n \n\n ###############################################################################\n # ## PRIVATE FUNCTIONS ## #\n ###############################################################################\n\n\n def _get_coords(self, sampling=None, centre=None, boundaries=None):\n\n if sampling is not None:\n # override default sampling method\n self.sampling = sampling\n\n if self.sampling == \"uniform\":\n return self._uniformunitsquare(centre, boundaries)\n elif self.sampling == \"data\":\n pos0 = np.array(self.locations[self.counter])\n self.counter += 1\n pos1 = self.alpha * pos0 + (1 - self.alpha) * np.random.uniform(low=-1, high=1, size=2)\n pos2 = self.beta * pos0 + (1 - self.beta) * np.random.uniform(low=-.5, high=.5, size=2)\n return tuple(pos1 if np.random.random() < self.gamma else pos2)\n elif self.sampling == \"clotty\":\n l = len(self.lat)\n if l==0: return (0,0)\n i = np.random.choice(list(range(l)))\n pos0 = np.array([self.lat[i], self.lon[i]])\n pos1 = self.alpha * pos0 + (1 - self.alpha) * np.random.uniform(low=-1,high=1,size=2)\n pos2 = self.beta * pos0 + (1 - self.beta) * np.random.uniform(low=-.5, high=.5, size=2)\n return tuple(pos1 if np.random.random() < self.gamma else pos2)\n else:\n print(\"ERROR: Not implemented yet.\")\n exit(1)\n\n\n def _get_distances(self, sources, targets):\n \"\"\"\n return array of distances from nodes \"sources\" to list of nodes \"targets\"\n \"\"\"\n x = np.c_[np.array(self.lon)[sources], np.array(self.lat)[sources]]\n y = np.c_[np.array(self.lon)[targets], np.array(self.lat)[targets]]\n return sp.cdist(x, y, metric=self.distance_measure)\n \n\n def _uniformunitsquare(self, centre=None, boundaries=None):\n \"\"\"\n return point drawn uniformly at random\n\n :param centre: centre distribution around this point\n :param boundaries: array containing [width, height]\n :return: coordinate tuple\n \"\"\"\n\n if centre is None:\n centre = -1.\n if boundaries is None:\n boundaries = -1.\n\n return (.5 - np.random.uniform(size=2)) * np.array(boundaries) + np.array(centre)\n\n\n def _G2(self, l, node):\n # only now get one new location and nearest earlier node:\n target = self._get_locations(l, node, node+1)\n\n if target is not None:\n # update graphs:\n d = self._get_distances([node], [target])[0, 0]\n self._update_graphs(l, edges=[(target, node)], weights=[d])\n\n if self.debug:\n print((\"G2\", (node, target)))\n\n # update density:\n if np.sum(self.u) > 0:\n ds = self._get_distances([node], list(range(node)))[0, :]\n self.density = list(np.array(self.density) + 1. / ds**2) \n self.density.append((1. / ds**2).sum())\n \n\n def _G34(self, l, node):\n targets = list(set(self.cumnodes[l]).difference(self.cumgraph[l].neighbors(node)).difference([node]))\n if len(targets):\n dists = self._get_distances([node], targets)[0, :]\n prices = dists \n if self.r[l]>0: prices /= (dists + self.cumgraph[l].shortest_paths_dijkstra(node, targets)[0])**self.r[l]\n if self.u[l]>0: prices /= np.array(self.density)[targets] ** self.u[l]\n best = np.argmin(prices)\n a, b = self._s((targets[best], node))\n\n # update graphs:\n d = dists[best]\n self._update_graphs(l, edges=[(a, b)], weights=[d])\n\n if self.debug:\n print((\"G3/4\", (a, b)))\n\n\n def _G5(self, l, node):\n # choose link at random:\n elist = self.levgraph[l].get_edgelist()\n a, b = elist[np.random.choice(list(range(len(elist))))]\n\n # NOTE: CHANGED BEHAVIOUR: now split somewhere, not in middle:\n pos = np.random.random() # 0:a, 1:b\n\n # add node at midpoint and calc distances:\n lat = (1 - pos) * self.lat[a] + pos * self.lat[b]\n lon = (1 - pos) * self.lon[a] + pos * self.lon[b]\n self.lat.append(lat)\n self.lon.append(lon)\n\n # update graphs and rtrees:\n\n eid = self.levgraph[l].get_eid(a, b)\n d = self.levgraph[l].es[\"weight\"][eid]\n\n self.levrtree[l].insert(node, (lat, lon, lat, lon))\n for l2 in range(l + 1):\n self.cumrtree[l2].insert(node, (lat, lon, lat, lon))\n\n self._update_graphs(l, edges=[(a, b)], delete_edges=True)\n self._update_graphs(l, edges=[(a, node), (b, node)], weights=[pos * d, (1 - pos) * d])\n\n # update density:\n if np.sum(self.u) > 0:\n ds = self._get_distances([node], list(range(node)))[0, :]\n self.density = list(np.array(self.density) + 1. / ds**2) \n self.density.append((1. / ds**2).sum())\n\n if self.debug:\n print((\"G5\", (int(a), int(b))))\n\n\n def _validation(self, attr, value):\n value = np.array(value)\n if attr == \"n0\" or attr == \"n\":\n if any(value < 1):\n return False\n else:\n return True\n elif attr in [\"r\", \"u\"]:\n if any(value < 0):\n return False\n else:\n return True\n elif attr in [\"p\", \"q\", \"s\", \"w\"]:\n if any(value < 0) or any(value > 1):\n return False\n else:\n return True\n elif attr == \"L\":\n if value < 1:\n return False\n else:\n return True\n elif attr in [\"alpha\", \"beta\", \"gamma\"]:\n if value < 0 or value > 1:\n return False\n else:\n return True\n\n\n def _initial_mst(self, l):\n\n self.lev += [l for i in range(self.n0[l])]\n nodes = list(range(self.noffset, self.noffset+self.n0[l]))\n self.mst_edges[l] = elist = self._get_mst(l)\n self._update_graphs(l, nodes=nodes, edges=elist)\n\n return elist\n\n\n def _get_mst(self, l):\n nodes = list(range(self.noffset, self.noffset + self.n0[l]))\n distmatrix = self._get_distances(nodes, nodes)\n full_graph = Graph.Full(self.n0[l])\n factor = 1e5 # since small weights lead to MST problems\n weights = [factor * distmatrix[i,j] for (i,j) in full_graph.get_edgelist()]\n G = full_graph.spanning_tree(weights).as_undirected()\n return [self._s((i+self.noffset,j+self.noffset)) for (i,j) in G.get_edgelist()]\n\n\n def _get_locations(self, l, offset, _m, init=False):\n m = int(_m)\n poss = np.zeros((m,2))\n for i in range(offset, m):\n poss[i,:] = pos = self._get_coords(self.sampling, self.centre, self.boundaries)\n self.lat.append(pos[0])\n self.lon.append(pos[1])\n # update earlier rtree spatial indices:\n for l2 in range(l):\n self.cumrtree[l2].insert(i, (pos[0],pos[1],pos[0],pos[1]))\n if not init: # otherwise en bulk (below)\n nearest = list(self.cumrtree[l].nearest((pos[0],pos[1],pos[0],pos[1]),1))[0] if m > 0 else None # query before adding!\n self.levrtree[l].insert(i, (pos[0],pos[1],pos[0],pos[1]))\n self.cumrtree[l].insert(i, (pos[0],pos[1],pos[0],pos[1])) \n# self._update_distance(offset, m, m)\n if init: # bulk insert: # TODO: CAUTION: must only be used at initialization of level!\n # set up additional rtree spatial indices:\n def f():\n for i in range(offset, m):\n yield (i, (poss[i,0],poss[i,1],poss[i,0],poss[i,1]),None)\n self.levrtree[l] = lrt = rtree(f())\n self.cumrtree[l] = crt = rtree(f()) # sadly, rtrees cannot be cloned yet\n else:\n return nearest\n\n\n def _update_counters(self, level, nodes=0, edges=0):\n self.added_nodes[level] += nodes\n self.noffset += nodes\n self.added_edges[level] += edges\n\n\n def _update_graphs(self, level, nodes=[], edges=[], weights=[], delete_edges=False):\n if delete_edges:\n eid = self.levgraph[level].get_eids(edges)\n self.levgraph[level].delete_edges(eid)\n\n for l in range(level + 1):\n eid = self.cumgraph[l].get_eids(edges)\n self.cumgraph[l].delete_edges(eid)\n\n self._update_counters(level, edges=-len(edges))\n else:\n if nodes:\n # PS: this is not necessary, as all Graphs are created with size totaln.\n # otherwise, the difference between index and name is going to cause many problems\n # self.levgraph[level].add_vertices(nodes)\n # for l in range(level + 1):\n # self.cumgraph[l].add_vertices(nodes)\n\n self.levnodes[level].extend(nodes)\n for l in range(level + 1):\n self.cumnodes[l].extend(nodes)\n\n if edges:\n if not weights:\n weights = [self._get_distances([i],[j])[0,0] for (i,j) in edges]\n\n for idx, (i, j) in enumerate(edges):\n # level graphs do not contain links between levels,\n #if self.lev[i] == self.lev[j]:\n self.levgraph[level].add_edge(i, j, weight=weights[idx])\n\n for l in range(level + 1):\n self.cumgraph[l].add_edge(i, j, weight=weights[idx])\n\n self._update_counters(level, nodes=len(nodes), edges=len(edges))\n\n\n def _s(self, tuple):\n if tuple[0] < tuple[1]:\n return tuple\n else:\n return (tuple[1], tuple[0])\n\n\n#######################################################################################################################\n#######################################################################################################################\n#######################################################################################################################\n\n\ndef calc(name=\"test\", debug=False, layer_plots=True):\n\n #np.random.seed(0)\n\n # initialise algorithm\n g = RpgAlgorithm(L=3)\n assert(isinstance(g, RpgAlgorithm))\n\n # for detailed output set \n g.debug = debug\n\n# branching = np.array([6084.,84.,2.])\n\n # set desired parameters and perform algorithm\n# g.set_params(n=[100,50,9850],\n# n0=[100,50,25],\n# w=[.945,.05,.005],\n# p=[0,.1,.3],\n# q=[0, .075, .075],\n# r=[0., 0.75, 1.5],\n# s=[.2, .05, .0],\n# u=[0.,.05,.1],\n# gamma=0.95\n# )\n\n g.set_params(n=np.array([500,250,49250], dtype=int)/50,\n n0=np.array([500,250,100], dtype=int)/50,\n w=[.945,.05,.005],\n p=[0,.1,.3],\n q=[0, .075, .075],\n r=[0., 0.75, 1.5],\n s=[.2, .05, .0],\n u=[0.,.05,.1],\n beta=0.95\n )\n # use predefined locations ...\n # g.setup_locations(sampling=\"data\", locations=np.random.random([g.n, 2]))\n\n g.prepare()\n for l in range(g.L):\n g.initialise(l)\n g.grow(l)\n\n print(g)\n print(np.array(np.triu(np.tensordot(np.array(g.w), np.array(g.n), axes=0)), dtype=np.int))\n\n # if layer_plots:\n # for b in range(g.L):\n # F = g.levgraph[b].copy()\n # elist = np.array(F.get_edgelist())\n # F.es['level'] = map(lambda (a, b): min(g.lev[a], g.lev[b]), elist)\n # F.vs[\"level\"] = g.lev\n # F.vs[\"lat\"] = g.lat\n # F.vs[\"lon\"] = g.lon\n # F.vs[\"density\"] = g.density\n # plot(G=F, name=\"output_layer\"+str(b+1), groups=True)\n # del F\n\n G = g.cumgraph[0].copy()\n elist = np.array(G.get_edgelist())\n G.es['level'] = [min(g.lev[a_b[0]], g.lev[a_b[1]]) for a_b in elist]\n G.vs[\"level\"] = g.lev\n G.vs[\"lat\"] = g.lat\n G.vs[\"lon\"] = g.lon\n G.vs[\"density\"] = g.density\n\n G.write_pickle(name)\n G.write_graphml(name + \".graphml\")\n\n return G\n\n\ndef plot(G=None, name=\"output\", groups=False):\n\n if G is None:\n G = Graph.Read_Pickle(name)\n\n cols = {0: \"grey\", 1: \"blue\", 2: \"red\"}\n weights = {0: 1, 1: 1.5, 2: 2}\n sizes = weights\n\n G.vs['color'] = [cols[y] for y in G.vs[\"level\"]]\n G.es['color'] = [cols[y] for y in G.es[\"level\"]]\n G.es['width'] = [10. * weights[y] for y in G.es[\"level\"]]\n G.vs['size'] = [20. * sizes[y] for y in G.vs[\"level\"]]\n\n\n print((\"connected graph:\", G.is_connected()))\n\n from igraph import plot, Layout\n l = [(xy[0], xy[1]) for xy in np.array([G.vs[\"lat\"], G.vs[\"lon\"]]).T]\n\n w = 100 * np.sqrt(G.vcount())\n if groups:\n comp = G.clusters()\n sort = np.argsort([len(c) for c in comp])[::-1]\n comp = [comp[i] for i in sort]\n cmap = np.tile([\"grey\", \"blue\", \"red\", \"yellow\"], 3)\n group_markers = []\n\n print((\"components:\", len(comp)))\n for i, c in enumerate(comp):\n if i >= len(cmap):\n break\n print((i, len(c), cmap[i]))\n group_markers.append((c, cmap[i]))\n\n plot(G, name + \".pdf\",\n bbox=(w, w),\n layout=Layout(coords=l),\n vertex_order=np.argsort(G.vs[\"level\"]),\n mark_groups=group_markers\n )\n else:\n plot(G, name + \".pdf\",\n bbox=(w, w),\n layout=Layout(coords=l),\n vertex_order=np.argsort(G.vs[\"level\"])\n )\n # plot(G, name + \".png\",\n # bbox=(w, w),\n # layout=Layout(coords=l),\n # vertex_order=np.argsort(G.vs[\"level\"])\n # )\n\ndef collect_data(G=None, name=\"test\"):\n import pandas as pd\n\n if G is None:\n G = Graph.Read_Pickle(name)\n assert isinstance(G, Graph)\n\n df_edges = pd.DataFrame({\"length\": G.es[\"weight\"],\n \"loglength\": np.log10(G.es[\"weight\"]),\n \"level\": G.es[\"level\"]})\n\n # print \"aspl\", G.average_path_length(), \"transitivity\", G.transitivity_undirected()\n\n df_nodes = pd.DataFrame({\"level\": G.vs[\"level\"],\n \"degree\": G.vs.degree(),\n \"clust\": G.transitivity_local_undirected(),\n \"betw\": 2. * np.array(G.betweenness()) / (G.vcount() * (G.vcount() - 1.)),\n \"density\": G.vs[\"density\"],\n })\n\n return df_nodes, df_edges\n\ndef hist(df_nodes=None, df_edges=None, name=\"test\"):\n import pandas as pd\n import matplotlib.pyplot as plt\n\n if df_nodes is None:\n df_nodes = pd.read_pickle(name+\".nodedata\")\n assert isinstance(df_nodes, pd.DataFrame)\n\n if df_edges is None:\n df_edges = pd.read_pickle(name+\".edgedata\")\n assert isinstance(df_edges, pd.DataFrame)\n\n df_edges.pivot(columns=\"level\").loglength.plot(kind=\"hist\", bins=40, stacked=True, log=True, grid=True)\n plt.xlabel(r\"$\\log_{10}$ length\")\n plt.savefig(name + \"_loglength_dist.pdf\")\n\n plt.figure()\n for i in range(3):\n no = np.where(df_nodes.level==i)[0]\n plt.plot(df_nodes.degree[no]+0.5*np.random.random(size=no.size),df_nodes.density[no],\"b.\",alpha=0.2*(i+1),ms=8*(i+1),color={0:\"grey\",1:\"blue\",\n 2:\"red\"}[i])\n plt.xlabel(\"degree\")\n plt.ylabel(\"density\")\n plt.savefig(name + \"_degree_vs_density.pdf\")\n\n df_nodes = df_nodes.pivot(columns=\"level\")\n\n df_nodes.degree.plot(kind=\"hist\", bins=40, stacked=True, log=True, grid=True)\n plt.xlabel(\"degree\")\n plt.savefig(name + \"_degree_dist.pdf\")\n\n df_nodes.clust.plot(kind=\"hist\", bins=40, stacked=True, log=True, grid=True)\n plt.xlabel(\"local transitivity\")\n plt.savefig(name + \"_clust_dist.pdf\")\n\n df_nodes.betw.plot(kind=\"hist\", bins=40, stacked=True, log=True, grid=True)\n plt.xlabel(\"shortest path betweenness\")\n plt.savefig(name + \"_betw_dist.pdf\")\n\n\nif __name__ == \"__main__\":\n name = \"test\"\n g = calc(name=name)\n plot(g, name=name)\n\n\n\n\n # from pandas import concat, read_pickle, merge\n # import time as t\n # name = \"ilg\"\n\n # nodes = []\n # edges = []\n # for sample in range(50):\n # s = t.time()\n # G = calc(name=name, debug=False)\n # print t.time() - s\n # dfn, dfe = collect_data(G=G, name=name)\n # nodes.append(dfn)\n # edges.append(dfe)\n #\n # #plot(G, groups=False, name=name)\n # df_nodes = concat(nodes)\n # df_edges = concat(edges)\n #\n # df_edges.to_pickle(name + \".edgedata\")\n # df_nodes.to_pickle(name + \".nodedata\")\n #\n # hist(df_nodes, df_edges, name=name)\n\n # hist(name=name)\n\n\n\n\n","sub_path":"src/rpgm_neonet.py","file_name":"rpgm_neonet.py","file_ext":"py","file_size_in_byte":25957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"66812432","text":"#!/usr/bin/env python3\nimport sys\nimport urllib.request\nimport shutil\n\nif __name__ == '__main__':\n if len (sys.argv) < 3:\n print (\"Usage: download.py \")\n exit (1)\n url = sys.argv[1]\n dst = sys.argv[2]\n with urllib.request.urlopen (url) as response, open (dst, 'wb') as out_file:\n shutil.copyfileobj (response, out_file)\n","sub_path":"tools/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"11123434","text":"##Chris Barker\n#!/usr/bin/env python\n\n\"\"\"\nA simple test of the GridBagSizer\n\nhttp://wiki.wxpython.org/index.cgi/WriteItYourself\n\n\"\"\"\n\nimport wx\n\nclass MyFrame(wx.Frame):\n def __init__(self, parent, ID, title):\n wx.Frame.__init__(self, parent, ID, title, wx.DefaultPosition)\n\n Buttons = []\n for i in range(6):\n Buttons.append(wx.Button(self,-1, \"Button %i\"%(i)))\n\n sizer = wx.GridBagSizer(9, 9)\n sizer.Add(Buttons[0], (0, 0), wx.DefaultSpan, wx.ALL, 5)\n sizer.Add(Buttons[1], (1, 1), (1,7), wx.EXPAND)\n sizer.Add(Buttons[2], (6, 6), (3,3), wx.EXPAND)\n sizer.Add(Buttons[3], (3, 0), (1,1), wx.ALIGN_CENTER)\n sizer.Add(Buttons[4], (4, 0), (1,1), wx.ALIGN_LEFT)\n sizer.Add(Buttons[5], (5, 0), (1,1), wx.ALIGN_RIGHT)\n\n sizer.AddGrowableRow(6)\n sizer.AddGrowableCol(6)\n\n self.SetSizerAndFit(sizer)\n self.Centre()\n\n\nclass MyApp(wx.App):\n def OnInit(self):\n frame = MyFrame(None, -1, \"wx.gridbagsizer.py\")\n frame.Show(True)\n self.SetTopWindow(frame)\n return True\n\nif __name__ == \"__main__\":\n app = MyApp(0)\n app.MainLoop()\n\n","sub_path":"docs/sphinx/rest_substitutions/snippets/python/contrib/GridBagSizer.1.py","file_name":"GridBagSizer.1.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"82370717","text":"#!/usr/bin/env python\r\n# license removed for brevity\r\nimport os\r\nimport sys\r\ncurrent_folder = os.path.dirname(os.path.realpath(__file__))\r\nsys.path.append(current_folder)\r\n\r\nimport numpy as np\r\nimport time\r\nimport csv\r\nfrom console_formatter import Console_Formatter\r\n\r\nclass DATA_READER:\r\n #PUBLIC\r\n file_data_path = None\r\n\r\n\r\n #PRIVATE\r\n consoler_ = Console_Formatter(__name__)\r\n fid_file_ = None\r\n data_list = []\r\n\r\n def __init__(self):\r\n pass\r\n\r\n def __del__(self):\r\n self.close_file()\r\n\r\n def load_file(self, data_path=None):\r\n self.file_data_path = self.file_data_path if data_path == None else data_path\r\n if not self.check_path(data_path):\r\n #print(self.consoler_.WARN(\"index file : \\\"{}\\\" not found !\".format(self.data_path)))\r\n print(self.consoler_.WARN(\"Loading file : {} not found !\".format(self.file_data_path)))\r\n return None\r\n #print(self.consoler_.INFO(\"loading index file ...\"))\r\n print(self.consoler_.INFO(\"Loading file : {} ...\".format(self.file_data_path)))\r\n self.close_file()\r\n self.fid_file_ = open(self.file_data_path, 'r+')\r\n #print(self.consoler_.INFO(\"Loading file : {} ok !\".format(self.file_data_path)))\r\n self.data_list = self.fid_file_.readlines()\r\n return self.data_list\r\n '''\r\n data_list = []\r\n with open(data_path, 'r') as fid:\r\n infile_cursor = csv.reader(fid, delimiter=',')\r\n \r\n for row in infile_cursor:\r\n data_list = np.append(data_list, row)# [row[0], row[1]])\r\n return data_list\r\n '''\r\n\r\n def write_file(self, data, recursive_search=False):\r\n if self.fid_file_ == None:\r\n print(self.consoler_.WARN(\"File not load !\"))\r\n return False\r\n it_ = iter(self.data_list)\r\n while recursive_search:\r\n try:\r\n if next(it_).strip() == data.strip():\r\n return False\r\n except StopIteration:\r\n break\r\n \r\n data = \"{}\\n\".format(data)\r\n self.data_list = np.append(self.data_list, data)\r\n print(self.consoler_.INFO(\"Writing file : {} ...\".format(self.file_data_path)))\r\n self.fid_file_.write(data)\r\n #print(self.consoler_.INFO(\"Writing file : {} ok !\".format(self.file_data_path)))\r\n return True\r\n\r\n def close_file(self, fid=None):\r\n fid = self.fid_file_ if fid == None else fid\r\n if fid != None:\r\n fid.close()\r\n self.fid_file_ = None\r\n\r\n def check_path(self, path):\r\n return os.path.exists(path)\r\n\r\n def current_time(self):\r\n return time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) #%Y-%m-%d %H:%M:%S\r\n\r\n def current_time_stamp(self):\r\n return time.time()\r\n\r\n\r\n\r\n","sub_path":"utils/data_reader.py","file_name":"data_reader.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"254176715","text":"#!/usr/bin/env python\nfrom PIL import Image\nimport numpy as np\nimport sys\nimport os\nimport subprocess\n\n\ndef image2dat(img_name,ndisk,radius,mask_name=None,factor=100):\n ndisk=int(ndisk)\n radius=int(radius)\n if ( factor != '100' ):\n cmd='convert -resize '+str(factor)+'% '+ str(img_name)+' img_temp'\n subprocess.call(cmd,shell=True)\n img_name='img_temp'\n\n #open file in fileList:\n img_file = Image.open(img_name)\n\n\n # get original image parameters...\n height, width = img_file.size\n\n \n data_mask=np.zeros([width,height])\n data_mask[:,:]=255\n\n check = 2\n \n for i in range(ndisk) :\n x_center=int(np.random.rand()*width)\n y_center=int(np.random.rand()*height)\n \n for ip in range( x_center - radius - check, x_center + radius + check):\n for jp in range( y_center - radius - check, y_center + radius + check) :\n if ( ip 1:\n image2dat(*sys.argv[1:])\n else:\n raise SystemExit(\"usage: python image2dat image data [grid]\")\n","sub_path":"python_scripts/network_impainting/makemask.py","file_name":"makemask.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"72458113","text":"\"\"\"\nDefines the common utility functions used in our applications.\n\"\"\"\n\nfrom django.apps import apps as django_apps\nfrom django.contrib.auth.models import Group\nfrom django.core.cache import cache\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.http import HttpRequest\nfrom django.utils.cache import get_cache_key\nfrom django.utils.module_loading import import_string\nfrom organizations.models import Organization\nfrom rest_framework import exceptions, serializers\n\nfrom backend import settings\n\n\ndef field_not_found_error():\n \"\"\"Generates error message for when a field does not exist.\"\"\"\n return \"Field not found.\"\n\n\ndef field_with_id_not_found_error(field_id):\n \"\"\"Generates error message for when a field does not exist.\"\"\"\n return \"Field with id={} not found.\".format(field_id)\n\n\ndef field_required_error():\n \"\"\"Generates error message for when a field is required.\"\"\"\n return \"This is a required field.\"\n\n\ndef field_invalid_error():\n \"\"\"Generates error message for when a field is invalid.\"\"\"\n return \"Invalid field.\"\n\n\ndef get_user_from_serializer(serializer, raise_exception=False):\n \"\"\"\n Returns the user from serializer context. Raises permission denied error\n if user is not found.\n \"\"\"\n\n # get user requesting for a new registration\n request_user = None\n request = serializer.context.get(\"request\")\n if request and hasattr(request, \"user\"):\n request_user = request.user\n else:\n if raise_exception:\n # raise unauthorized error if user is not found\n # most probably this will never get called\n raise exceptions.PermissionDenied()\n return request_user\n\n\ndef filter_queryset_by_lookup_list(query_set, lookup_list, lookup_param):\n \"\"\"\n Filters a queryset by th given list of lookup params\n \"\"\"\n query = '{}__in'.format(lookup_param)\n return query_set.filter(**{query: lookup_list})\n\n\ndef exclude_queryset_by_lookup_list(query_set, lookup_list, lookup_param):\n \"\"\"\n Filters a queryset by excluding the given list of lookup params\n \"\"\"\n query = '{}__in'.format(lookup_param)\n return query_set.exclude(**{query: lookup_list})\n\n\ndef filter_objects_by_lookup_list(objects, lookup_list, lookup_param):\n \"\"\"\n Filters the object by lookup list. If all lookup params in the list do not\n match, a not found exception is raised.\n \"\"\"\n try:\n filtered_objects = filter_queryset_by_lookup_list(\n objects, lookup_list, lookup_param)\n\n # make sure all the given ids are inside filtered objects,\n # otherwise raise a validation error\n if len(filtered_objects) != len(lookup_list):\n raise exceptions.NotFound(\n {\n 'id': 'The following requested ids are invalid: {}'.format(\n exclude_queryset_by_lookup_list(\n objects, lookup_list, lookup_param).values_list(\n lookup_param, flat=True))\n })\n return filtered_objects\n except:\n raise exceptions.ValidationError(\n detail={lookup_param: \"Invalid list of parameters.\"})\n\n\ndef get_lookup_list(request, lookup_param):\n \"\"\"\n Returns the list of ids from API request.\n \"\"\"\n return request.query_params.getlist(lookup_param)\n\n\ndef get_user_group_model():\n try:\n return django_apps.get_model(settings.USER_GROUP_MODEL, require_ready=False)\n except ValueError:\n raise ImproperlyConfigured(\n \"USER_GROUP_MODEL must be of the form 'app_label.model_name'\")\n except LookupError:\n raise ImproperlyConfigured(\n \"USER_GROUP_MODEL refers to model '%s' that has not been installed\" % settings.USER_GROUP_MODEL\n )\n\n\ndef get_organization_model():\n try:\n return django_apps.get_model(settings.ORGANIZATION_MODEL, require_ready=False)\n except ValueError:\n raise ImproperlyConfigured(\n \"ORGANIZATION_MODEL must be of the form 'app_label.model_name'\")\n except LookupError:\n raise ImproperlyConfigured(\n \"ORGANIZATION_MODEL refers to model '%s' that has not been installed\" % settings.ORGANIZATION_MODEL\n )\n\n\ndef get_organization_user_model():\n try:\n return django_apps.get_model(settings.ORGANIZATION_USER_MODEL, require_ready=False)\n except ValueError:\n raise ImproperlyConfigured(\n \"ORGANIZATION_USER_MODEL must be of the form 'app_label.model_name'\")\n except LookupError:\n raise ImproperlyConfigured(\n \"ORGANIZATION_USER_MODEL refers to model '%s' that has not been installed\" % settings.ORGANIZATION_USER_MODEL\n )\n\n\ndef get_organization_owner_model():\n try:\n return django_apps.get_model(settings.ORGANIZATION_OWNER_MODEL, require_ready=False)\n except ValueError:\n raise ImproperlyConfigured(\n \"ORGANIZATION_OWNER_MODEL must be of the form 'app_label.model_name'\")\n except LookupError:\n raise ImproperlyConfigured(\n \"ORGANIZATION_OWNER_MODEL refers to model '%s' that has not been installed\" % settings.ORGANIZATION_OWNER_MODEL\n )\n\n\ndef get_organization_group_model():\n try:\n return django_apps.get_model(settings.ORGANIZATION_GROUP_MODEL, require_ready=False)\n except ValueError:\n raise ImproperlyConfigured(\n \"ORGANIZATION_GROUP_MODEL must be of the form 'app_label.model_name'\")\n except LookupError:\n raise ImproperlyConfigured(\n \"ORGANIZATION_GROUP_MODEL refers to model '%s' that has not been installed\" % settings.ORGANIZATION_GROUP_MODEL\n )\n\n\ndef get_organization_auth_backend():\n auth_backend = import_string(settings.ORGANIZATION_USER_AUTH_BACKEND)\n if not auth_backend:\n raise ImproperlyConfigured(\n 'No authentication backends have been defined. Does '\n 'ORGANIZATION_USER_AUTH_BACKEND contain anything?'\n )\n return auth_backend\n","sub_path":"core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"204360718","text":"# -*- coding: utf-8 -*-\n# filename: ahnd_url_crawler.py\n\nimport ssl\nimport traceback\nfrom urllib.request import Request, urlopen\n\nssl._create_default_https_context = ssl._create_unverified_context\n\n\nclass ChannelCrawler(object):\n\n def __init__(self, parser=None, debug=False):\n \"\"\"\n depth: how many time it will bounce from page one (optional)\n cache: a basic cache controller (optional)\n \"\"\"\n self.channel_map = {}\n self.init_url = ''\n self.channel_parser = parser\n self.debug = debug\n\n def crawl(self, url):\n \"\"\"\n url: where we start crawling, should be a complete URL like\n 'http://www.intel.com/news/'\n no_cache: function returning True if the url should be refreshed\n \"\"\"\n self.init_url = url\n print(\" to crawl url:%s\" % url)\n\n html = self.curl(url)\n\n self.channel_parser.parse(html)\n self.channel_map = self.channel_parser.channel_map\n\n if self.debug or len(self.channel_map) == 0:\n\n if len(self.channel_map) == 0:\n print(\"crawl url:{} return empty\".format(url))\n print(html)\n\n def curl(self, url):\n \"\"\"\n return content at url.\n return empty string if response raise an HTTPError (not found, 500...)\n \"\"\"\n try:\n print(\"retrieving url... %s\" % (url))\n # req = Request('%s://%s%s' % (self.scheme, self.domain, url))\n req = Request(url)\n\n req.add_header('User-Agent',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.i/605.1.15')\n\n response = urlopen(req, timeout=10)\n\n # print(response.encoding)\n\n if response.url != req.full_url:\n return response.url\n\n charset = self.url_parser.charset\n\n if response.headers.get_content_charset():\n charset = response.headers.get_content_charset()\n return response.read().decode(charset, 'ignore')\n except Exception as e:\n traceback.print_exc()\n print(\"error %s: %s\" % (url, e))\n return ''\n\n @staticmethod\n def curl(url, charsert='utf-8'):\n \"\"\"\n return content at url.\n return empty string if response raise an HTTPError (not found, 500...)\n \"\"\"\n charset_local = charsert\n try:\n print(\"retrieving url... %s\" % (url))\n # req = Request('%s://%s%s' % (self.scheme, self.domain, url))\n req = Request(url)\n\n req.add_header('User-Agent',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.i/605.1.15')\n\n response = urlopen(req, timeout=10)\n\n # print(response.encoding)\n\n if response.url != req.full_url:\n return response.url\n\n if response.headers.get_content_charset():\n charset_local = response.headers.get_content_charset()\n return response.read().decode(charset_local, 'ignore')\n except Exception as e:\n print(\"error %s: %s\" % (url, e))\n return ''\n","sub_path":"backend/app/channel/channel_crawler.py","file_name":"channel_crawler.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"243570559","text":"\"\"\"\n题目描述\n 编写一段程序来查找第 n 个超级丑数。\n 超级丑数是指其所有质因数都是长度为 k 的质数列表 primes 中的正整数。\n\"\"\"\n\n\ndef nthSuperUglyNumber(n, primes):\n \"\"\"\n :type n: int\n :type primes: List[int]\n :rtype: int\n \"\"\"\n if n < 0:\n return False\n t = len(primes) * [0]\n res = [1]\n while len(res) < n:\n m = pow(2, 32)\n for i in range(len(primes)):\n temp = res[t[i]] * primes[i]\n if temp < m:\n m = temp\n for j in range(len(primes)):\n if m == res[t[j]] * primes[j]:\n t[j] += 1\n res.append(m)\n return res[-1]\n\n\nnn = int(input())\np = list(map(int, input().split(\",\")))\nprint(nthSuperUglyNumber(nn, p))","sub_path":"Code/CodeRecords/2116/60782/291927.py","file_name":"291927.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"23444549","text":"# search.py\n# ---------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n#\n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\n\"\"\"\nIn search.py, you will implement generic search algorithms which are called by\nPacman agents (in searchAgents.py).\n\"\"\"\n\nimport util\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"\n Returns the start state for the search problem.\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state.\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples, (successor,\n action, stepCost), where 'successor' is a successor to the current\n state, 'action' is the action required to get there, and 'stepCost' is\n the incremental cost of expanding to that successor.\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions.\n The sequence must be composed of legal moves.\n \"\"\"\n util.raiseNotDefined()\n\n\ndef tinyMazeSearch(problem):\n \"\"\"\n Returns a sequence of moves that solves tinyMaze. For any other maze, the\n sequence of moves will be incorrect, so only use this for tinyMaze.\n \"\"\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s, s, w, s, w, w, s, w]\n\ndef depthFirstSearch(problem):\n \"\"\"\n Search the deepest nodes in the search tree first.\n\n Your search algorithm needs to return a list of actions that reaches the\n goal. Make sure to implement a graph search algorithm.\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print(\"Start:\", problem.getStartState())\n print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n #### variables\n startState = problem.getStartState()\n goal = None\n stack = Stack() #basically keeps track of our depth first search\n parentDict = {} #our dictionary adds successor states as keys, each mapping to their parent states\n actions = [] #RETURN THIS\n currentState = None #this will be set to goal, and then updated by a loop until it becomes start. so when\n # currentState = (startState, None, None) then we have successfully reached start from\n # goal and we are almost done\n\n explored = set() #the 'explored' set variable prevents infinite loop by preventing us from appending (to stack) successor\n # states of a state whose successor states are already in stack. in other words, prevents us from\n # re-exploring previously explored branches\n explored.add(startState)\n\n #this loop is executed once. our dictionary adds each successor of startState as a key mapping to startState\n for successor in problem.getSuccessors(startState):\n parentDict[successor] = (startState, None, None)\n stack.push(successor)\n\n #this while loop and its contained for loop allow us to move through with our depth first search\n while not stack.isEmpty():\n popped = stack.pop()\n if popped[0] in explored:\n continue #exits the current iteration\n if problem.isGoalState(popped[0]):\n goal = popped\n break #exits entire while loop\n explored.add(popped[0])\n for successor in problem.getSuccessors(popped[0]): #similar to the above for loop\n parentDict[successor] = popped\n stack.push(successor)\n currentState = goal\n #print(\"goal: \")\n #print(goal)\n\n #from all the above code we've been able to assemble a dictionary mapping successor states as keys to their\n # parent states. now our currentState variable will use a while loop to move through this dictionary from\n # goal to start and append the direction values to our 'actions' list. the reverse of this list is our answer\n while currentState != (startState, None, None):\n actions.append(currentState[1])\n currentState = parentDict[currentState]\n actions.reverse()\n return actions\n\n\ndef breadthFirstSearch(problem):\n \"\"\"Search the shallowest nodes in the search tree first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n #the first attempt didn't work for problem 5\n \"\"\"\n from util import Queue\n #### variables\n startState = problem.getStartState()\n goal = None\n q = Queue()\n parentDict = {}\n actions = []\n currentState = None\n explored = set()\n explored.add(startState)\n\n #this loop is executed once. our dictionary adds each successor of startState as a key mapping to startState\n for successor in problem.getSuccessors(startState):\n parentDict[successor] = (startState, None, None)\n q.push(successor)\n\n #this while loop and its contained for loop allow us to move through with our depth first search\n while not q.isEmpty():\n popped = q.pop()\n if popped[0] in explored:\n continue #exits the current iteration\n if problem.isGoalState(popped[0]):\n goal = popped\n break #exits entire while loop\n explored.add(popped[0])\n for successor in problem.getSuccessors(popped[0]): #similar to the above for loop\n parentDict[successor] = popped\n q.push(successor)\n currentState = goal\n\n #from all the above code we've been able to assemble a dictionary mapping successor states as keys to their\n # parent states. now our currentState variable will use a while loop to move through this dictionary from\n # goal to start and append the direction values to our 'actions' list. the reverse of this list is our answer\n while currentState != (startState, None, None):\n actions.append(currentState[1])\n currentState = parentDict[currentState]\n actions.reverse()\n return actions\n \"\"\"\n #cleaned up version that works with problem 5\n from util import Queue\n q = Queue()\n explored = set()\n q.push((problem.getStartState(), [], 0))\n while not q.isEmpty():\n popped = q.pop()\n if popped[0] in explored:\n continue\n explored.add(popped[0])\n if problem.isGoalState(popped[0]):\n return popped[1]\n for successor in problem.getSuccessors(popped[0]):\n if successor[0] not in explored:\n #push successor, where actions and cost are cumulative on its parent (popped)\n q.push((successor[0], popped[1] + [successor[1]], popped[2] + successor[2]))\n return popped[1]\n\n\ndef uniformCostSearch(problem):\n \"\"\"Search the node of least total cost first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n from util import PriorityQueue\n #### variables\n pq = PriorityQueue() #basically keeps track of our uniform cost search\n startState = problem.getStartState()\n explored = [] #this list will stop us from revisiting states unnecessarily. But it will let us revisit\n ##a state if we have found a way to get there in lower cost\n\n pq.push([startState, [], 0], 0)\n current = pq.pop()\n while not problem.isGoalState(current[0]):\n exploreThis = True\n totalCost = problem.getCostOfActions(current[1])\n for e in explored:\n if current[0] == e[0] and totalCost >= e[1]:\n #if current's state has already been explored and the cost is not being lowered:\n exploreThis = False\n if exploreThis:\n explored.append([current[0], totalCost])\n for successor in problem.getSuccessors(current[0]):\n (state, action, cost) = successor\n pq.push([state, current[1] + [action], problem.getCostOfActions(current[1] + [action])],\n problem.getCostOfActions(current[1] + [action]))\n current = pq.pop()\n return current[1]\n\n\ndef nullHeuristic(state, problem=None):\n \"\"\"\n A heuristic function estimates the cost from the current state to the nearest\n goal in the provided SearchProblem. This heuristic is trivial.\n \"\"\"\n return 0\n\ndef aStarSearch(problem, heuristic=nullHeuristic):\n \"\"\"Search the node that has the lowest combined cost and heuristic first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n from util import PriorityQueue\n #### variables\n pq = PriorityQueue() #basically keeps track of our uniform cost search\n startState = problem.getStartState()\n explored = [] #this list will stop us from revisiting states unnecessarily. But it will let us revisit\n ##a state if we have found a way to get there in lower cost\n\n pq.push([startState, [], 0], 0)\n current = pq.pop()\n while not problem.isGoalState(current[0]):\n exploreThis = True\n totalCost = problem.getCostOfActions(current[1]) + heuristic(current[0], problem)\n for e in explored:\n if current[0] == e[0] and totalCost >= e[1]:\n #if current's state has already been explored and the cost is not being lowered:\n exploreThis = False\n if exploreThis:\n explored.append([current[0], totalCost])\n for successor in problem.getSuccessors(current[0]):\n (state, action, cost) = successor\n ucsDistance = problem.getCostOfActions(current[1] + [action])\n heur = heuristic(state, problem)\n pq.push([state, current[1] + [action], ucsDistance + heur], ucsDistance + heur)\n current = pq.pop()\n return current[1]\n\n\n# Abbreviations\nbfs = breadthFirstSearch\ndfs = depthFirstSearch\nastar = aStarSearch\nucs = uniformCostSearch\n","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":10818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"245575640","text":"# test_file.py\n\nimport os.path\nfrom curio import *\n\ndirname = os.path.dirname(__file__)\ntestinput = os.path.join(dirname, 'testdata.txt')\n\n\ndef test_read(kernel):\n async def main():\n async with aopen(testinput, 'r') as f:\n data = await f.read()\n assert data == 'line 1\\nline 2\\nline 3\\n'\n\n kernel.run(main())\n\n\ndef test_read1(kernel):\n async def main():\n async with aopen(testinput, 'rb') as f:\n data = await f.read1(1000)\n assert data == b'line 1\\nline 2\\nline 3\\n'\n\n kernel.run(main())\n\n\ndef test_readinto(kernel):\n async def main():\n async with aopen(testinput, 'rb') as f:\n buf = bytearray(1000)\n n = await f.readinto(buf)\n assert buf[:n] == b'line 1\\nline 2\\nline 3\\n'\n\n kernel.run(main())\n\n\ndef test_readinto1(kernel):\n async def main():\n async with aopen(testinput, 'rb') as f:\n buf = bytearray(1000)\n n = await f.readinto1(buf)\n assert buf[:n] == b'line 1\\nline 2\\nline 3\\n'\n\n kernel.run(main())\n\n\ndef test_readline(kernel):\n async def main():\n async with aopen(testinput, 'r') as f:\n lines = []\n while True:\n line = await f.readline()\n if not line:\n break\n lines.append(line)\n\n assert lines == ['line 1\\n', 'line 2\\n', 'line 3\\n']\n\n kernel.run(main())\n\n\ndef test_readlines(kernel):\n async def main():\n async with aopen(testinput, 'r') as f:\n lines = await f.readlines()\n\n assert lines == ['line 1\\n', 'line 2\\n', 'line 3\\n']\n\n kernel.run(main())\n\n\ndef test_readiter(kernel):\n async def main():\n async with aopen(testinput, 'r') as f:\n lines = []\n async for line in f:\n lines.append(line)\n\n assert lines == ['line 1\\n', 'line 2\\n', 'line 3\\n']\n\n kernel.run(main())\n\n\nwlines = ['line1\\n', 'line2\\n', 'line3\\n']\n\n\ndef test_write(kernel):\n async def main():\n outname = os.path.join(dirname, 'tmp.txt')\n async with aopen(outname, 'w') as f:\n outdata = ''.join(wlines)\n await f.write(outdata)\n await f.flush()\n\n assert open(outname).read() == outdata\n\n kernel.run(main())\n\n\ndef test_writelines(kernel):\n async def main():\n outname = os.path.join(dirname, 'tmp.txt')\n async with aopen(outname, 'w') as f:\n await f.writelines(wlines)\n\n assert open(outname).readlines() == wlines\n\n kernel.run(main())\n\n\ndef test_seek_tell(kernel):\n async def main():\n async with aopen(testinput, 'rb') as f:\n await f.seek(10)\n n = await f.tell()\n assert n == 10\n data = await f.read()\n\n assert data == b'line 1\\nline 2\\nline 3\\n'[10:]\n\n kernel.run(main())\n\n\ndef test_sync_iter(kernel):\n async def main():\n async with aopen(testinput, 'r') as f:\n try:\n for line in f:\n pass\n\n assert False, 'sync-iteration should have failed'\n except SyncIOError:\n assert True\n\n kernel.run(main())\n\n\ndef test_sync_with(kernel):\n async def main():\n f = aopen(testinput, 'r')\n try:\n with f:\n pass\n assert False, 'sync-with should have failed'\n except AsyncOnlyError:\n assert True\n\n kernel.run(main())\n\n\ndef test_blocking(kernel):\n async def main():\n async with aopen(testinput, 'r') as f:\n with f.blocking() as sync_f:\n data = sync_f.read()\n\n assert data == 'line 1\\nline 2\\nline 3\\n'\n\n kernel.run(main())\n","sub_path":"tests/test_file.py","file_name":"test_file.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"587846929","text":"#3.7.X\r\n#loadFiles --> open side of eye or respiratory system\r\n#iterate over file\r\n#print title and code if value is page of eye or respiratory system\r\n#get nodes: search for class\"info\"... and get ID and description over print_leafes is used, otherwise subnodes are printed with loadFiles\r\n\r\n#print_leafes --> get ID and description over div class=\"code_bottom\" read and print ID and description analog to loadFiles\r\nimport urllib.request\r\nimport re\r\nimport html\r\nurl1 = 'http://www.icd-code.de/icd/code/H00-H59.html'\r\nurl2 = 'http://www.icd-code.de/icd/code/J00-J99.html'\r\ndef print_leafes(txt, tab_num):\r\n for j in txt.readlines():\r\n pattern = re.compile(r\"
(.*)\")\r\n result = pattern.search(str(j))\r\n if result != None:\r\n s = str(result.group(1)).split('
')\r\n for i in s:\r\n pattern = re.compile(r\"(.*)
(.*)<.*\")\r\n result2 = pattern.search(str(i))\r\n \r\n if result2 != None:\r\n if result2.group(1).find('.') != -1:\r\n name = result2.group(2)\r\n if(result2.group(2).find('<') != -1):\r\n pattern = re.compile(r\"([A-za-zäöü, ]*).*$\")\r\n name = (pattern.search(html.unescape(result2.group(2)))).group(1)\r\n tabs=''\r\n for x in range(0,tab_num):\r\n tabs+='\\t'\r\n print(tabs + str(result2.group(1))+ \" \" + html.unescape(str(name)))\r\n \r\ndef loadFiles(url, tab_num=1):\r\n printed_title = (tab_num != 1)\r\n f = urllib.request.urlopen(url)\r\n \r\n for j in f.readlines():\r\n if not printed_title:\r\n pattern = re.compile(r\"(.*)\")\r\n title = pattern.search(str(j))\r\n if title != None:\r\n print(title.group(1))\r\n printed_title = True\r\n pattern = re.compile(r\"
Info:
(.*)
(.*)<\")\r\n result = pattern.search(str(i))\r\n if result != None:\r\n name = result.group(2)\r\n if(result.group(2).find('') != -1):\r\n pattern = re.compile(r\"(.*)\")\r\n name = pattern.search(result.group(2)).group(1)\r\n tabs=''\r\n for x in range(0,tab_num):\r\n tabs+='\\t'\r\n print(tabs + str(result.group(1))+ \" \" + html.unescape(str(name)))\r\n if str(result.group(1)).find(\".-\") != -1:\r\n print_leafes(urllib.request.urlopen('http://www.icd-code.de/icd/code/' + str(result.group(1)) + \".html\"), tab_num+1)\r\n else:\r\n loadFiles('http://www.icd-code.de/icd/code/' + str(result.group(1)) + \".html\", tab_num+1)\r\n else: #reached leaf of tree?\r\n print_leafes(f, tab_num)\r\nloadFiles(url1)\r\nloadFiles(url2)\r\n\r\n","sub_path":"3.Semester/Skriptsprachen/Übung/3/3_2.py","file_name":"3_2.py","file_ext":"py","file_size_in_byte":3541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"71836577","text":"#### import the simple module from the paraview\nfrom paraview.simple import *\n#### disable automatic camera reset on 'Show'\nparaview.simple._DisableFirstRenderCameraReset()\n# create a new 'OpenFOAMReader'\na0foam = OpenFOAMReader(FileName='/scratch/orca/superran/3rd/refineMesh/0/0.foam')\na0foam.CaseType = 'Reconstructed Case'\na0foam.Expect64bitLabels = 0\na0foam.Createcelltopointfiltereddata = 1\na0foam.Adddimensionalunitstoarraynames = 0\na0foam.MeshRegions = ['CYLINDER']\na0foam.CellArrays = ['p']\na0foam.PointArrays = []\na0foam.Cachemesh = 1\na0foam.Decomposepolyhedra = 1\na0foam.ListtimestepsaccordingtocontrolDict = 0\na0foam.Readzones = 0\n# get animation scene\nanimationScene1 = GetAnimationScene()\n# update animation scene based on data timesteps\nanimationScene1.UpdateAnimationUsingDataTimeSteps()\n# Properties modified on a0foam\na0foam.CellArrays = ['p']\n# create a new 'Temporal Statistics'\ntemporalStatistics1 = TemporalStatistics(Input=a0foam)\ntemporalStatistics1.ComputeAverage = 1\ntemporalStatistics1.ComputeMinimum = 1\ntemporalStatistics1.ComputeMaximum = 1\ntemporalStatistics1.ComputeStandardDeviation = 1\n# Properties modified on temporalStatistics1\ntemporalStatistics1.ComputeMinimum = 0\ntemporalStatistics1.ComputeMaximum = 0\ntemporalStatistics1.ComputeStandardDeviation = 0\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n","sub_path":"plotOverIntersection/extractVelocity.py","file_name":"extractVelocity.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"156644010","text":"from selenium import webdriver\nimport time\n\ntry:\n browser = webdriver.Chrome(\"chromedriver.exe\")\nexcept:\n print(\"Error!\")\n\nbrowser.get(\"https://www.forbes.com/powerful-brands/list/#tab:rank\")\nbrowser.maximize_window()\ntime.sleep(3)\nbrowser.get(\n \"https://www.forbes.com/companies/apple/?list=powerful-brands#2cc564585355\")\ntime.sleep(3)\nbrowser.back()\ntime.sleep(10)\nbrowser.quit()\n","sub_path":"learn/dd/slnm1.py","file_name":"slnm1.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"161955870","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('frame', '0022_auto_20181211_1605'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='scope',\n name='ops_main',\n field=models.ForeignKey(related_name='scopes_ops_main', verbose_name='\\u8fd0\\u7ef4\\u4eba\\u5458\\uff08\\u4e3b\\uff09', blank=True, to=settings.AUTH_USER_MODEL, null=True),\n ),\n migrations.AddField(\n model_name='scope',\n name='ops_stan',\n field=models.ForeignKey(related_name='scopes_ops_stan', verbose_name='\\u8fd0\\u7ef4\\u4eba\\u5458\\uff08\\u5907\\uff09', blank=True, to=settings.AUTH_USER_MODEL, null=True),\n ),\n ]\n","sub_path":"WiseEyeIAMService/frame/migrations/0023_auto_20181212_1517.py","file_name":"0023_auto_20181212_1517.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"477608566","text":"__author__ = 'xueyishu'\n\nclass Guest():\n\tdef __init__(self, name, hunger):\n\t\tself.name = name\n\t\tself.hunger = hunger\n\n\tdef eat(self):\n\t\tself.hunger -= 1\n\t\tif self.hunger == 0:\n\t\t\tprint (\"{0}: Burp!\".format(self.name))\n\t\treturn self.hunger\n\nclass Restaurant(list):\n\n\tdef __init__(self, size):\n\t\tself.size = size\n\t\tfor i in range(size):\n\t\t\tself.append(None)\n\n\tdef seat(self, guest):\n\t\tfor i in range(len(self)):\n\t\t\tif not isinstance(self[i], Guest):\n\t\t\t\tself[i] = guest\n\t\t\t\tprint (\"Seating guest {0} at table {1}.\".format(guest.name, i))\n\t\t\t\treturn True\n\t\tprint (\"No free table.\")\n\t\treturn False\n\n\tdef serve(self):\n\t\tfor i in range(len(self)):\n\t\t\tif self[i]:\n\t\t\t\tprint (\"Serving guest {0}.\".format(self[i].name))\n\t\t\t\thunger = self[i].eat()\n\t\t\t\tif not hunger:\n\t\t\t\t\tself[i] = None\n\t\t\t\treturn\n\t\tprint(\"No guest to serve.\")","sub_path":"CUhomework/restaurant.py","file_name":"restaurant.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"522612558","text":"# -*- encoding: utf-8 -*-\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom opencart.items import OpencartItem\nfrom scrapy.http import Request\nimport re\nimport datetime\nimport MySQLdb\n\ndef image_name_cleaner(image_url):\n if image_url == 'http://avtoto.com.ua/skin/images/placeholder.png':\n image_name = 'placeholder.png'\n else:\n image_name = image_url.split(\"/\")\n image_name = image_name[-1]\n return image_name\n\n\nclass OpencartSpider(CrawlSpider):\n name = \"main\"\n allowed_domains = ['avtoto.com.ua']\n start_urls = ['http://avtoto.com.ua/komplekty/komplekt-tormozov/']\n\n rules = (\n Rule(SgmlLinkExtractor(\\\n restrict_xpaths=\"//a[@class='btn btngrey next']\"),\\\n callback='parse_start_url',\\\n follow=True),)\n\n def parse_start_url(self, response):\n prod_url_list = response.xpath(\"//div[@class='product-name']/h2/a/@href\").extract()\n for url in prod_url_list:\n yield Request(url, callback=self.parse_product)\n\n def parse_product(self, response):\n item = OpencartItem()\n\n item['model'] = \"Prod_\"\n item['quantity'] = 99\n item['stock_status_id'] = 3\n item['price'] = '148300'\n item['minimum'] = 1\n item['status'] = 1\n item['date_added'] = datetime.date.today()\n item['date_modified'] = datetime.date.today()\n item['viewed'] = 1\n\n item['sku'] = \"\"\n item['upc'] = \"\"\n item['ean'] = \"\" \n item['jan'] = \"\"\n item['isbn'] = \"\"\n item['mpn'] = \"\"\n item['location'] = \"\" \n item['manufacturer_id'] = \"1\"\n item['tax_class_id'] = \"1\"\n item['date_available'] = \"2016-01-01\"\n\n item['name'] =response.xpath('//h1/text()').extract()[0]\n item['language_id'] = 1\n item['description'] = 'Длинное описание товара'\n item['meta_description'] = 'МЕТА описание'\n item['meta_keyword'] = 'МЕТА ключевые слова'\n item['seo_title'] = 'Тайтл'\n item['seo_h1'] = 'H1'\n item['tag'] = '1'\n\n item['main_category'] = 1\n\n # images block\n\n image_names_list = []\n image_url_list = []\n try:\n first_image_name = image_name_cleaner(response.xpath(\"//div[@class='product-image']/img/@src\").extract()[0])\n except IndexError:\n first_image_name = image_name_cleaner(response.xpath(\"//div[@class='product-image']/a/img/@src\").extract()[0])\n image_names_list.append(first_image_name)\n try:\n first_image_url = response.xpath(\"//div[@class='product-image']/img/@src\").extract()[0]\n except IndexError:\n first_image_url = response.xpath(\"//div[@class='product-image']/a/img/@src\").extract()[0]\n image_url_list.append(first_image_url)\n image_name = image_names_list[0]\n item['images'] = zip(image_url_list, image_names_list)\n item['image'] = (\"data/tovar/\" + image_name)\n item['sort_order'] = 0\n item['category_id'] = 61 \n return item","sub_path":"opencart/spiders/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"587639573","text":"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n\nclass FermionOccupationNumber:\n \"\"\"A representation of an OccupationNumber state vector\n \"\"\"\n def __init__(self, n_max=0, a=0, occupied=None, scalar=1):\n self.n_max = n_max\n self.scalar = scalar\n self.a = a\n self.occ = occupied\n if self.occ is None:\n self.occ = [-1] + [1] * self.a + [0] * n_max # ground state\n else:\n self.a = len(filter(lambda x: x == 1, self.occ))\n self.occ = self.occ + [0] * (self.n_max + self.a - len(self.occ))\n if self.occ[0] != -1:\n self.occ = [-1] + self.occ\n\n def __len__(self):\n return len(self.occ) - 1\n\n def __getitem__(self, item):\n return self.occ[item]\n\n def __add__(self, other):\n return self.__radd__(other)\n\n def __radd__(self, other):\n if other == 0:\n return self\n elif isinstance(other, FermionOccupationNumber):\n if other.scalar == 0:\n return self\n elif self.scalar == 0:\n return other\n elif self.occ != other.occ:\n raise CannotAddOccupationNumbersException(\n 'Cannot add occupation states {} and {}'\n ''.format(self, other)\n )\n else:\n return FermionOccupationNumber(\n n_max=self.n_max, a=self.a, occupied=self.occ,\n scalar=self.scalar + other.scalar\n )\n else:\n raise CannotAddOccupationNumbersException(\n 'Cannot add occupation state {} to {}'\n ''.format(self, other))\n\n def __sub__(self, other):\n return self.__add__(-1 * other)\n\n def __rsub__(self, other):\n return -1 * self.__sub__(other)\n\n def __mul__(self, other):\n return self.__rmul__(other)\n\n def __rmul__(self, other):\n return FermionOccupationNumber(\n n_max=self.n_max, a=self.a, occupied=self.occ,\n scalar=self.scalar * other\n )\n\n def __str__(self):\n s = self.scalar\n if s == 0:\n return '0'\n s_str = '' if s == 1 else str(s)\n sep = '|'\n right = '>'\n occ = str(self.occ[1:]).strip('[]').replace(', ', '')\n return '{}{}{}{}'.format(s_str, sep, occ, right)\n\n def __iter__(self):\n return iter(list(self.occ[1:]))\n\n def __eq__(self, other):\n if isinstance(other, FermionOccupationNumber):\n if self.scalar == 0:\n return 0 == other.scalar\n else:\n return self.scalar == other.scalar and self.occ == other.occ\n else:\n return self.scalar == 0 and other == 0\n\n def _phase_factor(self, i):\n phase = 1\n for j in range(1, i):\n if self[j] == 1:\n phase *= -1\n return phase\n\n def create(self, i):\n if i > len(self):\n raise ModelSpaceTooSmallException(\n 'Cannot create a particle in state i = {}. Model space is '\n 'limited to i in \\{1, 2, ...{}\\}.'.format(i, len(self.occ))\n )\n elif self[i] == 1:\n return FermionOccupationNumber(\n n_max=self.n_max, a=self.a, occupied=self.occ, scalar=0)\n elif self[i] == 0:\n next_occ = list(self.occ)\n next_occ[i] = 1\n return FermionOccupationNumber(\n n_max=self.n_max, a=self.a + 1, occupied=next_occ,\n scalar=self.scalar * self._phase_factor(i)\n )\n\n def annihilate(self, i):\n if i > len(self):\n raise ModelSpaceTooSmallException(\n ('Cannot create a particle in state i = {}.'.format(i) +\n ' Model space is limited to i in {1, 2, ...' +\n '{}'.format(len(self))) + '}.'\n )\n elif self[i] == 0:\n return FermionOccupationNumber(\n n_max=self.n_max, a=self.a, occupied=self.occ, scalar=0)\n elif self[i] == 1:\n next_occ = list(self.occ)\n next_occ[i] = 0\n return FermionOccupationNumber(\n n_max=self.n_max, a=self.a - 1, occupied=next_occ,\n scalar=self.scalar * self._phase_factor(i)\n )\n\n\nclass CannotAddOccupationNumbersException(Exception):\n pass\n\n\nclass _FermionCAOperator(object):\n def __init__(self, i, adjoint_type):\n self.i = i\n self._adjoint_type = adjoint_type\n\n def adjoint(self):\n return self._adjoint_type(self.i)\n\n def anticommute(self, other):\n if isinstance(other, self._adjoint_type):\n return 1 if self.i == other.i else 0\n else:\n return 0\n\n\nclass FermionCreationOperator(_FermionCAOperator):\n \"\"\"A creation operator, which may be applied to an occupation state vector\n in an appropriately sized model space\n \"\"\"\n def __init__(self, i):\n super(FermionCreationOperator,\n self).__init__(i, FermionAnnihilationOperator)\n\n def __call__(self, occ_num):\n return occ_num.create(self.i)\n\n\nclass FermionAnnihilationOperator(_FermionCAOperator):\n \"\"\"An annihilation operator, which may be applied to an occupation state vector\n in an appropriately sized model space\n \"\"\"\n def __init__(self, i):\n super(FermionAnnihilationOperator,\n self).__init__(i, FermionCreationOperator)\n\n def __call__(self, occ_num):\n return occ_num.annihilate(self.i)\n\n\nclass ModelSpaceTooSmallException(Exception):\n pass\n","sub_path":"src/occ_rep.py","file_name":"occ_rep.py","file_ext":"py","file_size_in_byte":5649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"213597222","text":"\nimport os\nfrom flask import Flask, request, redirect, url_for, render_template\nfrom werkzeug.utils import secure_filename\n\nUPLOAD_FOLDER = '/home/user/Desktop/Joon/Uploads'\nALLOWED_EXTENSIONS = set(['pdf', 'png', 'jpg', 'jpeg', 'gif', 'txt'])\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n@app.route('/upload', methods = ['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n f = request.files['file']\n f.save(secure_filename(f.filename))\n f.save(os.path.join(app.config['UPLOAD_FOLDER'], f.filename))\n return 'file uploaded successfully'\n\"\"\"\ndef upload_file():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return render_template('hello.html')\n\"\"\"\n \n@app.route('/')\ndef welcome():\n return render_template('ImageUpload.html') # render a template\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"Image Upload(Joon)/imageupload(Original).py","file_name":"imageupload(Original).py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"357761965","text":"from django.test import TestCase, Client\nfrom django.urls import reverse\nfrom comment.models import Comment\nfrom posts.models import Posts\nimport json\n\n\nclass TestViews(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.postscmadd_url = reverse('posts_cm_add', args=[1])\n self.commentdel_url = reverse('comments_del', args=[2])\n self.comment1 = Comment.objects.create(\n name=\"Ryan\",\n email=\"email@email.com\",\n cm='Comment',\n posts_id=1,\n date=1,\n time=1\n )\n\n\n def test_commentadd_POST(self):\n \n response = self.client.post(self.postscmadd_url, {\n 'name': \"Ryan\",\n 'email': \"email@email.com\",\n 'cm': 'Comment',\n 'posts_id': 1,\n 'date':1,\n 'time':1\n })\n\n self.assertEquals(response.status_code, 200)\n self.assertEquals(self.comment1.name, 'Ryan')\n\n\n def test_commentdel_DELETE(self):\n \n Comment.objects.create(\n name=\"Ryan\",\n email=\"email@email.com\",\n cm=\"Comment\",\n posts_id=1,\n date=1,\n time=1\n )\n\n response = self.client.delete(self.commentdel_url, json.dumps({\n 'name': \"Ryan\",\n 'email': \"email@email.com\",\n 'cm': 'Comment',\n 'posts_id': 1,\n 'date': 1,\n 'time': 1\n }))\n\n self.assertEquals(response.status_code, 302)\n self.assertEquals(self.comment1.name, 'Ryan')\n\n\n ","sub_path":"rdpproject/comment/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"255618331","text":"import os\nimport re\nfrom subprocess import Popen, PIPE, STDOUT\n\nfrom django.conf import settings\n\ndef get_revision():\n\ttry:\n\t\tcommand = ['svnversion',settings.PROJECT_ROOT]\n\t\tstIO = Popen(command, stdout=PIPE, stderr=STDOUT)\n\t\tstIO.wait()\n\t\toutS = stIO.stdout.read().strip()\n\t\tm = re.match(':?(\\d*).*[MS]?', outS)\n\t\treturn m.group(1) or ' '\n\texcept:\n\t\treturn 'Versioning Unavailable'\n\nREVISION = get_revision()\n","sub_path":"svnrevision/templatetags/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"183876798","text":"import hashlib\nimport os\nimport sys\nimport cPickle as pickle\nimport time\nusername=sys.argv[1]\n#global_dict = pickle.load( open( \"/pxeusers/username/save.p\", \"rb\" ) )\nglobal_dict = pickle.load( open( '/pxeusers/%s/save.p' %username, \"rb\" ) )\n\n#Start Time\nprint(time.asctime( time.localtime(time.time())))\n\nfor key,value in global_dict.iteritems():\n\tprint (\"Generating file %s\" % key)\n\tfilename=key\n\tif not os.path.exists(os.path.dirname(filename)):\n\t\ttry:\n\t\t\tos.makedirs(os.path.dirname(filename))\n\t\texcept OSError as exc:# Guard against race condition\n\t\t\tif exc.errno != errno.EEXIST:\n\t\t\t\traise\n\n\tdataList = []\n\tfileNames = global_dict[key]\n\tfor fn in fileNames:\n\t\tf = open(os.path.join('/pxeusers/%s/hashedfile/'%username, fn), 'rb')\n\t\tdataList.append(f.read())\n\t\tf.close()\n\n\tf = open(filename, 'wb')\n\tfor data in dataList:\n\t\tf.write(data)\n\tf.close\n#End Time\nprint(time.asctime( time.localtime(time.time())))","sub_path":"Deduplication_DS/reConstruct.py","file_name":"reConstruct.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"641653034","text":"# Os endereços IP versão 4 são divididos em cinco classes: A, B, C, D e E.\n# Os endereços no intervalo de 0 a 127 são classe A, de 128 a 191 são classe B,\n# de 192 a 223 são classe C, de 224 a 239 são classe D e a partir de 240 são classe E.\n# Crie um algoritmo que leia o primeiro octeto, no formato decimal, de um endereço IP e informe a sua classe.\n\nclasses = [\"A\", \"B\", \"C\", \"D\",\"E\"]\nbanda = [127, 191, 223, 239,255 ]\n\nendereco = input('informe o endereco de IP: ')\n\nendereco = endereco[0] + endereco[1] + endereco[2]\nendereco = int(endereco)\n\nif endereco <= banda[0]:\n ip = classes[0]\n\nelif endereco <= banda[1]:\n ip = classes[1]\n\nelif endereco <= banda[2]:\n ip = classes[2]\n\nelif endereco <= banda[3]:\n ip = classes[3]\n\nelse:\n ip = classes[4]\n\nprint(f'A classe do IP informado e: {ip}')\n\n\n","sub_path":"Lista 02/08.py","file_name":"08.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"270672627","text":"class Student:\r\n def __init__(self):\r\n self.name = ''\r\n self.roll = 0\r\n self.marks = 0\r\n print('Student Constructor')\r\n\r\n def getStudentData(self,name,roll,marks):\r\n self.name = name\r\n self.roll = roll\r\n self.marks = marks\r\n\r\n def displayS(self):\r\n print('Student Name: ',self.name)\r\n print('Roll No: ', self.roll)\r\n print('Marks: ', self.marks)\r\n\r\nclass Placements(Student):\r\n def __init__(self):\r\n #super().__init__() #Do without & with\r\n self.company = ''\r\n self.package = ''\r\n print('Placement Constructor')\r\n\r\n def getPlacementData(self,company,package):\r\n self.company = company\r\n self.package = package\r\n\r\n def displayP(self):\r\n print('Company: ',self.company)\r\n print('Package: ', self.package)\r\n\r\nP = Placements()\r\nP.getStudentData('abc',101,78.25)\r\nP.getPlacementData('Dell',48000)\r\nP.displayS()\r\nP.displayP()\r\n\r\n\r\n\r\n\r\n","sub_path":"14. OOPS/112.2.Inheritance.py","file_name":"112.2.Inheritance.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"524133835","text":"import numpy as np\n\nimport cv2\nimport matplotlib.pyplot as plt\n\nfrom alexnet import AlexNet\nfrom caffe_classes import class_names\nimport tensorflow as tf\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimagenet_mean=np.array([104.,117.,124.],dtype=np.float32)\ncurrent=os.getcwd()\nprint(current)\nimage_dir=os.path.join(current,'images')\nimg_files=[os.path.join(image_dir,f)for f in os.listdir(image_dir) if f.endswith('.jpeg')]\nimgs=[]\nfor f in img_files:\n imgs.append(cv2.imread(f))\nfig=plt.figure(figsize=(15,6))\n# for i,img in enumerate(imgs):\n# fig.add_subplot(1,3,i+1)\n# # plt.imshow(cv2.cvtColor(img,cv2.COLOR_BGR2GRB))\n# plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n# plt.axis('off')\n# plt.show()\n\nx=tf.placeholder(tf.float32,[1,227,227,3])\nkeep_prob=tf.placeholder(tf.float32)\nmodel=AlexNet(x,keep_prob,1000,[])\nscore=model.fc8\nsoftmax=tf.nn.softmax(score)\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.5\nconfig.gpu_options.allow_growth = True\nconfig.gpu_options.allocator_type = 'BFC'\nwith tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n model.load_initial_weights(sess)\n fig2=plt.figure(figsize=(15,6))\n for i,image in enumerate(imgs):\n # cv2.imshow(str(i),image)\n img=cv2.resize(image.astype(np.float32),(227,227))\n img-=imagenet_mean\n img=img.reshape((1,227,227,3))\n probs=sess.run(softmax,feed_dict={x:img,keep_prob:1})\n class_name=class_names[np.argmax(probs)]\n maxVal=probs.max()\n cv2.imwrite('./output/'+class_name+'_'+str(maxVal)+'.jpeg',image)","sub_path":"val.py","file_name":"val.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"189743524","text":"H = {}\n\n\ndef knapsack(W, items):\n if len(items) == 0:\n return 0\n if (W, len(items)) in H:\n return H[(W, len(items))]\n if items[0]['w'] > W:\n H[(W, len(items))] = knapsack(W, items[1:])\n else:\n H[(W, len(items))] = max(knapsack(W, items[1:]), knapsack(W - items[0]['w'], items[1:]) + items[0]['v'])\n return H[(W, len(items))]\n\nH={}\ndef step(array):\n length=len(array)\n if length in H:\n return H[length]\n if length>2:\n H[length]=max(array[0]+step(array[1:]),1+array[1]+step(array[2:]),1+array[2]+step(array[3:]))\n return H[i]\n if length>0:\n return 1\n else:\n return 0\n\nif __name__ == '__main__':\n print(knapsack(50, [{'v': 100, 'w': 20}, {'v': 60, 'w': 10}, {'v': 120, 'w': 30}]))\n","sub_path":"dynamic_programming/knapsack.py","file_name":"knapsack.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"229417612","text":"# Import libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom datetime import date\nfrom sklearn.linear_model import LinearRegression as lm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn import metrics\n\n# Read in the dataset\ndataset = pd.read_csv(r'../Melbourne_housing_FULL.csv', skip_blank_lines=True)\ndataset[\"Date\"] = pd.to_datetime(dataset[\"Date\"],dayfirst=True)\n\n# Initialize arrays\ndataset_dr = dataset.dropna().sort_values(\"Date\")\ndataset_dr = dataset_dr\nfull_Data = []\n\n#How many days since start\ndays_since_start = [(x - dataset_dr[\"Date\"].min()).days for x in dataset_dr[\"Date\"]]\ndataset_dr[\"Days\"] = days_since_start\n\nsuburb_dummies = pd.get_dummies(dataset_dr[[\"Type\", \"Method\"]])\n\nfull_Data = dataset_dr.drop([\"Address\", \"Price\", \"Date\", \"SellerG\", \"Suburb\", \"Type\", \"Method\", \"CouncilArea\", \"Regionname\"], axis=1).join(suburb_dummies)\n\nX = full_Data\ny = dataset_dr[\"Price\"]\n\n# Split into test data and training data\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n# Train the algorithm\nregressor = lm()\nregressor.fit(X_train, y_train)\nprint(\"Intercept: {}\" .format(regressor.intercept_))\ncoeff_df = pd.DataFrame(regressor.coef_,X.columns,columns=['Coefficient'])\nranked_suburbs = coeff_df.sort_values(\"Coefficient\", ascending = False)\nprint(ranked_suburbs)\n\n# Calculate linear predictions\ny_pred = regressor.predict(X_test)\n\n# Metrics\nprint('MSE:', metrics.mean_squared_error(y_test, y_pred))\nprint(\"MAE:\", metrics.mean_absolute_error(y_test, y_pred))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))\n\n# Plot\n\nplt.scatter(y_test, y_pred)\nplt.ylim([200000,1000000])\nplt.xlim([200000,1000000])\n\nsns.displot((y_test-y_pred), bins=50, kde=True)\nplt.show()\n","sub_path":"TrainAndTest/regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"599887821","text":"\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport re\n\n\nurl='https://leetcode.com/playground/VdgKnqNb/shared'\nres=requests.get(url)\nprint(res.text)\nprint(res.status_code)\n\nsp=bs(res.text,'lxml')\nprett=sp.prettify()\nprint(prett)\n\nfp=open('code.txt','w')\nfp.writelines(prett)\nfp.close()","sub_path":"python/za/ariticle/123.py","file_name":"123.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"642924650","text":"def solution(A):\n # write your code in Python 2.7\n seen = [False] * (len(A) + 2)\n for a in A:\n if a > 0 and a <= len(A):\n seen[a] = True\n for i in range(1, len(seen)):\n if not seen[i]:\n return i\n\n\nfor item in (\n [1],\n):\n print(solution(item))\n","sub_path":"python/lesson4/missing_int.py","file_name":"missing_int.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"84957099","text":"######################################################################################################\n#\n# Organization: Asociacion De Investigacion En Inteligencia Artificial Para La Leucemia Peter Moss\n# Project: UP2 NCS1 Realsense F200 Facial Recognition Security System\n#\n# Author: Adam Milton-Barker (AdamMiltonBarker.com)\n#\n# Title: CamRead Class\n# Description: Reads frames from an F200 camera and streams them to a socket stream.\n# License: MIT License\n# Last Modified: 2020-09-29\n#\n######################################################################################################\n\nimport base64\nimport cv2\nimport dlib\nimport os\nimport sys\nimport time\n\nimport numpy as np\n\nfrom datetime import datetime\nfrom imutils import face_utils\nfrom threading import Thread\n\nfrom Classes.Helpers import Helpers\nfrom Classes.iotJumpWay import Device as iot\nfrom Classes.TassAI import TassAI\n\nimport pyrealsense as pyrs\nfrom pyrealsense.constants import rs_option\n\nclass CamRead(Thread):\n\t\"\"\" CamRead Class\n\n\tReads frames from a Realsense F200 camera and streams them\n\tto a socket stream.\n\t\"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\" Initializes the class. \"\"\"\n\n\t\tself.Helpers = Helpers(\"CamRead\")\n\t\tsuper(CamRead, self).__init__()\n\n\t\tself.Helpers.logger.info(\"CamRead class initialized.\")\n\n\tdef run(self):\n\t\t\"\"\" Runs the module. \"\"\"\n\n\t\tself.identified = 0\n\n\t\t# Starts the TassAI module\n\t\tself.TassAI = TassAI()\n\t\tself.TassAI.cv()\n\t\tself.TassAI.ncs()\n\n\t\t# Starts the socket server\n\t\tsoc = self.Sockets.connect(self.Helpers.confs[\"Socket\"][\"host\"], self.Helpers.confs[\"Socket\"][\"port\"])\n\n\t\tfps = \"\"\n\t\tframecount = 0\n\t\tcount = 0\n\t\ttime1 = 0\n\t\ttime2 = 0\n\n\t\tself.publishes = [None] * (len(self.TassAI.NCS1.encoded) + 1)\n\n\t\twith pyrs.Service() as serv:\n\t\t\twith serv.Device() as dev:\n\n\t\t\t\tdev.apply_ivcam_preset(0)\n\n\t\t\t\twhile True:\n\t\t\t\t\tt1 = time.perf_counter()\n\n\t\t\t\t\tdev.wait_for_frames()\n\t\t\t\t\tframe = dev.color\n\t\t\t\t\tframe = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n\n\t\t\t\t\t# Processes the frame\n\t\t\t\t\traw, frame = self.TassAI.NCS1.prepareImg(frame)\n\t\t\t\t\twidth = frame.shape[1]\n\n\t\t\t\t\t# Gets faces and coordinates\n\t\t\t\t\tfaces, coords = self.TassAI.NCS1.faces(frame)\n\n\t\t\t\t\t# Writes header to frame\n\t\t\t\t\tcv2.putText(frame, \"TassAI\", (10, 30), self.TassAI.font,\n\t\t\t\t\t\t\t\t\t\t0.7, self.TassAI.color, 2, cv2.LINE_AA)\n\n\t\t\t\t\t# Writes date to frame\n\t\t\t\t\tcv2.putText(frame, str(datetime.now()), (10, 50),\n\t\t\t\t\t\t\t\t\t\t\tself.TassAI.font, 0.5, self.TassAI.color, 2, cv2.LINE_AA)\n\n\t\t\t\t\tif len(coords):\n\t\t\t\t\t\ti = 0\n\t\t\t\t\t\tmesg = \"\"\n\t\t\t\t\t\t# Loops through coordinates\n\t\t\t\t\t\tfor (i, face) in enumerate(coords):\n\n\t\t\t\t\t\t\t# Gets facial landmarks coordinates\n\t\t\t\t\t\t\tcoordsi = face_utils.shape_to_np(face)\n\t\t\t\t\t\t\t# Looks for matches/intruders\n\t\t\t\t\t\t\tknown, distance = self.TassAI.NCS1.match(raw, faces[i])\n\n\t\t\t\t\t\t\tif known:\n\t\t\t\t\t\t\t\tmesg = \"TassAI identified User #\" + str(known)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tmesg = \"TassAI identified intruder\"\n\n\t\t\t\t\t\t\t# If iotJumpWay publish for user is in past\n\t\t\t\t\t\t\tif (self.publishes[int(known)] is None or (self.publishes[int(known)] + (1 * 20)) < time.time()):\n\t\t\t\t\t\t\t\t# Update publish time for user\n\t\t\t\t\t\t\t\tself.publishes[int(known)] = time.time()\n\n\t\t\t\t\t\t\t\t# Send iotJumpWay notification\n\t\t\t\t\t\t\t\tself.iot.channelPub(\"Sensors\", {\n\t\t\t\t\t\t\t\t\t\"Type\": \"TassAI\",\n\t\t\t\t\t\t\t\t\t\"Sensor\": \"F200 Camera\",\n\t\t\t\t\t\t\t\t\t\"Value\": known,\n\t\t\t\t\t\t\t\t\t\"Message\": mesg\n\t\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\t\t# Send iotJumpWay notification\n\t\t\t\t\t\t\t\tself.iot.channelPub(\"Cameras\", {\n\t\t\t\t\t\t\t\t\t\"Type\": \"TassAI\",\n\t\t\t\t\t\t\t\t\t\"Sensor\": \"F200 Camera\",\n\t\t\t\t\t\t\t\t\t\"Value\": known,\n\t\t\t\t\t\t\t\t\t\"Message\": mesg\n\t\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\t(x, y, w, h) = self.TassAI.NCS1.bounding_box(faces[i])\n\t\t\t\t\t\t\tcv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)\n\n\t\t\t\t\t\t\tcx = int(round(+(w/2)))\n\t\t\t\t\t\t\tcy = int(round(y+(h/2)))\n\n\t\t\t\t\t\t\tcv2.putText(frame, \"User ID#\"+str(known), (x, y - 5),\n\t\t\t\t\t\t\t\t\t\tcv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1)\n\n\t\t\t\t\t\t\tdistance = dev.depth[cy][cx]/1000.0\n\t\t\t\t\t\t\tif(distance != 0.0):\n\t\t\t\t\t\t\t\tcv2.putText(frame, str(distance) + \"cm\", (x + (w - 20), y - 5),\n\t\t\t\t\t\t\t\t\t\tcv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1)\n\n\n\t\t\t\t\t\t\t# Draws facial landmarks\n\t\t\t\t\t\t\tfor (x, y) in coordsi:\n\t\t\t\t\t\t\t\tcv2.circle(frame, (x, y), 2, (0, 255, 0), -1)\n\t\t\t\t\t\t\t# Adds user name to frame\n\t\t\t\t\t\t\ti += 1\n\n\t\t\t\t\tcv2.putText(frame, fps, (width-170, 30), cv2.FONT_HERSHEY_SIMPLEX,\n\t\t\t\t\t\t\t\t0.5, self.TassAI.color, 1, cv2.LINE_AA)\n\n\t\t\t\t\td = dev.depth * dev.depth_scale * 1000\n\t\t\t\t\td = cv2.applyColorMap(d.astype(np.uint8), cv2.COLORMAP_RAINBOW)\n\n\t\t\t\t\tcd = np.concatenate((frame, d), axis=1)\n\n\t\t\t\t\t# Streams the modified frame to the socket server\n\t\t\t\t\tencoded, buffer = cv2.imencode('.jpg', cd)\n\t\t\t\t\tsoc.send(base64.b64encode(buffer))\n\n\t\t\t\t\t# FPS calculation\n\t\t\t\t\tframecount += 1\n\t\t\t\t\tif framecount >= 15:\n\t\t\t\t\t\tfps = \"Stream: {:.1f} FPS\".format(time1/15)\n\t\t\t\t\t\tframecount = 0\n\t\t\t\t\t\ttime1 = 0\n\t\t\t\t\t\ttime2 = 0\n\t\t\t\t\tt2 = time.perf_counter()\n\t\t\t\t\telapsedTime = t2-t1\n\t\t\t\t\ttime1 += 1/elapsedTime\n\t\t\t\t\ttime2 += elapsedTime\n\t\t\t\t\ttime.sleep(0.05)\n","sub_path":"UP2/NCS1/Realsense/F200/Classes/CamRead.py","file_name":"CamRead.py","file_ext":"py","file_size_in_byte":4967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"579837309","text":"import numpy as np\nimport glob\nimport threading\nimport time\nimport cv2\n\nread_files = set([])\nsoup_rad = 30 #px\ndt = np.dtype([('w', np.intc),\n ('h', np.intc),\n ('low', np.intc),\n ('high', np.intc),\n ('int_temp', np.intc),\n ('pad', np.intc),\n ('time', 'd'),\n ('img', np.uint16, (160 * 120,))\n ])\n\n\ndef conv_celsius(temps):\n r = 395653\n b = 1428\n f = 1\n o = 156\n t_k = b / np.log(r / (temps - o) + f)\n return t_k - 273.15\n\n\n# Creates a circle mask with some radius and center.\ndef cmask(center, radius, array_like):\n a, b = center\n nx, ny = array_like.shape\n y, x = np.ogrid[-a:nx-a,-b:ny-b]\n mask = x*x + y*y <= radius*radius\n return mask\n\n\nclass ObjectStorer(object):\n def __init__(self, init):\n self.value = init\n self.time = 0\n\n def store(self, init):\n self.value = init\n\n def storet(self, t):\n self.time = t\n\n\n# Grabs thermal image frames from the .dat files.\ndef grab_frames(last_time):\n frame_list = np.array([]).reshape(0,19200)\n times_list = np.array([])\n dat_files = set( glob.glob('*.dat') )\n to_read = dat_files - read_files\n for fname in to_read:\n a = np.fromfile(fname, dtype=dt)\n ts = a['time']\n imgs = a['img']\n\n mask = ts>=last_time\n frame_list = np.concatenate((frame_list, imgs[mask]), axis=0)\n times_list = np.concatenate((times_list, ts[mask]))\n\n if ts[mask].shape[0] == 0 and fname != 'thermal.dat':\n read_files.add(fname)\n\n sort_order = np.argsort(times_list, axis=0)\n return frame_list[sort_order], times_list[sort_order]\n\n\ndef get_lapl(img):\n lapl = cv2.Laplacian(img, cv2.CV_64F)\n r, c = img.shape\n cm = cmask((r/2, c/2), soup_rad, img)\n lapl[cm] = np.nan\n # lapl[np.abs(lapl) > 7] = 0\n m = np.nanmean(lapl)\n lapl[cm] = m\n return lapl\n\n\n# Checks the a set of images for splatter.\n# Currently checks the target and previous frame.\n# Takes the Laplacian of both images, and if there is any significant increase greater than 10(?), it marks a splatter.\n# I don't actually know the units, just guessing with this magic number right now.\ndef check_splatter(images):\n assert images.shape[0] == 2\n\n # Strip the first few cols b/c theres some distortions.\n i1 = images[1].reshape(120,160)[5:-5]\n i2 = images[0].reshape(120,160)[5:-5]\n\n l1 = get_lapl(i1)\n l2 = get_lapl(i2)\n # diff = to_check[1] - 0.5 * to_check[0] - 0.5 * to_check[2]\n diff = l2 - l1\n\n score = np.amax(diff) - np.mean(diff)\n\n # if score > 10:\n # print('DEBUG')\n # print(cm.shape)\n # print(images.shape)\n # print(score)\n # print('END DEBUG')\n\n return score > 5\n\n\n# Main thread loop. Grabs the latest few frames, and checks them for splatter.\n# When it finds splatter, call on_splatter().\n# Repeat.\ndef thread_loop(latest_img, on_splatter):\n last_time = 0\n while True:\n frames, times = grab_frames(last_time)\n if len(frames) > 0:\n last = np.rot90(conv_celsius(frames[-1].reshape(120,160)))\n latest_img.store(last)\n latest_img.storet(times[-1])\n if len(frames) >= 3:\n frames = conv_celsius(frames)\n for i in range(1, len(frames) - 1):\n if check_splatter(frames[i - 1:i+1]):\n on_splatter(times[i])\n last_time = times[-2]\n\n\ndef start_thread(on_splatter=None):\n latest_img = ObjectStorer( np.zeros((120*160)) )\n if on_splatter:\n th = threading.Thread(target=thread_loop, args=[latest_img, on_splatter])\n else:\n th = threading.Thread(target=thread_loop, args=[latest_img, lambda x: print('Splatter at t={}'.format(x))])\n th.setDaemon(True)\n th.start()\n return latest_img\n\nif __name__ == '__main__':\n img = start_thread()\n time.sleep(3)\n print('Python time is ', time.time())\n","sub_path":"microwave/Pi-side/therm_frame_grabber.py","file_name":"therm_frame_grabber.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"597749108","text":"class Solution(object):\n def convert(self, s, numRows):\n \"\"\"\n :type s: str\n :type numRows: int\n :rtype: str\n \"\"\"\n if numRows == 1:\n return s\n \n x,y = 0,0\n makeTurn = False\n max_step = numRows -1\n cur_step = 0\n dict = {}\n ans = []\n\n\n \n for i,c in enumerate(s):\n \n dict[str(y)+\"-\"+str(x)] = c\n if cur_step == max_step:\n makeTurn = not makeTurn\n cur_step = 1\n else:\n cur_step += 1\n \n if makeTurn:\n x,y = x+1, y-1\n else:\n x,y = x, y+1\n\n for i in range(numRows):\n for j in range(x+1):\n dict_key = str(i) + \"-\" + str(j)\n if dict_key in dict:\n ans.append(dict[dict_key])\n\n return \"\".join(ans)\n\n\n\nprint(Solution().convert(\"AB\",1))","sub_path":"0006_zigzag_conversion/0006_zigzag_conversion.py","file_name":"0006_zigzag_conversion.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"124241010","text":"from time import sleep\n\nfrom selenium.webdriver import ActionChains\n\nfrom day_test08.Base import Base\n\n\nclass TestAlter(Base):\n def test_alter(self):\n self.driver.get('https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable')\n self.driver.switch_to_frame(\"iframeResult\")\n drag=self.driver.find_element_by_id('draggable')\n drop=self.driver.find_element_by_id('droppable')\n action=ActionChains(self.driver)\n action.drag_and_drop(drag,drop).perform()\n sleep(3)\n print(\"点击 alter 确认\")\n self.driver.switch_to.alert.accept()\n self.driver.switch_to.default_content()\n self.driver.find_element_by_id('submitBTN').click()\n\n sleep(4)","sub_path":"day_test08/test_alter.py","file_name":"test_alter.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"574023987","text":"#!/usr/env python\n\n# version 0.9\n# developed by Artur Zych\n# 06.2016, updated 10.2016\n\nimport time\nimport sys\nimport pyodbc\nimport os\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\n\nMAIN_WINDOW_TITLE = \"CBRE Time Tracker\"\nCOMPANY = 'CBRE'\nTOOL_NAME = 'Time Tracker'\nACTIVITY_SELECTION_CAPTION = 'Select an activity:'\n\nMENU_BUTTONS_STYLE = 'QPushButton {background-color: rgba(0, 125, 0, 0); color: #ffffff;}'\nMAIN_WINDOW_LABEL_STYLE = 'QLabel {color: #ffffff;}'\nSTOP_BUTTON_STYLE = 'QPushButton {border-style: outset; border-width: 1px; background-color: #860927;' \\\n 'border-color: #fdfdfd; color: rgb(252, 252, 252);}'\nSTART_BUTTON_STYLE = 'QPushButton {border-style: outset; border-width: 1px; background-color: #50c752;' \\\n 'border-color: #fdfdfd; color: rgb(252, 252, 252);}'\nINACTIVE_STYLE = 'QPushButton {border-style: outset; border-width: 1px; background-color: #959596;' \\\n 'border-color: #fdfdfd; color: rgb(252, 252, 252);}'\nACTIVITIES_STYLE = 'QComboBox {border-style: outset; border-width: 1px; ' \\\n 'border-color: rgb(80, 199, 82); color: rgb(80, 199, 82);}'\n\nBG = 'bg2.jpg'\nICO = '32x32.ico'\n\nSTART_TIME = ''\nSTOP_TIME = ''\nDATE = ''\n\nDB_PATH = r'''Y:\\12 Analyst\\HACKATHON\\MySQL2.accdb'''\nDB_CONN_STRING = \"DRIVER={Microsoft Access Driver (*.mdb, *.accdb)}; \" \\\n \"DBQ=%s; UID=%s;, autocommit=True\" % (DB_PATH, 'azych')\n\nGET_GROUP_ID = 'SELECT * FROM Users WHERE Name_Users=?'\nGET_ACTIVITY_ID = 'SELECT ID_Activities FROM Activities WHERE Name_Activities=?'\nGET_USER_ID = 'SELECT ID_Users FROM Users WHERE Name_Users=?'\nGET_CLIENT_NAMES = 'SELECT Name_Clients FROM Clients WHERE ID_Users=?'\nGET_ACTIVITIES = 'SELECT Name_Activities FROM Activities WHERE ID_Groups=?'\nGET_PROJECT_ID = 'SELECT ID_Projects FROM Project WHERE Name_Projects=?'\nGET_CLIENT_ID = 'SELECT ID_Clients FROM Clients WHERE Name_Clients=?'\n\n\ndef get_sql_single_data(key_value, sql_query):\n db_connection = pyodbc.connect(DB_CONN_STRING)\n db_cursor = db_connection.cursor()\n db_cursor.execute(sql_query, key_value)\n return_value = db_cursor.fetchall()[0][0]\n db_connection.close()\n return return_value\n\n\ndef get_sql_list_data(key_value, sql_query):\n db_connection = pyodbc.connect(DB_CONN_STRING)\n db_cursor = db_connection.cursor()\n db_cursor.execute(sql_query, key_value)\n return_list = [i[0] for i in db_cursor.fetchall()]\n db_connection.close()\n return return_list\n\n\nsaved_flag = True\n\n\nclass WindowLabel(QLabel):\n def __init__(self, window, text, style, fontsize, posX, posY, bold=False, act_label=False):\n super().__init__(text, window)\n self.setStyleSheet(style)\n if act_label:\n self.setMinimumSize(QSize(500, 20))\n if bold:\n self.setFont(QFont(\"\", fontsize, QFont.Bold, True))\n else:\n self.setFont(QFont(\"\", fontsize, True))\n self.move(posX, posY)\n\n\nclass WindowButton(QPushButton):\n def __init__(self, window, text, style, qsizeA, qsizeB, posX, posY, func, flat=False, fontsize=0):\n super().__init__(\"\", window)\n self.setText(text)\n self.setStyleSheet(style)\n self.resize(QSize(qsizeA, qsizeB))\n self.move(posX, posY)\n self.clicked.connect(func)\n if flat:\n self.setFlat(True)\n if fontsize:\n self.setFont(QFont(\"\", fontsize, QFont.Bold, True))\n\n\nclass Activities_list(QComboBox):\n def __init__(self, window, list, posX, posY, width):\n super().__init__(window)\n self.addItems(list)\n self.move(posX, posY)\n self.setFixedWidth(width)\n\n\nclass Activity():\n def __init__(self, start_time='', activity_name='', username='', end_time='', days_date='',\n client='', project_name=''):\n self.start_time = start_time\n self.end_time = end_time\n self.activity_name = activity_name\n self.username = username\n self.user_id = 0.0\n self.group_id = 0.0\n self.date = days_date\n self.full_start_date = ''\n self.full_end_date = ''\n self.project = ''\n self.client_id = 0.0\n self.project_name = project_name\n self.client = client\n\n def save(self):\n self.end_time = time.strftime(\"%H\" + \":\" + \"%M\" + \":\" + \"%S\")\n self.date = time.strftime('%d/%m/%Y')\n self.user_id = float(get_sql_single_data(self.username, GET_USER_ID))\n self.activity_name = float(get_sql_single_data(self.activity_name, GET_ACTIVITY_ID))\n self.group_id = float(get_sql_single_data(self.username, GET_GROUP_ID))\n self.full_start_date = str(self.date + ' ' + self.start_time)\n self.full_end_date = str(self.date + ' ' + self.end_time)\n self.project = str(get_sql_single_data(self.project_name, GET_PROJECT_ID))\n self.client_id = float(get_sql_single_data(self.client, GET_CLIENT_ID))\n\n db_connection = pyodbc.connect(DB_CONN_STRING)\n db_cursor = db_connection.cursor()\n db_cursor.execute('INSERT into User_Activities (ID_Users, ID_Activities, Start_UA, End_UA, '\n 'ID_Clients, Comments_UA, ID_Group, Project) VALUES (?, ?, ?, ?, ?, ?, ?, ?)',\n (self.user_id, self.activity_name, self.full_start_date, self.full_end_date,\n self.client_id, '', self.group_id, self.project))\n db_connection.commit()\n db_connection.close()\n\n\nclass ClockR(object):\n def __init__(self, label, totalTime):\n self.label = label\n self.totalTime = totalTime\n self.timer = QTimer(interval=1000)\n self.timer.timeout.connect(self.update_timer)\n\n def update_timer(self):\n self.totalTime += 1\n self.count()\n\n def start(self):\n self.timer.start()\n\n def count(self):\n self.label.setStyleSheet(MAIN_WINDOW_LABEL_STYLE)\n self.label.setText(time.strftime(\"%H\" + \":\" + \"%M\" + \":\" + \"%S\"))\n\n\ndef main():\n\n new_activity = Activity()\n\n\n def save_and_quit():\n global new_activity\n global saved_flag\n app_instance.quit()\n # if saved_flag:\n # app_instance.quit()\n # else:\n # new_activity.save()\n # app_instance.quit()\n\n\n def start_activity():\n global new_activity\n global saved_flag\n new_activity = Activity(start_time=time.strftime(\"%H\" + \":\" + \"%M\" + \":\" + \"%S\"),\n activity_name=activities_list.currentText(),\n username=os.getlogin(),\n client=clients_list.currentText(),\n project_name=project_list.currentText())\n start_button.setEnabled(False)\n start_button.setStyleSheet(INACTIVE_STYLE)\n stop_button.setEnabled(True)\n stop_button.setStyleSheet(STOP_BUTTON_STYLE)\n saved_flag = False\n current_activity_label.move(18, 245)\n current_activity_label.setText('\\tCurrent: {} | {} | '\n '{} started: {}'.format(\n new_activity.client, new_activity.project_name,\n new_activity.activity_name, new_activity.start_time))\n\n\n def stop_activity():\n global new_activity\n start_button.setEnabled(True)\n start_button.setStyleSheet(START_BUTTON_STYLE)\n stop_button.setEnabled(False)\n stop_button.setStyleSheet(INACTIVE_STYLE)\n new_activity.save()\n saved_flag = True\n current_activity_label.move(100, 245)\n current_activity_label.setText('Current: {0} | {0} | {0} started: {0}'.format('N/A'))\n\n\n group_id = str(get_sql_single_data(os.getlogin().lower(), GET_GROUP_ID))\n user_id = float(get_sql_single_data(os.getlogin().lower(), GET_USER_ID))\n data = get_sql_list_data(group_id, GET_ACTIVITIES)\n client_names = (get_sql_list_data(user_id, GET_CLIENT_NAMES))\n projects = ['proj ' + str(i) for i in range(1, 16)]\n\n\n app = QApplication(sys.argv)\n app_instance = QCoreApplication.instance()\n main_window = QWidget()\n palette = QPalette()\n\n\n palette.setBrush(QPalette.Background, QBrush(QPixmap(BG)))\n main_window.setPalette(palette)\n main_window.setWindowIcon(QIcon(ICO))\n main_window.setFixedSize(430, 350)\n main_window.setWindowTitle(MAIN_WINDOW_TITLE)\n main_window.setWindowFlags(Qt.FramelessWindowHint)\n main_window.manual_window = None\n\n\n label_company = WindowLabel(main_window, COMPANY, MAIN_WINDOW_LABEL_STYLE, 28, 150, 5, True)\n label_tool_name = WindowLabel(main_window, TOOL_NAME, MAIN_WINDOW_LABEL_STYLE, 20, 127, 45)\n client_label = WindowLabel(main_window, 'Client: ', MAIN_WINDOW_LABEL_STYLE, 10, 90, 144, False)\n proj_label = WindowLabel(main_window, 'Project: ', MAIN_WINDOW_LABEL_STYLE, 10, 90, 168, False)\n activity_label = WindowLabel(main_window, 'Activity: ', MAIN_WINDOW_LABEL_STYLE, 10, 90, 192, False)\n quit_button = WindowButton(main_window, '[X]', MENU_BUTTONS_STYLE,\n 18, 17, 2, 0, save_and_quit, True)\n minimize_button = WindowButton(main_window, '[_]', MENU_BUTTONS_STYLE,\n 18, 17, 412, 0, main_window.showMinimized, True)\n start_button = WindowButton(main_window, 'START', START_BUTTON_STYLE, 14, 30, 20, 285, start_activity, True)\n start_button.setFixedSize(120, 35)\n stop_button = WindowButton(main_window, 'STOP', STOP_BUTTON_STYLE, 14, 30, 290, 285, stop_activity, True)\n stop_button.setFixedSize(120, 35)\n stop_button.setEnabled(False)\n stop_button.setStyleSheet(INACTIVE_STYLE)\n activities_list = Activities_list(main_window, data, 20, 180, 120)\n activities_list.move(142, 190)\n clients_list = Activities_list(main_window, client_names, 20, 180, 120)\n clients_list.move(142, 142)\n project_list = Activities_list(main_window, projects, 20, 180, 120)\n project_list.move(142, 166)\n user_label = WindowLabel(main_window, '', MAIN_WINDOW_LABEL_STYLE, 10, 164, 222, act_label=True)\n user_label.setText('User: {}'.format(os.getlogin()))\n current_activity_label = WindowLabel(main_window, '', MAIN_WINDOW_LABEL_STYLE, 10, 100, 245, act_label=True)\n current_activity_label.setText('Current: {0} | {0} | {0} started: {0}'.format('N/A'))\n clock_label = WindowLabel(main_window, '', MAIN_WINDOW_LABEL_STYLE, 20, 150, 90)\n clock_label.setFixedWidth(150)\n\n\n rtm_clock = ClockR(clock_label, 0)\n rtm_clock.start()\n\n\n main_window.show()\n sys.exit(app.exec_())\n\n\nmain()\n","sub_path":"PyQt_doodles/track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":10622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"422756099","text":"from time import sleep\n\nfrom robot.moves import *\n\n\nGPIO.setmode(GPIO.BCM)\nmotor1a = 4\nmotor1b = 17\n\nmotor2a = 27\nmotor2b = 22\n\nmotor3a = 24\nmotor3b = 26\n\nmotor4a = 5\nmotor4b = 6\n\nenable_1 = 13\nenable_2 = 19\n\nGPIO.setup(motor1a, GPIO.OUT)\nGPIO.setup(motor1b, GPIO.OUT)\nGPIO.setup(motor2a, GPIO.OUT)\nGPIO.setup(motor2b, GPIO.OUT)\nGPIO.setup(enable_1, GPIO.OUT)\nGPIO.setup(enable_2, GPIO.OUT)\n\nenable_motors(enable_1)\nenable_motors(enable_2)\n\n\nprint(\"up\")\nmove_up(motor1a, motor1b, motor2a, motor2b, motor3a, motor3b, motor4a, motor4b)\nsleep(3)\nprint(\"down\")\nmove_down(motor1a, motor1b, motor2a, motor2b, motor3a, motor3b, motor4a, motor4b)\nsleep(3)\nprint(\"right\")\nmove_right(motor1a, motor1b, motor2a, motor2b, motor3a, motor3b, motor4a, motor4b)\nsleep(3)\nprint(\"left\")\nmove_left(motor1a, motor1b, motor2a, motor2b, motor3a, motor3b, motor4a, motor4b)\n\nsleep(3)\nprint(\"stop\")\nstop_motor(motor1a, motor1b, motor2a, motor2b, motor3a, motor3b, motor4a, motor4b)\n","sub_path":"src/safa.py","file_name":"safa.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"37662847","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 5 21:10:43 2018\n\n@author: dell\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom bokeh.palettes import Spectral4\nfrom bokeh.plotting import figure,output_notebook,show\n\n#所有特征之间的pearson相关系数,用热点图表示\ndef draw_all_features():\n data = pd.read_csv('result.csv')\n pearson = data.corr()\n sns.heatmap(pearson)\n plt.show()\n return\n\n#不同类型的方位与duration之间的关系\ndef show_bearing():\n data = pd.read_csv('result.csv')\n sns.regplot(x = 'bearing', y = 'trip_duration', data = data)\n plt.show()\n \n sns.regplot(x = 'bearing_pick_cent_p', y = 'trip_duration', data = data)\n plt.show()\n sns.regplot(x = 'bearing_drop_cent_p', y = 'trip_duration', data = data)\n plt.show()\n sns.regplot(x = 'bearing_cent_p_cent_d', y = 'trip_duration', data = data)\n plt.show()\n return\n\n#label_pick,label_drop与duration的关系\ndef show_label():\n data = pd.read_csv('result.csv')\n label_pick = pd.DataFrame(data.groupby('label_pick')['trip_duration'].mean())\n label_pick.reset_index(inplace = True)\n label_drop = pd.DataFrame(data.groupby('label_drop')['trip_duration'].mean())\n label_drop.reset_index(inplace = True)\n \n label_pick['trip_duration'].plot(kind = 'line', rot = 0)\n plt.xlabel('label_pick')\n plt.ylabel('avg_trip_duration')\n plt.show()\n \n label_pick['trip_duration'].plot(kind = 'line', rot = 0)\n plt.xlabel('label_drop')\n plt.ylabel('avg_trip_duration')\n plt.show()\n\ndef show_centroid():\n data = pd.read_csv('result.csv')\n centroid_pick_long = pd.DataFrame(data.groupby('centroid_pick_long')['trip_duration'].mean())\n centroid_pick_long.reset_index(inplace = True)\n centroid_pick_lat = pd.DataFrame(data.groupby('centroid_pick_lat')['trip_duration'].mean())\n centroid_pick_lat.reset_index(inplace = True)\n centroid_drop_long = pd.DataFrame(data.groupby('centroid_drop_long')['trip_duration'].mean())\n centroid_drop_long.reset_index(inplace = True)\n centroid_drop_lat = pd.DataFrame(data.groupby('centroid_drop_lat')['trip_duration'].mean())\n centroid_drop_lat.reset_index(inplace = True)\n \n centroid_pick_long['trip_duration'].plot(kind = 'line', rot = 0)\n plt.xlabel('centroid_pick_long')\n plt.ylabel('avg_trip_duration')\n plt.show()\n \n centroid_pick_lat['trip_duration'].plot(kind = 'line', rot = 0)\n plt.xlabel('centroid_pick_lat')\n plt.ylabel('avg_trip_duration')\n plt.show()\n \n centroid_drop_long['trip_duration'].plot(kind = 'line', rot = 0)\n plt.xlabel('centroid_drop_long')\n plt.ylabel('avg_trip_duration')\n plt.show()\n \n centroid_drop_lat['trip_duration'].plot(kind = 'line', rot = 0)\n plt.xlabel('centroid_drop_lat')\n plt.ylabel('avg_trip_duration')\n plt.show()\n return\n\ndef show_hvsine_distance():\n train_cl = pd.read_csv('result.csv')\n \n hvsine_pick_cent_p = train_cl.loc[(train_cl.hvsine_pick_cent_p < 60)]\n sns.regplot(x = 'hvsine_pick_cent_p', y = 'trip_duration', data = hvsine_pick_cent_p)\n plt.show()\n hvsine_drop_cent_d = train_cl.loc[(train_cl.hvsine_drop_cent_d < 100)]\n sns.regplot(x = 'hvsine_drop_cent_d', y = 'trip_duration', data = hvsine_drop_cent_d)\n plt.show()\n hvsine_cent_p_cent_d = train_cl.loc[(train_cl.hvsine_cent_p_cent_d < 50)]\n sns.regplot(x = 'hvsine_cent_p_cent_d', y = 'trip_duration', data = hvsine_cent_p_cent_d)\n plt.show() \n return\n\ndef show_manhtn_distance():\n train_cl = pd.read_csv('result.csv')\n \n manhtn_pick_cent_p = train_cl.loc[(train_cl.manhtn_pick_cent_p < 60)]\n sns.regplot(x = 'manhtn_pick_cent_p', y = 'trip_duration', data = manhtn_pick_cent_p)\n plt.show()\n manhtn_drop_cent_d = train_cl.loc[(train_cl.manhtn_drop_cent_d < 100)]\n sns.regplot(x = 'manhtn_drop_cent_d', y = 'trip_duration', data = manhtn_drop_cent_d)\n plt.show()\n manhtn_cent_p_cent_d = train_cl.loc[(train_cl.manhtn_cent_p_cent_d < 50)]\n sns.regplot(x = 'manhtn_cent_p_cent_d', y = 'trip_duration', data = manhtn_cent_p_cent_d)\n plt.show() \n \n \nplt.style.use({'figure.figsize':(12, 8)})\n#show_manhtn_distance()\n ","sub_path":"nyc/JYK/visualizaition.py","file_name":"visualizaition.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"324156301","text":"class thod():\r\n\r\n def ad_thod(self):\r\n\r\n print(\"Be ready with your chipher-text!\")\r\n print(\"---------------------------------------------------------\")\r\n print(\"@ NOTE = PLEASE ENTER CAPS LETTERS\")\r\n k = input(\"Enter you chipher-text :\")\r\n print(\"---------------------------------------------------------\")\r\n strplain = \" \"\r\n\r\n for j in range(0, 26):\r\n plain = []\r\n text = \" \"\r\n for i in range(0 , len(k)):\r\n\r\n b = (ord(k[i])-65)\r\n z = (b-j)%26\r\n y = (chr(z + 65))\r\n plain.append(y)\r\n for x in plain:\r\n text += x\r\n\r\n print(f'Encrypted Cipher-text:{text}')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nprint(\"welcome to Cipher Breaking Algorithem\")\r\nad = thod()\r\nprint(\"Are you here to Break the Security ? \\n1.yes \\n2.No\")\r\n\r\np = int(input())\r\n\r\nif(p==1):\r\n ad.ad_thod()\r\nelse:\r\n print(\"Thank-you , you choose to Quit.\")\r\n\r\n\r\n","sub_path":"addchipher.py","file_name":"addchipher.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"240862021","text":"\"\"\"\nThis module contains the Sanic routes, as well as functions necessary to retrieve video files. This module also\ncontains functions that will search the file structure for video files, as well as cleanup the DB video records.\n\nAll paths in the DB are relative. A Channel's directory is relative to the video_root_directory. A Video's path (as\nwell as its meta files) is relative to its Channel's directory.\n\n Example:\n Real Paths:\n video_root_directory = '/media/something'\n channel['directory'] = '/media/something/the channel'\n video['video_path'] = '/media/something/the channel/foo.mp4'\n video['poster_path'] = '/media/something/the channel/foo.jpg'\n video['video_path'] = '/media/something/the channel/subdir/bar.mp4'\n\n The same paths in the DB:\n channel['directory'] = 'the channel'\n video['video_path'] = 'foo.mp4'\n video['poster_path'] = 'foo.jpg'\n video['video_path'] = 'subdir/bar.mp4'\n\nRelative DB paths allow files to be moved without having to rebuild the entire collection. It also ensures that when\na file is moved, it will not be duplicated in the DB.\n\"\"\"\nimport asyncio\nimport pathlib\nfrom functools import wraps\nfrom http import HTTPStatus\nfrom uuid import uuid1\n\nfrom dictorm import DictDB\nfrom sanic import Blueprint, response\nfrom sanic.exceptions import abort\nfrom sanic.request import Request\n\nfrom lib.common import sanitize_link, boolean_arg, load_schema, env, attach_websocket_with_queue, get_sanic_url\nfrom lib.db import get_db_context\nfrom lib.plugins.videos.captions import process_captions\nfrom lib.plugins.videos.common import get_conflicting_channels, get_absolute_video_path, UnknownFile\nfrom lib.plugins.videos.downloader import insert_video, update_channels, download_all_missing_videos\nfrom lib.plugins.videos.main import logger\nfrom lib.plugins.videos.schema import downloader_config_schema, channel_schema\nfrom .common import generate_video_paths, save_settings_config, get_downloader_config, \\\n get_absolute_channel_directory, UnknownDirectory\n\nPLUGIN_ROOT = 'videos'\n\n\ndef set_plugins(plugins):\n global PLUGIN_ROOT\n PLUGIN_ROOT = plugins\n\n\napi_bp = Blueprint('api_video', url_prefix='/videos')\n\n\n@api_bp.put('/settings')\n@load_schema(downloader_config_schema)\ndef settings(request: Request, data: dict):\n downloader_config = get_downloader_config()\n downloader_config['video_root_directory'] = data['video_root_directory']\n downloader_config['file_name_format'] = data['file_name_format']\n save_settings_config(downloader_config)\n return response.json({'success': 'Settings saved'})\n\n\n@api_bp.get('/channels')\ndef get_channels(request: Request):\n db: DictDB = request.ctx.get_db()\n Channel = db['channel']\n channels = Channel.get_where().order_by('name DESC')\n channels = list(channels)\n return response.json({'channels': channels})\n\n\nrefresh_queue, refresh_event = attach_websocket_with_queue('/feeds/refresh', 1000, api_bp)\n\n\n@api_bp.post('/settings/refresh')\nasync def refresh(_):\n \"\"\"\n Search for videos that have previously been downloaded and stored.\n \"\"\"\n refresh_logger = logger.getChild('refresh')\n\n # Only one refresh can run at a time\n if refresh_event.is_set():\n return response.json({'error': 'Refresh already running'}, HTTPStatus.BAD_REQUEST)\n\n refresh_event.set()\n refresh_queue.put('refresh-started')\n\n async def do_refresh():\n refresh_logger.info('refresh started')\n\n with get_db_context(commit=True) as (db_conn, db):\n for msg in _refresh_videos(db):\n refresh_queue.put(msg)\n\n refresh_queue.put('refresh-complete')\n refresh_logger.info('refresh complete')\n\n refresh_event.clear()\n\n coro = do_refresh()\n asyncio.ensure_future(coro)\n refresh_logger.debug('do_refresh scheduled')\n stream_url = get_sanic_url(scheme='ws', path='/api/videos/feeds/refresh')\n return response.json({'success': 'stream-started', 'stream-url': stream_url})\n\n\ndownload_queue, download_event = attach_websocket_with_queue('/feeds/download', 1000, api_bp)\n\n\n@api_bp.post('/settings/download')\nasync def download(_):\n \"\"\"\n Compare previously downloaded videos with newly updated catalogs. If any videos are missing, download them.\n :return:\n \"\"\"\n download_logger = logger.getChild('download')\n\n # Only one download can run at a time\n if download_event.is_set():\n return response.json({'error': 'download already running'}, HTTPStatus.BAD_REQUEST)\n\n download_event.set()\n download_queue.put('download-started')\n\n async def do_download():\n download_logger.info('download started')\n\n with get_db_context(commit=True) as (db_conn, db):\n for msg in update_channels(db_conn, db):\n download_queue.put(msg)\n for msg in download_all_missing_videos(db_conn, db):\n download_queue.put(msg)\n\n download_queue.put('download-complete')\n download_logger.info('download complete')\n\n download_event.clear()\n\n coro = do_download()\n asyncio.ensure_future(coro)\n download_logger.debug('do_download scheduled')\n stream_url = get_sanic_url(scheme='ws', path='/api/videos/feeds/download')\n return response.json({'success': 'stream-started', 'stream-url': stream_url})\n\n\n@api_bp.get('/channel/')\ndef channel_get(request: Request, link: str):\n db: DictDB = request.ctx.get_db()\n Channel = db['channel']\n channel = Channel.get_one(link=link)\n if not channel:\n return response.json({'error': 'Unknown channel'}, HTTPStatus.NOT_FOUND)\n return response.json({'channel': channel})\n\n\n@api_bp.post('/channel')\n@load_schema(channel_schema)\ndef channel_post(request: Request, data: dict):\n \"\"\"Create a new channel\"\"\"\n try:\n data['directory'] = get_absolute_channel_directory(data['directory'])\n except UnknownDirectory:\n return response.json({'error': 'Unknown directory'}, HTTPStatus.BAD_REQUEST)\n\n db: DictDB = request.ctx.get_db()\n Channel = db['channel']\n\n # Verify that the URL/Name/Link aren't taken\n conflicting_channels = get_conflicting_channels(\n db,\n url=data['url'],\n name_=data['name'],\n link=sanitize_link(data['name']),\n )\n if conflicting_channels:\n return response.json({'error': 'Channel Name or URL already taken'}, HTTPStatus.BAD_REQUEST)\n\n with db.transaction(commit=True):\n channel = Channel(\n name=data['name'],\n url=data['url'],\n match=data['match_regex'],\n link=sanitize_link(data['name']),\n )\n channel.flush()\n\n return response.json({'success': 'Channel created successfully'}, HTTPStatus.CREATED,\n {'Location': f'/api/videos/channel/{channel[\"link\"]}'})\n\n\n@api_bp.put('/channel/')\n@load_schema(channel_schema)\ndef channel_put(request: Request, link: str, data: dict):\n \"\"\"Update an existing channel\"\"\"\n db: DictDB = request.ctx.get_db()\n Channel = db['channel']\n\n with db.transaction(commit=True):\n existing_channel = Channel.get_one(link=link)\n\n if not existing_channel:\n return response.json({'error': 'Unknown channel'}, 404)\n\n # Only update directory if it was empty\n if data['directory'] and not existing_channel['directory']:\n try:\n data['directory'] = get_absolute_channel_directory(data['directory'])\n except UnknownDirectory:\n return response.json({'error': 'Unknown directory'}, 404)\n else:\n data['directory'] = existing_channel['directory']\n data['directory'] = str(data['directory'])\n\n # Verify that the URL/Name/Link aren't taken\n conflicting_channels = get_conflicting_channels(\n db=db,\n id=existing_channel['id'],\n url=data['url'],\n name_=data['name'],\n link=data['link'],\n directory=data['directory'],\n )\n if list(conflicting_channels):\n return response.json({'error': 'Channel Name or URL already taken'}, 400)\n\n existing_channel['url'] = data['url']\n existing_channel['name'] = data['name']\n existing_channel['directory'] = data['directory']\n existing_channel['match_regex'] = data['match_regex']\n existing_channel.flush()\n\n return response.json({'success': 'The channel was updated successfully.'})\n\n\n@api_bp.delete('/channel/')\ndef channel_delete(request, link: str):\n db: DictDB = request.ctx.get_db()\n Channel = db['channel']\n channel = Channel.get_one(link=link)\n if not channel:\n return response.json({'error': 'Unknown channel'}, HTTPStatus.NOT_FOUND)\n with db.transaction(commit=True):\n channel.delete()\n return response.json({'success': 'Channel deleted'})\n\n\n@api_bp.get('/channel//videos')\ndef channel_videos(request, link: str):\n db: DictDB = request.ctx.get_db()\n Channel = db['channel']\n channel = Channel.get_one(link=link)\n if not channel:\n return response.json({'error': 'Unknown channel'}, HTTPStatus.NOT_FOUND)\n return response.json({'videos': list(channel['videos'])})\n\n\n@api_bp.route('/video/')\n@api_bp.route('/poster/')\n@api_bp.route('/caption/')\nasync def media_file(request: Request, hash: str):\n db: DictDB = request.ctx.get_db()\n download = boolean_arg(request, 'download')\n Video = db['video']\n kind = str(request.path).split('/')[3]\n\n try:\n video = Video.get_one(video_path_hash=hash)\n path = get_absolute_video_path(video, kind=kind)\n if download:\n return await response.file_stream(str(path), filename=path.name)\n else:\n return await response.file_stream(str(path))\n except TypeError or KeyError or UnknownFile:\n abort(404, f\"Can't find {kind} by that ID.\")\n\n\ndef get_channel_form(form_data: dict):\n channel = dict(\n url=form_data.get('url'),\n name=form_data['name'],\n match_regex=form_data.get('match_regex'),\n link=sanitize_link(form_data['name']),\n directory=form_data.get('directory'),\n )\n return channel\n\n\ndef refresh_channel_videos(db, channel):\n \"\"\"\n Find all video files in a channel's directory. Add any videos not in the DB to the DB.\n \"\"\"\n # Set the idempotency key so we can remove any videos not touched during this search\n curs = db.get_cursor()\n curs.execute('UPDATE video SET idempotency=NULL WHERE channel_id=%s', (channel['id'],))\n idempotency = str(uuid1())\n\n directory = get_absolute_channel_directory(channel['directory'])\n\n # A set of absolute paths that exist in the file system\n possible_new_paths = set(generate_video_paths(directory))\n\n # Update all videos that match the current video paths\n query = 'UPDATE video SET idempotency = %s WHERE channel_id = %s AND video_path = ANY(%s) RETURNING video_path'\n relative_new_paths = [str(i.relative_to(directory)) for i in possible_new_paths]\n curs.execute(query, (idempotency, channel['id'], relative_new_paths))\n existing_paths = {i for (i,) in curs.fetchall()}\n\n # Get the paths for any video not yet in the DB\n # (paths in DB are relative, but we need to pass an absolute path)\n new_videos = {p for p in possible_new_paths if str(p.relative_to(directory)) not in existing_paths}\n\n for video_path in new_videos:\n logger.debug(f'{channel[\"name\"]}: Added {video_path}')\n insert_video(db, pathlib.Path(video_path), channel, idempotency=idempotency)\n\n curs.execute('DELETE FROM video WHERE channel_id=%s AND idempotency IS NULL RETURNING id', (channel['id'],))\n deleted_count = curs.fetchall()\n if deleted_count:\n deleted_count = len(deleted_count)\n deleted_status = f'Deleted {deleted_count} video records from channel {channel[\"name\"]}'\n logger.info(deleted_status)\n yield deleted_status\n\n status = f'{channel[\"name\"]}: {len(new_videos)} new videos, {len(existing_paths)} already existed. '\n logger.info(status)\n yield status\n\n # Fill in any missing captions\n query = 'SELECT id FROM video WHERE channel_id=%s AND caption IS NULL AND caption_path IS NOT NULL'\n curs.execute(query, (channel['id'],))\n missing_captions = [i for (i,) in curs.fetchall()]\n Video = db['video']\n for video_id in missing_captions:\n video = Video.get_one(id=video_id)\n process_captions(video)\n yield f'Processed captions for video {video_id}'\n\n status = f'Processed {len(missing_captions)} missing captions.'\n logger.info(status)\n yield status\n\n\ndef _refresh_videos(db: DictDB):\n \"\"\"\n Find any videos in the channel directories and add them to the DB. Delete DB records of any videos not in the\n file system.\n\n Yields status updates to be passed to the UI.\n\n :param db:\n :return:\n \"\"\"\n logger.info('Refreshing video files')\n Channel = db['channel']\n\n total_channels = Channel.count()\n\n for idx, channel in enumerate(Channel.get_where()):\n progress = int((idx / total_channels) * 100)\n yield {'progress': progress, 'message': f'Checking {channel[\"name\"]} directory for new videos'}\n with db.transaction(commit=True):\n for msg in refresh_channel_videos(db, channel):\n yield {'message': msg}\n yield {'progress': 100, 'message': 'All videos refreshed.'}\n\n\n@wraps(_refresh_videos)\ndef refresh_videos(db: DictDB):\n return list(_refresh_videos(db))\n\n\n@wraps(_refresh_videos)\ndef refresh_videos_with_db():\n with get_db_context(commit=True) as (db_conn, db):\n return refresh_videos(db)\n\n\ndef video_search(db: DictDB, search_str, offset, link):\n db_conn = db.conn\n template = env.get_template('lib/plugins/videos/templates/search_video.html')\n curs = db_conn.cursor()\n\n # Get the match count per channel\n query = 'SELECT channel_id, COUNT(*) FROM video WHERE textsearch @@ to_tsquery(%s) GROUP BY channel_id'\n curs.execute(query, (search_str,))\n channel_totals = {i: j for (i, j) in curs.fetchall()}\n\n # Get the names of each channel, add the counts respectively\n query = 'SELECT id, name, link FROM channel ORDER BY LOWER(name)'\n curs.execute(query)\n channels = []\n for (id_, name, link_) in curs.fetchall():\n channel_total = channel_totals[id_] if id_ in channel_totals else 0\n d = {\n 'id': id_,\n 'name': f'{name} ({channel_total})',\n 'link': link_,\n 'search_link': f'/{PLUGIN_ROOT}/search?link={link_}&search={search_str}',\n }\n channels.append(d)\n\n # Get the search results\n if link:\n # The results are restricted to a single channel\n curs.execute('SELECT id FROM channel WHERE link = %s', (link,))\n (channel_id,) = curs.fetchone()\n query = 'SELECT id, ts_rank_cd(textsearch, to_tsquery(%s)) FROM video WHERE ' \\\n 'textsearch @@ to_tsquery(%s) AND channel_id=%s ORDER BY 2 OFFSET %s LIMIT 20'\n curs.execute(query, (search_str, search_str, channel_id, offset))\n total = channel_totals[channel_id]\n else:\n # The results are for all channels\n query = 'SELECT id, ts_rank_cd(textsearch, to_tsquery(%s)) FROM video WHERE ' \\\n 'textsearch @@ to_tsquery(%s) ORDER BY 2 OFFSET %s LIMIT 20'\n curs.execute(query, (search_str, search_str, offset))\n # Sum up all the matches for paging\n total = sum(channel_totals.values())\n\n results = list(curs.fetchall())\n\n videos = []\n Video = db['video']\n if results:\n videos = [dict(i) for i in Video.get_where(Video['id'].In([i[0] for i in results]))]\n\n results = {\n 'template': template,\n 'videos': videos,\n 'total': total,\n 'channels': channels,\n }\n return results\n","sub_path":"lib/plugins/videos/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":15983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"167063079","text":"from django.conf.urls import *\nfrom quests.models import *\nfrom django.contrib import admin\nfrom django.conf import settings\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Uncomment the admin/doc line below to enable admin documentation:\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n url(r'^quests/', include('quests.urls')),\n url(r'^admin/', include(admin.site.urls)),\n)\n\n#To handle static html problems\nif settings.DEBUG:\n urlpatterns += patterns('',\n (r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),\n )\n","sub_path":"gamify/gamify/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"95293419","text":"import datetime\nfrom dateutil.relativedelta import *\nimport time\nimport logging\n\nfrom django.shortcuts import render_to_response, get_object_or_404, get_list_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.auth.models import User\nfrom django.template import RequestContext\nfrom django.utils.dates import MONTHS_3, MONTHS_3_REV\nfrom django.utils.encoding import force_unicode\n\nfrom calloway.models import Calendar, Event\nfrom calloway.forms import CalendarForm, EventForm\n\ndef get_monday():\n \"\"\"\n Function to return the most useful Monday.\n During the weekend return next Monday, otherwise return this weeks Monday.\n \"\"\"\n now = datetime.datetime.now()\n # Get next Monday date\n monday = now+relativedelta(weekday=MO)\n # If before Saturday, use previous Monday (-1 week)\n if now.weekday() < 5:\n monday = monday+relativedelta(weeks=-1)\n return monday\n\ndef add_css_positioning_to_events(events):\n minutes_in_day = float(1440) # 60 * 24\n \n def calculate_horizontal_position(j):\n if hasattr(events[j], 'horizontal_position_percentage'):\n #logging.debug('already positioned')\n return\n else:\n k = j\n l = events[j].total_number_of_horizontal_events\n #logging.debug('total events ' + str(len(events)))\n #logging.debug('clashing events ' + str(l))\n m = 0\n width = float(1) / float(l) * 100\n #logging.debug('the width of ' + events[k].summary + ' is ' + str(width))\n while m < l and k < len(events):\n #logging.debug('k is ' + str(k))\n events[k].horizontal_position_percentage = float(m) / float(l) * 100\n events[k].width_percentage = width\n k = k + 1\n m = m + 1 \n #logging.debug('yep')\n \n def calculate_start_and_end_points(j):\n # Calculate start and end points\n event = events[j]\n start = (event.dtstart.hour * 60) + event.dtstart.minute\n end = (event.dtend.hour * 60) + event.dtend.minute\n event.dtstart_pixels = start\n event.dtend_pixels = end\n #event.dtstart_percentage = (start / minutes_in_day) * 1440\n #event.dtend_percentage = (end / minutes_in_day) * 1440\n duration = event.dtend_pixels - event.dtstart_pixels\n if duration > 10: # If less than 10 minutes ( 10 pixels )\n event.duration_time_pixels = event.dtend_pixels - event.dtstart_pixels\n else:\n event.duration_time_pixels = 10\n \n i = 0\n for event in events:\n calculate_start_and_end_points(i)\n \n # Calculate clashes manually, to avoid multiple db queries.\n event.total_number_of_horizontal_events = 0\n for e in events:\n if e.dtstart < event.dtend and e.dtend > event.dtstart:\n event.total_number_of_horizontal_events = event.total_number_of_horizontal_events + 1\n \n #event.total_number_of_horizontal_events = events.filter(dtstart__lt=event.dtend, dtend__gt=event.dtstart).count()\n calculate_horizontal_position(i)\n \n i = i + 1\n \n return events\n\ndef redirect(post_save_redirect, obj):\n if post_save_redirect:\n return HttpResponseRedirect(post_save_redirect % obj.__dict__)\n elif hasattr(obj, 'get_absolute_url'):\n return HttpResponseRedirect(obj.get_absolute_url())\n else:\n raise ImproperlyConfigured(\n \"No URL to redirect to. Either pass a post_save_redirect\"\n \" parameter to the generic view or define a get_absolute_url\"\n \" method on the Model.\")\n\n@login_required\ndef event_detail(request, owner_username, calendar_slug, event_id, template='calloway/event_detail.html'):\n try:\n event = Event.objects.select_related().get(pk=event_id)\n except Event.DoesNotExist:\n raise Http404()\n event.primary_calendar = event.calendars.filter(slug=event.primary_calendar_slug, owner=event.owner)[0]\n if event.owner.username != owner_username:\n raise Http404()\n if event.primary_calendar.slug != calendar_slug:\n raise Http404()\n \n context = {\n 'event': event\n }\n \n if 'output' in request.GET and request.GET['output'] == 'json':\n from django.core import serializers\n json = serializers.serialize('json', [event])\n return HttpResponse(json, mimetype='text/javascript')\n \n return render_to_response(template, context,\n context_instance=RequestContext(request))\n \n@login_required \ndef create_event(request, owner_username, calendar_slug, template='calloway/create_event.html', post_save_redirect=None):\n if request.method == 'POST':\n from django.template.defaultfilters import slugify\n form = EventForm(request.POST)\n if form.is_valid():\n event = form.save(commit=False)\n event.calendar = get_object_or_404(Calendar, owner=request.user, slug='lessons')\n event.confirmed = False\n event.save()\n \n # ManyToMany relationship can only be updated once the instance is saved.\n # http://docs.djangoproject.com/en/dev/topics/forms/modelforms/#the-save-method\n event.users_with_read_access.add(request.user)\n form.save_m2m()\n \n return redirect(post_save_redirect, event)\n \n else:\n form = EventForm()\n context = {\n 'form': form\n }\n\n return render_to_response(template, context,\n context_instance=RequestContext(request))\n\ndef calendar_year(request, owner_username, calendar_slug, year, template='calloway/calendar_year.html'):\n \n return render_to_response(template, { 'year': year },\n context_instance=RequestContext(request))\n\n@login_required\ndef calendar_month(request, owner_username, calendar_slug, year, month, template='calloway/calendar_month.html'):\n # Get calendar by name for that user. Will only return calendars for the logged in username.\n owner = get_object_or_404(User, username=owner_username)\n calendar = get_object_or_404(Calendar, slug=calendar_slug, owner=owner)\n \n # Do some date work.\n month = int(MONTHS_3_REV[month])\n year = int(year)\n date = datetime.datetime(year, month, 1)\n next_month = date + relativedelta(months=+1)\n previous_month = date + relativedelta(months=-1)\n \n # Get events for that month (Could this be done in the template tag?)\n events = calendar.get_events_for_month(date)\n \n # Get availability_blocks for the month\n availability_blocks = calendar.get_availability_for_month(date)\n \n context = {\n 'calendar': calendar,\n 'year': date.year,\n 'month': date.month,\n 'date': date,\n 'next_month':next_month,\n 'next_month_url': \"/calendars/%s/%s/%s/%s/\" % (owner, calendar.slug, next_month.year, force_unicode(MONTHS_3[next_month.month])),\n 'previous_month': previous_month,\n 'previous_month_url': \"/calendars/%s/%s/%s/%s/\" % (owner, calendar.slug, previous_month.year, force_unicode(MONTHS_3[previous_month.month])),\n 'events': events,\n 'availability_blocks': availability_blocks\n }\n \n return render_to_response(template, context,\n context_instance=RequestContext(request))\n \ndef calendar_week(request, app, model, object_id, calendar_slug, year, month, day, model_instance=None, template='calloway/calendar_week.html'):\n \n # If the optional model_instance arg is passed in, this saves two db queries.\n if model_instance is not None:\n ct = ContentType.objects.get_for_model(model_instance)\n owner = model_instance\n # Otherwise lookup the ContentType and the object_id.\n else:\n ct = ContentType.objects.get(app_label=app, model=model)\n owner = ct.get_object_for_this_type(pk=object_id)\n \n calendar = get_object_or_404(Calendar, slug=calendar_slug, content_type=ct, object_id=owner.pk)\n\n # Do some date work.\n date = datetime.datetime(int(year), int(MONTHS_3_REV[month]), int(day))\n # Should be 'week starting', therefore day must be a Monday (settings.CALENDAR_WEEK_START_DAY?).\n if date.weekday() != 0:\n date = date + datetime.timedelta(days=-date.weekday())\n next_week = date + datetime.timedelta(days=+7)\n previous_week = date + datetime.timedelta(days=-7)\n \n events = calendar.get_events_for_week(date)\n add_css_positioning_to_events(events)\n context = {\n 'calendar': calendar,\n 'year': date.year,\n 'month': date.month,\n 'day': date.day,\n 'date': date,\n 'next_week': next_week,\n 'next_week_url': '/calendars/%s/%s/%s/%s/week/%s/%s/%s/' % (app, model, object_id, calendar_slug, next_week.year, force_unicode(MONTHS_3[next_week.month]), next_week.day),\n 'previous_week': previous_week,\n 'previous_week_url': '/calendars/%s/%s/%s/%s/week/%s/%s/%s/' % (app, model, object_id, calendar.slug, previous_week.year, force_unicode(MONTHS_3[previous_week.month]), previous_week.day),\n 'events': events\n }\n return render_to_response(template, context,\n context_instance=RequestContext(request))\n \ndef calendar_day(request, owner_username, calendar_slug, year, month, day, template='calloway/calendar_month.html'):\n try:\n date = datetime.date(*time.strptime(year+month+day, '%Y%b%d')[:3])\n except ValueError:\n raise Http404\n\n return render_to_response(template, { 'year':year, 'month': date.month, 'day': day, 'date': date },\n context_instance=RequestContext(request))\n \ndef list_calendars(request, app, model, template='calloway/list_calendars.html'):\n ct = ContentType.objects.filter(app_label=app, model=model)\n calendars = Calendar.objects.filter(content_type=ct)\n context = {\n 'calendars': calendars\n }\n \n return render_to_response(template, context,\n context_instance=RequestContext(request))\n\n\ndef list_users_in_group_calendars(request, group, template='calloway/list_users_in_group_calendars.html'):\n # Getting profile direct so it's not a separate query for each one.\n # TODO: Can't do this, it references ellis_manager!!!\n user_profiles = UserProfile.objects.filter(user__groups__name=group)\n context = {\n 'users': user_profiles\n }\n\n return render_to_response(template, context,\n context_instance=RequestContext(request))","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"450226401","text":"import os.path\nimport json\nfrom models.delegate_info import DelegateInfoStatus\n##\n# Notification filter. Filter delegates for notifications\n##\n\nDELEGATE_STATUS_LOG_FILE = \"delegateinfo.log\"\n## 30 minutes on telegram notifications\nTIME_TELEGRAM_NOTIFICATION = (1000*60)*60\n\n## Return the last config file stored\ndef read_last_log():\n if os.path.exists(DELEGATE_STATUS_LOG_FILE):\n logFile = open(DELEGATE_STATUS_LOG_FILE, 'r')\n fileContent = logFile.read()\n logFile.close()\n return json.loads(fileContent)\n\n else:\n return None\n\n## Write the file\ndef write_last_log(delegateList, currentTime):\n logFileJson = {}\n logFileJson['lastTime'] = currentTime\n\n for delegate in delegateList:\n logFileJson[delegateList[delegate]['name']] = 'Forging'\n\n logFile = open(DELEGATE_STATUS_LOG_FILE, 'w')\n logFile.write(json.dumps(logFileJson))\n logFile.close()\n\n\ndef checkTelegramNotification (timestamp, delegateName, currentStatus, lastLog):\n ## If there is no log, return true\n if lastLog is None:\n return True\n ## Log exists\n else:\n ## Status red or orange -> notify\n if currentStatus is DelegateInfoStatus.STATUS_NOT_FORGING or currentStatus is DelegateInfoStatus.STATUS_CYCLE_LOST:\n return True\n ## If 30 min elapsed after the last notification and the status is forging or not found, send another notification\n if (currentStatus is DelegateInfoStatus.STATUS_FORGING or currentStatus is DelegateInfoStatus.STATUS_NOT_FOUND) and timestamp > (lastLog['lastTime'] + TIME_TELEGRAM_NOTIFICATION):\n return True\n ## time elapsed < 30 min and status is forging or not found -> not notify again\n else:\n return False\n","sub_path":"notification_filter.py","file_name":"notification_filter.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"544017495","text":"# ===== PROBLEM1 =====\n# Exercise 1 - Introduction - Say \"Hello, World!\" With Python\nprint(\"Hello, World!\")\n# Exercise 2 - Introduction - Python If-Else\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n\nif __name__ == '__main__':\n n = int(input().strip())\n if 1<=n<=100:\n if n%2!=0:\n print('Weird')\n elif n%2==0:\n if 220:\n print('Not Weird')\n \n# Exercise 3 - Introduction - Arithmetic Operators\nif __name__ == '__main__':\n a = int(input())\n b = int(input())\n print(a+b)\n print(a-b)\n print(a*b)\n \n# Exercise 4 - Introduction - Python: Division\nif __name__ == '__main__':\n a = int(input())\n b = int(input())\n print(a//b)\n print(float(a/b))\n \n# Exercise 5 - Introduction - Loops\nif __name__ == '__main__':\n n = int(input())\n for i in range(n):\n print(i**2)\n \n# Exercise 6 - Introduction - Write a function\ndef is_leap(year):\n leap = False\n if year%4==0:\n leap=True\n if year%100==0:\n leap=False\n if year%400==0:\n leap=True\n \n return leap\n\n# Exercise 7 - Introduction - Print Function\nif __name__ == '__main__':\n n = int(input())\n for i in range(1,n+1):\n print(i,sep='',end='')\n \n# Exercise 8 - Basic data types - List Comprehensions\nif __name__ == '__main__':\n x = int(input())\n y = int(input())\n z = int(input())\n n = int(input())\n lista = [[i, j, k] for i in range(x+1) for j in range(y+1) for k in range(z+1) if i+ j+k!=n]\n print(lista)\n \n# Exercise 9 - Basic data types - Find the Runner-Up Score!\nif __name__ == '__main__':\n n = int(input())\n arr = map(int, input().split())\n l=list(arr)\n l1=[]\n newmax=list(filter(lambda x:x2:\n# for i in range(2,n+1):\n# t1=(l[i],l[i+1])\n# t=t+t1\n# else:\n# t\n# elif n%2!=0:\n# t=(l[0],)\n# for i in range(1,n+1):\n# t1=(l[i],l[i+1])\n# t=t+t1\n# print(hash(t))\n t=tuple(l)\n print(hash(t))\n\n# Exercise 14 - Strings - sWAP cASE\ndef swap_case(s):\n s2=\"\"\n for i in range(len(s)):\n if s[i].islower()==True:\n s2+=s[i].upper()\n else:\n s2+=s[i].lower()\n return s2\n\n# Exercise 15 - Strings - String Split and Join\ndef split_and_join(line):\n s=line.split(\" \")\n return '-'.join(s)\n\n# Exercise 16 - Strings - What's Your Name?\ndef print_full_name(a, b):\n print((\"Hello\"+' '+a+' '+b)+('! You just delved into python.'))\n \n# Exercise 17 - Strings - Mutations\ndef mutate_string(string, position, character):\n lista=list(string)\n lista[position]=character\n string=''.join(lista)\n\n return string\n\n# Exercise 18 - Strings - Find a string\ndef count_substring(string, sub_string):\n #conto=string.count(sub_string) perchè non da il risultato corretto??\n conto=0\n for k in range((len(string)-len(sub_string))+1):\n if string[k:k+len(sub_string)]==sub_string:\n conto+=1\n return conto\n\n# Exercise 19 - Strings - String Validators\n\n# Exercise 20 - Strings - Text Alignment\nthickness = int(input()) #This must be an odd number\nc = 'H'\n\n#Top Cone\nfor i in range(thickness):\n print((c*i).rjust(thickness-1)+c+(c*i).ljust(thickness-1))\n\n#Top Pillars\nfor i in range(thickness+1):\n print((c*thickness).center(thickness*2)+(c*thickness).center(thickness*6))\n\n#Middle Belt\nfor i in range((thickness+1)//2):\n print((c*thickness*5).center(thickness*6)) \n\n#Bottom Pillars\nfor i in range(thickness+1):\n print((c*thickness).center(thickness*2)+(c*thickness).center(thickness*6))\n#helped by discussion\n\n#Bottom Cone\nfor i in range(thickness):\n print(((c*(thickness-i-1)).rjust(thickness)+c+(c*(thickness-i-1)).ljust(thickness)).rjust(thickness*6))\n# Exercise 21 - Strings - Text Wrap\ndef wrap(string, max_width):\n a=textwrap.fill(string,max_width)\n return a\n\n# Exercise 22 - Strings - Designer Door Mat\n#flag is simmmetrical\n\nn, m =map(int,input().split())\nword='WELCOME'\npattern='.|.'\n\nl=[]\nfor i in range(n//2):\n door=[(pattern*(2*i + 1)).center(3*n, '-')] #i=0 i have .|. then i=1 .|..|..|. ecc.\n l.append(door[0])\n print(door[0])\nprint('WELCOME'.center(m, '-'))\n\nprint('\\n'.join(l[::-1]))\n\n# Exercise 23 - Strings - String Formatting\ndef print_formatted(number):\n for i in range(1, number+1):\n print(\"{0:>{w}d} {0:>{w}o} {0:>{w}X} {0:>{w}b}\".format(i, w=len(bin(number)[2:])))\n\n#I founded on internet .format(), but i've done the exercise only thanks to the guys in comment section. \n# Exercise 24 - Strings - Alphabet Rangoli\ndef print_rangoli(n):\n Alph='abcdefghijklmnopqrstuvwxyz'[0:n] #i take all the letter i need for the rangoli\n \n for i in range(n-1,-n,-1): #because num of lines is: n+(n//2) ex:n=3 I have 5 lines\n i=i\n if i>0:\n line = Alph[n:i:-1]+Alph[i:n]\n print('-'.join(line).center(4*(n-1)+1,'-'))\n elif i<0:\n i=-i\n line = Alph[n:i:-1]+Alph[i:n]\n print('-'.join(line).center(4*(n-1)+1,'-'))\n elif i==0:\n line = Alph[n:i:-1]+Alph[i:n]\n print('-'.join(line).center(4*(n-1)+1,'-'))\n \n#tried to do my best alone, but i saw some pages on interner,so the idea for index in center is not mine\n\n# Exercise 25 - Strings - Capitalize!\ndef solve(s):\n z=''\n m=' '\n i=0\n for c in s:\n if (m==' '):\n z+=c.capitalize()\n else:\n z+=c\n #++i = i++ = i+=1 = i=i+1\n m=c\n ++i\n\n return z\n\n# Exercise 26 - Strings - The Minion Game\ndef minion_game(string):\n contoS=0\n contoK=0\n cons='QWRTPLKJGFDSZXCVBNMYH'\n vocs='AEIUO'\n\n n=len(string)\n for i in range(n):\n if string[i] in cons:\n contoS+=n-i #incremento conto\n elif string[i] in vocs:\n contoK+=n-i\n if contoS>contoK:\n print ('Stuart',contoS)\n elif contoS0:\n if lista[0] in size and size[lista[0]]>0:\n conto=conto+int(lista[1])\n replace(lista[0],-1,size)\n #print(lista)\n #print(size)\n\n #print(conto)\n #elif lista[1]<=0:\n #conto=conto\nprint(conto)\n\n# Exercise 42 - Collections - DefaultDict Tutorial\n# Enter your code here. Read input from STDIN. Print output to STDOUT\n\nfrom collections import defaultdict\nd=defaultdict(list)\ninter=list(map(int,input().split()))\nlistaB=[]\n#print(inter[1])\n\n#for i in range(inter[0]):\n #d[input()].append(i+1) #ottengo dizionario con k:lettera e v:pos\n#for k in range(0,inter[1]):\n #listaB=listaB+[(input())]\n#right, but this 2 cilces gave me error in runtime, so i changed helped by the discussion\n\nfor i in range(inter[0]):\n d[input()].append(i+1)\n\nfor i in range(inter[1]):\n print(' '.join(map(str, d[input()])) or -1)\n\n\n#for j in range(len(listaB)): \n# if listaB[j] in d:\n# print(\" \".join(map(str,d[listaB[j]]))) \n# else:\n# print -1\n\n# Exercise 43 - Collections - Collections.namedtuple()\nfrom collections import namedtuple\ntotale=0\nstud=int(input())\n\nnames=list(map(str,input().split()))\nS=namedtuple('S',names)\n#print(S)\n\n\nfor i in range(stud):\n l_tab=input().split() #riga iesima della tabella\n S=S._make(l_tab) #._make() mi rida la tupla con i valori l_tab inseriti\n totale=totale+int(S.MARKS)\n\nprint('%.2f'%(totale/stud))\n\n# Exercise 44 - Collections - Collections.OrderedDict()\nfrom collections import OrderedDict\n\n\nitems=int(input())\nd=OrderedDict()\ns=''\n\nfor i in range(items):\n line = list(input().split())\n items, price = ' '.join(line[:-1]), int(line[-1])\n d[items] = d.get(items, 0) + int(price) #mi rida il valore di item se ci sta, altrimenti mi da 0\nfor key in d:\n print(key,d[key])\n \n# Exercise 45 - Collections - Word Order\nfrom collections import Counter,OrderedDict\n\nclass orderedcount(Counter, OrderedDict):\n pass #tried without class, then found why I have to use on internet\n\nn=int(input())\nd=orderedcount(input() for i in range(n))\n\nprint(len(d))\nprint(*d.values())\n\n# Exercise 46 - Collections - Collections.deque()\n\n\nfrom collections import deque\n\nd=deque()\nops=int(input())\nfor i in range(ops):\n s=input().split()\n\n if s[0]=='append':\n d.append(s[1])\n if s[0]=='appendleft':\n d.appendleft(s[1])\n if s[0]=='extend':\n d.extend(s[1])\n if s[0]=='extendleft':\n d.extendleft(s[1])\n if s[0]=='remove':\n d.remove(s[1])\n if s[0]=='reverse':\n d.reverse()\n if s[0]=='rotate':\n d.rotate(s[1])\n if s[0]=='pop':\n d.pop()\n if s[0]=='popleft':\n d.popleft()\n if s[0]=='clear':\n d.clear(s[1])\n \nprint(' '.join(d))\n# Exercise 47 - Collections - Company Logo\n# Exercise 48 - Collections - Piling Up!\n# Exercise 49 - Date time - Calendar Module\nimport calendar\n\ndate=list(map(int,input().split()))\ncalendar.TextCalendar(firstweekday=0).formatyear(date[2]) #crea calendario\n#print(date)\nday=calendar.weekday(date[2], date[0],date[1]) #cerca il giorno corr\ndayname=calendar.day_name[day] #nome del giorno\nprint(dayname.upper())\n\n# Exercise 50 - Date time - Time Delta\n# Exercise 51 - Exceptions -\n#def divEx(a,b):\n# try:\n# a/b\n# except ZeroDivisionError as (integer division or modulo by zero):\n# print(\"Error Code:\",(integer division or modulo by zero))\n# except ValueError as f:\n# print(\"Error Code:\",f)\n\n\nn=int(input())\n\nfor i in range(n):\n num=list(map(str,input().split()))\n try:\n print(int(num[0])//int(num[1]))\n \n except ValueError as e:\n print('Error Code:', e)\n except ZeroDivisionError as e:\n print(\"Error Code:\",e)\n\n# Exercise 52 - Built-ins - Zipped!\nn_x=list(map(int,input().split()))\n\nz=[]\nfor i in range(0,n_x[1]):\n z=z+[list(map(float,input().split()))] #ora ho una matrice\n\nz=(zip(*z))\nl=list(z) #per lavorae devo creare una lista di z\n\nfor i in range(len(l)):\n print(sum(l[i])/n_x[1])\n\n# Exercise 53 - Built-ins - Athlete Sort\n# Exercise 54 - Built-ins - Ginorts\ns=list(input())\n\nUp=[]\nlow=[]\nodd=[]\ndig=[]\nl=[]\n\nfor i in range(len(s)):\n if s[i].isalpha()==True:\n if s[i].isupper()==True:\n Up.append(s[i])\n else:\n low.append(s[i])\n elif s[i].isnumeric()==True:\n if int(s[i])%2!=0:\n odd.append(s[i])\n else:\n dig.append(s[i])\n\nl=''.join(sorted(low)+sorted(Up)+sorted(odd)+sorted(dig))\nprint(l)\n\n# Exercise 55 - Map and lambda function\ncube = lambda x: pow(x,3)\n\ndef fibonacci(n):\n l=[0,1]\n if n==0:\n return([])\n elif n==1:\n return([0])\n else:\n for i in range(2,n):\n i=l[i-1]+l[i-2]\n l.append(i)\n return l\n \n# Exercise 56 - Regex - Detect Floating Point Number\nimport re\n\nntest=int(input())\nfor i in range(ntest):\n m=bool(re.search(r'^[+-]?[0-9]*\\.[0-9]+$',input())) #? means 0 or 1, \n print(m)\n #if m==True:\n #print(m)\n #else:\n #print(False)\n \n# Exercise 57 - Regex - Re.split()\nregex_pattern = r\"[.,+]\"\t# Do not delete 'r'.\n\n# Exercise 58 - Regex - Group(), Groups() & Groupdict()\nimport re\n\nm = re.search(r'([a-zA-Z0-9])\\1+', input().strip()) #([a-zA-Z0-9])\\1+ : cerca, da a a z, maiuscolo e min e 0 a 9, quando c'è una ripetizione e mi da la posizione\nif m:\n print(m.group(1))\nelse:\n print(-1)\n\n\n#done thanks the contribute of discussion session\n \n#Exercise 59 - Regex - Re.findall() & Re.finditer()\nimport re\n\ncons='[qwrtypsdfghjklzxcvbnm]'\ns=str(input())\n\nif bool(re.findall(r'(?<='+cons+')([aeiou]{2,})'+cons, s ,re.IGNORECASE))==True:\n m=re.findall(r'(?<='+cons+')([aeiou]{2,})'+cons, s ,re.IGNORECASE)\n for i in range(len(m)):\n print(m[i])\nelse:\n print(-1)\n# Exercise 60 - Regex - Re.start() & Re.end()\ns= input()\nk= input()\nimport re\npattern = re.compile(k)\nr = pattern.search(s)\nif not r: print(\"(-1, -1)\")\nwhile r:\n print((\"({0}, {1})\").format(r.start(), r.end() - 1))\n r = pattern.search(s,r.start() + 1)\n\n#saw ho to do in solutions\n\n# Exercise 61 - Regex - Regex Substitution\n# Exercise 62 - Regex - Validating Roman Numerals\nregex_pattern = r\"^M{0,3}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$\"\t# Do not delete 'r'.\n\n# Exercise 63 - Regex - Validating phone numbers\nimport re\n\nn=int(input())\n\nfor i in range(n):\n if bool(re.match(r'[789]{1}\\d{9}$',input(),flags=0))==True:\n print('YES')\n else:\n print('NO')\n\n\n#re.match('^[789]\\d{\\d\\d\\d\\d\\d\\d\\d\\d}\n \n# Exercise 64 - Regex - Validating and Parsing Email Addresses\nimport re\nimport email.utils\n\nn=int(input())\n\nfor i in range(n):\n name, address = input().split(' ')\n if bool(re.match(r'<[A-Za-z](\\w|-|\\.|_)+@[A-Za-z]+\\.[A-Za-z]{1,3}>', address))==True:\n print(name, address)\n\n# Exercise 65 - Regex - Hex Color Code\nimport re\n\nN=int(input())\nfor i in range(N):\n s=input()\n res = re.findall(r'[\\s:](#[a-f0-9]{6}|#[a-f0-9]{3})',s,re.IGNORECASE)\n if len(res) > 0:\n for r in res:\n print(r)\n# Exercise 66 - Regex - HTML Parser - Part 1\n\nfrom html.parser import HTMLParser\n\nclass MyHTMLParser(HTMLParser):\n def handle_starttag(self, tag, attrs):\n print(\"Start :\", tag)\n for attr in attrs:\n print(\"-> %s >\" % attr[0], attr[1])\n def handle_endtag(self, tag):\n print(\"End :\", tag)\n def handle_startendtag(self, tag, attrs):\n print(\"Empty :\", tag)\n for attr in attrs:\n print(\"-> %s >\" % attr[0], attr[1])\n def handle_comment(self, data):\n pass\n\nparser = MyHTMLParser()\nN = int(input())\nfor i in range(N):\n s = input()\n parser.feed(s)\n# Exercise 67 - Regex - HTML Parser - Part 2\nfrom html.parser import HTMLParser\n\nclass MyHTMLParser(HTMLParser):\n def handle_data(self, data):\n if data != '\\n':\n print(\">>> Data\")\n print(data)\n\n def handle_comment(self, data):\n if \"\\n\" in data: #to match multi line\n print(\">>> Multi-line Comment\")\n else:\n print(\">>> Single-line Comment\")\n print(data.strip()) \n \nhtml = \"\" \nfor i in range(int(input())):\n html += input().rstrip()\n html += '\\n'\n \nparser = MyHTMLParser()\nparser.feed(html)\nparser.close()\n# Exercise 68 - Regex - Detect HTML Tags, Attributes and Attribute Values\n# Exercise 69 - Regex - Validating UID\nimport re\n\nN = int(input())\n\nfor i in range(N):\n s = input()\n if not re.search(r'.*[A-Z].*[A-Z].*', s):\n print('Invalid')\n elif not re.search(r'.*[0-9].*[0-9].*[0-9].*', s):\n print('Invalid')\n elif re.search(r'(.).*\\1', s):\n print('Invalid')\n elif len(s) != 10:\n print('Invalid')\n else:\n print('Valid')\n# re.match(r'(?!.*(.).*\\1)([A-Z]{2}|[0-9]{3}|[a-zA-Z0-9]{5})', s):\n\n# Exercise 70 - Regex - Validating Credit Card Numbers\nimport re\n\nN = int(input())\nfor i in range(N):\n s = input()\n if '-' not in s and len(s) == 16:\n s = '-'.join([s[i:i+4] for i in range(0, len(s), 4)]) \n if len(s) != 19:\n print('Invalid')\n elif not re.match(r'[0-9]{4}-[0-9]{4}-[0-9]{4}', s):\n print('Invalid')\n elif s[0] not in ['4', '5', '6']:\n print('Invalid')\n elif '_' in s or ' ' in s:\n print('Invalid')\n elif re.search(r'[a-zA-Z]', s):\n print('Invalid')\n elif re.search(r'0000|1111|2222|3333|4444|5555|6666|7777|8888|9999', s.replace('-','')):\n print('Invalid')\n else:\n print('Valid')\n\n# Exercise 71 - Regex - Validating Postal Codes\nregex_integer_in_range = r\"^[1-9][0-9][0-9][0-9][0-9][0-9]$\"\t# Do not delete 'r'.\nregex_alternating_repetitive_digit_pair = r\"(\\d)(?=(\\d\\1))\"\t# Do not delete 'r'.\n\n# Exercise 72 - Regex - Matrix Script\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n\n\nfirst_multiple_input = input().rstrip().split()\n\nn = int(first_multiple_input[0])\n\nm = int(first_multiple_input[1])\n\nmatrix = []\n\nfor _ in range(n):\n matrix_item = input()\n matrix.append(matrix_item)\n\ns = ''\nfor j in range(m):\n for i in range(n):\n s += matrix[i][j]\n\ns = re.sub(r'(?<=(\\w))[\\W]+(?=(\\w))', ' ', s)\nprint(s)\n\n# Exercise 73 - Xml - XML 1 - Find the Score\ndef get_attr_number(node):\n # your code goes here\n s = len(node.attrib)\n for el in node:\n s += len(el.attrib)\n for e in el:\n s += len(e.attrib)\n for x in e:\n s += len(x.attrib)\n for y in x:\n s += len(y.attrib)\n return s\n# Exercise 74 - Xml - XML 2 - Find the Maximum Depth\n# Exercise 75 - Closures and decorators - Standardize Mobile Number Using Decorators\n# Exercise 76 - Closures and decorators - Decorators 2 - Name Directory\n# Exercise 77 - Numpy - Arrays\ndef arrays(arr):\n a=arr[::-1]\n b=numpy.array(a, float)\n \n return b\n\n# Exercise 78 - Numpy - Shape and Reshape\nimport numpy\n\narr=list(map(int,input().split()))\n\nchange_array = numpy.array(arr)\nchange_array.shape = (3, 3)\nprint(change_array)\n\n# Exercise 79 - Numpy - Transpose and Flatten\nimport numpy\n\ndim=input().split()\n\nl=[]\nfor i in range((int(dim[0]))):\n l+=[list(map(int,input().split()))]\n#print(l)\nprint (numpy.transpose(l)) #transpodes array\nlnumpy=numpy.array(l) #beacuse list doesn't have attr flatten()\nprint(lnumpy.flatten())\n\n# Exercise 80 - Numpy - Concatenate\nimport numpy\n\nN, M, P = map(int,input().split())\n\nMatA = numpy.array([input().split() for i in range(N)],int)\nMatB = numpy.array([input().split() for j in range(M)],int)\n\nprint(numpy.concatenate((MatA, MatB), axis = 0))\n\n# Exercise 81 - Numpy - Zeros and Ones\nimport numpy\n\nl=list(map(int,input().split())) #shape into a tuple\n\n\nprint (numpy.zeros(l, dtype = numpy.int)) #cause in the example we declare a tuple for the shape of the array\nprint (numpy.ones(l, dtype = numpy.int)) #same process\n\n# Exercise 82 - Numpy - Eye and Identity\nimport numpy\n\nnumpy.set_printoptions(legacy='1.13') #seen in discussion session because the code was ok but it diden't match\n\nsize=(list(map(int,input().split())))\n\nprint(numpy.eye(size[0],size[1],k=0))\n\n# Exercise 83 - Numpy - Array Mathematics\nimport numpy\n\nn=list(map(int,input().split()))\n\nl=[]\nfor i in range(n[0]):\n l+=[list(map(int,input().split()))]\nmatA=numpy.array(l)\n\nl1=[]\nfor i in range(n[0]):\n l1+=[list(map(int,input().split()))]\nmatB=numpy.array(l1)\n\n\nprint(matA+matB)\nprint(matA-matB)\nprint(matA*matB)\nprint(matA//matB)\nprint(matA%matB)\nprint(matA**matB)\n\n# Exercise 84 - Numpy - Floor, Ceil and Rint\nimport numpy\n\nnumpy.set_printoptions(legacy='1.13') #for the same output format of the tests\n\nA=list(map(float,input().split()))\n\narr = numpy.array(A)\n\nprint (numpy.floor(arr))\nprint (numpy.ceil(arr))\nprint (numpy.rint(arr))\n\n# Exercise 85 - Numpy - Sum and Prod\nimport numpy\n\nsize=list(map(int,input().split()))\nl=[]\nfor i in range(int(size[0])):\n l+=[input().split()]\n\n arr=numpy.array(l, dtype=int)\n\n\nsomma=numpy.sum(arr, axis = 0)\n\nprint (numpy.prod(somma, axis= None))\n\n# Exercise 86 - Numpy - Min and Max\nimport numpy\n\nsize=list(map(int,input().split()))\n\nl=[]\nfor i in range(size[0]):\n l+=[input().split()]\n arr=numpy.array(l,dtype=int)\n\nMIN=numpy.min(arr, axis = 1)\n\nprint(numpy.max(MIN)) #max\n# Exercise 87 - Numpy - Mean, Var, and Std\nimport numpy\n\nsize=list(map(int,input().split()))\nl=[]\nfor i in range(size[0]):\n l+=[input().split()]\narr=numpy.array(l, dtype=int)\n\n\nnumpy.set_printoptions(legacy='1.13')\n\nprint (numpy.mean(arr, axis = 1))\nprint (numpy.var(arr, axis = 0))\nprint (numpy.std(arr)) \n#print mean\n#print var\n#print std\n\n# Exercise 88 - Numpy - Dot and Cross\nimport numpy\n\nsize=list(map(int,input().split()))\n\nl=[]\nfor i in range(size[0]):\n l+=[input().split()]\nl1=[]\nfor j in range(size[0]):\n l1+=[input().split()]\n\narr1=numpy.array(l, dtype=int)\narr2=numpy.array(l1, dtype=int)\n\nprint (numpy.dot(arr1, arr2))\n\n# Exercise 89 - Numpy - Inner and Outer\nimport numpy\n\narrA=numpy.array(list(map(int,input().split())))\narrB=numpy.array(list(map(int,input().split())))\n\nprint(numpy.inner(arrA,arrB))\nprint(numpy.outer(arrA,arrB))\n\n# Exercise 90 - Numpy - Polynomials\nimport numpy\n\nvalP=list(map(float,input().split()))\nx=float(input())\n\nprint(numpy.polyval(valP,x))\n\n# Exercise 91 - Numpy - Linear Algebra\nimport numpy\n\nnumpy.set_printoptions(legacy='1.13')\n\nsize=int(input())\n\nl=[]\nfor i in range(size):\n l+=[input().split()]\n\narr=numpy.array(l,dtype=float)\n#print(arr)\n\nprint((numpy.linalg.det(arr)))\n\n# ===== PROBLEM2 =====\n\n# Exercise 92 - Challenges - Birthday Cake Candles\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the birthdayCakeCandles function below.\ndef birthdayCakeCandles(ar):\n count=ar.count(max(ar))\n return count\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n ar_count = int(input())\n\n ar = list(map(int, input().rstrip().split()))\n\n result = birthdayCakeCandles(ar)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n\n# Exercise 93 - Challenges - Kangaroo\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the kangaroo function below.\ndef kangaroo(x1, v1, x2, v2):\n if v2>=v1:\n s='NO'\n else:\n if (x1-x2)%(v2-v1)==0:\n s='YES'\n else:\n s='NO'\n return s\n\n\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n x1V1X2V2 = input().split()\n\n x1 = int(x1V1X2V2[0])\n\n v1 = int(x1V1X2V2[1])\n\n x2 = int(x1V1X2V2[2])\n\n v2 = int(x1V1X2V2[3])\n\n result = kangaroo(x1, v1, x2, v2)\n\n fptr.write(result + '\\n')\n\n fptr.close()\n\n# Exercise 94 - Challenges - Viral Advertising\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the viralAdvertising function below.\ndef viralAdvertising(n):\n people=5\n likes=0\n for i in range(n):\n likes+=people//2\n people=(people//2)*3\n return likes\n\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n result = viralAdvertising(n)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n# Exercise 95 - Challenges - Recursive Digit Sum\nimport math\nimport os\nimport random\nimport re\nimport sys\nimport re\n\n\n# Complete the superDigit function below.\ndef superDigit(n,k):\n conto=0\n if len(n)==1:\n return n\n if len(n)>0:\n for i in range(len(n)):\n conto+=int(n[i])\n return superDigit(str(conto),k=1)\n \n \n \n\n\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n nk = input().split()\n\n n = nk[0]\n\n k = int(nk[1]) #if i have 9600000 or 96 the result is alway 6, so for the running time i have to change this part of code\n if k==100000:\n k=1\n\n result = superDigit(n*k,k)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n\n# Exercise 96 - Challenges - Insertion Sort - Part 1\n#EVEN IF I HAVE THE SAME OUTPUT HACKERRANK TELL ME IS WRONG!!\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the insertionSort1 function below.\ndef insertionSort1(n, arr):\n x=arr[n-1]\n l=arr\n for i in range(n-1):\n j=n-1-i #scandisce la lista al contrario\n if x float(current):\r\n return 1\r\n else:\r\n return 0\r\n\r\ndef preprocess_df(df):\r\n df = df.drop(\"target\", 1)\r\n df.dropna(inplace=True)\r\n #\r\n # for col in df.columns:\r\n # if col != \"future\":\r\n # df[col] = df[col].pct_change(fill_method='ffill') #normalizing the data\r\n #\r\n # df[col] = preprocessing.scale(df[col].values) #scaling the data\r\n #\r\n # df.dropna(inplace=True)\r\n\r\n sequential_data = []\r\n prev_days = deque(maxlen=SEQ_LEN)\r\n for i in df.values:\r\n prev_days.append([n for n in i[:-1]])\r\n if len(prev_days) == SEQ_LEN:\r\n sequential_data.append([np.array(prev_days), i[-1]])\r\n\r\n random.shuffle(sequential_data)\r\n\r\n buys = []\r\n sells = []\r\n\r\n for seq, target in sequential_data:\r\n if target == 0:\r\n sells.append([seq, target])\r\n elif target == 1:\r\n buys.append([seq, target])\r\n\r\n random.shuffle(buys)\r\n random.shuffle(sells)\r\n\r\n lower = min(len(buys), len(sells))\r\n\r\n buys = buys[:lower]\r\n sells = sells[:lower]\r\n\r\n sequential_data = buys+sells\r\n\r\n random.shuffle(sequential_data)\r\n\r\n X = []\r\n y = []\r\n\r\n for seq, target in sequential_data:\r\n X.append(seq)\r\n y.append(target)\r\n\r\n return np.array(X), y\r\n\r\n\r\ndf = pd.read_csv(f\"G:\\Programming\\Projects\\Index_price_movement\\All_Stock_Data\\{RATIO_TO_PREDICT}.csv\")\r\ndf.rename(columns = {\"Adj Close\": \"AdjClose\"}, inplace=True)\r\ndf[\"Date\"] = pd.to_datetime(df[\"Date\"], format=\"%Y-%m-%d\")\r\ndf.set_index(\"Date\", inplace = True)\r\ndf = df[[\"AdjClose\"]]\r\n\r\n# df.plot()\r\n# plt.show()\r\n\r\n# df['future'] = df[\"AdjClose\"].shift(-FUTURE_PERIOD_PREDICT)\r\n#\r\n# df['target'] = list(map(classify, df[\"AdjClose\"], df[\"future\"]))\r\n#\r\n#\r\n# main_df = df\r\n#\r\n# times = sorted(main_df.index.values)\r\n# last_10pct = times[-int(0.10*len(times))]\r\n#\r\n# validation_main_df = main_df[main_df.index >= last_10pct]\r\n# main_df = main_df[main_df.index < last_10pct]\r\n#\r\n# train_x, train_y = preprocess_df(main_df)\r\n#\r\n# validation_x, validation_y = preprocess_df(validation_main_df)\r\n#\r\n# print (f\"train_data: {len(train_x)} validation: {len(validation_x)}\")\r\n# print (f\"Dont buys: {train_y.count(0)}, buys: {train_y.count(1)}\")\r\n# print (f\"VALIDATION Dont buys: {validation_y.count(0)}, buys: {validation_y.count(1)}\")\r\n#\r\n# #define model\r\n# model = Sequential()\r\n# model.add(LSTM(50, activation='relu', input_shape=(SEQ_LEN, FUTURE_PERIOD_PREDICT)))\r\n# model.add(Dense(1))\r\n# model.compile(optimizer='adam', loss='mse')\r\n#\r\n# #fit model\r\n# model.fit(train_x, train_y, epochs=EPOCHS, verbose=1)\r\n#\r\n# # demonstrate prediction\r\n# tboard = TensorBoard(log_dir=f\"new_logs/{NAME}\")\r\n#\r\n# filepath = \"new_models/RNN_Final-{epoch:02d}-{val_acc}.hdf5\"\r\n# checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') # saves only the best ones\r\n# callbacks_list = [tboard, checkpoint]\r\n#\r\n# history = model.fit(train_x, train_y, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(validation_x, validation_y), callbacks=callbacks_list, verbose=1)\r\n#\r\n# #history = model.fit(train_x, train_y, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(validation_x, validation_y), verbose=1)\r\n#\r\n# #history = model.fit(train_x, train_y, batch_size=BATCH_SIZE, epochs=EPOCHS, verbose=1)\r\n\r\n","sub_path":"univariate_analysis.py","file_name":"univariate_analysis.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"582283132","text":"from pymongo import MongoClient\nimport pandas as pd\nfrom sklearn.metrics.pairwise import euclidean_distances\nimport gc\n\n#Artists = db.music_artists\n#Albums = db.music_albums\n\n\ndef get_data_track(query, start, end):\n client = MongoClient('mongodb://localhost:27017')\n db = client.spotify\n \n Tracks = db.music_tracks\n Albums = db.music_albums\n\n cursor = Tracks.find(query, {'_id': False}).skip(start).limit(end)\n list_cur = list(cursor)\n for track in list_cur:\n try:\n track['album'] = Albums.find_one({'id': track['album_id']}, {'_id': False})\n except:\n track['album'] = None\n client.close()\n return list_cur\n\ndef get_data_album(query, start, end):\n client = MongoClient('mongodb://localhost:27017')\n db = client.spotify\n\n Albums = db.music_albums\n\n cursor = Albums.find(query, {'_id': False}).skip(start).limit(end)\n list_cur = list(cursor)\n client.close()\n return list_cur\n\ndef make_audio_feature(data):\n client = MongoClient('mongodb://localhost:27017')\n db = client.spotify\n\n Tracks = db.music_tracks\n AF = db.tracks_af_clustered\n cluster_features = [\n 'acousticness',\n 'danceability',\n 'energy',\n 'valence'\n ]\n\n ctemp = [{\"valence\": 0,\"energy\": 0,\"danceability\": 0,\"acousticness\": 0,}, \n {\"valence\": 0,\"energy\": 0,\"danceability\": 0,\"acousticness\": 0,}, \n {\"valence\": 0,\"energy\": 0,\"danceability\": 0,\"acousticness\": 0,}, \n {\"valence\": 0,\"energy\": 0,\"danceability\": 0,\"acousticness\": 0,}, \n {\"valence\": 0,\"energy\": 0,\"danceability\": 0,\"acousticness\": 0,}]\n clen = [0, 0, 0, 0, 0]\n for track in data:\n af = AF.find_one({'id': track['songid']}, {'_id': False})\n \n ctemp[af['cluster']]['valence'] += af['valence']\n ctemp[af['cluster']]['energy'] += af['energy']\n ctemp[af['cluster']]['danceability'] += af['danceability']\n ctemp[af['cluster']]['acousticness'] += af['acousticness']\n clen[af['cluster']]+=1\n\n result = []\n\n for i in range(0, 5):\n if clen[i] == 0:\n continue\n ctemp[i]['valence'] = ctemp[i]['valence']/clen[i]\n ctemp[i]['energy'] = ctemp[i]['energy']/clen[i]\n ctemp[i]['danceability'] = ctemp[i]['danceability']/clen[i]\n ctemp[i]['acousticness'] = ctemp[i]['acousticness']/clen[i]\n \n audio_feature_df = pd.DataFrame(columns=[\n \"valence\",\n \"energy\",\n \"danceability\",\n \"acousticness\"\n ])\n \n\n audio_feature = AF.find({'cluster': i}, {'_id': False})\n \n #idx = 1\n #for af in audio_feature:\n # data_info = pd.DataFrame({\n # \"valence\": af['valence'],\n # \"energy\": af['energy'],\n # \"danceability\": af['danceability'],\n # \"acousticness\": af['acousticness'],\n # }, index=[idx])\n # idx+=1\n # audio_feature_df = audio_feature_df.append(data_info, sort=True)\n #audio_feature_df = audio_feature_df.append(pd.DataFrame(ctemp[i], index=[0]), sort=True)\n\n afList = list(audio_feature)\n #afList.insert(0, ctemp[i])\n audio_feature_df = pd.DataFrame(afList)\n audio_feature_df = audio_feature_df[cluster_features]\n audio_feature_df.loc[:,['valence']] = audio_feature_df.loc[:,['valence']].astype('float32')\n audio_feature_df.loc[:,['energy']] = audio_feature_df.loc[:,['energy']].astype('float32')\n audio_feature_df.loc[:,['danceability']] = audio_feature_df.loc[:,['danceability']].astype('float32')\n audio_feature_df.loc[:,['acousticness']] = audio_feature_df.loc[:,['acousticness']].astype('float32')\n \n te = [ctemp[i]['acousticness'],ctemp[i]['danceability'],ctemp[i]['energy'],ctemp[i]['valence']]\n dists = euclidean_distances(audio_feature_df, pd.DataFrame(ctemp[i], index=[0]))\n \n six = [1, 1, 1, 1, 1, 1]\n li = []\n for dis in dists:\n for j in range(0, 6):\n if six[j] > dis[0]:\n six[j] = dis[0]\n break\n li.append(dis[0])\n \n re = []\n for val in six:\n re.append(afList[li.index(val)]['id'])\n\n result.append({'cluster' : i, 'data' : re})\n del audio_feature_df\n del dists\n gc.collect()\n \n client.close()\n return result\n","sub_path":"backend/ReMu/connectdb.py","file_name":"connectdb.py","file_ext":"py","file_size_in_byte":4447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"624001786","text":"# MixedFraction Class\n\nfrom fraction import *\n\nclass MixedFraction(Fraction):\n\n def __init__(self, *args):\n \n if len(args) == 2:\n Fraction.__init__(self, args[0], args[1]) \n elif len(args) == 3:\n Fraction.__init__(self, args[1] + args[0] * args[2], args[2])\n else:\n raise TypeError('MixedFraction takes 2 or 3 arguments ' + \\\n '(' + str(len(args)) + ' given)')\n \n \n def __str__(self):\n\n empty_str = ''\n blank = ' '\n \n displayFrac = Fraction.copy(self)\n displayFrac.reduce()\n\n whole_num = 0\n numer = displayFrac.getNumerator()\n denom = displayFrac.getDenominator()\n\n if numer == 0:\n return '0'\n\n if denom == 1:\n return str(numer)\n\n if numer < 0:\n numer = abs(numer)\n sign = '-'\n else:\n sign = empty_str\n \n if abs(numer) > abs(denom):\n whole_num = abs(numer) // abs(denom)\n numer = abs(numer) % abs(denom)\n \n if whole_num == 0:\n return sign + str(numer) + '/' + str(denom)\n else:\n return sign + str(whole_num) + blank + \\\n str(numer) + '/' + str(denom) \n\n def __repr__(self):\n \n return self.__str__()\n \n\n def getWholeNum(self):\n \n return self.getNumerator() // self.getDenominator()\n \n\n def setWholeNum(self, value):\n \n self.setNumerator(self.getNumerator() + \\\n value * self.getDenominator())\n \n\n def set(self, whole_num, numer, denom):\n \n Fraction.set(self, numer + whole_num * denom, denom)\n \n\n def __neg__(self):\n \n return MixedFraction(-Fraction.getNumerator(self),\n Fraction.getDenominator(self))\n\n def __sub__(self, rfraction):\n \n tempFrac = Fraction.__sub__(self, rfraction)\n \n return self.__createMixedFraction(tempFrac)\n \n\n def __add__(self, rfraction):\n\n tempFrac = Fraction.__add__(self, rfraction)\n \n return self.__createMixedFraction(tempFrac)\n \n\n def __mul__(self, rfraction):\n \n tempFrac = Fraction.__mul__(self, rfraction)\n \n return self.__createMixedFraction(tempFrac)\n \n\n def __createMixedFraction(self, frac):\n \n numer = frac.getNumerator()\n denom = frac.getDenominator()\n\n return MixedFraction(numer, denom)\n \n","sub_path":"vehicle reservation/MixedFraction.py","file_name":"MixedFraction.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"45742473","text":"from django.shortcuts import render\nfrom django.views import generic\nfrom listings.models import Listing\nfrom realtors.models import Realtor\nfrom listings.choices import bedroom_choices, price_choices, province_choices\n\n\nclass HomeView(generic.ListView):\n model = Listing\n queryset = Listing.objects.all().order_by(\"-list_date\")[:3]\n template_name = \"index.html\"\n context_object_name = \"listings\"\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super(HomeView, self).get_context_data()\n context['bedroom_choices'] = bedroom_choices\n context['price_choices'] = price_choices\n context['province_choices'] = province_choices\n return context\n\n\ndef about(request):\n realtors = Realtor.objects.all()\n context = {\n \"realtors\": realtors\n }\n return render(request, template_name=\"about.html\", context=context)\n\n\ndef search(request):\n queryset_listing = Listing.objects.all().order_by(\"-list_date\")\n\n #keywords\n\n if \"keywords\" in request.GET:\n keywords = request.GET['keywords']\n if keywords:\n queryset_listing = queryset_listing.filter(description__icontains=keywords)\n\n #city\n\n if \"city\" in request.GET:\n city = request.GET['city']\n if city:\n queryset_listing = queryset_listing.filter(city__iexact=city)\n\n #Province\n\n if \"province\" in request.GET:\n province = request.GET['province']\n if province:\n queryset_listing = queryset_listing.filter(province__iexact=province)\n\n # Bedrooms\n if \"bedrooms\" in request.GET:\n bedrooms = request.GET['bedrooms']\n if bedrooms:\n queryset_listing = queryset_listing.filter(bedrooms__lte=bedrooms)\n\n # Price\n if \"price\" in request.GET:\n price = request.GET['price']\n if price:\n queryset_listing = queryset_listing.filter(price__lte=price)\n\n context = {\n \"bedroom_choices\": bedroom_choices,\n \"price_choices\": price_choices,\n \"province_choices\": province_choices,\n \"listings\": queryset_listing,\n \"values\": request.GET\n }\n return render(request, template_name=\"search.html\", context=context)\n","sub_path":"pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"357829084","text":"import threading, time, pandas, sqlite3, platform\nfrom concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor,as_completed\nfrom functools import wraps\n\ndb_lock = threading.Lock()\n\nif platform.system() == \"Windows\":\n database_name = \"D:/shopee.db\"\n driver_path = 'D:/chromedriver_win32/chromedriver.exe'\nelse:\n database_name = \"/root/shopee.db\"\n driver_path = \"/root/chromedriver.exe\"\n\ndef mydb(sql, values=(), many=False):\n with sqlite3.connect(database_name) as db:\n if 'select' in sql:\n cur = db.execute(sql, values)\n rv = cur.fetchall()\n else:\n with db_lock:\n if many:\n db.executemany(sql, values)\n else:\n db.execute(sql, values)\n db.commit()\n rv = None\n return rv\n\ndef snow(tsp=None):\n tsp = int(tsp) if tsp else None\n t = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(tsp))\n return t\n\ndef unsnow(s):\n st = time.strptime(s,\"%Y-%m-%d %H:%M:%S\")\n tp = int(time.mktime(st))\n return tp\n\ndef data2book(data, name):\n path = './static/{}.xlsx'.format(name)\n book = pandas.ExcelWriter(path)\n df = pandas.DataFrame(data)\n df.to_excel(book, sheet_name='Sheet1', index=False, header=False)\n book.save()\n return path\n\n#新线程伪异步装饰器\ndef decor_async(func):\n @wraps(func)\n def wrapped_function(*args, **kwargs):\n print('one new thread started for ', func.__name__)\n mission = threading.Thread(target=func, args=args, kwargs=kwargs)\n mission.start()\n return wrapped_function\n\n#失败重试装饰器\ndef decor_retry(func):\n @wraps(func)\n def wrapped_function(*args, **kwargs):\n try:\n result = func(*args, **kwargs)\n except:\n print(func.__name__, \" failed , try again later\")\n time.sleep(5)\n result = func(*args, **kwargs)\n return result\n return wrapped_function\n\n#多任务并发, 线程版\ndef multiple_mission(func, args_list, max_number=16):\n num = len(args_list)\n print('total mission number is ', num)\n for i in range(num):\n args = args_list[i]\n while threading.active_count() > max_number + 1:\n print('reach max mission number, waiting...')\n time.sleep(1)\n mission = threading.Thread(target=func, args = args)\n mission.start()\n print('start mission NO.', i)\n return\n\n#多任务并发, 线程版, 加线程池\ndef multiple_mission_pool(func, args_list, max_workers=16, debug=False):\n if debug:\n arg = args_list[0]\n func(*arg)\n return \n count, num = 0, len(args_list)\n print('total mission number is ', num)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n future_list = [executor.submit(func, *args) for args in args_list]\n # for future in as_completed(future_list):\n # result = future.result()\n # count += 1\n # rate = round(count/num, 2)\n # msg = 'total mission {}, completed {}, {}%'.format(num, count, rate)\n print('all missions done')\n\n# #多任务并发,协程版,慢\n# def multiple_mission_gevent(func, args_list, max_workers=32):\n# num = len(args_list)\n# print('total mission number is ', num)\n# jobs = [gevent.spawn(func, args) for args in args_list]\n# gevent.wait(jobs)\n# print('all missions done')\n# return","sub_path":"api_tools.py","file_name":"api_tools.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"218814936","text":"import sys\nfrom termcolor import colored\nfrom optparse import OptionParser\nfrom os import path\nfrom time import sleep\nfrom IOBluetooth import *\nfrom IOBluetoothUI import *\nfrom ctypes import CDLL\n\ndef logoff():\n print(\"Logging Off\")\n loginPF = CDLL('/System/Library/PrivateFrameworks/login.framework/Versions/Current/login')\n result = loginPF.SACLockScreenImmediate()\n\n\nopts = OptionParser()\nopts.add_option('-d', '--device', dest='device', help='ask for device', default=False, action='store_true')\n(options, args) = opts.parse_args()\n\nif path.exists('device.conf') and not options.device:\n with open('device.conf', 'r') as devFile:\n devAddr = devFile.read()\n dev = IOBluetoothDevice.deviceWithAddressString_(devAddr)\nelse:\n with open('device.conf', 'w') as devFile:\n selector = IOBluetoothDeviceSelectorController.deviceSelector()\n selector.runModal()\n results = selector.getResults()\n dev = results[0]\n devAddr = dev.getAddressString()\n devFile.write(devAddr)\n\ndev.openConnection()\nif dev.isConnected():\n print(\"Device Connected\")\n while True:\n if not dev.isConnected():\n dev.openConnection()\n devSignal = dev.rawRSSI()\n print(devSignal)\n #if devSignal < 0 and devSignal > -60:\n # color = 'green'\n #else:\n # color = 'red'\n if devSignal < -50:\n logoff()\n sys.stdout.write(\"%s\\r\" % devSignal)\n sys.stdout.flush()\n sleep(1)\n\n","sub_path":"proxcheck.py","file_name":"proxcheck.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"552695322","text":"import sys\n# will be called like this `python finalproject.py input_file`\n# input_file: name of one of the 10 provided test files\n\n# must output file named prediction.txt\n# prediction.txt must contain next 60 predictions (x,y)\n# predictions must be same format as input file\n# 1 minute time limit to output all 10 predictions (won't matter b/c our answers will be pre-computed)\n\n\ninfile = sys.argv[1]\n\nwith open(infile) as inf:\n with open('prediction.txt', 'w+') as outf:\n for line in inf: outf.writelines(line)\n","sub_path":"finalproject/finalproject.py","file_name":"finalproject.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"351870590","text":"from pymongo import MongoClient\r\nfrom pymongo.errors import ConnectionFailure\r\n\r\nprint(\"Establishing connection with the database\".center(100, \"=\"))\r\nmyclient = MongoClient(\"mongodb://%s:%s@127.0.0.1\" % (\"myUserAdmin\", \"abc123\"))\r\nprint(\"Connection established successfully: \", myclient)\r\n\r\nmydatabase = myclient['database']\r\n\r\nmycollection = mydatabase['test3']\r\n\r\nmycollection.delete_many({})\r\n\r\nsample_data = [{\"x\": 1, \"tags\": [\"dog\", \"cat\"]},\r\n {\"x\": 2, \"tags\": [\"cat\"]},\r\n {\"x\": 2, \"tags\": [\"mouse\", \"cat\", \"dog\"]},\r\n {\"x\": 3, \"tags\": []}]\r\n\r\nresult = mycollection.insert_many(sample_data)\r\n\r\nfrom bson.son import SON\r\n\r\npipeline = [\r\n { \"$unwind\": \"$tags\" }, #1. Make a flat hierarchy\r\n { \"$group\": { \"_id\": \"$tags\", \"count\": { \"$sum\": 1 } } }, #2. Actual Aggregation\r\n { \"$sort\": SON( [(\"count\", -1) , (\"_id\", -1) ] ) } #3. Display\r\n]\r\n\r\nimport pprint\r\nprint(\"Aggregation Pipeline\".center(100, \"=\"))\r\npprint.pprint(list(mycollection.aggregate(pipeline)))\r\nprint(\"\".center(100, \"=\"))\r\n\r\n\r\n","sub_path":"prime-agg.py","file_name":"prime-agg.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"651196149","text":"import scrapy\nfrom scrapy import Selector\nfrom spiders.items import MaoyanItem\n\n\nclass MaoyanSpider(scrapy.Spider):\n name = 'maoyan'\n allowed_domains = ['maoyan.com']\n start_urls = ['http://maoyan.com/']\n\n # 注释默认的parse函数\n # def parse(self, response):\n # pass\n\n\n # 爬虫启动时,引擎自动调用该方法,并且只会被调用一次,用于生成初始的请求对象(Request)。\n # start_requests()方法读取start_urls列表中的URL并生成Request对象,发送给引擎。\n # 引擎再指挥其他组件向网站服务器发送请求,下载网页\n def start_requests(self):\n url = 'https://maoyan.com/films?showType=3'\n yield scrapy.Request(url=url, callback=self.parse)\n # url 请求访问的网址\n # callback 回调函数,引擎回将下载好的页面(Response对象)发给该方法,执行数据解析\n # 这里可以使用callback指定新的函数,不是用parse作为默认的回调参数\n # 解析函数\n def parse(self, response):\n movies = Selector(response=response).xpath('//div[@class=\"movie-hover-info\"]')[:11]\n print(movies)\n for movie in movies:\n item = MaoyanItem()\n name = movie.xpath('./div[1]/span[1]/text()')\n type = movie.xpath('./div[2]/text()')[-1]\n date = movie.xpath('./div[4]/text()')[-1]\n item['name'] = name.extract_first().strip()\n item['type'] = type.extract().strip()\n item['date'] = date.extract().strip()\n yield item\n","sub_path":"week02/spiders/spiders/spiders/maoyan.py","file_name":"maoyan.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"568673962","text":"# Finish 2D convolution/filtering by your self.\n# What you are supposed to do can be described as \"median blur\", which means by using a sliding window\n# on an image, your task is not going to do a normal convolution, but to find the median value within\n# that crop.\n#\n# You can assume your input has only one channel. (a.k.a a normal 2D list/vector)\n# And you do need to consider the padding method and size. There are 2 padding ways: REPLICA & ZERO. When\n# \"REPLICA\" are given to you, the padded pixels are the same with the border pixels. E.g is [1 2 3] is your\n# image, the padded version will be [(...1 1) 1 2 3 (3 3...)] where how many 1 & 3 in the parenthesis\n# depends on your padding size. When \"ZERO\", the padded version will be [(...0 0) 1 2 3 (0 0...)]\n#\n# Assume your input's size of the image is W x H, kernel size's m x n. You may first complete a version\n# with O(W·H·m·n log(m·n)) to O(W·H·m·n·m·n)).\n# Follow up 1: Can it be completed in a shorter time complexity?\n# Follow up 2: Can it be completed in O(W·H·m·n)?\n#\n# Python version:\n# def medianBlur(img, kernel, padding_way):\n# img & kernel is List of List; padding_way a string\n# Please finish your code under this blank\n#\nimport numpy as np\nimport cv2\n\ndef medianBlur(img, kernel, padding_way,padding_size=2):\n img_padded = []\n h,w = img.shape\n kh,kw = kernel.shape\n if padding_way == 'REPLICA':\n img_padded=np.pad(img,padding_size,'edge')\n elif padding_way == 'ZERO':\n img_padded = np.pad(img,padding_size,'constant')\n\n out_h,out_w = (h+2*padding_size+1-kh),(w+2*padding_size+1-kw)\n img_pooling=np.zeros((out_h,out_w))\n for i in range(out_h):\n for j in range(out_w):\n img_pooling[i][j]=np.median(img_padded[i:i+kh,j:j+kw])\n\n return img_pooling\n\nif __name__=='__main__':\n img_gray=cv2.imread('lena.jpg',0)\n\n kernel = cv2.getGaussianKernel(5,1);\n kernel2D = kernel*kernel.T\n\n print(img_gray.shape)\n print(kernel2D.shape)\n img_padding_REPLICA = medianBlur(img_gray, kernel2D, 'REPLICA')\n img_padding_ZERO = medianBlur(img_gray, kernel2D, 'ZERO')\n img_padding_ZERO = img_padding_ZERO.astype(np.uint8)\n img_padding_REPLICA = img_padding_REPLICA.astype(np.uint8)\n\n cv2.imshow('img_padding_REPLICA',img_padding_REPLICA)\n cv2.imshow('img_padding_ZERO',img_padding_ZERO)\n key=cv2.waitKey()\n if key==27:\n cv2.destroyAllWindows()\n\n\n\n\n","sub_path":"week2/medianblur.py","file_name":"medianblur.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"593338735","text":"from FirebaseCredentials.Firebase import db\n\ndef get_admin(project_id):\n all_users=db.child(\"Users\").get()\n a=[]\n for user in all_users.each():\n employee_id=user.key()\n status=db.child(\"Users/\"+employee_id+\"/profile/status\").get().val()\n if status == \"admin\":\n a.append(employee_id)\n for admin in a:\n projects= db.child(\"Users/\"+admin+\"/Projects\").get().each()\n for project in projects:\n admin_project_id=project.key()\n if admin_project_id==project_id:\n admin_id=admin\n break\n return(admin_id)\n\n\n","sub_path":"projects/getProjectAdmin.py","file_name":"getProjectAdmin.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"473596836","text":"\"\"\"classic Acrobot task\"\"\"\nfrom rlpy.Tools import wrap, bound, lines, fromAtoB, rk4\nfrom rlpy.Domains import Acrobot\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n__copyright__ = \"Copyright 2013, RLPy http://acl.mit.edu/RLPy\"\n__credits__ = [\"Alborz Geramifard\", \"Robert H. Klein\", \"Christoph Dann\",\n \"William Dabney\", \"Jonathan P. How\"]\n__license__ = \"BSD 3-Clause\"\n__author__ = \"Christoph Dann \"\n\nclass ModifiedAcrobot(Acrobot):\n episodeCap = 500\n\n def __init__(self, **kwargs):\n self.counter = 0\n super(ModifiedAcrobot, self).__init__()\n\n def step(self, a):\n s = self.state\n torque = self.AVAIL_TORQUE[a]\n\n # Add noise to the force action\n if self.torque_noise_max > 0:\n torque += self.random_state.uniform(-\n self.torque_noise_max, self.torque_noise_max)\n\n # Now, augment the state with our force action so it can be passed to\n # _dsdt\n s_augmented = np.append(s, torque)\n\n ns = rk4(self._dsdt, s_augmented, [0, self.dt])\n # only care about final timestep of integration returned by integrator\n ns = ns[-1]\n ns = ns[:4] # omit action\n # ODEINT IS TOO SLOW!\n # ns_continuous = integrate.odeint(self._dsdt, self.s_continuous, [0, self.dt])\n # self.s_continuous = ns_continuous[-1] # We only care about the state\n # at the ''final timestep'', self.dt\n\n ns[0] = wrap(ns[0], -np.pi, np.pi)\n ns[1] = wrap(ns[1], -np.pi, np.pi)\n ns[2] = bound(ns[2], -self.MAX_VEL_1, self.MAX_VEL_1)\n ns[3] = bound(ns[3], -self.MAX_VEL_2, self.MAX_VEL_2)\n self.state = ns.copy()\n terminal = self.isTerminal()\n reward = self._reward_function(terminal)\n return reward, ns, terminal, self.possibleActions()\n\n def _reward_function(self, terminal):\n return -1. if not terminal else 0.\n\n def showDomain(self, a=0):\n self.counter += 1\n if self.counter % 2:\n return\n super(ModifiedAcrobot, self).showDomain(a)\n plt.pause(0.001)\n\n\nclass Acrobot_Mass1(ModifiedAcrobot):\n LINK_LENGTH_1 = 1. # [m]\n LINK_LENGTH_2 = 1. # [m]\n LINK_MASS_1 = 3. #: [kg] mass of link 1\n LINK_MASS_2 = 1.\n\nclass Acrobot_Mass2(ModifiedAcrobot):\n LINK_LENGTH_1 = 1. # [m]\n LINK_LENGTH_2 = 1. # [m]\n LINK_MASS_1 = 1. #: [kg] mass of link 1\n LINK_MASS_2 = 0.1","sub_path":"rlgym/models/Acrobot/Mass1_Heavy/domain.py","file_name":"domain.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"568303449","text":"import pytest\r\n\r\nfrom lib import consts\r\nfrom lib.log_parser import LogParser\r\nfrom lib.environment import Env\r\nfrom tests.tests_old.install.installbase import InstallBase\r\n\r\ndesired_backup_mode = None\r\n\r\n\r\nclass TestGICreateAccount(InstallBase):\r\n\r\n @pytest.mark.skipif(InstallBase.SERVER_EDITION, reason='Personal not supported on server editions')\r\n def test_create_account_through_service(self):\r\n \"\"\"\r\n @TITLE\r\n Create an account from the service\r\n\r\n @DESCRIPTION\r\n Create an account from the service after the Generic Install has laid down the bits and the service and UI are up\r\n\r\n @PRECONDITIONS\r\n Carbonite is not installed on PC\r\n\r\n @PROCEDURE\r\n 1. Install the generic installer(version=latest)\r\n 2. Hit the Service to create an account\r\n 3. Verify that the account is created and reguid is received\r\n\r\n @PASS FAIL CRITERIA\r\n PASS criteria: 1. Account created\r\n 2. Reguid received\r\n\r\n FAIL criteria: Any of the above PASS criteria condition fails\r\n \"\"\"\r\n self.log.info(\"Installing the Latest version of the Generic Installer with NO REGUID.\")\r\n self.install(account=consts.personal_account, generic=True, cluster=consts.endpoint_stabilityGI_build)\r\n #if the Policy text exist in the registry (means missed account creation page)\r\n #if the string \"Change to page 2 phase 500\" not exist in CarboniteUI.log (means missed account creation page)\r\n if (self.check_policy_text_exist() or\r\n not LogParser(consts.win_ui_log_file_path).text_exists(consts.create_account_page_flag)):\r\n raise RuntimeError(\"Generic Installer missed account creation page\")\r\n home_server, membership_server, portal_server, registration_guid, creds = self.create_account_from_service(Env().portal)\r\n\r\n # these are the same until the account logs in using the sign-on page\r\n assert home_server == \"{0}.{1}\".format(Env().download_host, Env().download_domain), \\\r\n 'HomeServer is {0}, should be {1}.{2}'.format(home_server, Env().download_host, Env().download_domain)\r\n assert membership_server == \"{0}.{1}\".format(Env().download_host, Env().download_domain),\\\r\n 'MembershipServer is {0}, should be {1}.{2}'.format(membership_server, Env().download_host, Env().download_domain)\r\n assert portal_server == \"{0}.{1}\".format(Env().download_host, Env().download_domain),\\\r\n 'Portal server is {0}, should be {1}.{2}'.format(portal_server, Env().download_host, Env().download_domain)\r\n assert len(registration_guid) > 0, 'Registration GUID is 0 length string'\r\n #just log this for now, will use these credentials to login when sign-on page is implemented\r\n self.log.info(creds)\r\n","sub_path":"Chandan Singh/headless-automation__14sep/tests/tests_old/install/test_GI_create_account.py","file_name":"test_GI_create_account.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"39686924","text":" \n# Import the heap functions from python library \nfrom heapq import heappush, heappop \n \n# heappop - pop and return the smallest element from heap \n# heappush - push the value item onto the heap, maintaining \n# heap invarient \n \n# A class for Min Heap \nclass MinHeap: \n \n # Constructor to initialize a heap \n def __init__(self): \n self.heap = [] \n \n # Inserts a new key 'k' \n def insertKey(self, k): \n heappush(self.heap, k) \n \n # Method to remove minium element from min heap \n def extractMin(self): \n return heappop(self.heap) \n \n # Get the minimum element from the heap \n def getMin(self): \n return self.heap[0] \n \n# Driver program to test above function \nheapObj = MinHeap() \nheapObj.insertKey(3) \nheapObj.insertKey(2) \nheapObj.insertKey(15) \nheapObj.insertKey(5) \nheapObj.insertKey(4) \nheapObj.insertKey(45) \n# 2,3,15,5,4,45\n# 2\n# / \\\n# 3 15\n# / \\ |\n# 5 4 45\n \nprint (heapObj.extractMin())\nprint (heapObj.extractMin())\nprint (heapObj.getMin())","sub_path":"minHeap.py","file_name":"minHeap.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"634986957","text":"import RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BCM);\n\ndef pinIt(pin,art,value,name):\n\tprint(\"Pin {0:2}; Art {1:3}; Value: {2}; Name: {3}; \".format(pin,art,value,name));\ndef checkInput(pin,name):\n\tGPIO.setup(pin, GPIO.IN);\n\tpinIt(pin,\"IN\",GPIO.input(pin),name);\ndef output(pin,name):\n\tGPIO.setup(pin, GPIO.OUT)\n\tGPIO.output(pin, 0)\n\tpinIt(pin,'OUT','0',name);\nprint('--- Allgemeine Sensoren ---')\ncheckInput(19,\"PIR\")\ncheckInput(26,\"DHT11\")\nprint('--- Linetrackingsensoren (Erster Sensor-Fahrtrichtung-Rechts) ---');\ncheckInput(21,\"IR-LT-1\")\ncheckInput(20,\"IR-LT-2\")\ncheckInput(16,\"IR-LT-3\")\ncheckInput(12,\"IR-LT-4\")\nprint('--- Ultraschall-Abstanssensoren (Erster Sensor-Fahrtrichtung-Rechts) ---');\noutput(13,\"UA-1-TRIGGER\")\ncheckInput(22,\"UA-1-ECHO\")\noutput(6,\"UA-2-TRIGGER\")\ncheckInput(27,\"UA-2-IN\")\noutput(5,\"UA-3-TRIGGER\")\ncheckInput(17,\"UA-3-IN\")\nprint('--- Motorenbelegung ---');\noutput(23,\"A-1A\") #Grau\noutput(18,\"A-1B\") #Weiss\noutput(25,\"B-1A\") #Blau\noutput(24,\"B-1B\") #Lila\nGPIO.cleanup();\n\n","sub_path":"gpio_mapping.py","file_name":"gpio_mapping.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"518794114","text":"### THIS MODULE RENDERS THE TEMPLATES FROM THE JINJA2 FILES\n### AND PACKAGES THEM INTO A LIST OF LISTS. IT ONLY LOOKS AT THE \n### SELECTED INDEXES (INIITIALIZE.ELEMENT) OF THE NODE_OBJECT. \n### THE CONFIGURATIONS ARE STORED IN THE GLOBAL VARIABLE CALL \n### INITIALIZE.CONFIGURATION.\n\nfrom jinja2 import Environment, FileSystemLoader\nfrom ciscoconfparse import CiscoConfParse\nfrom collections import Counter\nfrom multithread import multithread_engine\nfrom get_property import get_template_directory\nfrom get_property import get_updated_list\nfrom get_property import get_syntax\nfrom get_property import get_sorted_juniper_template_list \nimport re\nimport initialize\n\ndef auditdiff_engine(template_list,node_object,auditcreeper,output,remediation):\n\n\tredirect = [] \n\tcommand = [] \n\t### RENDERED_CONFIG IS TO ACCOMODATE JUNIPER PLATFORM BY APPENDING A 'LOAD REPLACE TERMINAL' TO GET THE DIFF OUTPUT\n\trendered_config = []\n\trendered_config.append('load replace terminal')\n\tedit_list = []\n\tno_diff = 0\n\n\t### PUSH_CONFIGS IS A LIST OF THE FINAL CONFIGS TO BE PUSHED\n#\tpush_configs = []\n\n\t### INDEX_POSITION IS THE INDEX OF ALL THE MATCHED FILTER_CONFIG AGAINST THE BACKUP_CONFIGS. THE INDEX IS COMING FROM THE BACKUP_CONFIG\n\tindex_position = 0\n\n\t### NODE_INDEX KEEPS TRACK OF THE INDEX IN INITIALIZE.NTW_DEVICE. IF REMEDIATION IS NOT REQUIRED (CONFIGS MATCHES TEMPLATE), THEN THE NODE IS POPPED OFF\n\t### INITIALIZE.NTW_DEVICE AND NOTHING IS CHANGED ON THAT DEVICE\n\tnode_index = 0 \n\n\t### AUDIT_FILTER_RE IS THE REGULAR EXPRESSION TO FILTER OUT THE AUDIT FILTER IN EVERY TEMPLATE\n\tAUDIT_FILTER_RE = r\"\\[.*\\]\"\n\n\t### TEMPLATE_LIST_COPY TAKE A COPY OF THE CURRENT TEMPLATE_LIST\n\ttemplate_list_original = template_list[:]\n\ttemplate_list_copy = template_list\n\n\tif(auditcreeper):\n\t\ttemplate_list = template_list_copy[0]\n\n#\tprint \"TEMPLATE_LIST: {}\".format(template_list)\n\n\t### THIS SECTION OF CODE WILL GATHER ALL RENDERED CONFIGS FIRST AS IT'S REQUIRED FOR ALL PLATFORMS (CISCO & JUNIPER)\n\t### JUNIPER DOES NOT REQUIRE BACKUP-CONFIGS IN ORDER TO BE DIFFED SO INSTEAD IT WILL JUST PUSH (PUSH_CFGS) THE TEMPLATE AND PERFORM THE DIFF ON THE DEVICE ITSELF.\n\t### CISCO WILL REQUIRE BACKUP-CONFIGS (GET_CONFIG)\n\tfor index in initialize.element:\n\n\t\tif(node_object[index]['platform'] == 'juniper'):\n\n\t\t\t### THIS WILL RETURN A SORTED JUNIPER TEMPLATE LIST BASED ON JUNIPER'S 'SHOW CONFIGURATION' OUTPUT\n\t\t\ttemplate_list = get_sorted_juniper_template_list(template_list)\n#\t\t\tprint(\"TEMPLATE_LIST FIRST PHASE: {}\".format(template_list))\n\n\t\tfor template in template_list:\n\n\t\t\t### THIS SECTION OF CODE WILL PROCESS THE TEMPLATE AND OUTPUT TO A *.CONF FILE\n\t\t\tdirectory = get_template_directory(node_object[index]['platform'],node_object[index]['opersys'],node_object[index]['type'])\n\t\t\tenv = Environment(loader=FileSystemLoader(\"{}\".format(directory)))\n\t\t\tbaseline = env.get_template(template)\n\t\t\tf = open(\"/rendered-configs/{}.{}\".format(node_object[index]['hostname'],template.split('.')[0]) + \".conf\", \"w\") \n\n\t\t\t### GENERATING TEMPLATE BASED ON NODE OBJECT\n\t\t\tconfig = baseline.render(nodes = node_object[index])\n\n\t\t\tf.write(config) \n\t\t\tf.close \n\t\t\tif(node_object[index]['platform'] == 'cisco'):\n\n\t\t\t\t### THIS SECTION OF CODE WILL OPEN THE RENDERED-CONFIG *.CONF FILE AND STORE IN RENDERED_CONFIG AS A LIST\n\t\t\t\tf = open(\"/rendered-configs/{}.{}\".format(node_object[index]['hostname'],template.split('.')[0]) + \".conf\", \"r\")\n\t\t\t\tinit_config = f.readlines()\n\t\t\t\t### RENDERED_CONFIG IS A LIST OF ALL THE CONFIGS THAT WAS RENDERED FROM THE TEMPLATES (SOURCE OF TRUTH)\n\n\t\t\tif(node_object[index]['platform'] == 'juniper'):\n\t\n\t\t\t\t### THIS SECTION OF CODE WILL OPEN THE RENDERED-CONFIG *.CONF FILE AND STORE IN RENDERED_CONFIG AS A LIST\n\t\t\t\tf = open(\"/rendered-configs/{}.{}\".format(node_object[index]['hostname'],template.split('.')[0]) + \".conf\", \"r\")\n\t\t\t\tinit_config = f.readlines()\n\t\t\t\t### RENDERED_CONFIG IS A LIST OF ALL THE CONFIGS THAT WAS RENDERED FROM THE TEMPLATES (SOURCE OF TRUTH)\n\t\n\t\t\t\tfor config_line in init_config:\n\t\t\t\t\tstrip_config = config_line.strip('\\n')\n\t\t\t\t\t### THIS WILL REMOVE ANY LINES THAT ARE EMPTY OR HAS A '!' MARK\n\t\t\t\t\tif(strip_config == '' or strip_config == \"!\"):\n\t\t\t\t\t\tcontinue\t\n\t\t\t\t\telse:\n\t\t\t\t\t\trendered_config.append(strip_config)\t\n\t\n\t\t\t\t###UN-COMMENT THE BELOW PRINT STATEMENT FOR DEBUGING PURPOSES\n#\t\t\t\tprint (\"RENDERED CONFIG: {}\".format(rendered_config))\n\n\t\ttemplate_list = get_updated_list(template_list_copy)\n\n\t\tif(node_object[index]['platform'] == 'cisco'):\n\t\t\tredirect.append('get_config')\n\t\t\tcommand.append([''])\n\t\t### JUNIPER DEVICES WILL RECEIVE A DIFFERENT REDIRECT THAN CISCO PLATFORM\n\t\t### THREE ADDITIONAL COMMANDS ARE APPENEDED AT THE END, ^D, SHOW | COMPARE AND ROLLBACK 0\n\t\t### ALL TEMPLATES MATCHING ARE EXECUTED AT ONCE PER DEVICE\n\t\telif(node_object[index]['platform'] == 'juniper'):\n\t\t\tredirect.append('get_diff')\n\t\t\trendered_config.append('\\x04')\n\t\t\trendered_config.append('show | compare')\n\t\t\trendered_config.append('rollback 0')\n\t\t\tcommand.append(rendered_config)\n\n\t###UN-COMMENT THE BELOW PRINT STATEMENT FOR DEBUGING PURPOSES\n#\tprint\"REDIRECT: {}\".format(redirect)\n\t###UN-COMMENT THE BELOW PRINT STATEMENT FOR DEBUGING PURPOSES\n#\tprint\"COMMAND: {}\".format(command)\n#\tprint(\"[+] [COMPUTING DIFF. STANDBY...]\")\n\tmultithread_engine(initialize.ntw_device,redirect,command)\n\t\n\t### RESETING TEMPLATE_LIST TO ORIGINAL LIST\n\n\t###UN-COMMENT THE BELOW PRINT STATEMENT FOR DEBUGING PURPOSES\n#\tprint(\"ORIGINAL_LIST: {}\".format(template_list_original))\n\ttemplate_list = template_list_original\n\n\t###UN-COMMENT THE BELOW PRINT STATEMENT FOR DEBUGING PURPOSES\n#\tprint(\"TEMPLATE_LIST: {}\".format(template_list))\n\n\t### REINITIALIZING TEMPLATE_LIST TO THE ORIGINAL LIST OF TEMPLATES\n\tif(auditcreeper):\n\t\ttemplate_list = template_list_original[0]\n\n\t### THIS FOR LOOP WILL LOOP THROUGH ALL THE MATCHED ELEMENTS FROM THE USER SEARCH AND AUDIT ON SPECIFIC TEMPLATE OR IF NO ARGUMENT IS GIVEN, ALL TEMPLATES\n\t\n\tfor index in initialize.element:\n\n\t\t### NODE_CONFIG IS THE FINALIZED CONFIG TO PUSH TO THE NODE FOR REMEDIATION\n\t\tnode_configs = []\n\t\tntw_device_pop = True \n\t\t### TEMPLATE_NAME IS SET TO TRUE IN ORDER TO PRINT OUT THE TEMPLATE HEADING WHEN RECURSING\n\t\ttemplate_name = True\n\n\t\tif(not remediation):\n\t\t\tprint(\"Only in the device: -\")\n\t\t\tprint(\"Only in the generated config: +\")\n\t\t\tprint (\"{}\".format(node_object[index]['hostname']))\n\n\t\t###UN-COMMENT THE BELOW PRINT STATEMENT FOR DEBUGING PURPOSES\n\t\ttemplate_list_juniper = template_list[:]\n\t\tif(node_object[index]['platform'] == 'juniper'):\n\n\t\t\t### THIS WILL RETURN A SORTED JUNIPER TEMPLATE LIST BASED ON JUNIPER'S 'SHOW CONFIGURATION' OUTPUT\n\t\t\ttemplate_list = get_sorted_juniper_template_list(template_list)\n\n\t\t### THIS WILL LOOP THROUGH ALL THE TEMPLATES SPECIFIED FOR THE PARTICULAR HOST IN NODES.YAML\n\t\tfor template in template_list:\n\n\t\t\t### THIS SECTION IS FOR CISCO SYSTEMS PLATFORM ###\n\t\t\tif(node_object[index]['platform'] == 'cisco'):\n\n\t\t\t\tcisco_audit_diff(node_object,index,template,AUDIT_FILTER_RE,output,remediation)\n\t\n\t\t\t### THIS SECTION IS FOR JUNIPER NETWORKS PLATFORM ###\n\t\t\tif(node_object[index]['platform'] == 'juniper'):\n\n\t\t\t\tdirectory = get_template_directory(node_object[index]['platform'],node_object[index]['opersys'],node_object[index]['type'])\n\t\t\t\t### THIS SECTION OF CODE WILL OPEN DIFF-CONFIG *.CONF FILE AND STORE IN DIFF_CONFIG AS A LIST\n\t\t\t\tf = open(\"/diff-configs/{}\".format(node_object[index]['hostname']) + \".conf\", \"r\")\n\t\t\t\tinit_config = f.readlines()\n\t\t\t\t### DIFF_CONFIG ARE THE DIFFERENTIAL CONFIGS GENERATED BY THE /DIFF-CONFIGS/*.CONF FILE \n\t\t\t\tdiff_config = []\n\t\n\t\t\t\tfor config_line in init_config:\n\t\t\t\t\tstrip_config = config_line.strip('\\n')\n\t\t\t\t\tdiff_config.append(strip_config)\t\n\t\n\t\t\t\t###UN-COMMENT THE BELOW PRINT STATEMENT FOR DEBUGING PURPOSES\n#\t\t\t\tprint (\"DIFF CONFIG: {}\".format(diff_config))\n\n\t\t\t\tRE = re.compile(r'\\[edit\\s({})'.format(template.split('.')[0]))\n\t\t\t\tsearch = list(filter(RE.match,diff_config))\n\n\t\t\t\tif(len(search) == 0):\n\t\t\t\t\tprint(\"{}{} (none)\".format(directory,template))\n\t\t\t\t\tprint('')\n\t\t\t\t\tno_diff = no_diff + 1\n\t\t\t\t\tif(no_diff == len(template_list)):\n\t\t\t\t\t\tbreak\n\t\t\t\t\tif(len(template_list) > 1):\t\n\t\t\t\t\t\tjuniper_audit_diff(directory,template,template_list,diff_config,edit_list,search)\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t### THIS FIRST SECTION WILL FIND ALL THE INDEXES WITH THE '[edit